Merge
authorAndreas Stewering-Bone <ab@igh-essen.com>
Wed, 13 Apr 2011 22:06:28 +0200
changeset 2060 8b67602f5161
parent 2059 ab0b96ac18bb (current diff)
parent 2048 25cc3a272cad (diff)
child 2066 b544025bd696
Merge
configure.ac
devices/8139too-2.6.33-ethercat.c
devices/8139too-2.6.33-orig.c
include/ecrt.h
master/master.c
master/module.c
--- a/.hgignore	Wed Mar 23 08:06:58 2011 +0100
+++ b/.hgignore	Wed Apr 13 22:06:28 2011 +0200
@@ -136,3 +136,5 @@
 tool/Makefile.in
 tool/TAGS
 tool/ethercat
+debuild.log
+
--- a/.hgtags	Wed Mar 23 08:06:58 2011 +0100
+++ b/.hgtags	Wed Apr 13 22:06:28 2011 +0200
@@ -1,1 +1,2 @@
 b6cfd85db58e116ea155a52a584e863fc6ad0eff version-1.3.2
+ce2fae4d3c4fae6a3cb6d8a91a3df9bbb272ff82 Stable
--- a/configure.ac	Wed Mar 23 08:06:58 2011 +0100
+++ b/configure.ac	Wed Apr 13 22:06:28 2011 +0200
@@ -580,6 +580,31 @@
 fi
 
 #------------------------------------------------------------------------------
+# Use 2 datagrams (payload+last-byte) when sending to mailbox (reduces frame size)
+#------------------------------------------------------------------------------
+
+AC_ARG_ENABLE([mboxframesize],
+    AS_HELP_STRING([--enable-mboxframesize],
+                   [Reduced frame size when sending to mailbox, uses 2 datagrams (default: no)]),
+    [
+        case "${enableval}" in
+            yes) mboxframesize=1
+                ;;
+            no) mboxframesize=0
+                ;;
+            *) AC_MSG_ERROR([Invalid value for --enable-mboxframesize])
+                ;;
+        esac
+    ],
+    [mboxframesize=0]
+)
+
+if test "x${mboxframesize}" = "x1"; then
+    AC_DEFINE([EC_REDUCE_MBOXFRAMESIZE], [1], [Reduced frame size when sending to mailbox])
+fi
+
+
+#------------------------------------------------------------------------------
 # Read alias address from register
 #------------------------------------------------------------------------------
 
@@ -603,6 +628,7 @@
     AC_DEFINE([EC_REGALIAS], [1], [Read alias adresses from register])
 fi
 
+
 #------------------------------------------------------------------------------
 # Command-line tool
 #-----------------------------------------------------------------------------
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000-2.6.31-ethercat.h	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,357 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/capability.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#include <net/checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include "../ecdev.h"
+
+
+#define BAR_0		0
+#define BAR_1		1
+#define BAR_5		5
+
+#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
+	PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+struct e1000_adapter;
+
+#include "e1000_hw-2.6.31-ethercat.h"
+
+#ifdef DBG
+#define E1000_DBG(args...) printk(KERN_DEBUG "ec_e1000: " args)
+#else
+#define E1000_DBG(args...)
+#endif
+
+#define E1000_ERR(args...) printk(KERN_ERR "ec_e1000: " args)
+
+#define PFX "ec_e1000: "
+
+#define DPRINTK(nlevel, klevel, fmt, args...)				\
+do {									\
+	if (NETIF_MSG_##nlevel & adapter->msg_enable)			\
+		printk(KERN_##klevel PFX "%s: %s: " fmt,		\
+		       adapter->netdev->name, __func__, ##args);	\
+} while (0)
+
+#define E1000_MAX_INTR 10
+
+/* TX/RX descriptor defines */
+#define E1000_DEFAULT_TXD                  256
+#define E1000_MAX_TXD                      256
+#define E1000_MIN_TXD                       80
+#define E1000_MAX_82544_TXD               4096
+
+#define E1000_DEFAULT_RXD                  256
+#define E1000_MAX_RXD                      256
+#define E1000_MIN_RXD                       80
+#define E1000_MAX_82544_RXD               4096
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define E1000_RXBUFFER_128   128    /* Used for packet split */
+#define E1000_RXBUFFER_256   256    /* Used for packet split */
+#define E1000_RXBUFFER_512   512
+#define E1000_RXBUFFER_1024  1024
+#define E1000_RXBUFFER_2048  2048
+#define E1000_RXBUFFER_4096  4096
+#define E1000_RXBUFFER_8192  8192
+#define E1000_RXBUFFER_16384 16384
+
+/* SmartSpeed delimiters */
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX       15
+
+/* Packet Buffer allocations */
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
+
+/* Flow Control Watermarks */
+#define E1000_FC_HIGH_DIFF 0x1638  /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640   /* Low:  5696 bytes below Rx FIFO size */
+
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define E1000_TX_QUEUE_WAKE	16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+
+#define AUTO_ALL_MODES            0
+#define E1000_EEPROM_82544_APM    0x0004
+#define E1000_EEPROM_ICH8_APME    0x0004
+#define E1000_EEPROM_APME         0x0400
+
+#ifndef E1000_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define E1000_MASTER_SLAVE	e1000_ms_hw_default
+#endif
+
+#define E1000_MNG_VLAN_NONE (-1)
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct e1000_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	unsigned long time_stamp;
+	u16 length;
+	u16 next_to_watch;
+};
+
+struct e1000_tx_ring {
+	/* pointer to the descriptor ring memory */
+	void *desc;
+	/* physical address of the descriptor ring */
+	dma_addr_t dma;
+	/* length of descriptor ring in bytes */
+	unsigned int size;
+	/* number of descriptors in the ring */
+	unsigned int count;
+	/* next descriptor to associate a buffer with */
+	unsigned int next_to_use;
+	/* next descriptor to check for DD status bit */
+	unsigned int next_to_clean;
+	/* array of buffer information structs */
+	struct e1000_buffer *buffer_info;
+
+	u16 tdh;
+	u16 tdt;
+	bool last_tx_tso;
+};
+
+struct e1000_rx_ring {
+	/* pointer to the descriptor ring memory */
+	void *desc;
+	/* physical address of the descriptor ring */
+	dma_addr_t dma;
+	/* length of descriptor ring in bytes */
+	unsigned int size;
+	/* number of descriptors in the ring */
+	unsigned int count;
+	/* next descriptor to associate a buffer with */
+	unsigned int next_to_use;
+	/* next descriptor to check for DD status bit */
+	unsigned int next_to_clean;
+	/* array of buffer information structs */
+	struct e1000_buffer *buffer_info;
+
+	/* cpu for rx queue */
+	int cpu;
+
+	u16 rdh;
+	u16 rdt;
+};
+
+#define E1000_DESC_UNUSED(R)						\
+	((((R)->next_to_clean > (R)->next_to_use)			\
+	  ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define E1000_RX_DESC_EXT(R, i)						\
+	(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type)	(&(((struct type *)((R).desc))[i]))
+#define E1000_RX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_rx_desc)
+#define E1000_TX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i)	E1000_GET_DESC(R, i, e1000_context_desc)
+
+/* board specific private data structure */
+
+struct e1000_adapter {
+	struct timer_list tx_fifo_stall_timer;
+	struct timer_list watchdog_timer;
+	struct timer_list phy_info_timer;
+	struct vlan_group *vlgrp;
+	u16 mng_vlan_id;
+	u32 bd_number;
+	u32 rx_buffer_len;
+	u32 wol;
+	u32 smartspeed;
+	u32 en_mng_pt;
+	u16 link_speed;
+	u16 link_duplex;
+	spinlock_t stats_lock;
+	unsigned int total_tx_bytes;
+	unsigned int total_tx_packets;
+	unsigned int total_rx_bytes;
+	unsigned int total_rx_packets;
+	/* Interrupt Throttle Rate */
+	u32 itr;
+	u32 itr_setting;
+	u16 tx_itr;
+	u16 rx_itr;
+
+	struct work_struct reset_task;
+	u8 fc_autoneg;
+
+	struct timer_list blink_timer;
+	unsigned long led_status;
+
+	/* TX */
+	struct e1000_tx_ring *tx_ring;      /* One per active queue */
+	unsigned int restart_queue;
+	unsigned long tx_queue_len;
+	u32 txd_cmd;
+	u32 tx_int_delay;
+	u32 tx_abs_int_delay;
+	u32 gotcl;
+	u64 gotcl_old;
+	u64 tpt_old;
+	u64 colc_old;
+	u32 tx_timeout_count;
+	u32 tx_fifo_head;
+	u32 tx_head_addr;
+	u32 tx_fifo_size;
+	u8  tx_timeout_factor;
+	atomic_t tx_fifo_stall;
+	bool pcix_82544;
+	bool detect_tx_hung;
+
+	/* RX */
+	bool (*clean_rx)(struct e1000_adapter *adapter,
+			 struct e1000_rx_ring *rx_ring,
+			 int *work_done, int work_to_do);
+	void (*alloc_rx_buf)(struct e1000_adapter *adapter,
+			     struct e1000_rx_ring *rx_ring,
+			     int cleaned_count);
+	struct e1000_rx_ring *rx_ring;      /* One per active queue */
+	struct napi_struct napi;
+
+	int num_tx_queues;
+	int num_rx_queues;
+
+	u64 hw_csum_err;
+	u64 hw_csum_good;
+	u64 rx_hdr_split;
+	u32 alloc_rx_buff_failed;
+	u32 rx_int_delay;
+	u32 rx_abs_int_delay;
+	bool rx_csum;
+	u32 gorcl;
+	u64 gorcl_old;
+
+	/* OS defined structs */
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+
+	/* structs defined in e1000_hw.h */
+	struct e1000_hw hw;
+	struct e1000_hw_stats stats;
+	struct e1000_phy_info phy_info;
+	struct e1000_phy_stats phy_stats;
+
+	u32 test_icr;
+	struct e1000_tx_ring test_tx_ring;
+	struct e1000_rx_ring test_rx_ring;
+
+	int msg_enable;
+	bool have_msi;
+
+	/* to not mess up cache alignment, always add to the bottom */
+	bool tso_force;
+	bool smart_power_down;	/* phy smart power down */
+	bool quad_port_a;
+	unsigned long flags;
+	u32 eeprom_wol;
+
+	/* for ioport free */
+	int bars;
+	int need_ioport;
+
+	ec_device_t *ecdev;
+	unsigned long ec_watchdog_jiffies;
+};
+
+enum e1000_state_t {
+	__E1000_TESTING,
+	__E1000_RESETTING,
+	__E1000_DOWN
+};
+
+extern char e1000_driver_name[];
+extern const char e1000_driver_version[];
+
+extern int e1000_up(struct e1000_adapter *adapter);
+extern void e1000_down(struct e1000_adapter *adapter);
+extern void e1000_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000_reset(struct e1000_adapter *adapter);
+extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
+extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_update_stats(struct e1000_adapter *adapter);
+extern void e1000_power_up_phy(struct e1000_adapter *);
+extern void e1000_set_ethtool_ops(struct net_device *netdev);
+extern void e1000_check_options(struct e1000_adapter *adapter);
+
+#endif /* _E1000_H_ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000-2.6.31-orig.h	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,352 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/capability.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#include <net/checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#define BAR_0		0
+#define BAR_1		1
+#define BAR_5		5
+
+#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
+	PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+struct e1000_adapter;
+
+#include "e1000_hw.h"
+
+#ifdef DBG
+#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
+#else
+#define E1000_DBG(args...)
+#endif
+
+#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
+
+#define PFX "e1000: "
+
+#define DPRINTK(nlevel, klevel, fmt, args...)				\
+do {									\
+	if (NETIF_MSG_##nlevel & adapter->msg_enable)			\
+		printk(KERN_##klevel PFX "%s: %s: " fmt,		\
+		       adapter->netdev->name, __func__, ##args);	\
+} while (0)
+
+#define E1000_MAX_INTR 10
+
+/* TX/RX descriptor defines */
+#define E1000_DEFAULT_TXD                  256
+#define E1000_MAX_TXD                      256
+#define E1000_MIN_TXD                       80
+#define E1000_MAX_82544_TXD               4096
+
+#define E1000_DEFAULT_RXD                  256
+#define E1000_MAX_RXD                      256
+#define E1000_MIN_RXD                       80
+#define E1000_MAX_82544_RXD               4096
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define E1000_RXBUFFER_128   128    /* Used for packet split */
+#define E1000_RXBUFFER_256   256    /* Used for packet split */
+#define E1000_RXBUFFER_512   512
+#define E1000_RXBUFFER_1024  1024
+#define E1000_RXBUFFER_2048  2048
+#define E1000_RXBUFFER_4096  4096
+#define E1000_RXBUFFER_8192  8192
+#define E1000_RXBUFFER_16384 16384
+
+/* SmartSpeed delimiters */
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX       15
+
+/* Packet Buffer allocations */
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
+
+/* Flow Control Watermarks */
+#define E1000_FC_HIGH_DIFF 0x1638  /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640   /* Low:  5696 bytes below Rx FIFO size */
+
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define E1000_TX_QUEUE_WAKE	16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+
+#define AUTO_ALL_MODES            0
+#define E1000_EEPROM_82544_APM    0x0004
+#define E1000_EEPROM_ICH8_APME    0x0004
+#define E1000_EEPROM_APME         0x0400
+
+#ifndef E1000_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define E1000_MASTER_SLAVE	e1000_ms_hw_default
+#endif
+
+#define E1000_MNG_VLAN_NONE (-1)
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct e1000_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	unsigned long time_stamp;
+	u16 length;
+	u16 next_to_watch;
+};
+
+struct e1000_tx_ring {
+	/* pointer to the descriptor ring memory */
+	void *desc;
+	/* physical address of the descriptor ring */
+	dma_addr_t dma;
+	/* length of descriptor ring in bytes */
+	unsigned int size;
+	/* number of descriptors in the ring */
+	unsigned int count;
+	/* next descriptor to associate a buffer with */
+	unsigned int next_to_use;
+	/* next descriptor to check for DD status bit */
+	unsigned int next_to_clean;
+	/* array of buffer information structs */
+	struct e1000_buffer *buffer_info;
+
+	u16 tdh;
+	u16 tdt;
+	bool last_tx_tso;
+};
+
+struct e1000_rx_ring {
+	/* pointer to the descriptor ring memory */
+	void *desc;
+	/* physical address of the descriptor ring */
+	dma_addr_t dma;
+	/* length of descriptor ring in bytes */
+	unsigned int size;
+	/* number of descriptors in the ring */
+	unsigned int count;
+	/* next descriptor to associate a buffer with */
+	unsigned int next_to_use;
+	/* next descriptor to check for DD status bit */
+	unsigned int next_to_clean;
+	/* array of buffer information structs */
+	struct e1000_buffer *buffer_info;
+
+	/* cpu for rx queue */
+	int cpu;
+
+	u16 rdh;
+	u16 rdt;
+};
+
+#define E1000_DESC_UNUSED(R)						\
+	((((R)->next_to_clean > (R)->next_to_use)			\
+	  ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define E1000_RX_DESC_EXT(R, i)						\
+	(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type)	(&(((struct type *)((R).desc))[i]))
+#define E1000_RX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_rx_desc)
+#define E1000_TX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i)	E1000_GET_DESC(R, i, e1000_context_desc)
+
+/* board specific private data structure */
+
+struct e1000_adapter {
+	struct timer_list tx_fifo_stall_timer;
+	struct timer_list watchdog_timer;
+	struct timer_list phy_info_timer;
+	struct vlan_group *vlgrp;
+	u16 mng_vlan_id;
+	u32 bd_number;
+	u32 rx_buffer_len;
+	u32 wol;
+	u32 smartspeed;
+	u32 en_mng_pt;
+	u16 link_speed;
+	u16 link_duplex;
+	spinlock_t stats_lock;
+	unsigned int total_tx_bytes;
+	unsigned int total_tx_packets;
+	unsigned int total_rx_bytes;
+	unsigned int total_rx_packets;
+	/* Interrupt Throttle Rate */
+	u32 itr;
+	u32 itr_setting;
+	u16 tx_itr;
+	u16 rx_itr;
+
+	struct work_struct reset_task;
+	u8 fc_autoneg;
+
+	struct timer_list blink_timer;
+	unsigned long led_status;
+
+	/* TX */
+	struct e1000_tx_ring *tx_ring;      /* One per active queue */
+	unsigned int restart_queue;
+	unsigned long tx_queue_len;
+	u32 txd_cmd;
+	u32 tx_int_delay;
+	u32 tx_abs_int_delay;
+	u32 gotcl;
+	u64 gotcl_old;
+	u64 tpt_old;
+	u64 colc_old;
+	u32 tx_timeout_count;
+	u32 tx_fifo_head;
+	u32 tx_head_addr;
+	u32 tx_fifo_size;
+	u8  tx_timeout_factor;
+	atomic_t tx_fifo_stall;
+	bool pcix_82544;
+	bool detect_tx_hung;
+
+	/* RX */
+	bool (*clean_rx)(struct e1000_adapter *adapter,
+			 struct e1000_rx_ring *rx_ring,
+			 int *work_done, int work_to_do);
+	void (*alloc_rx_buf)(struct e1000_adapter *adapter,
+			     struct e1000_rx_ring *rx_ring,
+			     int cleaned_count);
+	struct e1000_rx_ring *rx_ring;      /* One per active queue */
+	struct napi_struct napi;
+
+	int num_tx_queues;
+	int num_rx_queues;
+
+	u64 hw_csum_err;
+	u64 hw_csum_good;
+	u64 rx_hdr_split;
+	u32 alloc_rx_buff_failed;
+	u32 rx_int_delay;
+	u32 rx_abs_int_delay;
+	bool rx_csum;
+	u32 gorcl;
+	u64 gorcl_old;
+
+	/* OS defined structs */
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+
+	/* structs defined in e1000_hw.h */
+	struct e1000_hw hw;
+	struct e1000_hw_stats stats;
+	struct e1000_phy_info phy_info;
+	struct e1000_phy_stats phy_stats;
+
+	u32 test_icr;
+	struct e1000_tx_ring test_tx_ring;
+	struct e1000_rx_ring test_rx_ring;
+
+	int msg_enable;
+	bool have_msi;
+
+	/* to not mess up cache alignment, always add to the bottom */
+	bool tso_force;
+	bool smart_power_down;	/* phy smart power down */
+	bool quad_port_a;
+	unsigned long flags;
+	u32 eeprom_wol;
+
+	/* for ioport free */
+	int bars;
+	int need_ioport;
+};
+
+enum e1000_state_t {
+	__E1000_TESTING,
+	__E1000_RESETTING,
+	__E1000_DOWN
+};
+
+extern char e1000_driver_name[];
+extern const char e1000_driver_version[];
+
+extern int e1000_up(struct e1000_adapter *adapter);
+extern void e1000_down(struct e1000_adapter *adapter);
+extern void e1000_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000_reset(struct e1000_adapter *adapter);
+extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
+extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_update_stats(struct e1000_adapter *adapter);
+extern void e1000_power_up_phy(struct e1000_adapter *);
+extern void e1000_set_ethtool_ops(struct net_device *netdev);
+extern void e1000_check_options(struct e1000_adapter *adapter);
+
+#endif /* _E1000_H_ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_ethtool-2.6.31-ethercat.c	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,2009 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for e1000 */
+
+#include "e1000-2.6.31-ethercat.h"
+#include <asm/uaccess.h>
+
+struct e1000_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define E1000_STAT(m) FIELD_SIZEOF(struct e1000_adapter, m), \
+		      offsetof(struct e1000_adapter, m)
+static const struct e1000_stats e1000_gstrings_stats[] = {
+	{ "rx_packets", E1000_STAT(stats.gprc) },
+	{ "tx_packets", E1000_STAT(stats.gptc) },
+	{ "rx_bytes", E1000_STAT(stats.gorcl) },
+	{ "tx_bytes", E1000_STAT(stats.gotcl) },
+	{ "rx_broadcast", E1000_STAT(stats.bprc) },
+	{ "tx_broadcast", E1000_STAT(stats.bptc) },
+	{ "rx_multicast", E1000_STAT(stats.mprc) },
+	{ "tx_multicast", E1000_STAT(stats.mptc) },
+	{ "rx_errors", E1000_STAT(stats.rxerrc) },
+	{ "tx_errors", E1000_STAT(stats.txerrc) },
+	{ "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
+	{ "multicast", E1000_STAT(stats.mprc) },
+	{ "collisions", E1000_STAT(stats.colc) },
+	{ "rx_length_errors", E1000_STAT(stats.rlerrc) },
+	{ "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
+	{ "rx_crc_errors", E1000_STAT(stats.crcerrs) },
+	{ "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
+	{ "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
+	{ "rx_missed_errors", E1000_STAT(stats.mpc) },
+	{ "tx_aborted_errors", E1000_STAT(stats.ecol) },
+	{ "tx_carrier_errors", E1000_STAT(stats.tncrs) },
+	{ "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) },
+	{ "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) },
+	{ "tx_window_errors", E1000_STAT(stats.latecol) },
+	{ "tx_abort_late_coll", E1000_STAT(stats.latecol) },
+	{ "tx_deferred_ok", E1000_STAT(stats.dc) },
+	{ "tx_single_coll_ok", E1000_STAT(stats.scc) },
+	{ "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
+	{ "tx_timeout_count", E1000_STAT(tx_timeout_count) },
+	{ "tx_restart_queue", E1000_STAT(restart_queue) },
+	{ "rx_long_length_errors", E1000_STAT(stats.roc) },
+	{ "rx_short_length_errors", E1000_STAT(stats.ruc) },
+	{ "rx_align_errors", E1000_STAT(stats.algnerrc) },
+	{ "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
+	{ "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
+	{ "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
+	{ "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
+	{ "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
+	{ "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
+	{ "rx_long_byte_count", E1000_STAT(stats.gorcl) },
+	{ "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
+	{ "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
+	{ "rx_header_split", E1000_STAT(rx_hdr_split) },
+	{ "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
+	{ "tx_smbus", E1000_STAT(stats.mgptc) },
+	{ "rx_smbus", E1000_STAT(stats.mgprc) },
+	{ "dropped_smbus", E1000_STAT(stats.mgpdc) },
+};
+
+#define E1000_QUEUE_STATS_LEN 0
+#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
+static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)", "Eeprom test    (offline)",
+	"Interrupt test (offline)", "Loopback test  (offline)",
+	"Link test   (on/offline)"
+};
+#define E1000_TEST_LEN	ARRAY_SIZE(e1000_gstrings_test)
+
+static int e1000_get_settings(struct net_device *netdev,
+			      struct ethtool_cmd *ecmd)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (hw->media_type == e1000_media_type_copper) {
+
+		ecmd->supported = (SUPPORTED_10baseT_Half |
+		                   SUPPORTED_10baseT_Full |
+		                   SUPPORTED_100baseT_Half |
+		                   SUPPORTED_100baseT_Full |
+		                   SUPPORTED_1000baseT_Full|
+		                   SUPPORTED_Autoneg |
+		                   SUPPORTED_TP);
+		if (hw->phy_type == e1000_phy_ife)
+			ecmd->supported &= ~SUPPORTED_1000baseT_Full;
+		ecmd->advertising = ADVERTISED_TP;
+
+		if (hw->autoneg == 1) {
+			ecmd->advertising |= ADVERTISED_Autoneg;
+			/* the e1000 autoneg seems to match ethtool nicely */
+			ecmd->advertising |= hw->autoneg_advertised;
+		}
+
+		ecmd->port = PORT_TP;
+		ecmd->phy_address = hw->phy_addr;
+
+		if (hw->mac_type == e1000_82543)
+			ecmd->transceiver = XCVR_EXTERNAL;
+		else
+			ecmd->transceiver = XCVR_INTERNAL;
+
+	} else {
+		ecmd->supported   = (SUPPORTED_1000baseT_Full |
+				     SUPPORTED_FIBRE |
+				     SUPPORTED_Autoneg);
+
+		ecmd->advertising = (ADVERTISED_1000baseT_Full |
+				     ADVERTISED_FIBRE |
+				     ADVERTISED_Autoneg);
+
+		ecmd->port = PORT_FIBRE;
+
+		if (hw->mac_type >= e1000_82545)
+			ecmd->transceiver = XCVR_INTERNAL;
+		else
+			ecmd->transceiver = XCVR_EXTERNAL;
+	}
+
+	if (er32(STATUS) & E1000_STATUS_LU) {
+
+		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
+		                                   &adapter->link_duplex);
+		ecmd->speed = adapter->link_speed;
+
+		/* unfortunatly FULL_DUPLEX != DUPLEX_FULL
+		 *          and HALF_DUPLEX != DUPLEX_HALF */
+
+		if (adapter->link_duplex == FULL_DUPLEX)
+			ecmd->duplex = DUPLEX_FULL;
+		else
+			ecmd->duplex = DUPLEX_HALF;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+
+	ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+			 hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+	return 0;
+}
+
+static int e1000_set_settings(struct net_device *netdev,
+			      struct ethtool_cmd *ecmd)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* When SoL/IDER sessions are active, autoneg/speed/duplex
+	 * cannot be changed */
+	if (e1000_check_phy_reset_block(hw)) {
+		DPRINTK(DRV, ERR, "Cannot change link characteristics "
+		        "when SoL/IDER is active.\n");
+		return -EINVAL;
+	}
+
+	if (adapter->ecdev)
+		return -EBUSY;
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+		msleep(1);
+
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		hw->autoneg = 1;
+		if (hw->media_type == e1000_media_type_fiber)
+			hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
+				     ADVERTISED_FIBRE |
+				     ADVERTISED_Autoneg;
+		else
+			hw->autoneg_advertised = ecmd->advertising |
+			                         ADVERTISED_TP |
+			                         ADVERTISED_Autoneg;
+		ecmd->advertising = hw->autoneg_advertised;
+	} else
+		if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+			clear_bit(__E1000_RESETTING, &adapter->flags);
+			return -EINVAL;
+		}
+
+	/* reset the link */
+
+	if (netif_running(adapter->netdev)) {
+		e1000_down(adapter);
+		e1000_up(adapter);
+	} else
+		e1000_reset(adapter);
+
+	clear_bit(__E1000_RESETTING, &adapter->flags);
+	return 0;
+}
+
+static void e1000_get_pauseparam(struct net_device *netdev,
+				 struct ethtool_pauseparam *pause)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	pause->autoneg =
+		(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+	if (hw->fc == E1000_FC_RX_PAUSE)
+		pause->rx_pause = 1;
+	else if (hw->fc == E1000_FC_TX_PAUSE)
+		pause->tx_pause = 1;
+	else if (hw->fc == E1000_FC_FULL) {
+		pause->rx_pause = 1;
+		pause->tx_pause = 1;
+	}
+}
+
+static int e1000_set_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pause)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	int retval = 0;
+
+	if (adapter->ecdev)
+		return -EBUSY;
+
+	adapter->fc_autoneg = pause->autoneg;
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+		msleep(1);
+
+	if (pause->rx_pause && pause->tx_pause)
+		hw->fc = E1000_FC_FULL;
+	else if (pause->rx_pause && !pause->tx_pause)
+		hw->fc = E1000_FC_RX_PAUSE;
+	else if (!pause->rx_pause && pause->tx_pause)
+		hw->fc = E1000_FC_TX_PAUSE;
+	else if (!pause->rx_pause && !pause->tx_pause)
+		hw->fc = E1000_FC_NONE;
+
+	hw->original_fc = hw->fc;
+
+	if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+		if (netif_running(adapter->netdev)) {
+			e1000_down(adapter);
+			e1000_up(adapter);
+		} else
+			e1000_reset(adapter);
+	} else
+		retval = ((hw->media_type == e1000_media_type_fiber) ?
+			  e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+
+	clear_bit(__E1000_RESETTING, &adapter->flags);
+	return retval;
+}
+
+static u32 e1000_get_rx_csum(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	return adapter->rx_csum;
+}
+
+static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->ecdev)
+		return -EBUSY;
+
+	adapter->rx_csum = data;
+
+	if (netif_running(netdev))
+		e1000_reinit_locked(adapter);
+	else
+		e1000_reset(adapter);
+	return 0;
+}
+
+static u32 e1000_get_tx_csum(struct net_device *netdev)
+{
+	return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (hw->mac_type < e1000_82543) {
+		if (!data)
+			return -EINVAL;
+		return 0;
+	}
+
+	if (data)
+		netdev->features |= NETIF_F_HW_CSUM;
+	else
+		netdev->features &= ~NETIF_F_HW_CSUM;
+
+	return 0;
+}
+
+static int e1000_set_tso(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if ((hw->mac_type < e1000_82544) ||
+	    (hw->mac_type == e1000_82547))
+		return data ? -EINVAL : 0;
+
+	if (data)
+		netdev->features |= NETIF_F_TSO;
+	else
+		netdev->features &= ~NETIF_F_TSO;
+
+	if (data && (adapter->hw.mac_type > e1000_82547_rev_2))
+		netdev->features |= NETIF_F_TSO6;
+	else
+		netdev->features &= ~NETIF_F_TSO6;
+
+	DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
+	adapter->tso_force = true;
+	return 0;
+}
+
+static u32 e1000_get_msglevel(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	return adapter->msg_enable;
+}
+
+static void e1000_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	adapter->msg_enable = data;
+}
+
+static int e1000_get_regs_len(struct net_device *netdev)
+{
+#define E1000_REGS_LEN 32
+	return E1000_REGS_LEN * sizeof(u32);
+}
+
+static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+			   void *p)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 *regs_buff = p;
+	u16 phy_data;
+
+	memset(p, 0, E1000_REGS_LEN * sizeof(u32));
+
+	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+	regs_buff[0]  = er32(CTRL);
+	regs_buff[1]  = er32(STATUS);
+
+	regs_buff[2]  = er32(RCTL);
+	regs_buff[3]  = er32(RDLEN);
+	regs_buff[4]  = er32(RDH);
+	regs_buff[5]  = er32(RDT);
+	regs_buff[6]  = er32(RDTR);
+
+	regs_buff[7]  = er32(TCTL);
+	regs_buff[8]  = er32(TDLEN);
+	regs_buff[9]  = er32(TDH);
+	regs_buff[10] = er32(TDT);
+	regs_buff[11] = er32(TIDV);
+
+	regs_buff[12] = hw->phy_type;  /* PHY type (IGP=1, M88=0) */
+	if (hw->phy_type == e1000_phy_igp) {
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_A);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[13] = (u32)phy_data; /* cable length */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_B);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[14] = (u32)phy_data; /* cable length */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_C);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[15] = (u32)phy_data; /* cable length */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_D);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[16] = (u32)phy_data; /* cable length */
+		regs_buff[17] = 0; /* extended 10bt distance (not needed) */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[18] = (u32)phy_data; /* cable polarity */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_PCS_INIT_REG);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[19] = (u32)phy_data; /* cable polarity */
+		regs_buff[20] = 0; /* polarity correction enabled (always) */
+		regs_buff[22] = 0; /* phy receive errors (unavailable) */
+		regs_buff[23] = regs_buff[18]; /* mdix mode */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+	} else {
+		e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+		regs_buff[13] = (u32)phy_data; /* cable length */
+		regs_buff[14] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		regs_buff[15] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		regs_buff[16] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+		regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
+		regs_buff[18] = regs_buff[13]; /* cable polarity */
+		regs_buff[19] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		regs_buff[20] = regs_buff[17]; /* polarity correction */
+		/* phy receive errors */
+		regs_buff[22] = adapter->phy_stats.receive_errors;
+		regs_buff[23] = regs_buff[13]; /* mdix mode */
+	}
+	regs_buff[21] = adapter->phy_stats.idle_errors;  /* phy idle errors */
+	e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+	regs_buff[24] = (u32)phy_data;  /* phy local receiver status */
+	regs_buff[25] = regs_buff[24];  /* phy remote receiver status */
+	if (hw->mac_type >= e1000_82540 &&
+	    hw->mac_type < e1000_82571 &&
+	    hw->media_type == e1000_media_type_copper) {
+		regs_buff[26] = er32(MANC);
+	}
+}
+
+static int e1000_get_eeprom_len(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	return hw->eeprom.word_size * 2;
+}
+
+static int e1000_get_eeprom(struct net_device *netdev,
+			    struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u16 *eeprom_buff;
+	int first_word, last_word;
+	int ret_val = 0;
+	u16 i;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+	first_word = eeprom->offset >> 1;
+	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+	eeprom_buff = kmalloc(sizeof(u16) *
+			(last_word - first_word + 1), GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	if (hw->eeprom.type == e1000_eeprom_spi)
+		ret_val = e1000_read_eeprom(hw, first_word,
+					    last_word - first_word + 1,
+					    eeprom_buff);
+	else {
+		for (i = 0; i < last_word - first_word + 1; i++) {
+			ret_val = e1000_read_eeprom(hw, first_word + i, 1,
+						    &eeprom_buff[i]);
+			if (ret_val)
+				break;
+		}
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < last_word - first_word + 1; i++)
+		le16_to_cpus(&eeprom_buff[i]);
+
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+			eeprom->len);
+	kfree(eeprom_buff);
+
+	return ret_val;
+}
+
+static int e1000_set_eeprom(struct net_device *netdev,
+			    struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u16 *eeprom_buff;
+	void *ptr;
+	int max_len, first_word, last_word, ret_val = 0;
+	u16 i;
+
+	if (eeprom->len == 0)
+		return -EOPNOTSUPP;
+
+	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+		return -EFAULT;
+
+	max_len = hw->eeprom.word_size * 2;
+
+	first_word = eeprom->offset >> 1;
+	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	ptr = (void *)eeprom_buff;
+
+	if (eeprom->offset & 1) {
+		/* need read/modify/write of first changed EEPROM word */
+		/* only the second byte of the word is being modified */
+		ret_val = e1000_read_eeprom(hw, first_word, 1,
+					    &eeprom_buff[0]);
+		ptr++;
+	}
+	if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+		/* need read/modify/write of last changed EEPROM word */
+		/* only the first byte of the word is being modified */
+		ret_val = e1000_read_eeprom(hw, last_word, 1,
+		                  &eeprom_buff[last_word - first_word]);
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < last_word - first_word + 1; i++)
+		le16_to_cpus(&eeprom_buff[i]);
+
+	memcpy(ptr, bytes, eeprom->len);
+
+	for (i = 0; i < last_word - first_word + 1; i++)
+		eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+	ret_val = e1000_write_eeprom(hw, first_word,
+				     last_word - first_word + 1, eeprom_buff);
+
+	/* Update the checksum over the first part of the EEPROM if needed
+	 * and flush shadow RAM for 82573 conrollers */
+	if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
+				(hw->mac_type == e1000_82573)))
+		e1000_update_eeprom_checksum(hw);
+
+	kfree(eeprom_buff);
+	return ret_val;
+}
+
+static void e1000_get_drvinfo(struct net_device *netdev,
+			      struct ethtool_drvinfo *drvinfo)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	char firmware_version[32];
+	u16 eeprom_data;
+
+	strncpy(drvinfo->driver,  e1000_driver_name, 32);
+	strncpy(drvinfo->version, e1000_driver_version, 32);
+
+	/* EEPROM image version # is reported as firmware version # for
+	 * 8257{1|2|3} controllers */
+	e1000_read_eeprom(hw, 5, 1, &eeprom_data);
+	switch (hw->mac_type) {
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+		sprintf(firmware_version, "%d.%d-%d",
+			(eeprom_data & 0xF000) >> 12,
+			(eeprom_data & 0x0FF0) >> 4,
+			eeprom_data & 0x000F);
+		break;
+	default:
+		sprintf(firmware_version, "N/A");
+	}
+
+	strncpy(drvinfo->fw_version, firmware_version, 32);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	drvinfo->regdump_len = e1000_get_regs_len(netdev);
+	drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
+}
+
+static void e1000_get_ringparam(struct net_device *netdev,
+				struct ethtool_ringparam *ring)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	e1000_mac_type mac_type = hw->mac_type;
+	struct e1000_tx_ring *txdr = adapter->tx_ring;
+	struct e1000_rx_ring *rxdr = adapter->rx_ring;
+
+	ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
+		E1000_MAX_82544_RXD;
+	ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
+		E1000_MAX_82544_TXD;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->rx_pending = rxdr->count;
+	ring->tx_pending = txdr->count;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+static int e1000_set_ringparam(struct net_device *netdev,
+			       struct ethtool_ringparam *ring)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	e1000_mac_type mac_type = hw->mac_type;
+	struct e1000_tx_ring *txdr, *tx_old;
+	struct e1000_rx_ring *rxdr, *rx_old;
+	int i, err;
+
+	if (adapter->ecdev)
+        return -EBUSY;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+		msleep(1);
+
+	if (netif_running(adapter->netdev))
+		e1000_down(adapter);
+
+	tx_old = adapter->tx_ring;
+	rx_old = adapter->rx_ring;
+
+	err = -ENOMEM;
+	txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
+	if (!txdr)
+		goto err_alloc_tx;
+
+	rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
+	if (!rxdr)
+		goto err_alloc_rx;
+
+	adapter->tx_ring = txdr;
+	adapter->rx_ring = rxdr;
+
+	rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
+	rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
+		E1000_MAX_RXD : E1000_MAX_82544_RXD));
+	rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+	txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
+	txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
+		E1000_MAX_TXD : E1000_MAX_82544_TXD));
+	txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		txdr[i].count = txdr->count;
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rxdr[i].count = rxdr->count;
+
+	if (netif_running(adapter->netdev)) {
+		/* Try to get new resources before deleting old */
+		err = e1000_setup_all_rx_resources(adapter);
+		if (err)
+			goto err_setup_rx;
+		err = e1000_setup_all_tx_resources(adapter);
+		if (err)
+			goto err_setup_tx;
+
+		/* save the new, restore the old in order to free it,
+		 * then restore the new back again */
+
+		adapter->rx_ring = rx_old;
+		adapter->tx_ring = tx_old;
+		e1000_free_all_rx_resources(adapter);
+		e1000_free_all_tx_resources(adapter);
+		kfree(tx_old);
+		kfree(rx_old);
+		adapter->rx_ring = rxdr;
+		adapter->tx_ring = txdr;
+		err = e1000_up(adapter);
+		if (err)
+			goto err_setup;
+	}
+
+	clear_bit(__E1000_RESETTING, &adapter->flags);
+	return 0;
+err_setup_tx:
+	e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+	adapter->rx_ring = rx_old;
+	adapter->tx_ring = tx_old;
+	kfree(rxdr);
+err_alloc_rx:
+	kfree(txdr);
+err_alloc_tx:
+	e1000_up(adapter);
+err_setup:
+	clear_bit(__E1000_RESETTING, &adapter->flags);
+	return err;
+}
+
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
+			     u32 mask, u32 write)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	static const u32 test[] =
+		{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+	u8 __iomem *address = hw->hw_addr + reg;
+	u32 read;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(test); i++) {
+		writel(write & test[i], address);
+		read = readl(address);
+		if (read != (write & test[i] & mask)) {
+			DPRINTK(DRV, ERR, "pattern test reg %04X failed: "
+				"got 0x%08X expected 0x%08X\n",
+				reg, read, (write & test[i] & mask));
+			*data = reg;
+			return true;
+		}
+	}
+	return false;
+}
+
+static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
+			      u32 mask, u32 write)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u8 __iomem *address = hw->hw_addr + reg;
+	u32 read;
+
+	writel(write & mask, address);
+	read = readl(address);
+	if ((read & mask) != (write & mask)) {
+		DPRINTK(DRV, ERR, "set/check reg %04X test failed: "
+			"got 0x%08X expected 0x%08X\n",
+			reg, (read & mask), (write & mask));
+		*data = reg;
+		return true;
+	}
+	return false;
+}
+
+#define REG_PATTERN_TEST(reg, mask, write)			     \
+	do {							     \
+		if (reg_pattern_test(adapter, data,		     \
+			     (hw->mac_type >= e1000_82543)   \
+			     ? E1000_##reg : E1000_82542_##reg,	     \
+			     mask, write))			     \
+			return 1;				     \
+	} while (0)
+
+#define REG_SET_AND_CHECK(reg, mask, write)			     \
+	do {							     \
+		if (reg_set_and_check(adapter, data,		     \
+			      (hw->mac_type >= e1000_82543)  \
+			      ? E1000_##reg : E1000_82542_##reg,     \
+			      mask, write))			     \
+			return 1;				     \
+	} while (0)
+
+static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
+{
+	u32 value, before, after;
+	u32 i, toggle;
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* The status register is Read Only, so a write should fail.
+	 * Some bits that get toggled are ignored.
+	 */
+	switch (hw->mac_type) {
+	/* there are several bits on newer hardware that are r/w */
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		toggle = 0x7FFFF3FF;
+		break;
+	case e1000_82573:
+	case e1000_ich8lan:
+		toggle = 0x7FFFF033;
+		break;
+	default:
+		toggle = 0xFFFFF833;
+		break;
+	}
+
+	before = er32(STATUS);
+	value = (er32(STATUS) & toggle);
+	ew32(STATUS, toggle);
+	after = er32(STATUS) & toggle;
+	if (value != after) {
+		DPRINTK(DRV, ERR, "failed STATUS register test got: "
+		        "0x%08X expected: 0x%08X\n", after, value);
+		*data = 1;
+		return 1;
+	}
+	/* restore previous status */
+	ew32(STATUS, before);
+
+	if (hw->mac_type != e1000_ich8lan) {
+		REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+		REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
+		REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
+		REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
+	}
+
+	REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
+	REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8);
+	REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
+	REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
+
+	REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
+
+	before = (hw->mac_type == e1000_ich8lan ?
+	          0x06C3B33E : 0x06DFB3FE);
+	REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
+	REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
+
+	if (hw->mac_type >= e1000_82543) {
+
+		REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
+		REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+		if (hw->mac_type != e1000_ich8lan)
+			REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
+		REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+		REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
+		value = (hw->mac_type == e1000_ich8lan ?
+		         E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
+		for (i = 0; i < value; i++) {
+			REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
+			                 0xFFFFFFFF);
+		}
+
+	} else {
+
+		REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
+		REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
+		REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
+		REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
+
+	}
+
+	value = (hw->mac_type == e1000_ich8lan ?
+			E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
+	for (i = 0; i < value; i++)
+		REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
+
+	*data = 0;
+	return 0;
+}
+
+static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 temp;
+	u16 checksum = 0;
+	u16 i;
+
+	*data = 0;
+	/* Read and add up the contents of the EEPROM */
+	for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+		if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) {
+			*data = 1;
+			break;
+		}
+		checksum += temp;
+	}
+
+	/* If Checksum is not Correct return error else test passed */
+	if ((checksum != (u16)EEPROM_SUM) && !(*data))
+		*data = 2;
+
+	return *data;
+}
+
+static irqreturn_t e1000_test_intr(int irq, void *data)
+{
+	struct net_device *netdev = (struct net_device *)data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	adapter->test_icr |= er32(ICR);
+
+	return IRQ_HANDLED;
+}
+
+static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 mask, i = 0;
+	bool shared_int = true;
+	u32 irq = adapter->pdev->irq;
+	struct e1000_hw *hw = &adapter->hw;
+
+	*data = 0;
+
+	/* NOTE: we don't test MSI interrupts here, yet */
+	/* Hook up test interrupt handler just for this test */
+	if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+	                 netdev))
+		shared_int = false;
+	else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
+	         netdev->name, netdev)) {
+		*data = 1;
+		return -1;
+	}
+	DPRINTK(HW, INFO, "testing %s interrupt\n",
+	        (shared_int ? "shared" : "unshared"));
+
+	/* Disable all the interrupts */
+	ew32(IMC, 0xFFFFFFFF);
+	msleep(10);
+
+	/* Test each interrupt */
+	for (; i < 10; i++) {
+
+		if (hw->mac_type == e1000_ich8lan && i == 8)
+			continue;
+
+		/* Interrupt to test */
+		mask = 1 << i;
+
+		if (!shared_int) {
+			/* Disable the interrupt to be reported in
+			 * the cause register and then force the same
+			 * interrupt and see if one gets posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			ew32(IMC, mask);
+			ew32(ICS, mask);
+			msleep(10);
+
+			if (adapter->test_icr & mask) {
+				*data = 3;
+				break;
+			}
+		}
+
+		/* Enable the interrupt to be reported in
+		 * the cause register and then force the same
+		 * interrupt and see if one gets posted.  If
+		 * an interrupt was not posted to the bus, the
+		 * test failed.
+		 */
+		adapter->test_icr = 0;
+		ew32(IMS, mask);
+		ew32(ICS, mask);
+		msleep(10);
+
+		if (!(adapter->test_icr & mask)) {
+			*data = 4;
+			break;
+		}
+
+		if (!shared_int) {
+			/* Disable the other interrupts to be reported in
+			 * the cause register and then force the other
+			 * interrupts and see if any get posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			ew32(IMC, ~mask & 0x00007FFF);
+			ew32(ICS, ~mask & 0x00007FFF);
+			msleep(10);
+
+			if (adapter->test_icr) {
+				*data = 5;
+				break;
+			}
+		}
+	}
+
+	/* Disable all the interrupts */
+	ew32(IMC, 0xFFFFFFFF);
+	msleep(10);
+
+	/* Unhook test interrupt handler */
+	free_irq(irq, netdev);
+
+	return *data;
+}
+
+static void e1000_free_desc_rings(struct e1000_adapter *adapter)
+{
+	struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+	struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	int i;
+
+	if (txdr->desc && txdr->buffer_info) {
+		for (i = 0; i < txdr->count; i++) {
+			if (txdr->buffer_info[i].dma)
+				pci_unmap_single(pdev, txdr->buffer_info[i].dma,
+						 txdr->buffer_info[i].length,
+						 PCI_DMA_TODEVICE);
+			if (txdr->buffer_info[i].skb)
+				dev_kfree_skb(txdr->buffer_info[i].skb);
+		}
+	}
+
+	if (rxdr->desc && rxdr->buffer_info) {
+		for (i = 0; i < rxdr->count; i++) {
+			if (rxdr->buffer_info[i].dma)
+				pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
+						 rxdr->buffer_info[i].length,
+						 PCI_DMA_FROMDEVICE);
+			if (rxdr->buffer_info[i].skb)
+				dev_kfree_skb(rxdr->buffer_info[i].skb);
+		}
+	}
+
+	if (txdr->desc) {
+		pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
+		txdr->desc = NULL;
+	}
+	if (rxdr->desc) {
+		pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
+		rxdr->desc = NULL;
+	}
+
+	kfree(txdr->buffer_info);
+	txdr->buffer_info = NULL;
+	kfree(rxdr->buffer_info);
+	rxdr->buffer_info = NULL;
+
+	return;
+}
+
+static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+	struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	u32 rctl;
+	int i, ret_val;
+
+	/* Setup Tx descriptor ring and Tx buffers */
+
+	if (!txdr->count)
+		txdr->count = E1000_DEFAULT_TXD;
+
+	txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer),
+				    GFP_KERNEL);
+	if (!txdr->buffer_info) {
+		ret_val = 1;
+		goto err_nomem;
+	}
+
+	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+	txdr->size = ALIGN(txdr->size, 4096);
+	txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+	if (!txdr->desc) {
+		ret_val = 2;
+		goto err_nomem;
+	}
+	memset(txdr->desc, 0, txdr->size);
+	txdr->next_to_use = txdr->next_to_clean = 0;
+
+	ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
+	ew32(TDBAH, ((u64)txdr->dma >> 32));
+	ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc));
+	ew32(TDH, 0);
+	ew32(TDT, 0);
+	ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN |
+	     E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+	     E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+
+	for (i = 0; i < txdr->count; i++) {
+		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
+		struct sk_buff *skb;
+		unsigned int size = 1024;
+
+		skb = alloc_skb(size, GFP_KERNEL);
+		if (!skb) {
+			ret_val = 3;
+			goto err_nomem;
+		}
+		skb_put(skb, size);
+		txdr->buffer_info[i].skb = skb;
+		txdr->buffer_info[i].length = skb->len;
+		txdr->buffer_info[i].dma =
+			pci_map_single(pdev, skb->data, skb->len,
+				       PCI_DMA_TODEVICE);
+		tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
+		tx_desc->lower.data = cpu_to_le32(skb->len);
+		tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
+						   E1000_TXD_CMD_IFCS |
+						   E1000_TXD_CMD_RPS);
+		tx_desc->upper.data = 0;
+	}
+
+	/* Setup Rx descriptor ring and Rx buffers */
+
+	if (!rxdr->count)
+		rxdr->count = E1000_DEFAULT_RXD;
+
+	rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
+				    GFP_KERNEL);
+	if (!rxdr->buffer_info) {
+		ret_val = 4;
+		goto err_nomem;
+	}
+
+	rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
+	rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+	if (!rxdr->desc) {
+		ret_val = 5;
+		goto err_nomem;
+	}
+	memset(rxdr->desc, 0, rxdr->size);
+	rxdr->next_to_use = rxdr->next_to_clean = 0;
+
+	rctl = er32(RCTL);
+	ew32(RCTL, rctl & ~E1000_RCTL_EN);
+	ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF));
+	ew32(RDBAH, ((u64)rxdr->dma >> 32));
+	ew32(RDLEN, rxdr->size);
+	ew32(RDH, 0);
+	ew32(RDT, 0);
+	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
+	ew32(RCTL, rctl);
+
+	for (i = 0; i < rxdr->count; i++) {
+		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
+		struct sk_buff *skb;
+
+		skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
+		if (!skb) {
+			ret_val = 6;
+			goto err_nomem;
+		}
+		skb_reserve(skb, NET_IP_ALIGN);
+		rxdr->buffer_info[i].skb = skb;
+		rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
+		rxdr->buffer_info[i].dma =
+			pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
+				       PCI_DMA_FROMDEVICE);
+		rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
+		memset(skb->data, 0x00, skb->len);
+	}
+
+	return 0;
+
+err_nomem:
+	e1000_free_desc_rings(adapter);
+	return ret_val;
+}
+
+static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* Write out to PHY registers 29 and 30 to disable the Receiver. */
+	e1000_write_phy_reg(hw, 29, 0x001F);
+	e1000_write_phy_reg(hw, 30, 0x8FFC);
+	e1000_write_phy_reg(hw, 29, 0x001A);
+	e1000_write_phy_reg(hw, 30, 0x8FF0);
+}
+
+static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 phy_reg;
+
+	/* Because we reset the PHY above, we need to re-force TX_CLK in the
+	 * Extended PHY Specific Control Register to 25MHz clock.  This
+	 * value defaults back to a 2.5MHz clock when the PHY is reset.
+	 */
+	e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+	phy_reg |= M88E1000_EPSCR_TX_CLK_25;
+	e1000_write_phy_reg(hw,
+		M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
+
+	/* In addition, because of the s/w reset above, we need to enable
+	 * CRS on TX.  This must be set for both full and half duplex
+	 * operation.
+	 */
+	e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+	phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+	e1000_write_phy_reg(hw,
+		M88E1000_PHY_SPEC_CTRL, phy_reg);
+}
+
+static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_reg;
+	u16 phy_reg;
+
+	/* Setup the Device Control Register for PHY loopback test. */
+
+	ctrl_reg = er32(CTRL);
+	ctrl_reg |= (E1000_CTRL_ILOS |		/* Invert Loss-Of-Signal */
+		     E1000_CTRL_FRCSPD |	/* Set the Force Speed Bit */
+		     E1000_CTRL_FRCDPX |	/* Set the Force Duplex Bit */
+		     E1000_CTRL_SPD_1000 |	/* Force Speed to 1000 */
+		     E1000_CTRL_FD);		/* Force Duplex to FULL */
+
+	ew32(CTRL, ctrl_reg);
+
+	/* Read the PHY Specific Control Register (0x10) */
+	e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+
+	/* Clear Auto-Crossover bits in PHY Specific Control Register
+	 * (bits 6:5).
+	 */
+	phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
+	e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
+
+	/* Perform software reset on the PHY */
+	e1000_phy_reset(hw);
+
+	/* Have to setup TX_CLK and TX_CRS after software reset */
+	e1000_phy_reset_clk_and_crs(adapter);
+
+	e1000_write_phy_reg(hw, PHY_CTRL, 0x8100);
+
+	/* Wait for reset to complete. */
+	udelay(500);
+
+	/* Have to setup TX_CLK and TX_CRS after software reset */
+	e1000_phy_reset_clk_and_crs(adapter);
+
+	/* Write out to PHY registers 29 and 30 to disable the Receiver. */
+	e1000_phy_disable_receiver(adapter);
+
+	/* Set the loopback bit in the PHY control register. */
+	e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+	phy_reg |= MII_CR_LOOPBACK;
+	e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+
+	/* Setup TX_CLK and TX_CRS one more time. */
+	e1000_phy_reset_clk_and_crs(adapter);
+
+	/* Check Phy Configuration */
+	e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+	if (phy_reg != 0x4100)
+		 return 9;
+
+	e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+	if (phy_reg != 0x0070)
+		return 10;
+
+	e1000_read_phy_reg(hw, 29, &phy_reg);
+	if (phy_reg != 0x001A)
+		return 11;
+
+	return 0;
+}
+
+static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_reg = 0;
+	u32 stat_reg = 0;
+
+	hw->autoneg = false;
+
+	if (hw->phy_type == e1000_phy_m88) {
+		/* Auto-MDI/MDIX Off */
+		e1000_write_phy_reg(hw,
+				    M88E1000_PHY_SPEC_CTRL, 0x0808);
+		/* reset to update Auto-MDI/MDIX */
+		e1000_write_phy_reg(hw, PHY_CTRL, 0x9140);
+		/* autoneg off */
+		e1000_write_phy_reg(hw, PHY_CTRL, 0x8140);
+	} else if (hw->phy_type == e1000_phy_gg82563)
+		e1000_write_phy_reg(hw,
+		                    GG82563_PHY_KMRN_MODE_CTRL,
+		                    0x1CC);
+
+	ctrl_reg = er32(CTRL);
+
+	if (hw->phy_type == e1000_phy_ife) {
+		/* force 100, set loopback */
+		e1000_write_phy_reg(hw, PHY_CTRL, 0x6100);
+
+		/* Now set up the MAC to the same speed/duplex as the PHY. */
+		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+			     E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+	} else {
+		/* force 1000, set loopback */
+		e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
+
+		/* Now set up the MAC to the same speed/duplex as the PHY. */
+		ctrl_reg = er32(CTRL);
+		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+			     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+	}
+
+	if (hw->media_type == e1000_media_type_copper &&
+	   hw->phy_type == e1000_phy_m88)
+		ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+	else {
+		/* Set the ILOS bit on the fiber Nic is half
+		 * duplex link is detected. */
+		stat_reg = er32(STATUS);
+		if ((stat_reg & E1000_STATUS_FD) == 0)
+			ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
+	}
+
+	ew32(CTRL, ctrl_reg);
+
+	/* Disable the receiver on the PHY so when a cable is plugged in, the
+	 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+	 */
+	if (hw->phy_type == e1000_phy_m88)
+		e1000_phy_disable_receiver(adapter);
+
+	udelay(500);
+
+	return 0;
+}
+
+static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 phy_reg = 0;
+	u16 count = 0;
+
+	switch (hw->mac_type) {
+	case e1000_82543:
+		if (hw->media_type == e1000_media_type_copper) {
+			/* Attempt to setup Loopback mode on Non-integrated PHY.
+			 * Some PHY registers get corrupted at random, so
+			 * attempt this 10 times.
+			 */
+			while (e1000_nonintegrated_phy_loopback(adapter) &&
+			      count++ < 10);
+			if (count < 11)
+				return 0;
+		}
+		break;
+
+	case e1000_82544:
+	case e1000_82540:
+	case e1000_82545:
+	case e1000_82545_rev_3:
+	case e1000_82546:
+	case e1000_82546_rev_3:
+	case e1000_82541:
+	case e1000_82541_rev_2:
+	case e1000_82547:
+	case e1000_82547_rev_2:
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+		return e1000_integrated_phy_loopback(adapter);
+		break;
+
+	default:
+		/* Default PHY loopback work is to read the MII
+		 * control register and assert bit 14 (loopback mode).
+		 */
+		e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+		phy_reg |= MII_CR_LOOPBACK;
+		e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+		return 0;
+		break;
+	}
+
+	return 8;
+}
+
+static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	if (hw->media_type == e1000_media_type_fiber ||
+	    hw->media_type == e1000_media_type_internal_serdes) {
+		switch (hw->mac_type) {
+		case e1000_82545:
+		case e1000_82546:
+		case e1000_82545_rev_3:
+		case e1000_82546_rev_3:
+			return e1000_set_phy_loopback(adapter);
+			break;
+		case e1000_82571:
+		case e1000_82572:
+#define E1000_SERDES_LB_ON 0x410
+			e1000_set_phy_loopback(adapter);
+			ew32(SCTL, E1000_SERDES_LB_ON);
+			msleep(10);
+			return 0;
+			break;
+		default:
+			rctl = er32(RCTL);
+			rctl |= E1000_RCTL_LBM_TCVR;
+			ew32(RCTL, rctl);
+			return 0;
+		}
+	} else if (hw->media_type == e1000_media_type_copper)
+		return e1000_set_phy_loopback(adapter);
+
+	return 7;
+}
+
+static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+	u16 phy_reg;
+
+	rctl = er32(RCTL);
+	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+	ew32(RCTL, rctl);
+
+	switch (hw->mac_type) {
+	case e1000_82571:
+	case e1000_82572:
+		if (hw->media_type == e1000_media_type_fiber ||
+		    hw->media_type == e1000_media_type_internal_serdes) {
+#define E1000_SERDES_LB_OFF 0x400
+			ew32(SCTL, E1000_SERDES_LB_OFF);
+			msleep(10);
+			break;
+		}
+		/* Fall Through */
+	case e1000_82545:
+	case e1000_82546:
+	case e1000_82545_rev_3:
+	case e1000_82546_rev_3:
+	default:
+		hw->autoneg = true;
+		if (hw->phy_type == e1000_phy_gg82563)
+			e1000_write_phy_reg(hw,
+					    GG82563_PHY_KMRN_MODE_CTRL,
+					    0x180);
+		e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+		if (phy_reg & MII_CR_LOOPBACK) {
+			phy_reg &= ~MII_CR_LOOPBACK;
+			e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+			e1000_phy_reset(hw);
+		}
+		break;
+	}
+}
+
+static void e1000_create_lbtest_frame(struct sk_buff *skb,
+				      unsigned int frame_size)
+{
+	memset(skb->data, 0xFF, frame_size);
+	frame_size &= ~1;
+	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int e1000_check_lbtest_frame(struct sk_buff *skb,
+				    unsigned int frame_size)
+{
+	frame_size &= ~1;
+	if (*(skb->data + 3) == 0xFF) {
+		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+		   (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+			return 0;
+		}
+	}
+	return 13;
+}
+
+static int e1000_run_loopback_test(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+	struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	int i, j, k, l, lc, good_cnt, ret_val=0;
+	unsigned long time;
+
+	ew32(RDT, rxdr->count - 1);
+
+	/* Calculate the loop count based on the largest descriptor ring
+	 * The idea is to wrap the largest ring a number of times using 64
+	 * send/receive pairs during each loop
+	 */
+
+	if (rxdr->count <= txdr->count)
+		lc = ((txdr->count / 64) * 2) + 1;
+	else
+		lc = ((rxdr->count / 64) * 2) + 1;
+
+	k = l = 0;
+	for (j = 0; j <= lc; j++) { /* loop count loop */
+		for (i = 0; i < 64; i++) { /* send the packets */
+			e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
+					1024);
+			pci_dma_sync_single_for_device(pdev,
+					txdr->buffer_info[k].dma,
+				    	txdr->buffer_info[k].length,
+				    	PCI_DMA_TODEVICE);
+			if (unlikely(++k == txdr->count)) k = 0;
+		}
+		ew32(TDT, k);
+		msleep(200);
+		time = jiffies; /* set the start time for the receive */
+		good_cnt = 0;
+		do { /* receive the sent packets */
+			pci_dma_sync_single_for_cpu(pdev,
+					rxdr->buffer_info[l].dma,
+				    	rxdr->buffer_info[l].length,
+				    	PCI_DMA_FROMDEVICE);
+
+			ret_val = e1000_check_lbtest_frame(
+					rxdr->buffer_info[l].skb,
+				   	1024);
+			if (!ret_val)
+				good_cnt++;
+			if (unlikely(++l == rxdr->count)) l = 0;
+			/* time + 20 msecs (200 msecs on 2.4) is more than
+			 * enough time to complete the receives, if it's
+			 * exceeded, break and error off
+			 */
+		} while (good_cnt < 64 && jiffies < (time + 20));
+		if (good_cnt != 64) {
+			ret_val = 13; /* ret_val is the same as mis-compare */
+			break;
+		}
+		if (jiffies >= (time + 2)) {
+			ret_val = 14; /* error code for time out error */
+			break;
+		}
+	} /* end loop count loop */
+	return ret_val;
+}
+
+static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* PHY loopback cannot be performed if SoL/IDER
+	 * sessions are active */
+	if (e1000_check_phy_reset_block(hw)) {
+		DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
+		        "when SoL/IDER is active.\n");
+		*data = 0;
+		goto out;
+	}
+
+	*data = e1000_setup_desc_rings(adapter);
+	if (*data)
+		goto out;
+	*data = e1000_setup_loopback_test(adapter);
+	if (*data)
+		goto err_loopback;
+	*data = e1000_run_loopback_test(adapter);
+	e1000_loopback_cleanup(adapter);
+
+err_loopback:
+	e1000_free_desc_rings(adapter);
+out:
+	return *data;
+}
+
+static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	*data = 0;
+	if (hw->media_type == e1000_media_type_internal_serdes) {
+		int i = 0;
+		hw->serdes_link_down = true;
+
+		/* On some blade server designs, link establishment
+		 * could take as long as 2-3 minutes */
+		do {
+			e1000_check_for_link(hw);
+			if (!hw->serdes_link_down)
+				return *data;
+			msleep(20);
+		} while (i++ < 3750);
+
+		*data = 1;
+	} else {
+		e1000_check_for_link(hw);
+		if (hw->autoneg)  /* if auto_neg is set wait for it */
+			msleep(4000);
+
+		if (!(er32(STATUS) & E1000_STATUS_LU)) {
+			*data = 1;
+		}
+	}
+	return *data;
+}
+
+static int e1000_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_TEST:
+		return E1000_TEST_LEN;
+	case ETH_SS_STATS:
+		return E1000_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void e1000_diag_test(struct net_device *netdev,
+			    struct ethtool_test *eth_test, u64 *data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+    bool if_running;
+
+    if (adapter->ecdev)
+        return;
+
+    if_running = netif_running(netdev);
+
+	set_bit(__E1000_TESTING, &adapter->flags);
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+		/* Offline tests */
+
+		/* save speed, duplex, autoneg settings */
+		u16 autoneg_advertised = hw->autoneg_advertised;
+		u8 forced_speed_duplex = hw->forced_speed_duplex;
+		u8 autoneg = hw->autoneg;
+
+		DPRINTK(HW, INFO, "offline testing starting\n");
+
+		/* Link test performed before hardware reset so autoneg doesn't
+		 * interfere with test result */
+		if (e1000_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		if (if_running)
+			/* indicate we're in test mode */
+			dev_close(netdev);
+		else
+			e1000_reset(adapter);
+
+		if (e1000_reg_test(adapter, &data[0]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e1000_reset(adapter);
+		if (e1000_eeprom_test(adapter, &data[1]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e1000_reset(adapter);
+		if (e1000_intr_test(adapter, &data[2]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e1000_reset(adapter);
+		/* make sure the phy is powered up */
+		e1000_power_up_phy(adapter);
+		if (e1000_loopback_test(adapter, &data[3]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* restore speed, duplex, autoneg settings */
+		hw->autoneg_advertised = autoneg_advertised;
+		hw->forced_speed_duplex = forced_speed_duplex;
+		hw->autoneg = autoneg;
+
+		e1000_reset(adapter);
+		clear_bit(__E1000_TESTING, &adapter->flags);
+		if (if_running)
+			dev_open(netdev);
+	} else {
+		DPRINTK(HW, INFO, "online testing starting\n");
+		/* Online tests */
+		if (e1000_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* Online tests aren't run; pass by default */
+		data[0] = 0;
+		data[1] = 0;
+		data[2] = 0;
+		data[3] = 0;
+
+		clear_bit(__E1000_TESTING, &adapter->flags);
+	}
+	msleep_interruptible(4 * 1000);
+}
+
+static int e1000_wol_exclusion(struct e1000_adapter *adapter,
+			       struct ethtool_wolinfo *wol)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int retval = 1; /* fail by default */
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82542:
+	case E1000_DEV_ID_82543GC_FIBER:
+	case E1000_DEV_ID_82543GC_COPPER:
+	case E1000_DEV_ID_82544EI_FIBER:
+	case E1000_DEV_ID_82546EB_QUAD_COPPER:
+	case E1000_DEV_ID_82545EM_FIBER:
+	case E1000_DEV_ID_82545EM_COPPER:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER:
+	case E1000_DEV_ID_82546GB_PCIE:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+		/* these don't support WoL at all */
+		wol->supported = 0;
+		break;
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546GB_FIBER:
+	case E1000_DEV_ID_82571EB_FIBER:
+	case E1000_DEV_ID_82571EB_SERDES:
+	case E1000_DEV_ID_82571EB_COPPER:
+		/* Wake events not supported on port B */
+		if (er32(STATUS) & E1000_STATUS_FUNC_1) {
+			wol->supported = 0;
+			break;
+		}
+		/* return success for non excluded adapter ports */
+		retval = 0;
+		break;
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		/* quad port adapters only support WoL on port A */
+		if (!adapter->quad_port_a) {
+			wol->supported = 0;
+			break;
+		}
+		/* return success for non excluded adapter ports */
+		retval = 0;
+		break;
+	default:
+		/* dual port cards only support WoL on port A from now on
+		 * unless it was enabled in the eeprom for port B
+		 * so exclude FUNC_1 ports from having WoL enabled */
+		if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
+		    !adapter->eeprom_wol) {
+			wol->supported = 0;
+			break;
+		}
+
+		retval = 0;
+	}
+
+	return retval;
+}
+
+static void e1000_get_wol(struct net_device *netdev,
+			  struct ethtool_wolinfo *wol)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	wol->supported = WAKE_UCAST | WAKE_MCAST |
+	                 WAKE_BCAST | WAKE_MAGIC;
+	wol->wolopts = 0;
+
+	/* this function will set ->supported = 0 and return 1 if wol is not
+	 * supported by this hardware */
+	if (e1000_wol_exclusion(adapter, wol) ||
+	    !device_can_wakeup(&adapter->pdev->dev))
+		return;
+
+	/* apply any specific unsupported masks here */
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		/* KSP3 does not suppport UCAST wake-ups */
+		wol->supported &= ~WAKE_UCAST;
+
+		if (adapter->wol & E1000_WUFC_EX)
+			DPRINTK(DRV, ERR, "Interface does not support "
+		        "directed (unicast) frame wake-up packets\n");
+		break;
+	default:
+		break;
+	}
+
+	if (adapter->wol & E1000_WUFC_EX)
+		wol->wolopts |= WAKE_UCAST;
+	if (adapter->wol & E1000_WUFC_MC)
+		wol->wolopts |= WAKE_MCAST;
+	if (adapter->wol & E1000_WUFC_BC)
+		wol->wolopts |= WAKE_BCAST;
+	if (adapter->wol & E1000_WUFC_MAG)
+		wol->wolopts |= WAKE_MAGIC;
+
+	return;
+}
+
+static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+		return -EOPNOTSUPP;
+
+	if (e1000_wol_exclusion(adapter, wol) ||
+	    !device_can_wakeup(&adapter->pdev->dev))
+		return wol->wolopts ? -EOPNOTSUPP : 0;
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		if (wol->wolopts & WAKE_UCAST) {
+			DPRINTK(DRV, ERR, "Interface does not support "
+		        "directed (unicast) frame wake-up packets\n");
+			return -EOPNOTSUPP;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* these settings will always override what we currently have */
+	adapter->wol = 0;
+
+	if (wol->wolopts & WAKE_UCAST)
+		adapter->wol |= E1000_WUFC_EX;
+	if (wol->wolopts & WAKE_MCAST)
+		adapter->wol |= E1000_WUFC_MC;
+	if (wol->wolopts & WAKE_BCAST)
+		adapter->wol |= E1000_WUFC_BC;
+	if (wol->wolopts & WAKE_MAGIC)
+		adapter->wol |= E1000_WUFC_MAG;
+
+	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+	return 0;
+}
+
+/* toggle LED 4 times per second = 2 "blinks" per second */
+#define E1000_ID_INTERVAL	(HZ/4)
+
+/* bit defines for adapter->led_status */
+#define E1000_LED_ON		0
+
+static void e1000_led_blink_callback(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
+		e1000_led_off(hw);
+	else
+		e1000_led_on(hw);
+
+	mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
+}
+
+static int e1000_phys_id(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (!data)
+		data = INT_MAX;
+
+	if (hw->mac_type < e1000_82571) {
+		if (!adapter->blink_timer.function) {
+			init_timer(&adapter->blink_timer);
+			adapter->blink_timer.function = e1000_led_blink_callback;
+			adapter->blink_timer.data = (unsigned long)adapter;
+		}
+		e1000_setup_led(hw);
+		mod_timer(&adapter->blink_timer, jiffies);
+		msleep_interruptible(data * 1000);
+		del_timer_sync(&adapter->blink_timer);
+	} else if (hw->phy_type == e1000_phy_ife) {
+		if (!adapter->blink_timer.function) {
+			init_timer(&adapter->blink_timer);
+			adapter->blink_timer.function = e1000_led_blink_callback;
+			adapter->blink_timer.data = (unsigned long)adapter;
+		}
+		mod_timer(&adapter->blink_timer, jiffies);
+		msleep_interruptible(data * 1000);
+		del_timer_sync(&adapter->blink_timer);
+		e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
+	} else {
+		e1000_blink_led_start(hw);
+		msleep_interruptible(data * 1000);
+	}
+
+	e1000_led_off(hw);
+	clear_bit(E1000_LED_ON, &adapter->led_status);
+	e1000_cleanup_led(hw);
+
+	return 0;
+}
+
+static int e1000_nway_reset(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->ecdev)
+		return -EBUSY;
+
+	if (netif_running(netdev))
+		e1000_reinit_locked(adapter);
+	return 0;
+}
+
+static void e1000_get_ethtool_stats(struct net_device *netdev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	int i;
+
+	e1000_update_stats(adapter);
+	for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+		char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
+		data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	}
+/*	BUG_ON(i != E1000_STATS_LEN); */
+}
+
+static void e1000_get_strings(struct net_device *netdev, u32 stringset,
+			      u8 *data)
+{
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		memcpy(data, *e1000_gstrings_test,
+			sizeof(e1000_gstrings_test));
+		break;
+	case ETH_SS_STATS:
+		for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, e1000_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+/*		BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+		break;
+	}
+}
+
+static const struct ethtool_ops e1000_ethtool_ops = {
+	.get_settings           = e1000_get_settings,
+	.set_settings           = e1000_set_settings,
+	.get_drvinfo            = e1000_get_drvinfo,
+	.get_regs_len           = e1000_get_regs_len,
+	.get_regs               = e1000_get_regs,
+	.get_wol                = e1000_get_wol,
+	.set_wol                = e1000_set_wol,
+	.get_msglevel           = e1000_get_msglevel,
+	.set_msglevel           = e1000_set_msglevel,
+	.nway_reset             = e1000_nway_reset,
+	.get_link               = ethtool_op_get_link,
+	.get_eeprom_len         = e1000_get_eeprom_len,
+	.get_eeprom             = e1000_get_eeprom,
+	.set_eeprom             = e1000_set_eeprom,
+	.get_ringparam          = e1000_get_ringparam,
+	.set_ringparam          = e1000_set_ringparam,
+	.get_pauseparam         = e1000_get_pauseparam,
+	.set_pauseparam         = e1000_set_pauseparam,
+	.get_rx_csum            = e1000_get_rx_csum,
+	.set_rx_csum            = e1000_set_rx_csum,
+	.get_tx_csum            = e1000_get_tx_csum,
+	.set_tx_csum            = e1000_set_tx_csum,
+	.set_sg                 = ethtool_op_set_sg,
+	.set_tso                = e1000_set_tso,
+	.self_test              = e1000_diag_test,
+	.get_strings            = e1000_get_strings,
+	.phys_id                = e1000_phys_id,
+	.get_ethtool_stats      = e1000_get_ethtool_stats,
+	.get_sset_count		= e1000_get_sset_count,
+};
+
+void e1000_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_ethtool-2.6.31-orig.c	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,1987 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for e1000 */
+
+#include "e1000.h"
+#include <asm/uaccess.h>
+
+struct e1000_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define E1000_STAT(m) FIELD_SIZEOF(struct e1000_adapter, m), \
+		      offsetof(struct e1000_adapter, m)
+static const struct e1000_stats e1000_gstrings_stats[] = {
+	{ "rx_packets", E1000_STAT(stats.gprc) },
+	{ "tx_packets", E1000_STAT(stats.gptc) },
+	{ "rx_bytes", E1000_STAT(stats.gorcl) },
+	{ "tx_bytes", E1000_STAT(stats.gotcl) },
+	{ "rx_broadcast", E1000_STAT(stats.bprc) },
+	{ "tx_broadcast", E1000_STAT(stats.bptc) },
+	{ "rx_multicast", E1000_STAT(stats.mprc) },
+	{ "tx_multicast", E1000_STAT(stats.mptc) },
+	{ "rx_errors", E1000_STAT(stats.rxerrc) },
+	{ "tx_errors", E1000_STAT(stats.txerrc) },
+	{ "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
+	{ "multicast", E1000_STAT(stats.mprc) },
+	{ "collisions", E1000_STAT(stats.colc) },
+	{ "rx_length_errors", E1000_STAT(stats.rlerrc) },
+	{ "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
+	{ "rx_crc_errors", E1000_STAT(stats.crcerrs) },
+	{ "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
+	{ "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
+	{ "rx_missed_errors", E1000_STAT(stats.mpc) },
+	{ "tx_aborted_errors", E1000_STAT(stats.ecol) },
+	{ "tx_carrier_errors", E1000_STAT(stats.tncrs) },
+	{ "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) },
+	{ "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) },
+	{ "tx_window_errors", E1000_STAT(stats.latecol) },
+	{ "tx_abort_late_coll", E1000_STAT(stats.latecol) },
+	{ "tx_deferred_ok", E1000_STAT(stats.dc) },
+	{ "tx_single_coll_ok", E1000_STAT(stats.scc) },
+	{ "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
+	{ "tx_timeout_count", E1000_STAT(tx_timeout_count) },
+	{ "tx_restart_queue", E1000_STAT(restart_queue) },
+	{ "rx_long_length_errors", E1000_STAT(stats.roc) },
+	{ "rx_short_length_errors", E1000_STAT(stats.ruc) },
+	{ "rx_align_errors", E1000_STAT(stats.algnerrc) },
+	{ "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
+	{ "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
+	{ "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
+	{ "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
+	{ "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
+	{ "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
+	{ "rx_long_byte_count", E1000_STAT(stats.gorcl) },
+	{ "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
+	{ "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
+	{ "rx_header_split", E1000_STAT(rx_hdr_split) },
+	{ "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
+	{ "tx_smbus", E1000_STAT(stats.mgptc) },
+	{ "rx_smbus", E1000_STAT(stats.mgprc) },
+	{ "dropped_smbus", E1000_STAT(stats.mgpdc) },
+};
+
+#define E1000_QUEUE_STATS_LEN 0
+#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
+static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)", "Eeprom test    (offline)",
+	"Interrupt test (offline)", "Loopback test  (offline)",
+	"Link test   (on/offline)"
+};
+#define E1000_TEST_LEN	ARRAY_SIZE(e1000_gstrings_test)
+
+static int e1000_get_settings(struct net_device *netdev,
+			      struct ethtool_cmd *ecmd)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (hw->media_type == e1000_media_type_copper) {
+
+		ecmd->supported = (SUPPORTED_10baseT_Half |
+		                   SUPPORTED_10baseT_Full |
+		                   SUPPORTED_100baseT_Half |
+		                   SUPPORTED_100baseT_Full |
+		                   SUPPORTED_1000baseT_Full|
+		                   SUPPORTED_Autoneg |
+		                   SUPPORTED_TP);
+		if (hw->phy_type == e1000_phy_ife)
+			ecmd->supported &= ~SUPPORTED_1000baseT_Full;
+		ecmd->advertising = ADVERTISED_TP;
+
+		if (hw->autoneg == 1) {
+			ecmd->advertising |= ADVERTISED_Autoneg;
+			/* the e1000 autoneg seems to match ethtool nicely */
+			ecmd->advertising |= hw->autoneg_advertised;
+		}
+
+		ecmd->port = PORT_TP;
+		ecmd->phy_address = hw->phy_addr;
+
+		if (hw->mac_type == e1000_82543)
+			ecmd->transceiver = XCVR_EXTERNAL;
+		else
+			ecmd->transceiver = XCVR_INTERNAL;
+
+	} else {
+		ecmd->supported   = (SUPPORTED_1000baseT_Full |
+				     SUPPORTED_FIBRE |
+				     SUPPORTED_Autoneg);
+
+		ecmd->advertising = (ADVERTISED_1000baseT_Full |
+				     ADVERTISED_FIBRE |
+				     ADVERTISED_Autoneg);
+
+		ecmd->port = PORT_FIBRE;
+
+		if (hw->mac_type >= e1000_82545)
+			ecmd->transceiver = XCVR_INTERNAL;
+		else
+			ecmd->transceiver = XCVR_EXTERNAL;
+	}
+
+	if (er32(STATUS) & E1000_STATUS_LU) {
+
+		e1000_get_speed_and_duplex(hw, &adapter->link_speed,
+		                                   &adapter->link_duplex);
+		ecmd->speed = adapter->link_speed;
+
+		/* unfortunatly FULL_DUPLEX != DUPLEX_FULL
+		 *          and HALF_DUPLEX != DUPLEX_HALF */
+
+		if (adapter->link_duplex == FULL_DUPLEX)
+			ecmd->duplex = DUPLEX_FULL;
+		else
+			ecmd->duplex = DUPLEX_HALF;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+
+	ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+			 hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+	return 0;
+}
+
+static int e1000_set_settings(struct net_device *netdev,
+			      struct ethtool_cmd *ecmd)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* When SoL/IDER sessions are active, autoneg/speed/duplex
+	 * cannot be changed */
+	if (e1000_check_phy_reset_block(hw)) {
+		DPRINTK(DRV, ERR, "Cannot change link characteristics "
+		        "when SoL/IDER is active.\n");
+		return -EINVAL;
+	}
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+		msleep(1);
+
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		hw->autoneg = 1;
+		if (hw->media_type == e1000_media_type_fiber)
+			hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
+				     ADVERTISED_FIBRE |
+				     ADVERTISED_Autoneg;
+		else
+			hw->autoneg_advertised = ecmd->advertising |
+			                         ADVERTISED_TP |
+			                         ADVERTISED_Autoneg;
+		ecmd->advertising = hw->autoneg_advertised;
+	} else
+		if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+			clear_bit(__E1000_RESETTING, &adapter->flags);
+			return -EINVAL;
+		}
+
+	/* reset the link */
+
+	if (netif_running(adapter->netdev)) {
+		e1000_down(adapter);
+		e1000_up(adapter);
+	} else
+		e1000_reset(adapter);
+
+	clear_bit(__E1000_RESETTING, &adapter->flags);
+	return 0;
+}
+
+static void e1000_get_pauseparam(struct net_device *netdev,
+				 struct ethtool_pauseparam *pause)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	pause->autoneg =
+		(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+	if (hw->fc == E1000_FC_RX_PAUSE)
+		pause->rx_pause = 1;
+	else if (hw->fc == E1000_FC_TX_PAUSE)
+		pause->tx_pause = 1;
+	else if (hw->fc == E1000_FC_FULL) {
+		pause->rx_pause = 1;
+		pause->tx_pause = 1;
+	}
+}
+
+static int e1000_set_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pause)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	int retval = 0;
+
+	adapter->fc_autoneg = pause->autoneg;
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+		msleep(1);
+
+	if (pause->rx_pause && pause->tx_pause)
+		hw->fc = E1000_FC_FULL;
+	else if (pause->rx_pause && !pause->tx_pause)
+		hw->fc = E1000_FC_RX_PAUSE;
+	else if (!pause->rx_pause && pause->tx_pause)
+		hw->fc = E1000_FC_TX_PAUSE;
+	else if (!pause->rx_pause && !pause->tx_pause)
+		hw->fc = E1000_FC_NONE;
+
+	hw->original_fc = hw->fc;
+
+	if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+		if (netif_running(adapter->netdev)) {
+			e1000_down(adapter);
+			e1000_up(adapter);
+		} else
+			e1000_reset(adapter);
+	} else
+		retval = ((hw->media_type == e1000_media_type_fiber) ?
+			  e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+
+	clear_bit(__E1000_RESETTING, &adapter->flags);
+	return retval;
+}
+
+static u32 e1000_get_rx_csum(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	return adapter->rx_csum;
+}
+
+static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	adapter->rx_csum = data;
+
+	if (netif_running(netdev))
+		e1000_reinit_locked(adapter);
+	else
+		e1000_reset(adapter);
+	return 0;
+}
+
+static u32 e1000_get_tx_csum(struct net_device *netdev)
+{
+	return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (hw->mac_type < e1000_82543) {
+		if (!data)
+			return -EINVAL;
+		return 0;
+	}
+
+	if (data)
+		netdev->features |= NETIF_F_HW_CSUM;
+	else
+		netdev->features &= ~NETIF_F_HW_CSUM;
+
+	return 0;
+}
+
+static int e1000_set_tso(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if ((hw->mac_type < e1000_82544) ||
+	    (hw->mac_type == e1000_82547))
+		return data ? -EINVAL : 0;
+
+	if (data)
+		netdev->features |= NETIF_F_TSO;
+	else
+		netdev->features &= ~NETIF_F_TSO;
+
+	if (data && (adapter->hw.mac_type > e1000_82547_rev_2))
+		netdev->features |= NETIF_F_TSO6;
+	else
+		netdev->features &= ~NETIF_F_TSO6;
+
+	DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
+	adapter->tso_force = true;
+	return 0;
+}
+
+static u32 e1000_get_msglevel(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	return adapter->msg_enable;
+}
+
+static void e1000_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	adapter->msg_enable = data;
+}
+
+static int e1000_get_regs_len(struct net_device *netdev)
+{
+#define E1000_REGS_LEN 32
+	return E1000_REGS_LEN * sizeof(u32);
+}
+
+static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+			   void *p)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 *regs_buff = p;
+	u16 phy_data;
+
+	memset(p, 0, E1000_REGS_LEN * sizeof(u32));
+
+	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+	regs_buff[0]  = er32(CTRL);
+	regs_buff[1]  = er32(STATUS);
+
+	regs_buff[2]  = er32(RCTL);
+	regs_buff[3]  = er32(RDLEN);
+	regs_buff[4]  = er32(RDH);
+	regs_buff[5]  = er32(RDT);
+	regs_buff[6]  = er32(RDTR);
+
+	regs_buff[7]  = er32(TCTL);
+	regs_buff[8]  = er32(TDLEN);
+	regs_buff[9]  = er32(TDH);
+	regs_buff[10] = er32(TDT);
+	regs_buff[11] = er32(TIDV);
+
+	regs_buff[12] = hw->phy_type;  /* PHY type (IGP=1, M88=0) */
+	if (hw->phy_type == e1000_phy_igp) {
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_A);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[13] = (u32)phy_data; /* cable length */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_B);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[14] = (u32)phy_data; /* cable length */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_C);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[15] = (u32)phy_data; /* cable length */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_D);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[16] = (u32)phy_data; /* cable length */
+		regs_buff[17] = 0; /* extended 10bt distance (not needed) */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[18] = (u32)phy_data; /* cable polarity */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_PCS_INIT_REG);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[19] = (u32)phy_data; /* cable polarity */
+		regs_buff[20] = 0; /* polarity correction enabled (always) */
+		regs_buff[22] = 0; /* phy receive errors (unavailable) */
+		regs_buff[23] = regs_buff[18]; /* mdix mode */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+	} else {
+		e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+		regs_buff[13] = (u32)phy_data; /* cable length */
+		regs_buff[14] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		regs_buff[15] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		regs_buff[16] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+		regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
+		regs_buff[18] = regs_buff[13]; /* cable polarity */
+		regs_buff[19] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		regs_buff[20] = regs_buff[17]; /* polarity correction */
+		/* phy receive errors */
+		regs_buff[22] = adapter->phy_stats.receive_errors;
+		regs_buff[23] = regs_buff[13]; /* mdix mode */
+	}
+	regs_buff[21] = adapter->phy_stats.idle_errors;  /* phy idle errors */
+	e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+	regs_buff[24] = (u32)phy_data;  /* phy local receiver status */
+	regs_buff[25] = regs_buff[24];  /* phy remote receiver status */
+	if (hw->mac_type >= e1000_82540 &&
+	    hw->mac_type < e1000_82571 &&
+	    hw->media_type == e1000_media_type_copper) {
+		regs_buff[26] = er32(MANC);
+	}
+}
+
+static int e1000_get_eeprom_len(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	return hw->eeprom.word_size * 2;
+}
+
+static int e1000_get_eeprom(struct net_device *netdev,
+			    struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u16 *eeprom_buff;
+	int first_word, last_word;
+	int ret_val = 0;
+	u16 i;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+	first_word = eeprom->offset >> 1;
+	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+	eeprom_buff = kmalloc(sizeof(u16) *
+			(last_word - first_word + 1), GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	if (hw->eeprom.type == e1000_eeprom_spi)
+		ret_val = e1000_read_eeprom(hw, first_word,
+					    last_word - first_word + 1,
+					    eeprom_buff);
+	else {
+		for (i = 0; i < last_word - first_word + 1; i++) {
+			ret_val = e1000_read_eeprom(hw, first_word + i, 1,
+						    &eeprom_buff[i]);
+			if (ret_val)
+				break;
+		}
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < last_word - first_word + 1; i++)
+		le16_to_cpus(&eeprom_buff[i]);
+
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+			eeprom->len);
+	kfree(eeprom_buff);
+
+	return ret_val;
+}
+
+static int e1000_set_eeprom(struct net_device *netdev,
+			    struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u16 *eeprom_buff;
+	void *ptr;
+	int max_len, first_word, last_word, ret_val = 0;
+	u16 i;
+
+	if (eeprom->len == 0)
+		return -EOPNOTSUPP;
+
+	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+		return -EFAULT;
+
+	max_len = hw->eeprom.word_size * 2;
+
+	first_word = eeprom->offset >> 1;
+	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	ptr = (void *)eeprom_buff;
+
+	if (eeprom->offset & 1) {
+		/* need read/modify/write of first changed EEPROM word */
+		/* only the second byte of the word is being modified */
+		ret_val = e1000_read_eeprom(hw, first_word, 1,
+					    &eeprom_buff[0]);
+		ptr++;
+	}
+	if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+		/* need read/modify/write of last changed EEPROM word */
+		/* only the first byte of the word is being modified */
+		ret_val = e1000_read_eeprom(hw, last_word, 1,
+		                  &eeprom_buff[last_word - first_word]);
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < last_word - first_word + 1; i++)
+		le16_to_cpus(&eeprom_buff[i]);
+
+	memcpy(ptr, bytes, eeprom->len);
+
+	for (i = 0; i < last_word - first_word + 1; i++)
+		eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+	ret_val = e1000_write_eeprom(hw, first_word,
+				     last_word - first_word + 1, eeprom_buff);
+
+	/* Update the checksum over the first part of the EEPROM if needed
+	 * and flush shadow RAM for 82573 conrollers */
+	if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
+				(hw->mac_type == e1000_82573)))
+		e1000_update_eeprom_checksum(hw);
+
+	kfree(eeprom_buff);
+	return ret_val;
+}
+
+static void e1000_get_drvinfo(struct net_device *netdev,
+			      struct ethtool_drvinfo *drvinfo)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	char firmware_version[32];
+	u16 eeprom_data;
+
+	strncpy(drvinfo->driver,  e1000_driver_name, 32);
+	strncpy(drvinfo->version, e1000_driver_version, 32);
+
+	/* EEPROM image version # is reported as firmware version # for
+	 * 8257{1|2|3} controllers */
+	e1000_read_eeprom(hw, 5, 1, &eeprom_data);
+	switch (hw->mac_type) {
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+		sprintf(firmware_version, "%d.%d-%d",
+			(eeprom_data & 0xF000) >> 12,
+			(eeprom_data & 0x0FF0) >> 4,
+			eeprom_data & 0x000F);
+		break;
+	default:
+		sprintf(firmware_version, "N/A");
+	}
+
+	strncpy(drvinfo->fw_version, firmware_version, 32);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	drvinfo->regdump_len = e1000_get_regs_len(netdev);
+	drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
+}
+
+static void e1000_get_ringparam(struct net_device *netdev,
+				struct ethtool_ringparam *ring)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	e1000_mac_type mac_type = hw->mac_type;
+	struct e1000_tx_ring *txdr = adapter->tx_ring;
+	struct e1000_rx_ring *rxdr = adapter->rx_ring;
+
+	ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
+		E1000_MAX_82544_RXD;
+	ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
+		E1000_MAX_82544_TXD;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->rx_pending = rxdr->count;
+	ring->tx_pending = txdr->count;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+static int e1000_set_ringparam(struct net_device *netdev,
+			       struct ethtool_ringparam *ring)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	e1000_mac_type mac_type = hw->mac_type;
+	struct e1000_tx_ring *txdr, *tx_old;
+	struct e1000_rx_ring *rxdr, *rx_old;
+	int i, err;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+		msleep(1);
+
+	if (netif_running(adapter->netdev))
+		e1000_down(adapter);
+
+	tx_old = adapter->tx_ring;
+	rx_old = adapter->rx_ring;
+
+	err = -ENOMEM;
+	txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
+	if (!txdr)
+		goto err_alloc_tx;
+
+	rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
+	if (!rxdr)
+		goto err_alloc_rx;
+
+	adapter->tx_ring = txdr;
+	adapter->rx_ring = rxdr;
+
+	rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
+	rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
+		E1000_MAX_RXD : E1000_MAX_82544_RXD));
+	rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+	txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
+	txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
+		E1000_MAX_TXD : E1000_MAX_82544_TXD));
+	txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		txdr[i].count = txdr->count;
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rxdr[i].count = rxdr->count;
+
+	if (netif_running(adapter->netdev)) {
+		/* Try to get new resources before deleting old */
+		err = e1000_setup_all_rx_resources(adapter);
+		if (err)
+			goto err_setup_rx;
+		err = e1000_setup_all_tx_resources(adapter);
+		if (err)
+			goto err_setup_tx;
+
+		/* save the new, restore the old in order to free it,
+		 * then restore the new back again */
+
+		adapter->rx_ring = rx_old;
+		adapter->tx_ring = tx_old;
+		e1000_free_all_rx_resources(adapter);
+		e1000_free_all_tx_resources(adapter);
+		kfree(tx_old);
+		kfree(rx_old);
+		adapter->rx_ring = rxdr;
+		adapter->tx_ring = txdr;
+		err = e1000_up(adapter);
+		if (err)
+			goto err_setup;
+	}
+
+	clear_bit(__E1000_RESETTING, &adapter->flags);
+	return 0;
+err_setup_tx:
+	e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+	adapter->rx_ring = rx_old;
+	adapter->tx_ring = tx_old;
+	kfree(rxdr);
+err_alloc_rx:
+	kfree(txdr);
+err_alloc_tx:
+	e1000_up(adapter);
+err_setup:
+	clear_bit(__E1000_RESETTING, &adapter->flags);
+	return err;
+}
+
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
+			     u32 mask, u32 write)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	static const u32 test[] =
+		{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+	u8 __iomem *address = hw->hw_addr + reg;
+	u32 read;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(test); i++) {
+		writel(write & test[i], address);
+		read = readl(address);
+		if (read != (write & test[i] & mask)) {
+			DPRINTK(DRV, ERR, "pattern test reg %04X failed: "
+				"got 0x%08X expected 0x%08X\n",
+				reg, read, (write & test[i] & mask));
+			*data = reg;
+			return true;
+		}
+	}
+	return false;
+}
+
+static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
+			      u32 mask, u32 write)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u8 __iomem *address = hw->hw_addr + reg;
+	u32 read;
+
+	writel(write & mask, address);
+	read = readl(address);
+	if ((read & mask) != (write & mask)) {
+		DPRINTK(DRV, ERR, "set/check reg %04X test failed: "
+			"got 0x%08X expected 0x%08X\n",
+			reg, (read & mask), (write & mask));
+		*data = reg;
+		return true;
+	}
+	return false;
+}
+
+#define REG_PATTERN_TEST(reg, mask, write)			     \
+	do {							     \
+		if (reg_pattern_test(adapter, data,		     \
+			     (hw->mac_type >= e1000_82543)   \
+			     ? E1000_##reg : E1000_82542_##reg,	     \
+			     mask, write))			     \
+			return 1;				     \
+	} while (0)
+
+#define REG_SET_AND_CHECK(reg, mask, write)			     \
+	do {							     \
+		if (reg_set_and_check(adapter, data,		     \
+			      (hw->mac_type >= e1000_82543)  \
+			      ? E1000_##reg : E1000_82542_##reg,     \
+			      mask, write))			     \
+			return 1;				     \
+	} while (0)
+
+static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
+{
+	u32 value, before, after;
+	u32 i, toggle;
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* The status register is Read Only, so a write should fail.
+	 * Some bits that get toggled are ignored.
+	 */
+	switch (hw->mac_type) {
+	/* there are several bits on newer hardware that are r/w */
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		toggle = 0x7FFFF3FF;
+		break;
+	case e1000_82573:
+	case e1000_ich8lan:
+		toggle = 0x7FFFF033;
+		break;
+	default:
+		toggle = 0xFFFFF833;
+		break;
+	}
+
+	before = er32(STATUS);
+	value = (er32(STATUS) & toggle);
+	ew32(STATUS, toggle);
+	after = er32(STATUS) & toggle;
+	if (value != after) {
+		DPRINTK(DRV, ERR, "failed STATUS register test got: "
+		        "0x%08X expected: 0x%08X\n", after, value);
+		*data = 1;
+		return 1;
+	}
+	/* restore previous status */
+	ew32(STATUS, before);
+
+	if (hw->mac_type != e1000_ich8lan) {
+		REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+		REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
+		REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
+		REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
+	}
+
+	REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
+	REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8);
+	REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
+	REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
+
+	REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
+
+	before = (hw->mac_type == e1000_ich8lan ?
+	          0x06C3B33E : 0x06DFB3FE);
+	REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
+	REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
+
+	if (hw->mac_type >= e1000_82543) {
+
+		REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
+		REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+		if (hw->mac_type != e1000_ich8lan)
+			REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
+		REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+		REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
+		value = (hw->mac_type == e1000_ich8lan ?
+		         E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
+		for (i = 0; i < value; i++) {
+			REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
+			                 0xFFFFFFFF);
+		}
+
+	} else {
+
+		REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
+		REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
+		REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
+		REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
+
+	}
+
+	value = (hw->mac_type == e1000_ich8lan ?
+			E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
+	for (i = 0; i < value; i++)
+		REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
+
+	*data = 0;
+	return 0;
+}
+
+static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 temp;
+	u16 checksum = 0;
+	u16 i;
+
+	*data = 0;
+	/* Read and add up the contents of the EEPROM */
+	for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+		if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) {
+			*data = 1;
+			break;
+		}
+		checksum += temp;
+	}
+
+	/* If Checksum is not Correct return error else test passed */
+	if ((checksum != (u16)EEPROM_SUM) && !(*data))
+		*data = 2;
+
+	return *data;
+}
+
+static irqreturn_t e1000_test_intr(int irq, void *data)
+{
+	struct net_device *netdev = (struct net_device *)data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	adapter->test_icr |= er32(ICR);
+
+	return IRQ_HANDLED;
+}
+
+static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 mask, i = 0;
+	bool shared_int = true;
+	u32 irq = adapter->pdev->irq;
+	struct e1000_hw *hw = &adapter->hw;
+
+	*data = 0;
+
+	/* NOTE: we don't test MSI interrupts here, yet */
+	/* Hook up test interrupt handler just for this test */
+	if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+	                 netdev))
+		shared_int = false;
+	else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
+	         netdev->name, netdev)) {
+		*data = 1;
+		return -1;
+	}
+	DPRINTK(HW, INFO, "testing %s interrupt\n",
+	        (shared_int ? "shared" : "unshared"));
+
+	/* Disable all the interrupts */
+	ew32(IMC, 0xFFFFFFFF);
+	msleep(10);
+
+	/* Test each interrupt */
+	for (; i < 10; i++) {
+
+		if (hw->mac_type == e1000_ich8lan && i == 8)
+			continue;
+
+		/* Interrupt to test */
+		mask = 1 << i;
+
+		if (!shared_int) {
+			/* Disable the interrupt to be reported in
+			 * the cause register and then force the same
+			 * interrupt and see if one gets posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			ew32(IMC, mask);
+			ew32(ICS, mask);
+			msleep(10);
+
+			if (adapter->test_icr & mask) {
+				*data = 3;
+				break;
+			}
+		}
+
+		/* Enable the interrupt to be reported in
+		 * the cause register and then force the same
+		 * interrupt and see if one gets posted.  If
+		 * an interrupt was not posted to the bus, the
+		 * test failed.
+		 */
+		adapter->test_icr = 0;
+		ew32(IMS, mask);
+		ew32(ICS, mask);
+		msleep(10);
+
+		if (!(adapter->test_icr & mask)) {
+			*data = 4;
+			break;
+		}
+
+		if (!shared_int) {
+			/* Disable the other interrupts to be reported in
+			 * the cause register and then force the other
+			 * interrupts and see if any get posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			ew32(IMC, ~mask & 0x00007FFF);
+			ew32(ICS, ~mask & 0x00007FFF);
+			msleep(10);
+
+			if (adapter->test_icr) {
+				*data = 5;
+				break;
+			}
+		}
+	}
+
+	/* Disable all the interrupts */
+	ew32(IMC, 0xFFFFFFFF);
+	msleep(10);
+
+	/* Unhook test interrupt handler */
+	free_irq(irq, netdev);
+
+	return *data;
+}
+
+static void e1000_free_desc_rings(struct e1000_adapter *adapter)
+{
+	struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+	struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	int i;
+
+	if (txdr->desc && txdr->buffer_info) {
+		for (i = 0; i < txdr->count; i++) {
+			if (txdr->buffer_info[i].dma)
+				pci_unmap_single(pdev, txdr->buffer_info[i].dma,
+						 txdr->buffer_info[i].length,
+						 PCI_DMA_TODEVICE);
+			if (txdr->buffer_info[i].skb)
+				dev_kfree_skb(txdr->buffer_info[i].skb);
+		}
+	}
+
+	if (rxdr->desc && rxdr->buffer_info) {
+		for (i = 0; i < rxdr->count; i++) {
+			if (rxdr->buffer_info[i].dma)
+				pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
+						 rxdr->buffer_info[i].length,
+						 PCI_DMA_FROMDEVICE);
+			if (rxdr->buffer_info[i].skb)
+				dev_kfree_skb(rxdr->buffer_info[i].skb);
+		}
+	}
+
+	if (txdr->desc) {
+		pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
+		txdr->desc = NULL;
+	}
+	if (rxdr->desc) {
+		pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
+		rxdr->desc = NULL;
+	}
+
+	kfree(txdr->buffer_info);
+	txdr->buffer_info = NULL;
+	kfree(rxdr->buffer_info);
+	rxdr->buffer_info = NULL;
+
+	return;
+}
+
+static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+	struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	u32 rctl;
+	int i, ret_val;
+
+	/* Setup Tx descriptor ring and Tx buffers */
+
+	if (!txdr->count)
+		txdr->count = E1000_DEFAULT_TXD;
+
+	txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer),
+				    GFP_KERNEL);
+	if (!txdr->buffer_info) {
+		ret_val = 1;
+		goto err_nomem;
+	}
+
+	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+	txdr->size = ALIGN(txdr->size, 4096);
+	txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+	if (!txdr->desc) {
+		ret_val = 2;
+		goto err_nomem;
+	}
+	memset(txdr->desc, 0, txdr->size);
+	txdr->next_to_use = txdr->next_to_clean = 0;
+
+	ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
+	ew32(TDBAH, ((u64)txdr->dma >> 32));
+	ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc));
+	ew32(TDH, 0);
+	ew32(TDT, 0);
+	ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN |
+	     E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+	     E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+
+	for (i = 0; i < txdr->count; i++) {
+		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
+		struct sk_buff *skb;
+		unsigned int size = 1024;
+
+		skb = alloc_skb(size, GFP_KERNEL);
+		if (!skb) {
+			ret_val = 3;
+			goto err_nomem;
+		}
+		skb_put(skb, size);
+		txdr->buffer_info[i].skb = skb;
+		txdr->buffer_info[i].length = skb->len;
+		txdr->buffer_info[i].dma =
+			pci_map_single(pdev, skb->data, skb->len,
+				       PCI_DMA_TODEVICE);
+		tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
+		tx_desc->lower.data = cpu_to_le32(skb->len);
+		tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
+						   E1000_TXD_CMD_IFCS |
+						   E1000_TXD_CMD_RPS);
+		tx_desc->upper.data = 0;
+	}
+
+	/* Setup Rx descriptor ring and Rx buffers */
+
+	if (!rxdr->count)
+		rxdr->count = E1000_DEFAULT_RXD;
+
+	rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
+				    GFP_KERNEL);
+	if (!rxdr->buffer_info) {
+		ret_val = 4;
+		goto err_nomem;
+	}
+
+	rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
+	rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+	if (!rxdr->desc) {
+		ret_val = 5;
+		goto err_nomem;
+	}
+	memset(rxdr->desc, 0, rxdr->size);
+	rxdr->next_to_use = rxdr->next_to_clean = 0;
+
+	rctl = er32(RCTL);
+	ew32(RCTL, rctl & ~E1000_RCTL_EN);
+	ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF));
+	ew32(RDBAH, ((u64)rxdr->dma >> 32));
+	ew32(RDLEN, rxdr->size);
+	ew32(RDH, 0);
+	ew32(RDT, 0);
+	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
+	ew32(RCTL, rctl);
+
+	for (i = 0; i < rxdr->count; i++) {
+		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
+		struct sk_buff *skb;
+
+		skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
+		if (!skb) {
+			ret_val = 6;
+			goto err_nomem;
+		}
+		skb_reserve(skb, NET_IP_ALIGN);
+		rxdr->buffer_info[i].skb = skb;
+		rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
+		rxdr->buffer_info[i].dma =
+			pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
+				       PCI_DMA_FROMDEVICE);
+		rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
+		memset(skb->data, 0x00, skb->len);
+	}
+
+	return 0;
+
+err_nomem:
+	e1000_free_desc_rings(adapter);
+	return ret_val;
+}
+
+static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* Write out to PHY registers 29 and 30 to disable the Receiver. */
+	e1000_write_phy_reg(hw, 29, 0x001F);
+	e1000_write_phy_reg(hw, 30, 0x8FFC);
+	e1000_write_phy_reg(hw, 29, 0x001A);
+	e1000_write_phy_reg(hw, 30, 0x8FF0);
+}
+
+static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 phy_reg;
+
+	/* Because we reset the PHY above, we need to re-force TX_CLK in the
+	 * Extended PHY Specific Control Register to 25MHz clock.  This
+	 * value defaults back to a 2.5MHz clock when the PHY is reset.
+	 */
+	e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+	phy_reg |= M88E1000_EPSCR_TX_CLK_25;
+	e1000_write_phy_reg(hw,
+		M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
+
+	/* In addition, because of the s/w reset above, we need to enable
+	 * CRS on TX.  This must be set for both full and half duplex
+	 * operation.
+	 */
+	e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+	phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+	e1000_write_phy_reg(hw,
+		M88E1000_PHY_SPEC_CTRL, phy_reg);
+}
+
+static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_reg;
+	u16 phy_reg;
+
+	/* Setup the Device Control Register for PHY loopback test. */
+
+	ctrl_reg = er32(CTRL);
+	ctrl_reg |= (E1000_CTRL_ILOS |		/* Invert Loss-Of-Signal */
+		     E1000_CTRL_FRCSPD |	/* Set the Force Speed Bit */
+		     E1000_CTRL_FRCDPX |	/* Set the Force Duplex Bit */
+		     E1000_CTRL_SPD_1000 |	/* Force Speed to 1000 */
+		     E1000_CTRL_FD);		/* Force Duplex to FULL */
+
+	ew32(CTRL, ctrl_reg);
+
+	/* Read the PHY Specific Control Register (0x10) */
+	e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+
+	/* Clear Auto-Crossover bits in PHY Specific Control Register
+	 * (bits 6:5).
+	 */
+	phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
+	e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
+
+	/* Perform software reset on the PHY */
+	e1000_phy_reset(hw);
+
+	/* Have to setup TX_CLK and TX_CRS after software reset */
+	e1000_phy_reset_clk_and_crs(adapter);
+
+	e1000_write_phy_reg(hw, PHY_CTRL, 0x8100);
+
+	/* Wait for reset to complete. */
+	udelay(500);
+
+	/* Have to setup TX_CLK and TX_CRS after software reset */
+	e1000_phy_reset_clk_and_crs(adapter);
+
+	/* Write out to PHY registers 29 and 30 to disable the Receiver. */
+	e1000_phy_disable_receiver(adapter);
+
+	/* Set the loopback bit in the PHY control register. */
+	e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+	phy_reg |= MII_CR_LOOPBACK;
+	e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+
+	/* Setup TX_CLK and TX_CRS one more time. */
+	e1000_phy_reset_clk_and_crs(adapter);
+
+	/* Check Phy Configuration */
+	e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+	if (phy_reg != 0x4100)
+		 return 9;
+
+	e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+	if (phy_reg != 0x0070)
+		return 10;
+
+	e1000_read_phy_reg(hw, 29, &phy_reg);
+	if (phy_reg != 0x001A)
+		return 11;
+
+	return 0;
+}
+
+static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_reg = 0;
+	u32 stat_reg = 0;
+
+	hw->autoneg = false;
+
+	if (hw->phy_type == e1000_phy_m88) {
+		/* Auto-MDI/MDIX Off */
+		e1000_write_phy_reg(hw,
+				    M88E1000_PHY_SPEC_CTRL, 0x0808);
+		/* reset to update Auto-MDI/MDIX */
+		e1000_write_phy_reg(hw, PHY_CTRL, 0x9140);
+		/* autoneg off */
+		e1000_write_phy_reg(hw, PHY_CTRL, 0x8140);
+	} else if (hw->phy_type == e1000_phy_gg82563)
+		e1000_write_phy_reg(hw,
+		                    GG82563_PHY_KMRN_MODE_CTRL,
+		                    0x1CC);
+
+	ctrl_reg = er32(CTRL);
+
+	if (hw->phy_type == e1000_phy_ife) {
+		/* force 100, set loopback */
+		e1000_write_phy_reg(hw, PHY_CTRL, 0x6100);
+
+		/* Now set up the MAC to the same speed/duplex as the PHY. */
+		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+			     E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+	} else {
+		/* force 1000, set loopback */
+		e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
+
+		/* Now set up the MAC to the same speed/duplex as the PHY. */
+		ctrl_reg = er32(CTRL);
+		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+			     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+	}
+
+	if (hw->media_type == e1000_media_type_copper &&
+	   hw->phy_type == e1000_phy_m88)
+		ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+	else {
+		/* Set the ILOS bit on the fiber Nic is half
+		 * duplex link is detected. */
+		stat_reg = er32(STATUS);
+		if ((stat_reg & E1000_STATUS_FD) == 0)
+			ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
+	}
+
+	ew32(CTRL, ctrl_reg);
+
+	/* Disable the receiver on the PHY so when a cable is plugged in, the
+	 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+	 */
+	if (hw->phy_type == e1000_phy_m88)
+		e1000_phy_disable_receiver(adapter);
+
+	udelay(500);
+
+	return 0;
+}
+
+static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 phy_reg = 0;
+	u16 count = 0;
+
+	switch (hw->mac_type) {
+	case e1000_82543:
+		if (hw->media_type == e1000_media_type_copper) {
+			/* Attempt to setup Loopback mode on Non-integrated PHY.
+			 * Some PHY registers get corrupted at random, so
+			 * attempt this 10 times.
+			 */
+			while (e1000_nonintegrated_phy_loopback(adapter) &&
+			      count++ < 10);
+			if (count < 11)
+				return 0;
+		}
+		break;
+
+	case e1000_82544:
+	case e1000_82540:
+	case e1000_82545:
+	case e1000_82545_rev_3:
+	case e1000_82546:
+	case e1000_82546_rev_3:
+	case e1000_82541:
+	case e1000_82541_rev_2:
+	case e1000_82547:
+	case e1000_82547_rev_2:
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+		return e1000_integrated_phy_loopback(adapter);
+		break;
+
+	default:
+		/* Default PHY loopback work is to read the MII
+		 * control register and assert bit 14 (loopback mode).
+		 */
+		e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+		phy_reg |= MII_CR_LOOPBACK;
+		e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+		return 0;
+		break;
+	}
+
+	return 8;
+}
+
+static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	if (hw->media_type == e1000_media_type_fiber ||
+	    hw->media_type == e1000_media_type_internal_serdes) {
+		switch (hw->mac_type) {
+		case e1000_82545:
+		case e1000_82546:
+		case e1000_82545_rev_3:
+		case e1000_82546_rev_3:
+			return e1000_set_phy_loopback(adapter);
+			break;
+		case e1000_82571:
+		case e1000_82572:
+#define E1000_SERDES_LB_ON 0x410
+			e1000_set_phy_loopback(adapter);
+			ew32(SCTL, E1000_SERDES_LB_ON);
+			msleep(10);
+			return 0;
+			break;
+		default:
+			rctl = er32(RCTL);
+			rctl |= E1000_RCTL_LBM_TCVR;
+			ew32(RCTL, rctl);
+			return 0;
+		}
+	} else if (hw->media_type == e1000_media_type_copper)
+		return e1000_set_phy_loopback(adapter);
+
+	return 7;
+}
+
+static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+	u16 phy_reg;
+
+	rctl = er32(RCTL);
+	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+	ew32(RCTL, rctl);
+
+	switch (hw->mac_type) {
+	case e1000_82571:
+	case e1000_82572:
+		if (hw->media_type == e1000_media_type_fiber ||
+		    hw->media_type == e1000_media_type_internal_serdes) {
+#define E1000_SERDES_LB_OFF 0x400
+			ew32(SCTL, E1000_SERDES_LB_OFF);
+			msleep(10);
+			break;
+		}
+		/* Fall Through */
+	case e1000_82545:
+	case e1000_82546:
+	case e1000_82545_rev_3:
+	case e1000_82546_rev_3:
+	default:
+		hw->autoneg = true;
+		if (hw->phy_type == e1000_phy_gg82563)
+			e1000_write_phy_reg(hw,
+					    GG82563_PHY_KMRN_MODE_CTRL,
+					    0x180);
+		e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+		if (phy_reg & MII_CR_LOOPBACK) {
+			phy_reg &= ~MII_CR_LOOPBACK;
+			e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+			e1000_phy_reset(hw);
+		}
+		break;
+	}
+}
+
+static void e1000_create_lbtest_frame(struct sk_buff *skb,
+				      unsigned int frame_size)
+{
+	memset(skb->data, 0xFF, frame_size);
+	frame_size &= ~1;
+	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int e1000_check_lbtest_frame(struct sk_buff *skb,
+				    unsigned int frame_size)
+{
+	frame_size &= ~1;
+	if (*(skb->data + 3) == 0xFF) {
+		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+		   (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+			return 0;
+		}
+	}
+	return 13;
+}
+
+static int e1000_run_loopback_test(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+	struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	int i, j, k, l, lc, good_cnt, ret_val=0;
+	unsigned long time;
+
+	ew32(RDT, rxdr->count - 1);
+
+	/* Calculate the loop count based on the largest descriptor ring
+	 * The idea is to wrap the largest ring a number of times using 64
+	 * send/receive pairs during each loop
+	 */
+
+	if (rxdr->count <= txdr->count)
+		lc = ((txdr->count / 64) * 2) + 1;
+	else
+		lc = ((rxdr->count / 64) * 2) + 1;
+
+	k = l = 0;
+	for (j = 0; j <= lc; j++) { /* loop count loop */
+		for (i = 0; i < 64; i++) { /* send the packets */
+			e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
+					1024);
+			pci_dma_sync_single_for_device(pdev,
+					txdr->buffer_info[k].dma,
+				    	txdr->buffer_info[k].length,
+				    	PCI_DMA_TODEVICE);
+			if (unlikely(++k == txdr->count)) k = 0;
+		}
+		ew32(TDT, k);
+		msleep(200);
+		time = jiffies; /* set the start time for the receive */
+		good_cnt = 0;
+		do { /* receive the sent packets */
+			pci_dma_sync_single_for_cpu(pdev,
+					rxdr->buffer_info[l].dma,
+				    	rxdr->buffer_info[l].length,
+				    	PCI_DMA_FROMDEVICE);
+
+			ret_val = e1000_check_lbtest_frame(
+					rxdr->buffer_info[l].skb,
+				   	1024);
+			if (!ret_val)
+				good_cnt++;
+			if (unlikely(++l == rxdr->count)) l = 0;
+			/* time + 20 msecs (200 msecs on 2.4) is more than
+			 * enough time to complete the receives, if it's
+			 * exceeded, break and error off
+			 */
+		} while (good_cnt < 64 && jiffies < (time + 20));
+		if (good_cnt != 64) {
+			ret_val = 13; /* ret_val is the same as mis-compare */
+			break;
+		}
+		if (jiffies >= (time + 2)) {
+			ret_val = 14; /* error code for time out error */
+			break;
+		}
+	} /* end loop count loop */
+	return ret_val;
+}
+
+static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* PHY loopback cannot be performed if SoL/IDER
+	 * sessions are active */
+	if (e1000_check_phy_reset_block(hw)) {
+		DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
+		        "when SoL/IDER is active.\n");
+		*data = 0;
+		goto out;
+	}
+
+	*data = e1000_setup_desc_rings(adapter);
+	if (*data)
+		goto out;
+	*data = e1000_setup_loopback_test(adapter);
+	if (*data)
+		goto err_loopback;
+	*data = e1000_run_loopback_test(adapter);
+	e1000_loopback_cleanup(adapter);
+
+err_loopback:
+	e1000_free_desc_rings(adapter);
+out:
+	return *data;
+}
+
+static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	*data = 0;
+	if (hw->media_type == e1000_media_type_internal_serdes) {
+		int i = 0;
+		hw->serdes_link_down = true;
+
+		/* On some blade server designs, link establishment
+		 * could take as long as 2-3 minutes */
+		do {
+			e1000_check_for_link(hw);
+			if (!hw->serdes_link_down)
+				return *data;
+			msleep(20);
+		} while (i++ < 3750);
+
+		*data = 1;
+	} else {
+		e1000_check_for_link(hw);
+		if (hw->autoneg)  /* if auto_neg is set wait for it */
+			msleep(4000);
+
+		if (!(er32(STATUS) & E1000_STATUS_LU)) {
+			*data = 1;
+		}
+	}
+	return *data;
+}
+
+static int e1000_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_TEST:
+		return E1000_TEST_LEN;
+	case ETH_SS_STATS:
+		return E1000_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void e1000_diag_test(struct net_device *netdev,
+			    struct ethtool_test *eth_test, u64 *data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	bool if_running = netif_running(netdev);
+
+	set_bit(__E1000_TESTING, &adapter->flags);
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+		/* Offline tests */
+
+		/* save speed, duplex, autoneg settings */
+		u16 autoneg_advertised = hw->autoneg_advertised;
+		u8 forced_speed_duplex = hw->forced_speed_duplex;
+		u8 autoneg = hw->autoneg;
+
+		DPRINTK(HW, INFO, "offline testing starting\n");
+
+		/* Link test performed before hardware reset so autoneg doesn't
+		 * interfere with test result */
+		if (e1000_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		if (if_running)
+			/* indicate we're in test mode */
+			dev_close(netdev);
+		else
+			e1000_reset(adapter);
+
+		if (e1000_reg_test(adapter, &data[0]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e1000_reset(adapter);
+		if (e1000_eeprom_test(adapter, &data[1]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e1000_reset(adapter);
+		if (e1000_intr_test(adapter, &data[2]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e1000_reset(adapter);
+		/* make sure the phy is powered up */
+		e1000_power_up_phy(adapter);
+		if (e1000_loopback_test(adapter, &data[3]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* restore speed, duplex, autoneg settings */
+		hw->autoneg_advertised = autoneg_advertised;
+		hw->forced_speed_duplex = forced_speed_duplex;
+		hw->autoneg = autoneg;
+
+		e1000_reset(adapter);
+		clear_bit(__E1000_TESTING, &adapter->flags);
+		if (if_running)
+			dev_open(netdev);
+	} else {
+		DPRINTK(HW, INFO, "online testing starting\n");
+		/* Online tests */
+		if (e1000_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* Online tests aren't run; pass by default */
+		data[0] = 0;
+		data[1] = 0;
+		data[2] = 0;
+		data[3] = 0;
+
+		clear_bit(__E1000_TESTING, &adapter->flags);
+	}
+	msleep_interruptible(4 * 1000);
+}
+
+static int e1000_wol_exclusion(struct e1000_adapter *adapter,
+			       struct ethtool_wolinfo *wol)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int retval = 1; /* fail by default */
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82542:
+	case E1000_DEV_ID_82543GC_FIBER:
+	case E1000_DEV_ID_82543GC_COPPER:
+	case E1000_DEV_ID_82544EI_FIBER:
+	case E1000_DEV_ID_82546EB_QUAD_COPPER:
+	case E1000_DEV_ID_82545EM_FIBER:
+	case E1000_DEV_ID_82545EM_COPPER:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER:
+	case E1000_DEV_ID_82546GB_PCIE:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+		/* these don't support WoL at all */
+		wol->supported = 0;
+		break;
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546GB_FIBER:
+	case E1000_DEV_ID_82571EB_FIBER:
+	case E1000_DEV_ID_82571EB_SERDES:
+	case E1000_DEV_ID_82571EB_COPPER:
+		/* Wake events not supported on port B */
+		if (er32(STATUS) & E1000_STATUS_FUNC_1) {
+			wol->supported = 0;
+			break;
+		}
+		/* return success for non excluded adapter ports */
+		retval = 0;
+		break;
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		/* quad port adapters only support WoL on port A */
+		if (!adapter->quad_port_a) {
+			wol->supported = 0;
+			break;
+		}
+		/* return success for non excluded adapter ports */
+		retval = 0;
+		break;
+	default:
+		/* dual port cards only support WoL on port A from now on
+		 * unless it was enabled in the eeprom for port B
+		 * so exclude FUNC_1 ports from having WoL enabled */
+		if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
+		    !adapter->eeprom_wol) {
+			wol->supported = 0;
+			break;
+		}
+
+		retval = 0;
+	}
+
+	return retval;
+}
+
+static void e1000_get_wol(struct net_device *netdev,
+			  struct ethtool_wolinfo *wol)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	wol->supported = WAKE_UCAST | WAKE_MCAST |
+	                 WAKE_BCAST | WAKE_MAGIC;
+	wol->wolopts = 0;
+
+	/* this function will set ->supported = 0 and return 1 if wol is not
+	 * supported by this hardware */
+	if (e1000_wol_exclusion(adapter, wol) ||
+	    !device_can_wakeup(&adapter->pdev->dev))
+		return;
+
+	/* apply any specific unsupported masks here */
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		/* KSP3 does not suppport UCAST wake-ups */
+		wol->supported &= ~WAKE_UCAST;
+
+		if (adapter->wol & E1000_WUFC_EX)
+			DPRINTK(DRV, ERR, "Interface does not support "
+		        "directed (unicast) frame wake-up packets\n");
+		break;
+	default:
+		break;
+	}
+
+	if (adapter->wol & E1000_WUFC_EX)
+		wol->wolopts |= WAKE_UCAST;
+	if (adapter->wol & E1000_WUFC_MC)
+		wol->wolopts |= WAKE_MCAST;
+	if (adapter->wol & E1000_WUFC_BC)
+		wol->wolopts |= WAKE_BCAST;
+	if (adapter->wol & E1000_WUFC_MAG)
+		wol->wolopts |= WAKE_MAGIC;
+
+	return;
+}
+
+static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+		return -EOPNOTSUPP;
+
+	if (e1000_wol_exclusion(adapter, wol) ||
+	    !device_can_wakeup(&adapter->pdev->dev))
+		return wol->wolopts ? -EOPNOTSUPP : 0;
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		if (wol->wolopts & WAKE_UCAST) {
+			DPRINTK(DRV, ERR, "Interface does not support "
+		        "directed (unicast) frame wake-up packets\n");
+			return -EOPNOTSUPP;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* these settings will always override what we currently have */
+	adapter->wol = 0;
+
+	if (wol->wolopts & WAKE_UCAST)
+		adapter->wol |= E1000_WUFC_EX;
+	if (wol->wolopts & WAKE_MCAST)
+		adapter->wol |= E1000_WUFC_MC;
+	if (wol->wolopts & WAKE_BCAST)
+		adapter->wol |= E1000_WUFC_BC;
+	if (wol->wolopts & WAKE_MAGIC)
+		adapter->wol |= E1000_WUFC_MAG;
+
+	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+	return 0;
+}
+
+/* toggle LED 4 times per second = 2 "blinks" per second */
+#define E1000_ID_INTERVAL	(HZ/4)
+
+/* bit defines for adapter->led_status */
+#define E1000_LED_ON		0
+
+static void e1000_led_blink_callback(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
+		e1000_led_off(hw);
+	else
+		e1000_led_on(hw);
+
+	mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
+}
+
+static int e1000_phys_id(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (!data)
+		data = INT_MAX;
+
+	if (hw->mac_type < e1000_82571) {
+		if (!adapter->blink_timer.function) {
+			init_timer(&adapter->blink_timer);
+			adapter->blink_timer.function = e1000_led_blink_callback;
+			adapter->blink_timer.data = (unsigned long)adapter;
+		}
+		e1000_setup_led(hw);
+		mod_timer(&adapter->blink_timer, jiffies);
+		msleep_interruptible(data * 1000);
+		del_timer_sync(&adapter->blink_timer);
+	} else if (hw->phy_type == e1000_phy_ife) {
+		if (!adapter->blink_timer.function) {
+			init_timer(&adapter->blink_timer);
+			adapter->blink_timer.function = e1000_led_blink_callback;
+			adapter->blink_timer.data = (unsigned long)adapter;
+		}
+		mod_timer(&adapter->blink_timer, jiffies);
+		msleep_interruptible(data * 1000);
+		del_timer_sync(&adapter->blink_timer);
+		e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
+	} else {
+		e1000_blink_led_start(hw);
+		msleep_interruptible(data * 1000);
+	}
+
+	e1000_led_off(hw);
+	clear_bit(E1000_LED_ON, &adapter->led_status);
+	e1000_cleanup_led(hw);
+
+	return 0;
+}
+
+static int e1000_nway_reset(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	if (netif_running(netdev))
+		e1000_reinit_locked(adapter);
+	return 0;
+}
+
+static void e1000_get_ethtool_stats(struct net_device *netdev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	int i;
+
+	e1000_update_stats(adapter);
+	for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+		char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
+		data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	}
+/*	BUG_ON(i != E1000_STATS_LEN); */
+}
+
+static void e1000_get_strings(struct net_device *netdev, u32 stringset,
+			      u8 *data)
+{
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		memcpy(data, *e1000_gstrings_test,
+			sizeof(e1000_gstrings_test));
+		break;
+	case ETH_SS_STATS:
+		for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, e1000_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+/*		BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+		break;
+	}
+}
+
+static const struct ethtool_ops e1000_ethtool_ops = {
+	.get_settings           = e1000_get_settings,
+	.set_settings           = e1000_set_settings,
+	.get_drvinfo            = e1000_get_drvinfo,
+	.get_regs_len           = e1000_get_regs_len,
+	.get_regs               = e1000_get_regs,
+	.get_wol                = e1000_get_wol,
+	.set_wol                = e1000_set_wol,
+	.get_msglevel           = e1000_get_msglevel,
+	.set_msglevel           = e1000_set_msglevel,
+	.nway_reset             = e1000_nway_reset,
+	.get_link               = ethtool_op_get_link,
+	.get_eeprom_len         = e1000_get_eeprom_len,
+	.get_eeprom             = e1000_get_eeprom,
+	.set_eeprom             = e1000_set_eeprom,
+	.get_ringparam          = e1000_get_ringparam,
+	.set_ringparam          = e1000_set_ringparam,
+	.get_pauseparam         = e1000_get_pauseparam,
+	.set_pauseparam         = e1000_set_pauseparam,
+	.get_rx_csum            = e1000_get_rx_csum,
+	.set_rx_csum            = e1000_set_rx_csum,
+	.get_tx_csum            = e1000_get_tx_csum,
+	.set_tx_csum            = e1000_set_tx_csum,
+	.set_sg                 = ethtool_op_set_sg,
+	.set_tso                = e1000_set_tso,
+	.self_test              = e1000_diag_test,
+	.get_strings            = e1000_get_strings,
+	.phys_id                = e1000_phys_id,
+	.get_ethtool_stats      = e1000_get_ethtool_stats,
+	.get_sset_count		= e1000_get_sset_count,
+};
+
+void e1000_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_hw-2.6.31-ethercat.c	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,8878 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.c
+ * Shared functions for accessing and configuring the MAC
+ */
+
+
+#include "e1000_hw-2.6.31-ethercat.h"
+
+static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask);
+static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask);
+static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data);
+static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
+static s32 e1000_get_software_semaphore(struct e1000_hw *hw);
+static void e1000_release_software_semaphore(struct e1000_hw *hw);
+
+static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw);
+static s32 e1000_check_downshift(struct e1000_hw *hw);
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+				e1000_rev_polarity *polarity);
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
+static void e1000_clear_vfta(struct e1000_hw *hw);
+static s32 e1000_commit_shadow_ram(struct e1000_hw *hw);
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
+					      bool link_up);
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
+static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank);
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+				  u16 *max_length);
+static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static s32 e1000_get_software_flag(struct e1000_hw *hw);
+static s32 e1000_ich8_cycle_init(struct e1000_hw *hw);
+static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout);
+static s32 e1000_id_led_init(struct e1000_hw *hw);
+static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+						 u32 cnf_base_addr,
+						 u32 cnf_size);
+static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw);
+static void e1000_init_rx_addrs(struct e1000_hw *hw);
+static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
+static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
+static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+				   u16 offset, u8 *sum);
+static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw,
+				      struct e1000_host_mng_command_header
+				      *hdr);
+static s32 e1000_mng_write_commit(struct e1000_hw *hw);
+static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
+				  struct e1000_phy_info *phy_info);
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+				  struct e1000_phy_info *phy_info);
+static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data);
+static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
+				   u16 *data);
+static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+				  struct e1000_phy_info *phy_info);
+static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
+static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data);
+static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index,
+					u8 byte);
+static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
+static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data);
+static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+				u16 *data);
+static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+				 u16 data);
+static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data);
+static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+				   u16 *data);
+static void e1000_release_software_flag(struct e1000_hw *hw);
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+static void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
+static s32 e1000_set_phy_type(struct e1000_hw *hw);
+static void e1000_phy_init_script(struct e1000_hw *hw);
+static s32 e1000_setup_copper_link(struct e1000_hw *hw);
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data,
+				     u16 count);
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
+                                      u16 words, u16 *data);
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+					u16 words, u16 *data);
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count);
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+				  u16 phy_data);
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr,
+				 u16 *phy_data);
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
+static void e1000_release_eeprom(struct e1000_hw *hw);
+static void e1000_standby_eeprom(struct e1000_hw *hw);
+static s32 e1000_set_vco_speed(struct e1000_hw *hw);
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
+static s32 e1000_set_phy_mode(struct e1000_hw *hw);
+static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer);
+static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
+static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex);
+static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+
+/* IGP cable length table */
+static const
+u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
+    { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+      25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+      40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+      60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+      90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+      100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+      110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
+
+static const
+u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
+    { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+      0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+      6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+      21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+      40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+      60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+      83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+      104, 109, 114, 118, 121, 124};
+
+static DEFINE_SPINLOCK(e1000_eeprom_lock);
+
+/******************************************************************************
+ * Set the phy type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_set_phy_type(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_set_phy_type");
+
+    if (hw->mac_type == e1000_undefined)
+        return -E1000_ERR_PHY_TYPE;
+
+    switch (hw->phy_id) {
+    case M88E1000_E_PHY_ID:
+    case M88E1000_I_PHY_ID:
+    case M88E1011_I_PHY_ID:
+    case M88E1111_I_PHY_ID:
+        hw->phy_type = e1000_phy_m88;
+        break;
+    case IGP01E1000_I_PHY_ID:
+        if (hw->mac_type == e1000_82541 ||
+            hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547 ||
+            hw->mac_type == e1000_82547_rev_2) {
+            hw->phy_type = e1000_phy_igp;
+            break;
+        }
+    case IGP03E1000_E_PHY_ID:
+        hw->phy_type = e1000_phy_igp_3;
+        break;
+    case IFE_E_PHY_ID:
+    case IFE_PLUS_E_PHY_ID:
+    case IFE_C_E_PHY_ID:
+        hw->phy_type = e1000_phy_ife;
+        break;
+    case GG82563_E_PHY_ID:
+        if (hw->mac_type == e1000_80003es2lan) {
+            hw->phy_type = e1000_phy_gg82563;
+            break;
+        }
+        /* Fall Through */
+    default:
+        /* Should never have loaded on this device */
+        hw->phy_type = e1000_phy_undefined;
+        return -E1000_ERR_PHY_TYPE;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * IGP phy init script - initializes the GbE PHY
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_phy_init_script(struct e1000_hw *hw)
+{
+    u32 ret_val;
+    u16 phy_saved_data;
+
+    DEBUGFUNC("e1000_phy_init_script");
+
+    if (hw->phy_init_script) {
+        msleep(20);
+
+        /* Save off the current value of register 0x2F5B to be restored at
+         * the end of this routine. */
+        ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+        /* Disabled the PHY transmitter */
+        e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+        msleep(20);
+
+        e1000_write_phy_reg(hw,0x0000,0x0140);
+
+        msleep(5);
+
+        switch (hw->mac_type) {
+        case e1000_82541:
+        case e1000_82547:
+            e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+
+            e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+
+            e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+
+            e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+
+            e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+
+            e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+
+            e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+
+            e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+
+            e1000_write_phy_reg(hw, 0x2010, 0x0008);
+            break;
+
+        case e1000_82541_rev_2:
+        case e1000_82547_rev_2:
+            e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+            break;
+        default:
+            break;
+        }
+
+        e1000_write_phy_reg(hw, 0x0000, 0x3300);
+
+        msleep(20);
+
+        /* Now enable the transmitter */
+        e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+        if (hw->mac_type == e1000_82547) {
+            u16 fused, fine, coarse;
+
+            /* Move to analog registers page */
+            e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
+
+            if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+                e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
+
+                fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+                coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+                if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+                    coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
+                    fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+                } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+                    fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+                fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+                        (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+                        (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+                e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused);
+                e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS,
+                                    IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+            }
+        }
+    }
+}
+
+/******************************************************************************
+ * Set the mac type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_set_mac_type");
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82542:
+		switch (hw->revision_id) {
+		case E1000_82542_2_0_REV_ID:
+			hw->mac_type = e1000_82542_rev2_0;
+			break;
+		case E1000_82542_2_1_REV_ID:
+			hw->mac_type = e1000_82542_rev2_1;
+			break;
+		default:
+			/* Invalid 82542 revision ID */
+			return -E1000_ERR_MAC_TYPE;
+		}
+		break;
+	case E1000_DEV_ID_82543GC_FIBER:
+	case E1000_DEV_ID_82543GC_COPPER:
+		hw->mac_type = e1000_82543;
+		break;
+	case E1000_DEV_ID_82544EI_COPPER:
+	case E1000_DEV_ID_82544EI_FIBER:
+	case E1000_DEV_ID_82544GC_COPPER:
+	case E1000_DEV_ID_82544GC_LOM:
+		hw->mac_type = e1000_82544;
+		break;
+	case E1000_DEV_ID_82540EM:
+	case E1000_DEV_ID_82540EM_LOM:
+	case E1000_DEV_ID_82540EP:
+	case E1000_DEV_ID_82540EP_LOM:
+	case E1000_DEV_ID_82540EP_LP:
+		hw->mac_type = e1000_82540;
+		break;
+	case E1000_DEV_ID_82545EM_COPPER:
+	case E1000_DEV_ID_82545EM_FIBER:
+		hw->mac_type = e1000_82545;
+		break;
+	case E1000_DEV_ID_82545GM_COPPER:
+	case E1000_DEV_ID_82545GM_FIBER:
+	case E1000_DEV_ID_82545GM_SERDES:
+		hw->mac_type = e1000_82545_rev_3;
+		break;
+	case E1000_DEV_ID_82546EB_COPPER:
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546EB_QUAD_COPPER:
+		hw->mac_type = e1000_82546;
+		break;
+	case E1000_DEV_ID_82546GB_COPPER:
+	case E1000_DEV_ID_82546GB_FIBER:
+	case E1000_DEV_ID_82546GB_SERDES:
+	case E1000_DEV_ID_82546GB_PCIE:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		hw->mac_type = e1000_82546_rev_3;
+		break;
+	case E1000_DEV_ID_82541EI:
+	case E1000_DEV_ID_82541EI_MOBILE:
+	case E1000_DEV_ID_82541ER_LOM:
+		hw->mac_type = e1000_82541;
+		break;
+	case E1000_DEV_ID_82541ER:
+	case E1000_DEV_ID_82541GI:
+	case E1000_DEV_ID_82541GI_LF:
+	case E1000_DEV_ID_82541GI_MOBILE:
+		hw->mac_type = e1000_82541_rev_2;
+		break;
+	case E1000_DEV_ID_82547EI:
+	case E1000_DEV_ID_82547EI_MOBILE:
+		hw->mac_type = e1000_82547;
+		break;
+	case E1000_DEV_ID_82547GI:
+		hw->mac_type = e1000_82547_rev_2;
+		break;
+	case E1000_DEV_ID_82571EB_COPPER:
+	case E1000_DEV_ID_82571EB_FIBER:
+	case E1000_DEV_ID_82571EB_SERDES:
+	case E1000_DEV_ID_82571EB_SERDES_DUAL:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+		hw->mac_type = e1000_82571;
+		break;
+	case E1000_DEV_ID_82572EI_COPPER:
+	case E1000_DEV_ID_82572EI_FIBER:
+	case E1000_DEV_ID_82572EI_SERDES:
+	case E1000_DEV_ID_82572EI:
+		hw->mac_type = e1000_82572;
+		break;
+	case E1000_DEV_ID_82573E:
+	case E1000_DEV_ID_82573E_IAMT:
+	case E1000_DEV_ID_82573L:
+		hw->mac_type = e1000_82573;
+		break;
+	case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
+	case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
+	case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
+	case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+		hw->mac_type = e1000_80003es2lan;
+		break;
+	case E1000_DEV_ID_ICH8_IGP_M_AMT:
+	case E1000_DEV_ID_ICH8_IGP_AMT:
+	case E1000_DEV_ID_ICH8_IGP_C:
+	case E1000_DEV_ID_ICH8_IFE:
+	case E1000_DEV_ID_ICH8_IFE_GT:
+	case E1000_DEV_ID_ICH8_IFE_G:
+	case E1000_DEV_ID_ICH8_IGP_M:
+		hw->mac_type = e1000_ich8lan;
+		break;
+	default:
+		/* Should never have loaded on this device */
+		return -E1000_ERR_MAC_TYPE;
+	}
+
+	switch (hw->mac_type) {
+	case e1000_ich8lan:
+		hw->swfwhw_semaphore_present = true;
+		hw->asf_firmware_present = true;
+		break;
+	case e1000_80003es2lan:
+		hw->swfw_sync_present = true;
+		/* fall through */
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+		hw->eeprom_semaphore_present = true;
+		/* fall through */
+	case e1000_82541:
+	case e1000_82547:
+	case e1000_82541_rev_2:
+	case e1000_82547_rev_2:
+		hw->asf_firmware_present = true;
+		break;
+	default:
+		break;
+	}
+
+	/* The 82543 chip does not count tx_carrier_errors properly in
+	 * FD mode
+	 */
+	if (hw->mac_type == e1000_82543)
+		hw->bad_tx_carr_stats_fd = true;
+
+	/* capable of receiving management packets to the host */
+	if (hw->mac_type >= e1000_82571)
+		hw->has_manc2h = true;
+
+	/* In rare occasions, ESB2 systems would end up started without
+	 * the RX unit being turned on.
+	 */
+	if (hw->mac_type == e1000_80003es2lan)
+		hw->rx_needs_kicking = true;
+
+	if (hw->mac_type > e1000_82544)
+		hw->has_smbus = true;
+
+	return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set media type and TBI compatibility.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * **************************************************************************/
+void e1000_set_media_type(struct e1000_hw *hw)
+{
+    u32 status;
+
+    DEBUGFUNC("e1000_set_media_type");
+
+    if (hw->mac_type != e1000_82543) {
+        /* tbi_compatibility is only valid on 82543 */
+        hw->tbi_compatibility_en = false;
+    }
+
+    switch (hw->device_id) {
+    case E1000_DEV_ID_82545GM_SERDES:
+    case E1000_DEV_ID_82546GB_SERDES:
+    case E1000_DEV_ID_82571EB_SERDES:
+    case E1000_DEV_ID_82571EB_SERDES_DUAL:
+    case E1000_DEV_ID_82571EB_SERDES_QUAD:
+    case E1000_DEV_ID_82572EI_SERDES:
+    case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+        hw->media_type = e1000_media_type_internal_serdes;
+        break;
+    default:
+        switch (hw->mac_type) {
+        case e1000_82542_rev2_0:
+        case e1000_82542_rev2_1:
+            hw->media_type = e1000_media_type_fiber;
+            break;
+        case e1000_ich8lan:
+        case e1000_82573:
+            /* The STATUS_TBIMODE bit is reserved or reused for the this
+             * device.
+             */
+            hw->media_type = e1000_media_type_copper;
+            break;
+        default:
+            status = er32(STATUS);
+            if (status & E1000_STATUS_TBIMODE) {
+                hw->media_type = e1000_media_type_fiber;
+                /* tbi_compatibility not valid on fiber */
+                hw->tbi_compatibility_en = false;
+            } else {
+                hw->media_type = e1000_media_type_copper;
+            }
+            break;
+        }
+    }
+}
+
+/******************************************************************************
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    u32 ctrl_ext;
+    u32 icr;
+    u32 manc;
+    u32 led_ctrl;
+    u32 timeout;
+    u32 extcnf_ctrl;
+    s32 ret_val;
+
+    DEBUGFUNC("e1000_reset_hw");
+
+    /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+        e1000_pci_clear_mwi(hw);
+    }
+
+    if (hw->bus_type == e1000_bus_type_pci_express) {
+        /* Prevent the PCI-E bus from sticking if there is no TLP connection
+         * on the last TLP read/write transaction when MAC is reset.
+         */
+        if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
+            DEBUGOUT("PCI-E Master disable polling has failed.\n");
+        }
+    }
+
+    /* Clear interrupt mask to stop board from generating interrupts */
+    DEBUGOUT("Masking off all interrupts\n");
+    ew32(IMC, 0xffffffff);
+
+    /* Disable the Transmit and Receive units.  Then delay to allow
+     * any pending transactions to complete before we hit the MAC with
+     * the global reset.
+     */
+    ew32(RCTL, 0);
+    ew32(TCTL, E1000_TCTL_PSP);
+    E1000_WRITE_FLUSH();
+
+    /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
+    hw->tbi_compatibility_on = false;
+
+    /* Delay to allow any outstanding PCI transactions to complete before
+     * resetting the device
+     */
+    msleep(10);
+
+    ctrl = er32(CTRL);
+
+    /* Must reset the PHY before resetting the MAC */
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
+        msleep(5);
+    }
+
+    /* Must acquire the MDIO ownership before MAC reset.
+     * Ownership defaults to firmware after a reset. */
+    if (hw->mac_type == e1000_82573) {
+        timeout = 10;
+
+        extcnf_ctrl = er32(EXTCNF_CTRL);
+        extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+        do {
+            ew32(EXTCNF_CTRL, extcnf_ctrl);
+            extcnf_ctrl = er32(EXTCNF_CTRL);
+
+            if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+                break;
+            else
+                extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+            msleep(2);
+            timeout--;
+        } while (timeout);
+    }
+
+    /* Workaround for ICH8 bit corruption issue in FIFO memory */
+    if (hw->mac_type == e1000_ich8lan) {
+        /* Set Tx and Rx buffer allocation to 8k apiece. */
+        ew32(PBA, E1000_PBA_8K);
+        /* Set Packet Buffer Size to 16k. */
+        ew32(PBS, E1000_PBS_16K);
+    }
+
+    /* Issue a global reset to the MAC.  This will reset the chip's
+     * transmit, receive, DMA, and link units.  It will not effect
+     * the current PCI configuration.  The global reset bit is self-
+     * clearing, and should clear within a microsecond.
+     */
+    DEBUGOUT("Issuing a global reset to MAC\n");
+
+    switch (hw->mac_type) {
+        case e1000_82544:
+        case e1000_82540:
+        case e1000_82545:
+        case e1000_82546:
+        case e1000_82541:
+        case e1000_82541_rev_2:
+            /* These controllers can't ack the 64-bit write when issuing the
+             * reset, so use IO-mapping as a workaround to issue the reset */
+            E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
+            break;
+        case e1000_82545_rev_3:
+        case e1000_82546_rev_3:
+            /* Reset is performed on a shadow of the control register */
+            ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
+            break;
+        case e1000_ich8lan:
+            if (!hw->phy_reset_disable &&
+                e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
+                /* e1000_ich8lan PHY HW reset requires MAC CORE reset
+                 * at the same time to make sure the interface between
+                 * MAC and the external PHY is reset.
+                 */
+                ctrl |= E1000_CTRL_PHY_RST;
+            }
+
+            e1000_get_software_flag(hw);
+            ew32(CTRL, (ctrl | E1000_CTRL_RST));
+            msleep(5);
+            break;
+        default:
+            ew32(CTRL, (ctrl | E1000_CTRL_RST));
+            break;
+    }
+
+    /* After MAC reset, force reload of EEPROM to restore power-on settings to
+     * device.  Later controllers reload the EEPROM automatically, so just wait
+     * for reload to complete.
+     */
+    switch (hw->mac_type) {
+        case e1000_82542_rev2_0:
+        case e1000_82542_rev2_1:
+        case e1000_82543:
+        case e1000_82544:
+            /* Wait for reset to complete */
+            udelay(10);
+            ctrl_ext = er32(CTRL_EXT);
+            ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+            ew32(CTRL_EXT, ctrl_ext);
+            E1000_WRITE_FLUSH();
+            /* Wait for EEPROM reload */
+            msleep(2);
+            break;
+        case e1000_82541:
+        case e1000_82541_rev_2:
+        case e1000_82547:
+        case e1000_82547_rev_2:
+            /* Wait for EEPROM reload */
+            msleep(20);
+            break;
+        case e1000_82573:
+            if (!e1000_is_onboard_nvm_eeprom(hw)) {
+                udelay(10);
+                ctrl_ext = er32(CTRL_EXT);
+                ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+                ew32(CTRL_EXT, ctrl_ext);
+                E1000_WRITE_FLUSH();
+            }
+            /* fall through */
+        default:
+            /* Auto read done will delay 5ms or poll based on mac type */
+            ret_val = e1000_get_auto_rd_done(hw);
+            if (ret_val)
+                return ret_val;
+            break;
+    }
+
+    /* Disable HW ARPs on ASF enabled adapters */
+    if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
+        manc = er32(MANC);
+        manc &= ~(E1000_MANC_ARP_EN);
+        ew32(MANC, manc);
+    }
+
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        e1000_phy_init_script(hw);
+
+        /* Configure activity LED after PHY reset */
+        led_ctrl = er32(LEDCTL);
+        led_ctrl &= IGP_ACTIVITY_LED_MASK;
+        led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+        ew32(LEDCTL, led_ctrl);
+    }
+
+    /* Clear interrupt mask to stop board from generating interrupts */
+    DEBUGOUT("Masking off all interrupts\n");
+    ew32(IMC, 0xffffffff);
+
+    /* Clear any pending interrupt events. */
+    icr = er32(ICR);
+
+    /* If MWI was previously enabled, reenable it. */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+            e1000_pci_set_mwi(hw);
+    }
+
+    if (hw->mac_type == e1000_ich8lan) {
+        u32 kab = er32(KABGTXD);
+        kab |= E1000_KABGTXD_BGSQLBIAS;
+        ew32(KABGTXD, kab);
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ *
+ * Initialize a number of hardware-dependent bits
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * This function contains hardware limitation workarounds for PCI-E adapters
+ *
+ *****************************************************************************/
+static void e1000_initialize_hardware_bits(struct e1000_hw *hw)
+{
+    if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) {
+        /* Settings common to all PCI-express silicon */
+        u32 reg_ctrl, reg_ctrl_ext;
+        u32 reg_tarc0, reg_tarc1;
+        u32 reg_tctl;
+        u32 reg_txdctl, reg_txdctl1;
+
+        /* link autonegotiation/sync workarounds */
+        reg_tarc0 = er32(TARC0);
+        reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27));
+
+        /* Enable not-done TX descriptor counting */
+        reg_txdctl = er32(TXDCTL);
+        reg_txdctl |= E1000_TXDCTL_COUNT_DESC;
+        ew32(TXDCTL, reg_txdctl);
+        reg_txdctl1 = er32(TXDCTL1);
+        reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC;
+        ew32(TXDCTL1, reg_txdctl1);
+
+        switch (hw->mac_type) {
+            case e1000_82571:
+            case e1000_82572:
+                /* Clear PHY TX compatible mode bits */
+                reg_tarc1 = er32(TARC1);
+                reg_tarc1 &= ~((1 << 30)|(1 << 29));
+
+                /* link autonegotiation/sync workarounds */
+                reg_tarc0 |= ((1 << 26)|(1 << 25)|(1 << 24)|(1 << 23));
+
+                /* TX ring control fixes */
+                reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24));
+
+                /* Multiple read bit is reversed polarity */
+                reg_tctl = er32(TCTL);
+                if (reg_tctl & E1000_TCTL_MULR)
+                    reg_tarc1 &= ~(1 << 28);
+                else
+                    reg_tarc1 |= (1 << 28);
+
+                ew32(TARC1, reg_tarc1);
+                break;
+            case e1000_82573:
+                reg_ctrl_ext = er32(CTRL_EXT);
+                reg_ctrl_ext &= ~(1 << 23);
+                reg_ctrl_ext |= (1 << 22);
+
+                /* TX byte count fix */
+                reg_ctrl = er32(CTRL);
+                reg_ctrl &= ~(1 << 29);
+
+                ew32(CTRL_EXT, reg_ctrl_ext);
+                ew32(CTRL, reg_ctrl);
+                break;
+            case e1000_80003es2lan:
+                /* improve small packet performace for fiber/serdes */
+                if ((hw->media_type == e1000_media_type_fiber) ||
+                    (hw->media_type == e1000_media_type_internal_serdes)) {
+                    reg_tarc0 &= ~(1 << 20);
+                }
+
+                /* Multiple read bit is reversed polarity */
+                reg_tctl = er32(TCTL);
+                reg_tarc1 = er32(TARC1);
+                if (reg_tctl & E1000_TCTL_MULR)
+                    reg_tarc1 &= ~(1 << 28);
+                else
+                    reg_tarc1 |= (1 << 28);
+
+                ew32(TARC1, reg_tarc1);
+                break;
+            case e1000_ich8lan:
+                /* Reduce concurrent DMA requests to 3 from 4 */
+                if ((hw->revision_id < 3) ||
+                    ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
+                     (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))
+                    reg_tarc0 |= ((1 << 29)|(1 << 28));
+
+                reg_ctrl_ext = er32(CTRL_EXT);
+                reg_ctrl_ext |= (1 << 22);
+                ew32(CTRL_EXT, reg_ctrl_ext);
+
+                /* workaround TX hang with TSO=on */
+                reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23));
+
+                /* Multiple read bit is reversed polarity */
+                reg_tctl = er32(TCTL);
+                reg_tarc1 = er32(TARC1);
+                if (reg_tctl & E1000_TCTL_MULR)
+                    reg_tarc1 &= ~(1 << 28);
+                else
+                    reg_tarc1 |= (1 << 28);
+
+                /* workaround TX hang with TSO=on */
+                reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24));
+
+                ew32(TARC1, reg_tarc1);
+                break;
+            default:
+                break;
+        }
+
+        ew32(TARC0, reg_tarc0);
+    }
+}
+
+/******************************************************************************
+ * Performs basic configuration of the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes the receive address registers,
+ * multicast table, and VLAN filter table. Calls routines to setup link
+ * configuration and flow control settings. Clears all on-chip counters. Leaves
+ * the transmit and receive units disabled and uninitialized.
+ *****************************************************************************/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    u32 i;
+    s32 ret_val;
+    u32 mta_size;
+    u32 reg_data;
+    u32 ctrl_ext;
+
+    DEBUGFUNC("e1000_init_hw");
+
+    /* force full DMA clock frequency for 10/100 on ICH8 A0-B0 */
+    if ((hw->mac_type == e1000_ich8lan) &&
+        ((hw->revision_id < 3) ||
+         ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
+          (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) {
+            reg_data = er32(STATUS);
+            reg_data &= ~0x80000000;
+            ew32(STATUS, reg_data);
+    }
+
+    /* Initialize Identification LED */
+    ret_val = e1000_id_led_init(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Initializing Identification LED\n");
+        return ret_val;
+    }
+
+    /* Set the media type and TBI compatibility */
+    e1000_set_media_type(hw);
+
+    /* Must be called after e1000_set_media_type because media_type is used */
+    e1000_initialize_hardware_bits(hw);
+
+    /* Disabling VLAN filtering. */
+    DEBUGOUT("Initializing the IEEE VLAN\n");
+    /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */
+    if (hw->mac_type != e1000_ich8lan) {
+        if (hw->mac_type < e1000_82545_rev_3)
+            ew32(VET, 0);
+        e1000_clear_vfta(hw);
+    }
+
+    /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+        e1000_pci_clear_mwi(hw);
+        ew32(RCTL, E1000_RCTL_RST);
+        E1000_WRITE_FLUSH();
+        msleep(5);
+    }
+
+    /* Setup the receive address. This involves initializing all of the Receive
+     * Address Registers (RARs 0 - 15).
+     */
+    e1000_init_rx_addrs(hw);
+
+    /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        ew32(RCTL, 0);
+        E1000_WRITE_FLUSH();
+        msleep(1);
+        if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+            e1000_pci_set_mwi(hw);
+    }
+
+    /* Zero out the Multicast HASH table */
+    DEBUGOUT("Zeroing the MTA\n");
+    mta_size = E1000_MC_TBL_SIZE;
+    if (hw->mac_type == e1000_ich8lan)
+        mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
+    for (i = 0; i < mta_size; i++) {
+        E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+        /* use write flush to prevent Memory Write Block (MWB) from
+         * occuring when accessing our register space */
+        E1000_WRITE_FLUSH();
+    }
+
+    /* Set the PCI priority bit correctly in the CTRL register.  This
+     * determines if the adapter gives priority to receives, or if it
+     * gives equal priority to transmits and receives.  Valid only on
+     * 82542 and 82543 silicon.
+     */
+    if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
+        ctrl = er32(CTRL);
+        ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
+    }
+
+    switch (hw->mac_type) {
+    case e1000_82545_rev_3:
+    case e1000_82546_rev_3:
+        break;
+    default:
+        /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+	if (hw->bus_type == e1000_bus_type_pcix && e1000_pcix_get_mmrbc(hw) > 2048)
+		e1000_pcix_set_mmrbc(hw, 2048);
+	break;
+    }
+
+    /* More time needed for PHY to initialize */
+    if (hw->mac_type == e1000_ich8lan)
+        msleep(15);
+
+    /* Call a subroutine to configure the link and setup flow control. */
+    ret_val = e1000_setup_link(hw);
+
+    /* Set the transmit descriptor write-back policy */
+    if (hw->mac_type > e1000_82544) {
+        ctrl = er32(TXDCTL);
+        ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+        ew32(TXDCTL, ctrl);
+    }
+
+    if (hw->mac_type == e1000_82573) {
+        e1000_enable_tx_pkt_filtering(hw);
+    }
+
+    switch (hw->mac_type) {
+    default:
+        break;
+    case e1000_80003es2lan:
+        /* Enable retransmit on late collisions */
+        reg_data = er32(TCTL);
+        reg_data |= E1000_TCTL_RTLC;
+        ew32(TCTL, reg_data);
+
+        /* Configure Gigabit Carry Extend Padding */
+        reg_data = er32(TCTL_EXT);
+        reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+        reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
+        ew32(TCTL_EXT, reg_data);
+
+        /* Configure Transmit Inter-Packet Gap */
+        reg_data = er32(TIPG);
+        reg_data &= ~E1000_TIPG_IPGT_MASK;
+        reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
+        ew32(TIPG, reg_data);
+
+        reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001);
+        reg_data &= ~0x00100000;
+        E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data);
+        /* Fall through */
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_ich8lan:
+        ctrl = er32(TXDCTL1);
+        ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+        ew32(TXDCTL1, ctrl);
+        break;
+    }
+
+
+    if (hw->mac_type == e1000_82573) {
+        u32 gcr = er32(GCR);
+        gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+        ew32(GCR, gcr);
+    }
+
+    /* Clear all of the statistics registers (clear on read).  It is
+     * important that we do this after we have tried to establish link
+     * because the symbol error count will increment wildly if there
+     * is no link.
+     */
+    e1000_clear_hw_cntrs(hw);
+
+    /* ICH8 No-snoop bits are opposite polarity.
+     * Set to snoop by default after reset. */
+    if (hw->mac_type == e1000_ich8lan)
+        e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
+
+    if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+        hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+        ctrl_ext = er32(CTRL_EXT);
+        /* Relaxed ordering must be disabled to avoid a parity
+         * error crash in a PCI slot. */
+        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+        ew32(CTRL_EXT, ctrl_ext);
+    }
+
+    return ret_val;
+}
+
+/******************************************************************************
+ * Adjust SERDES output amplitude based on EEPROM setting.
+ *
+ * hw - Struct containing variables accessed by shared code.
+ *****************************************************************************/
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
+{
+    u16 eeprom_data;
+    s32  ret_val;
+
+    DEBUGFUNC("e1000_adjust_serdes_amplitude");
+
+    if (hw->media_type != e1000_media_type_internal_serdes)
+        return E1000_SUCCESS;
+
+    switch (hw->mac_type) {
+    case e1000_82545_rev_3:
+    case e1000_82546_rev_3:
+        break;
+    default:
+        return E1000_SUCCESS;
+    }
+
+    ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, &eeprom_data);
+    if (ret_val) {
+        return ret_val;
+    }
+
+    if (eeprom_data != EEPROM_RESERVED_WORD) {
+        /* Adjust SERDES output amplitude only. */
+        eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control and link settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Determines which flow control settings to use. Calls the apropriate media-
+ * specific link configuration function. Configures the flow control settings.
+ * Assuming the adapter has a valid link partner, a valid link should be
+ * established. Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ *****************************************************************************/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+    u32 ctrl_ext;
+    s32 ret_val;
+    u16 eeprom_data;
+
+    DEBUGFUNC("e1000_setup_link");
+
+    /* In the case of the phy reset being blocked, we already have a link.
+     * We do not have to set it up again. */
+    if (e1000_check_phy_reset_block(hw))
+        return E1000_SUCCESS;
+
+    /* Read and store word 0x0F of the EEPROM. This word contains bits
+     * that determine the hardware's default PAUSE (flow control) mode,
+     * a bit that determines whether the HW defaults to enabling or
+     * disabling auto-negotiation, and the direction of the
+     * SW defined pins. If there is no SW over-ride of the flow
+     * control setting, then the variable hw->fc will
+     * be initialized based on a value in the EEPROM.
+     */
+    if (hw->fc == E1000_FC_DEFAULT) {
+        switch (hw->mac_type) {
+        case e1000_ich8lan:
+        case e1000_82573:
+            hw->fc = E1000_FC_FULL;
+            break;
+        default:
+            ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+                                        1, &eeprom_data);
+            if (ret_val) {
+                DEBUGOUT("EEPROM Read Error\n");
+                return -E1000_ERR_EEPROM;
+            }
+            if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
+                hw->fc = E1000_FC_NONE;
+            else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
+                    EEPROM_WORD0F_ASM_DIR)
+                hw->fc = E1000_FC_TX_PAUSE;
+            else
+                hw->fc = E1000_FC_FULL;
+            break;
+        }
+    }
+
+    /* We want to save off the original Flow Control configuration just
+     * in case we get disconnected and then reconnected into a different
+     * hub or switch with different Flow Control capabilities.
+     */
+    if (hw->mac_type == e1000_82542_rev2_0)
+        hw->fc &= (~E1000_FC_TX_PAUSE);
+
+    if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+        hw->fc &= (~E1000_FC_RX_PAUSE);
+
+    hw->original_fc = hw->fc;
+
+    DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
+
+    /* Take the 4 bits from EEPROM word 0x0F that determine the initial
+     * polarity value for the SW controlled pins, and setup the
+     * Extended Device Control reg with that info.
+     * This is needed because one of the SW controlled pins is used for
+     * signal detection.  So this should be done before e1000_setup_pcs_link()
+     * or e1000_phy_setup() is called.
+     */
+    if (hw->mac_type == e1000_82543) {
+        ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+                                    1, &eeprom_data);
+        if (ret_val) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
+                    SWDPIO__EXT_SHIFT);
+        ew32(CTRL_EXT, ctrl_ext);
+    }
+
+    /* Call the necessary subroutine to configure the link. */
+    ret_val = (hw->media_type == e1000_media_type_copper) ?
+              e1000_setup_copper_link(hw) :
+              e1000_setup_fiber_serdes_link(hw);
+
+    /* Initialize the flow control address, type, and PAUSE timer
+     * registers to their default values.  This is done even if flow
+     * control is disabled, because it does not hurt anything to
+     * initialize these registers.
+     */
+    DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+
+    /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */
+    if (hw->mac_type != e1000_ich8lan) {
+        ew32(FCT, FLOW_CONTROL_TYPE);
+        ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+        ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
+    }
+
+    ew32(FCTTV, hw->fc_pause_time);
+
+    /* Set the flow control receive threshold registers.  Normally,
+     * these registers will be set to a default threshold that may be
+     * adjusted later by the driver's runtime code.  However, if the
+     * ability to transmit pause frames in not enabled, then these
+     * registers will be set to 0.
+     */
+    if (!(hw->fc & E1000_FC_TX_PAUSE)) {
+        ew32(FCRTL, 0);
+        ew32(FCRTH, 0);
+    } else {
+        /* We need to set up the Receive Threshold high and low water marks
+         * as well as (optionally) enabling the transmission of XON frames.
+         */
+        if (hw->fc_send_xon) {
+            ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
+            ew32(FCRTH, hw->fc_high_water);
+        } else {
+            ew32(FCRTL, hw->fc_low_water);
+            ew32(FCRTH, hw->fc_high_water);
+        }
+    }
+    return ret_val;
+}
+
+/******************************************************************************
+ * Sets up link for a fiber based or serdes based adapter
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Manipulates Physical Coding Sublayer functions in order to configure
+ * link. Assumes the hardware has been previously reset and the transmitter
+ * and receiver are not enabled.
+ *****************************************************************************/
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    u32 status;
+    u32 txcw = 0;
+    u32 i;
+    u32 signal = 0;
+    s32 ret_val;
+
+    DEBUGFUNC("e1000_setup_fiber_serdes_link");
+
+    /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists
+     * until explicitly turned off or a power cycle is performed.  A read to
+     * the register does not indicate its status.  Therefore, we ensure
+     * loopback mode is disabled during initialization.
+     */
+    if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572)
+        ew32(SCTL, E1000_DISABLE_SERDES_LOOPBACK);
+
+    /* On adapters with a MAC newer than 82544, SWDP 1 will be
+     * set when the optics detect a signal. On older adapters, it will be
+     * cleared when there is a signal.  This applies to fiber media only.
+     * If we're on serdes media, adjust the output amplitude to value
+     * set in the EEPROM.
+     */
+    ctrl = er32(CTRL);
+    if (hw->media_type == e1000_media_type_fiber)
+        signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+
+    ret_val = e1000_adjust_serdes_amplitude(hw);
+    if (ret_val)
+        return ret_val;
+
+    /* Take the link out of reset */
+    ctrl &= ~(E1000_CTRL_LRST);
+
+    /* Adjust VCO speed to improve BER performance */
+    ret_val = e1000_set_vco_speed(hw);
+    if (ret_val)
+        return ret_val;
+
+    e1000_config_collision_dist(hw);
+
+    /* Check for a software override of the flow control settings, and setup
+     * the device accordingly.  If auto-negotiation is enabled, then software
+     * will have to set the "PAUSE" bits to the correct value in the Tranmsit
+     * Config Word Register (TXCW) and re-start auto-negotiation.  However, if
+     * auto-negotiation is disabled, then software will have to manually
+     * configure the two flow control enable bits in the CTRL register.
+     *
+     * The possible values of the "fc" parameter are:
+     *      0:  Flow control is completely disabled
+     *      1:  Rx flow control is enabled (we can receive pause frames, but
+     *          not send pause frames).
+     *      2:  Tx flow control is enabled (we can send pause frames but we do
+     *          not support receiving pause frames).
+     *      3:  Both Rx and TX flow control (symmetric) are enabled.
+     */
+    switch (hw->fc) {
+    case E1000_FC_NONE:
+        /* Flow control is completely disabled by a software over-ride. */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+        break;
+    case E1000_FC_RX_PAUSE:
+        /* RX Flow control is enabled and TX Flow control is disabled by a
+         * software over-ride. Since there really isn't a way to advertise
+         * that we are capable of RX Pause ONLY, we will advertise that we
+         * support both symmetric and asymmetric RX PAUSE. Later, we will
+         *  disable the adapter's ability to send PAUSE frames.
+         */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+        break;
+    case E1000_FC_TX_PAUSE:
+        /* TX Flow control is enabled, and RX Flow control is disabled, by a
+         * software over-ride.
+         */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+        break;
+    case E1000_FC_FULL:
+        /* Flow control (both RX and TX) is enabled by a software over-ride. */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+        break;
+    default:
+        DEBUGOUT("Flow control param set incorrectly\n");
+        return -E1000_ERR_CONFIG;
+        break;
+    }
+
+    /* Since auto-negotiation is enabled, take the link out of reset (the link
+     * will be in reset, because we previously reset the chip). This will
+     * restart auto-negotiation.  If auto-neogtiation is successful then the
+     * link-up status bit will be set and the flow control enable bits (RFCE
+     * and TFCE) will be set according to their negotiated value.
+     */
+    DEBUGOUT("Auto-negotiation enabled\n");
+
+    ew32(TXCW, txcw);
+    ew32(CTRL, ctrl);
+    E1000_WRITE_FLUSH();
+
+    hw->txcw = txcw;
+    msleep(1);
+
+    /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
+     * indication in the Device Status Register.  Time-out if a link isn't
+     * seen in 500 milliseconds seconds (Auto-negotiation should complete in
+     * less than 500 milliseconds even if the other end is doing it in SW).
+     * For internal serdes, we just assume a signal is present, then poll.
+     */
+    if (hw->media_type == e1000_media_type_internal_serdes ||
+       (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
+        DEBUGOUT("Looking for Link\n");
+        for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+            msleep(10);
+            status = er32(STATUS);
+            if (status & E1000_STATUS_LU) break;
+        }
+        if (i == (LINK_UP_TIMEOUT / 10)) {
+            DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+            hw->autoneg_failed = 1;
+            /* AutoNeg failed to achieve a link, so we'll call
+             * e1000_check_for_link. This routine will force the link up if
+             * we detect a signal. This will allow us to communicate with
+             * non-autonegotiating link partners.
+             */
+            ret_val = e1000_check_for_link(hw);
+            if (ret_val) {
+                DEBUGOUT("Error while checking for link\n");
+                return ret_val;
+            }
+            hw->autoneg_failed = 0;
+        } else {
+            hw->autoneg_failed = 0;
+            DEBUGOUT("Valid Link Found\n");
+        }
+    } else {
+        DEBUGOUT("No Signal Detected\n");
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Make sure we have a valid PHY and change PHY mode before link setup.
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_copper_link_preconfig");
+
+    ctrl = er32(CTRL);
+    /* With 82543, we need to force speed and duplex on the MAC equal to what
+     * the PHY speed and duplex configuration is. In addition, we need to
+     * perform a hardware reset on the PHY to take it out of reset.
+     */
+    if (hw->mac_type > e1000_82543) {
+        ctrl |= E1000_CTRL_SLU;
+        ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+        ew32(CTRL, ctrl);
+    } else {
+        ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
+        ew32(CTRL, ctrl);
+        ret_val = e1000_phy_hw_reset(hw);
+        if (ret_val)
+            return ret_val;
+    }
+
+    /* Make sure we have a valid PHY */
+    ret_val = e1000_detect_gig_phy(hw);
+    if (ret_val) {
+        DEBUGOUT("Error, did not detect valid phy.\n");
+        return ret_val;
+    }
+    DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+
+    /* Set PHY to class A mode (if necessary) */
+    ret_val = e1000_set_phy_mode(hw);
+    if (ret_val)
+        return ret_val;
+
+    if ((hw->mac_type == e1000_82545_rev_3) ||
+       (hw->mac_type == e1000_82546_rev_3)) {
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+        phy_data |= 0x00000008;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+    }
+
+    if (hw->mac_type <= e1000_82543 ||
+        hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+        hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
+        hw->phy_reset_disable = false;
+
+   return E1000_SUCCESS;
+}
+
+
+/********************************************************************
+* Copper link setup for e1000_phy_igp series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
+{
+    u32 led_ctrl;
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_copper_link_igp_setup");
+
+    if (hw->phy_reset_disable)
+        return E1000_SUCCESS;
+
+    ret_val = e1000_phy_reset(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Resetting the PHY\n");
+        return ret_val;
+    }
+
+    /* Wait 15ms for MAC to configure PHY from eeprom settings */
+    msleep(15);
+    if (hw->mac_type != e1000_ich8lan) {
+    /* Configure activity LED after PHY reset */
+    led_ctrl = er32(LEDCTL);
+    led_ctrl &= IGP_ACTIVITY_LED_MASK;
+    led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+    ew32(LEDCTL, led_ctrl);
+    }
+
+    /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
+    if (hw->phy_type == e1000_phy_igp) {
+        /* disable lplu d3 during driver init */
+        ret_val = e1000_set_d3_lplu_state(hw, false);
+        if (ret_val) {
+            DEBUGOUT("Error Disabling LPLU D3\n");
+            return ret_val;
+        }
+    }
+
+    /* disable lplu d0 during driver init */
+    ret_val = e1000_set_d0_lplu_state(hw, false);
+    if (ret_val) {
+        DEBUGOUT("Error Disabling LPLU D0\n");
+        return ret_val;
+    }
+    /* Configure mdi-mdix settings */
+    ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        hw->dsp_config_state = e1000_dsp_config_disabled;
+        /* Force MDI for earlier revs of the IGP PHY */
+        phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX);
+        hw->mdix = 1;
+
+    } else {
+        hw->dsp_config_state = e1000_dsp_config_enabled;
+        phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+        switch (hw->mdix) {
+        case 1:
+            phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+            break;
+        case 2:
+            phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+            break;
+        case 0:
+        default:
+            phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
+            break;
+        }
+    }
+    ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    /* set auto-master slave resolution settings */
+    if (hw->autoneg) {
+        e1000_ms_type phy_ms_setting = hw->master_slave;
+
+        if (hw->ffe_config_state == e1000_ffe_config_active)
+            hw->ffe_config_state = e1000_ffe_config_enabled;
+
+        if (hw->dsp_config_state == e1000_dsp_config_activated)
+            hw->dsp_config_state = e1000_dsp_config_enabled;
+
+        /* when autonegotiation advertisment is only 1000Mbps then we
+          * should disable SmartSpeed and enable Auto MasterSlave
+          * resolution as hardware default. */
+        if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+            /* Disable SmartSpeed */
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+            /* Set auto Master/Slave resolution process */
+            ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+            if (ret_val)
+                return ret_val;
+            phy_data &= ~CR_1000T_MS_ENABLE;
+            ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* load defaults for future use */
+        hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
+                                        ((phy_data & CR_1000T_MS_VALUE) ?
+                                         e1000_ms_force_master :
+                                         e1000_ms_force_slave) :
+                                         e1000_ms_auto;
+
+        switch (phy_ms_setting) {
+        case e1000_ms_force_master:
+            phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+            break;
+        case e1000_ms_force_slave:
+            phy_data |= CR_1000T_MS_ENABLE;
+            phy_data &= ~(CR_1000T_MS_VALUE);
+            break;
+        case e1000_ms_auto:
+            phy_data &= ~CR_1000T_MS_ENABLE;
+            default:
+            break;
+        }
+        ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_gg82563 series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_ggp_setup(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 phy_data;
+    u32 reg_data;
+
+    DEBUGFUNC("e1000_copper_link_ggp_setup");
+
+    if (!hw->phy_reset_disable) {
+
+        /* Enable CRS on TX for half-duplex operation. */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+        /* Use 25MHz for both link down and 1000BASE-T for Tx clock */
+        phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ;
+
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+                                      phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* Options:
+         *   MDI/MDI-X = 0 (default)
+         *   0 - Auto for all speeds
+         *   1 - MDI mode
+         *   2 - MDI-X mode
+         *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+         */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+        switch (hw->mdix) {
+        case 1:
+            phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+            break;
+        case 2:
+            phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+            break;
+        case 0:
+        default:
+            phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+            break;
+        }
+
+        /* Options:
+         *   disable_polarity_correction = 0 (default)
+         *       Automatic Correction for Reversed Cable Polarity
+         *   0 - Disabled
+         *   1 - Enabled
+         */
+        phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+        if (hw->disable_polarity_correction == 1)
+            phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+
+        if (ret_val)
+            return ret_val;
+
+        /* SW Reset the PHY so all changes take effect */
+        ret_val = e1000_phy_reset(hw);
+        if (ret_val) {
+            DEBUGOUT("Error Resetting the PHY\n");
+            return ret_val;
+        }
+    } /* phy_reset_disable */
+
+    if (hw->mac_type == e1000_80003es2lan) {
+        /* Bypass RX and TX FIFO's */
+        ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL,
+                                       E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
+                                       E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data);
+
+        if (ret_val)
+            return ret_val;
+
+        reg_data = er32(CTRL_EXT);
+        reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
+        ew32(CTRL_EXT, reg_data);
+
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+                                          &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* Do not init these registers when the HW is in IAMT mode, since the
+         * firmware will have already initialized them.  We only initialize
+         * them if the HW is not in IAMT mode.
+         */
+        if (!e1000_check_mng_mode(hw)) {
+            /* Enable Electrical Idle on the PHY */
+            phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+            ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+            ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+                                          phy_data);
+
+            if (ret_val)
+                return ret_val;
+        }
+
+        /* Workaround: Disable padding in Kumeran interface in the MAC
+         * and in the PHY to avoid CRC errors.
+         */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        phy_data |= GG82563_ICR_DIS_PADDING;
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
+                                      phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_m88 series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_copper_link_mgp_setup");
+
+    if (hw->phy_reset_disable)
+        return E1000_SUCCESS;
+
+    /* Enable CRS on TX. This must be set for half-duplex operation. */
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+    /* Options:
+     *   MDI/MDI-X = 0 (default)
+     *   0 - Auto for all speeds
+     *   1 - MDI mode
+     *   2 - MDI-X mode
+     *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+     */
+    phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+    switch (hw->mdix) {
+    case 1:
+        phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+        break;
+    case 2:
+        phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+        break;
+    case 3:
+        phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+        break;
+    case 0:
+    default:
+        phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+        break;
+    }
+
+    /* Options:
+     *   disable_polarity_correction = 0 (default)
+     *       Automatic Correction for Reversed Cable Polarity
+     *   0 - Disabled
+     *   1 - Enabled
+     */
+    phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+    if (hw->disable_polarity_correction == 1)
+        phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if (hw->phy_revision < M88E1011_I_REV_4) {
+        /* Force TX_CLK in the Extended PHY Specific Control Register
+         * to 25MHz clock.
+         */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+        if ((hw->phy_revision == E1000_REVISION_2) &&
+            (hw->phy_id == M88E1111_I_PHY_ID)) {
+            /* Vidalia Phy, set the downshift counter to 5x */
+            phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
+            phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+            ret_val = e1000_write_phy_reg(hw,
+                                        M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+            if (ret_val)
+                return ret_val;
+        } else {
+            /* Configure Master and Slave downshift values */
+            phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+                              M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+            phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+                             M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+            ret_val = e1000_write_phy_reg(hw,
+                                        M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+            if (ret_val)
+               return ret_val;
+        }
+    }
+
+    /* SW Reset the PHY so all changes take effect */
+    ret_val = e1000_phy_reset(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Resetting the PHY\n");
+        return ret_val;
+    }
+
+   return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Setup auto-negotiation and flow control advertisements,
+* and then perform auto-negotiation.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_copper_link_autoneg");
+
+    /* Perform some bounds checking on the hw->autoneg_advertised
+     * parameter.  If this variable is zero, then set it to the default.
+     */
+    hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+    /* If autoneg_advertised is zero, we assume it was not defaulted
+     * by the calling code so we set to advertise full capability.
+     */
+    if (hw->autoneg_advertised == 0)
+        hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+    /* IFE phy only supports 10/100 */
+    if (hw->phy_type == e1000_phy_ife)
+        hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
+    DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+    ret_val = e1000_phy_setup_autoneg(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Setting up Auto-Negotiation\n");
+        return ret_val;
+    }
+    DEBUGOUT("Restarting Auto-Neg\n");
+
+    /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+     * the Auto Neg Restart bit in the PHY control register.
+     */
+    ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+    ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Does the user want to wait for Auto-Neg to complete here, or
+     * check at a later time (for example, callback routine).
+     */
+    if (hw->wait_autoneg_complete) {
+        ret_val = e1000_wait_autoneg(hw);
+        if (ret_val) {
+            DEBUGOUT("Error while waiting for autoneg to complete\n");
+            return ret_val;
+        }
+    }
+
+    hw->get_link_status = true;
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Config the MAC and the PHY after link is up.
+*   1) Set up the MAC to the current PHY speed/duplex
+*      if we are on 82543.  If we
+*      are on newer silicon, we only need to configure
+*      collision distance in the Transmit Control Register.
+*   2) Set up flow control on the MAC to that established with
+*      the link partner.
+*   3) Config DSP to improve Gigabit link quality for some PHY revisions.
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    DEBUGFUNC("e1000_copper_link_postconfig");
+
+    if (hw->mac_type >= e1000_82544) {
+        e1000_config_collision_dist(hw);
+    } else {
+        ret_val = e1000_config_mac_to_phy(hw);
+        if (ret_val) {
+            DEBUGOUT("Error configuring MAC to PHY settings\n");
+            return ret_val;
+        }
+    }
+    ret_val = e1000_config_fc_after_link_up(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Configuring Flow Control\n");
+        return ret_val;
+    }
+
+    /* Config DSP to improve Giga link quality */
+    if (hw->phy_type == e1000_phy_igp) {
+        ret_val = e1000_config_dsp_after_link_change(hw, true);
+        if (ret_val) {
+            DEBUGOUT("Error Configuring DSP after link up\n");
+            return ret_val;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Detects which PHY is present and setup the speed and duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_setup_copper_link(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 i;
+    u16 phy_data;
+    u16 reg_data;
+
+    DEBUGFUNC("e1000_setup_copper_link");
+
+    switch (hw->mac_type) {
+    case e1000_80003es2lan:
+    case e1000_ich8lan:
+        /* Set the mac to wait the maximum time between each
+         * iteration and increase the max iterations when
+         * polling the phy; this fixes erroneous timeouts at 10Mbps. */
+        ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
+        if (ret_val)
+            return ret_val;
+        reg_data |= 0x3F;
+        ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+        if (ret_val)
+            return ret_val;
+    default:
+        break;
+    }
+
+    /* Check if it is a valid PHY and set PHY mode if necessary. */
+    ret_val = e1000_copper_link_preconfig(hw);
+    if (ret_val)
+        return ret_val;
+
+    switch (hw->mac_type) {
+    case e1000_80003es2lan:
+        /* Kumeran registers are written-only */
+        reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT;
+        reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING;
+        ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
+                                       reg_data);
+        if (ret_val)
+            return ret_val;
+        break;
+    default:
+        break;
+    }
+
+    if (hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) {
+        ret_val = e1000_copper_link_igp_setup(hw);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->phy_type == e1000_phy_m88) {
+        ret_val = e1000_copper_link_mgp_setup(hw);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        ret_val = e1000_copper_link_ggp_setup(hw);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if (hw->autoneg) {
+        /* Setup autoneg and flow control advertisement
+          * and perform autonegotiation */
+        ret_val = e1000_copper_link_autoneg(hw);
+        if (ret_val)
+            return ret_val;
+    } else {
+        /* PHY will be set to 10H, 10F, 100H,or 100F
+          * depending on value from forced_speed_duplex. */
+        DEBUGOUT("Forcing speed and duplex\n");
+        ret_val = e1000_phy_force_speed_duplex(hw);
+        if (ret_val) {
+            DEBUGOUT("Error Forcing Speed and Duplex\n");
+            return ret_val;
+        }
+    }
+
+    /* Check link status. Wait up to 100 microseconds for link to become
+     * valid.
+     */
+    for (i = 0; i < 10; i++) {
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if (phy_data & MII_SR_LINK_STATUS) {
+            /* Config the MAC and PHY after link is up */
+            ret_val = e1000_copper_link_postconfig(hw);
+            if (ret_val)
+                return ret_val;
+
+            DEBUGOUT("Valid link established!!!\n");
+            return E1000_SUCCESS;
+        }
+        udelay(10);
+    }
+
+    DEBUGOUT("Unable to establish link!!!\n");
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Configure the MAC-to-PHY interface for 10/100Mbps
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
+{
+    s32 ret_val = E1000_SUCCESS;
+    u32 tipg;
+    u16 reg_data;
+
+    DEBUGFUNC("e1000_configure_kmrn_for_10_100");
+
+    reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT;
+    ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
+                                   reg_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Configure Transmit Inter-Packet Gap */
+    tipg = er32(TIPG);
+    tipg &= ~E1000_TIPG_IPGT_MASK;
+    tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
+    ew32(TIPG, tipg);
+
+    ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+
+    if (ret_val)
+        return ret_val;
+
+    if (duplex == HALF_DUPLEX)
+        reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+    else
+        reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+    ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+    return ret_val;
+}
+
+static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
+{
+    s32 ret_val = E1000_SUCCESS;
+    u16 reg_data;
+    u32 tipg;
+
+    DEBUGFUNC("e1000_configure_kmrn_for_1000");
+
+    reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT;
+    ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
+                                   reg_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Configure Transmit Inter-Packet Gap */
+    tipg = er32(TIPG);
+    tipg &= ~E1000_TIPG_IPGT_MASK;
+    tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
+    ew32(TIPG, tipg);
+
+    ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+
+    if (ret_val)
+        return ret_val;
+
+    reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+    ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+    return ret_val;
+}
+
+/******************************************************************************
+* Configures PHY autoneg and flow control advertisement settings
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 mii_autoneg_adv_reg;
+    u16 mii_1000t_ctrl_reg;
+
+    DEBUGFUNC("e1000_phy_setup_autoneg");
+
+    /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+    ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+    if (ret_val)
+        return ret_val;
+
+    if (hw->phy_type != e1000_phy_ife) {
+        /* Read the MII 1000Base-T Control Register (Address 9). */
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+        if (ret_val)
+            return ret_val;
+    } else
+        mii_1000t_ctrl_reg=0;
+
+    /* Need to parse both autoneg_advertised and fc and set up
+     * the appropriate PHY registers.  First we will parse for
+     * autoneg_advertised software override.  Since we can advertise
+     * a plethora of combinations, we need to check each bit
+     * individually.
+     */
+
+    /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+     * Advertisement Register (Address 4) and the 1000 mb speed bits in
+     * the  1000Base-T Control Register (Address 9).
+     */
+    mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
+    mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
+
+    DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
+
+    /* Do we want to advertise 10 Mb Half Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
+        DEBUGOUT("Advertise 10mb Half duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+    }
+
+    /* Do we want to advertise 10 Mb Full Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
+        DEBUGOUT("Advertise 10mb Full duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+    }
+
+    /* Do we want to advertise 100 Mb Half Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
+        DEBUGOUT("Advertise 100mb Half duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+    }
+
+    /* Do we want to advertise 100 Mb Full Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
+        DEBUGOUT("Advertise 100mb Full duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+    }
+
+    /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+    if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+        DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
+    }
+
+    /* Do we want to advertise 1000 Mb Full Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+        DEBUGOUT("Advertise 1000mb Full duplex\n");
+        mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+        if (hw->phy_type == e1000_phy_ife) {
+            DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
+        }
+    }
+
+    /* Check for a software override of the flow control settings, and
+     * setup the PHY advertisement registers accordingly.  If
+     * auto-negotiation is enabled, then software will have to set the
+     * "PAUSE" bits to the correct value in the Auto-Negotiation
+     * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+     *
+     * The possible values of the "fc" parameter are:
+     *      0:  Flow control is completely disabled
+     *      1:  Rx flow control is enabled (we can receive pause frames
+     *          but not send pause frames).
+     *      2:  Tx flow control is enabled (we can send pause frames
+     *          but we do not support receiving pause frames).
+     *      3:  Both Rx and TX flow control (symmetric) are enabled.
+     *  other:  No software override.  The flow control configuration
+     *          in the EEPROM is used.
+     */
+    switch (hw->fc) {
+    case E1000_FC_NONE: /* 0 */
+        /* Flow control (RX & TX) is completely disabled by a
+         * software over-ride.
+         */
+        mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+        break;
+    case E1000_FC_RX_PAUSE: /* 1 */
+        /* RX Flow control is enabled, and TX Flow control is
+         * disabled, by a software over-ride.
+         */
+        /* Since there really isn't a way to advertise that we are
+         * capable of RX Pause ONLY, we will advertise that we
+         * support both symmetric and asymmetric RX PAUSE.  Later
+         * (in e1000_config_fc_after_link_up) we will disable the
+         *hw's ability to send PAUSE frames.
+         */
+        mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+        break;
+    case E1000_FC_TX_PAUSE: /* 2 */
+        /* TX Flow control is enabled, and RX Flow control is
+         * disabled, by a software over-ride.
+         */
+        mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+        mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+        break;
+    case E1000_FC_FULL: /* 3 */
+        /* Flow control (both RX and TX) is enabled by a software
+         * over-ride.
+         */
+        mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+        break;
+    default:
+        DEBUGOUT("Flow control param set incorrectly\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+    if (ret_val)
+        return ret_val;
+
+    DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+    if (hw->phy_type != e1000_phy_ife) {
+        ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Force PHY speed and duplex settings to hw->forced_speed_duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    s32 ret_val;
+    u16 mii_ctrl_reg;
+    u16 mii_status_reg;
+    u16 phy_data;
+    u16 i;
+
+    DEBUGFUNC("e1000_phy_force_speed_duplex");
+
+    /* Turn off Flow control if we are forcing speed and duplex. */
+    hw->fc = E1000_FC_NONE;
+
+    DEBUGOUT1("hw->fc = %d\n", hw->fc);
+
+    /* Read the Device Control Register. */
+    ctrl = er32(CTRL);
+
+    /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
+    ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+    ctrl &= ~(DEVICE_SPEED_MASK);
+
+    /* Clear the Auto Speed Detect Enable bit. */
+    ctrl &= ~E1000_CTRL_ASDE;
+
+    /* Read the MII Control Register. */
+    ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
+    if (ret_val)
+        return ret_val;
+
+    /* We need to disable autoneg in order to force link and duplex. */
+
+    mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
+
+    /* Are we forcing Full or Half Duplex? */
+    if (hw->forced_speed_duplex == e1000_100_full ||
+        hw->forced_speed_duplex == e1000_10_full) {
+        /* We want to force full duplex so we SET the full duplex bits in the
+         * Device and MII Control Registers.
+         */
+        ctrl |= E1000_CTRL_FD;
+        mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
+        DEBUGOUT("Full Duplex\n");
+    } else {
+        /* We want to force half duplex so we CLEAR the full duplex bits in
+         * the Device and MII Control Registers.
+         */
+        ctrl &= ~E1000_CTRL_FD;
+        mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
+        DEBUGOUT("Half Duplex\n");
+    }
+
+    /* Are we forcing 100Mbps??? */
+    if (hw->forced_speed_duplex == e1000_100_full ||
+       hw->forced_speed_duplex == e1000_100_half) {
+        /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
+        ctrl |= E1000_CTRL_SPD_100;
+        mii_ctrl_reg |= MII_CR_SPEED_100;
+        mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+        DEBUGOUT("Forcing 100mb ");
+    } else {
+        /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
+        ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+        mii_ctrl_reg |= MII_CR_SPEED_10;
+        mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+        DEBUGOUT("Forcing 10mb ");
+    }
+
+    e1000_config_collision_dist(hw);
+
+    /* Write the configured values back to the Device Control Reg. */
+    ew32(CTRL, ctrl);
+
+    if ((hw->phy_type == e1000_phy_m88) ||
+        (hw->phy_type == e1000_phy_gg82563)) {
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+         * forced whenever speed are duplex are forced.
+         */
+        phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+
+        /* Need to reset the PHY or these changes will be ignored */
+        mii_ctrl_reg |= MII_CR_RESET;
+
+    /* Disable MDI-X support for 10/100 */
+    } else if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IFE_PMC_AUTO_MDIX;
+        phy_data &= ~IFE_PMC_FORCE_MDIX;
+
+        ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+    } else {
+        /* Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+         * forced whenever speed or duplex are forced.
+         */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+        phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    /* Write back the modified PHY MII control register. */
+    ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
+    if (ret_val)
+        return ret_val;
+
+    udelay(1);
+
+    /* The wait_autoneg_complete flag may be a little misleading here.
+     * Since we are forcing speed and duplex, Auto-Neg is not enabled.
+     * But we do want to delay for a period while forcing only so we
+     * don't generate false No Link messages.  So we will wait here
+     * only if the user has set wait_autoneg_complete to 1, which is
+     * the default.
+     */
+    if (hw->wait_autoneg_complete) {
+        /* We will wait for autoneg to complete. */
+        DEBUGOUT("Waiting for forced speed/duplex link.\n");
+        mii_status_reg = 0;
+
+        /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+        for (i = PHY_FORCE_TIME; i > 0; i--) {
+            /* Read the MII Status Register and wait for Auto-Neg Complete bit
+             * to be set.
+             */
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+
+            if (mii_status_reg & MII_SR_LINK_STATUS) break;
+            msleep(100);
+        }
+        if ((i == 0) &&
+           ((hw->phy_type == e1000_phy_m88) ||
+            (hw->phy_type == e1000_phy_gg82563))) {
+            /* We didn't get link.  Reset the DSP and wait again for link. */
+            ret_val = e1000_phy_reset_dsp(hw);
+            if (ret_val) {
+                DEBUGOUT("Error Resetting PHY DSP\n");
+                return ret_val;
+            }
+        }
+        /* This loop will early-out if the link condition has been met.  */
+        for (i = PHY_FORCE_TIME; i > 0; i--) {
+            if (mii_status_reg & MII_SR_LINK_STATUS) break;
+            msleep(100);
+            /* Read the MII Status Register and wait for Auto-Neg Complete bit
+             * to be set.
+             */
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+        }
+    }
+
+    if (hw->phy_type == e1000_phy_m88) {
+        /* Because we reset the PHY above, we need to re-force TX_CLK in the
+         * Extended PHY Specific Control Register to 25MHz clock.  This value
+         * defaults back to a 2.5MHz clock when the PHY is reset.
+         */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= M88E1000_EPSCR_TX_CLK_25;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* In addition, because of the s/w reset above, we need to enable CRS on
+         * TX.  This must be set for both full and half duplex operation.
+         */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+            (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full ||
+             hw->forced_speed_duplex == e1000_10_half)) {
+            ret_val = e1000_polarity_reversal_workaround(hw);
+            if (ret_val)
+                return ret_val;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        /* The TX_CLK of the Extended PHY Specific Control Register defaults
+         * to 2.5MHz on a reset.  We need to re-force it back to 25MHz, if
+         * we're not in a forced 10/duplex configuration. */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+        if ((hw->forced_speed_duplex == e1000_10_full) ||
+            (hw->forced_speed_duplex == e1000_10_half))
+            phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ;
+        else
+            phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ;
+
+        /* Also due to the reset, we need to enable CRS on Tx. */
+        phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Sets the collision distance in the Transmit Control register
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Link should have been established previously. Reads the speed and duplex
+* information from the Device Status register.
+******************************************************************************/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+    u32 tctl, coll_dist;
+
+    DEBUGFUNC("e1000_config_collision_dist");
+
+    if (hw->mac_type < e1000_82543)
+        coll_dist = E1000_COLLISION_DISTANCE_82542;
+    else
+        coll_dist = E1000_COLLISION_DISTANCE;
+
+    tctl = er32(TCTL);
+
+    tctl &= ~E1000_TCTL_COLD;
+    tctl |= coll_dist << E1000_COLD_SHIFT;
+
+    ew32(TCTL, tctl);
+    E1000_WRITE_FLUSH();
+}
+
+/******************************************************************************
+* Sets MAC speed and duplex settings to reflect the those in the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* mii_reg - data to write to the MII control register
+*
+* The contents of the PHY register containing the needed information need to
+* be passed in.
+******************************************************************************/
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_config_mac_to_phy");
+
+    /* 82544 or newer MAC, Auto Speed Detection takes care of
+    * MAC speed/duplex configuration.*/
+    if (hw->mac_type >= e1000_82544)
+        return E1000_SUCCESS;
+
+    /* Read the Device Control Register and set the bits to Force Speed
+     * and Duplex.
+     */
+    ctrl = er32(CTRL);
+    ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+    ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+    /* Set up duplex in the Device Control and Transmit Control
+     * registers depending on negotiated values.
+     */
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if (phy_data & M88E1000_PSSR_DPLX)
+        ctrl |= E1000_CTRL_FD;
+    else
+        ctrl &= ~E1000_CTRL_FD;
+
+    e1000_config_collision_dist(hw);
+
+    /* Set up speed in the Device Control register depending on
+     * negotiated values.
+     */
+    if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+        ctrl |= E1000_CTRL_SPD_1000;
+    else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+        ctrl |= E1000_CTRL_SPD_100;
+
+    /* Write the configured values back to the Device Control Reg. */
+    ew32(CTRL, ctrl);
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Forces the MAC's flow control settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets the TFCE and RFCE bits in the device control register to reflect
+ * the adapter settings. TFCE and RFCE need to be explicitly set by
+ * software when a Copper PHY is used because autonegotiation is managed
+ * by the PHY rather than the MAC. Software must also configure these
+ * bits when link is forced on a fiber connection.
+ *****************************************************************************/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+    u32 ctrl;
+
+    DEBUGFUNC("e1000_force_mac_fc");
+
+    /* Get the current configuration of the Device Control Register */
+    ctrl = er32(CTRL);
+
+    /* Because we didn't get link via the internal auto-negotiation
+     * mechanism (we either forced link or we got link via PHY
+     * auto-neg), we have to manually enable/disable transmit an
+     * receive flow control.
+     *
+     * The "Case" statement below enables/disable flow control
+     * according to the "hw->fc" parameter.
+     *
+     * The possible values of the "fc" parameter are:
+     *      0:  Flow control is completely disabled
+     *      1:  Rx flow control is enabled (we can receive pause
+     *          frames but not send pause frames).
+     *      2:  Tx flow control is enabled (we can send pause frames
+     *          frames but we do not receive pause frames).
+     *      3:  Both Rx and TX flow control (symmetric) is enabled.
+     *  other:  No other values should be possible at this point.
+     */
+
+    switch (hw->fc) {
+    case E1000_FC_NONE:
+        ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+        break;
+    case E1000_FC_RX_PAUSE:
+        ctrl &= (~E1000_CTRL_TFCE);
+        ctrl |= E1000_CTRL_RFCE;
+        break;
+    case E1000_FC_TX_PAUSE:
+        ctrl &= (~E1000_CTRL_RFCE);
+        ctrl |= E1000_CTRL_TFCE;
+        break;
+    case E1000_FC_FULL:
+        ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+        break;
+    default:
+        DEBUGOUT("Flow control param set incorrectly\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    /* Disable TX Flow Control for 82542 (rev 2.0) */
+    if (hw->mac_type == e1000_82542_rev2_0)
+        ctrl &= (~E1000_CTRL_TFCE);
+
+    ew32(CTRL, ctrl);
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control settings after link is established
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Should be called immediately after a valid link has been established.
+ * Forces MAC flow control settings if link was forced. When in MII/GMII mode
+ * and autonegotiation is enabled, the MAC flow control settings will be set
+ * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
+ * and RFCE bits will be automaticaly set to the negotiated flow control mode.
+ *****************************************************************************/
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 mii_status_reg;
+    u16 mii_nway_adv_reg;
+    u16 mii_nway_lp_ability_reg;
+    u16 speed;
+    u16 duplex;
+
+    DEBUGFUNC("e1000_config_fc_after_link_up");
+
+    /* Check for the case where we have fiber media and auto-neg failed
+     * so we had to force link.  In this case, we need to force the
+     * configuration of the MAC to match the "fc" parameter.
+     */
+    if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
+        ((hw->media_type == e1000_media_type_internal_serdes) &&
+         (hw->autoneg_failed)) ||
+        ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
+        ret_val = e1000_force_mac_fc(hw);
+        if (ret_val) {
+            DEBUGOUT("Error forcing flow control settings\n");
+            return ret_val;
+        }
+    }
+
+    /* Check for the case where we have copper media and auto-neg is
+     * enabled.  In this case, we need to check and see if Auto-Neg
+     * has completed, and if so, how the PHY and link partner has
+     * flow control configured.
+     */
+    if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+        /* Read the MII Status Register and check to see if AutoNeg
+         * has completed.  We read this twice because this reg has
+         * some "sticky" (latched) bits.
+         */
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+            /* The AutoNeg process has completed, so we now need to
+             * read both the Auto Negotiation Advertisement Register
+             * (Address 4) and the Auto_Negotiation Base Page Ability
+             * Register (Address 5) to determine how flow control was
+             * negotiated.
+             */
+            ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+                                         &mii_nway_adv_reg);
+            if (ret_val)
+                return ret_val;
+            ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+                                         &mii_nway_lp_ability_reg);
+            if (ret_val)
+                return ret_val;
+
+            /* Two bits in the Auto Negotiation Advertisement Register
+             * (Address 4) and two bits in the Auto Negotiation Base
+             * Page Ability Register (Address 5) determine flow control
+             * for both the PHY and the link partner.  The following
+             * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+             * 1999, describes these PAUSE resolution bits and how flow
+             * control is determined based upon these settings.
+             * NOTE:  DC = Don't Care
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+             *-------|---------|-------|---------|--------------------
+             *   0   |    0    |  DC   |   DC    | E1000_FC_NONE
+             *   0   |    1    |   0   |   DC    | E1000_FC_NONE
+             *   0   |    1    |   1   |    0    | E1000_FC_NONE
+             *   0   |    1    |   1   |    1    | E1000_FC_TX_PAUSE
+             *   1   |    0    |   0   |   DC    | E1000_FC_NONE
+             *   1   |   DC    |   1   |   DC    | E1000_FC_FULL
+             *   1   |    1    |   0   |    0    | E1000_FC_NONE
+             *   1   |    1    |   0   |    1    | E1000_FC_RX_PAUSE
+             *
+             */
+            /* Are both PAUSE bits set to 1?  If so, this implies
+             * Symmetric Flow Control is enabled at both ends.  The
+             * ASM_DIR bits are irrelevant per the spec.
+             *
+             * For Symmetric Flow Control:
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+             *-------|---------|-------|---------|--------------------
+             *   1   |   DC    |   1   |   DC    | E1000_FC_FULL
+             *
+             */
+            if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+                /* Now we need to check if the user selected RX ONLY
+                 * of pause frames.  In this case, we had to advertise
+                 * FULL flow control because we could not advertise RX
+                 * ONLY. Hence, we must now check to see if we need to
+                 * turn OFF  the TRANSMISSION of PAUSE frames.
+                 */
+                if (hw->original_fc == E1000_FC_FULL) {
+                    hw->fc = E1000_FC_FULL;
+                    DEBUGOUT("Flow Control = FULL.\n");
+                } else {
+                    hw->fc = E1000_FC_RX_PAUSE;
+                    DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+                }
+            }
+            /* For receiving PAUSE frames ONLY.
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+             *-------|---------|-------|---------|--------------------
+             *   0   |    1    |   1   |    1    | E1000_FC_TX_PAUSE
+             *
+             */
+            else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                     (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+                hw->fc = E1000_FC_TX_PAUSE;
+                DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+            }
+            /* For transmitting PAUSE frames ONLY.
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+             *-------|---------|-------|---------|--------------------
+             *   1   |    1    |   0   |    1    | E1000_FC_RX_PAUSE
+             *
+             */
+            else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                     (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                     !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+                hw->fc = E1000_FC_RX_PAUSE;
+                DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+            }
+            /* Per the IEEE spec, at this point flow control should be
+             * disabled.  However, we want to consider that we could
+             * be connected to a legacy switch that doesn't advertise
+             * desired flow control, but can be forced on the link
+             * partner.  So if we advertised no flow control, that is
+             * what we will resolve to.  If we advertised some kind of
+             * receive capability (Rx Pause Only or Full Flow Control)
+             * and the link partner advertised none, we will configure
+             * ourselves to enable Rx Flow Control only.  We can do
+             * this safely for two reasons:  If the link partner really
+             * didn't want flow control enabled, and we enable Rx, no
+             * harm done since we won't be receiving any PAUSE frames
+             * anyway.  If the intent on the link partner was to have
+             * flow control enabled, then by us enabling RX only, we
+             * can at least receive pause frames and process them.
+             * This is a good idea because in most cases, since we are
+             * predominantly a server NIC, more times than not we will
+             * be asked to delay transmission of packets than asking
+             * our link partner to pause transmission of frames.
+             */
+            else if ((hw->original_fc == E1000_FC_NONE ||
+                      hw->original_fc == E1000_FC_TX_PAUSE) ||
+                      hw->fc_strict_ieee) {
+                hw->fc = E1000_FC_NONE;
+                DEBUGOUT("Flow Control = NONE.\n");
+            } else {
+                hw->fc = E1000_FC_RX_PAUSE;
+                DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+            }
+
+            /* Now we need to do one last check...  If we auto-
+             * negotiated to HALF DUPLEX, flow control should not be
+             * enabled per IEEE 802.3 spec.
+             */
+            ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+            if (ret_val) {
+                DEBUGOUT("Error getting link speed and duplex\n");
+                return ret_val;
+            }
+
+            if (duplex == HALF_DUPLEX)
+                hw->fc = E1000_FC_NONE;
+
+            /* Now we call a subroutine to actually force the MAC
+             * controller to use the correct flow control settings.
+             */
+            ret_val = e1000_force_mac_fc(hw);
+            if (ret_val) {
+                DEBUGOUT("Error forcing flow control settings\n");
+                return ret_val;
+            }
+        } else {
+            DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
+        }
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Checks to see if the link status of the hardware has changed.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Called by any function that needs to check the link status of the adapter.
+ *****************************************************************************/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+    u32 rxcw = 0;
+    u32 ctrl;
+    u32 status;
+    u32 rctl;
+    u32 icr;
+    u32 signal = 0;
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_check_for_link");
+
+    ctrl = er32(CTRL);
+    status = er32(STATUS);
+
+    /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
+     * set when the optics detect a signal. On older adapters, it will be
+     * cleared when there is a signal.  This applies to fiber media only.
+     */
+    if ((hw->media_type == e1000_media_type_fiber) ||
+        (hw->media_type == e1000_media_type_internal_serdes)) {
+        rxcw = er32(RXCW);
+
+        if (hw->media_type == e1000_media_type_fiber) {
+            signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+            if (status & E1000_STATUS_LU)
+                hw->get_link_status = false;
+        }
+    }
+
+    /* If we have a copper PHY then we only want to go out to the PHY
+     * registers to see if Auto-Neg has completed and/or if our link
+     * status has changed.  The get_link_status flag will be set if we
+     * receive a Link Status Change interrupt or we have Rx Sequence
+     * Errors.
+     */
+    if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+        /* First we want to see if the MII Status Register reports
+         * link.  If so, then we want to get the current speed/duplex
+         * of the PHY.
+         * Read the register twice since the link bit is sticky.
+         */
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if (phy_data & MII_SR_LINK_STATUS) {
+            hw->get_link_status = false;
+            /* Check if there was DownShift, must be checked immediately after
+             * link-up */
+            e1000_check_downshift(hw);
+
+            /* If we are on 82544 or 82543 silicon and speed/duplex
+             * are forced to 10H or 10F, then we will implement the polarity
+             * reversal workaround.  We disable interrupts first, and upon
+             * returning, place the devices interrupt state to its previous
+             * value except for the link status change interrupt which will
+             * happen due to the execution of this workaround.
+             */
+
+            if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+                (!hw->autoneg) &&
+                (hw->forced_speed_duplex == e1000_10_full ||
+                 hw->forced_speed_duplex == e1000_10_half)) {
+                ew32(IMC, 0xffffffff);
+                ret_val = e1000_polarity_reversal_workaround(hw);
+                icr = er32(ICR);
+                ew32(ICS, (icr & ~E1000_ICS_LSC));
+                ew32(IMS, IMS_ENABLE_MASK);
+            }
+
+        } else {
+            /* No link detected */
+            e1000_config_dsp_after_link_change(hw, false);
+            return 0;
+        }
+
+        /* If we are forcing speed/duplex, then we simply return since
+         * we have already determined whether we have link or not.
+         */
+        if (!hw->autoneg) return -E1000_ERR_CONFIG;
+
+        /* optimize the dsp settings for the igp phy */
+        e1000_config_dsp_after_link_change(hw, true);
+
+        /* We have a M88E1000 PHY and Auto-Neg is enabled.  If we
+         * have Si on board that is 82544 or newer, Auto
+         * Speed Detection takes care of MAC speed/duplex
+         * configuration.  So we only need to configure Collision
+         * Distance in the MAC.  Otherwise, we need to force
+         * speed/duplex on the MAC to the current PHY speed/duplex
+         * settings.
+         */
+        if (hw->mac_type >= e1000_82544)
+            e1000_config_collision_dist(hw);
+        else {
+            ret_val = e1000_config_mac_to_phy(hw);
+            if (ret_val) {
+                DEBUGOUT("Error configuring MAC to PHY settings\n");
+                return ret_val;
+            }
+        }
+
+        /* Configure Flow Control now that Auto-Neg has completed. First, we
+         * need to restore the desired flow control settings because we may
+         * have had to re-autoneg with a different link partner.
+         */
+        ret_val = e1000_config_fc_after_link_up(hw);
+        if (ret_val) {
+            DEBUGOUT("Error configuring flow control\n");
+            return ret_val;
+        }
+
+        /* At this point we know that we are on copper and we have
+         * auto-negotiated link.  These are conditions for checking the link
+         * partner capability register.  We use the link speed to determine if
+         * TBI compatibility needs to be turned on or off.  If the link is not
+         * at gigabit speed, then TBI compatibility is not needed.  If we are
+         * at gigabit speed, we turn on TBI compatibility.
+         */
+        if (hw->tbi_compatibility_en) {
+            u16 speed, duplex;
+            ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+            if (ret_val) {
+                DEBUGOUT("Error getting link speed and duplex\n");
+                return ret_val;
+            }
+            if (speed != SPEED_1000) {
+                /* If link speed is not set to gigabit speed, we do not need
+                 * to enable TBI compatibility.
+                 */
+                if (hw->tbi_compatibility_on) {
+                    /* If we previously were in the mode, turn it off. */
+                    rctl = er32(RCTL);
+                    rctl &= ~E1000_RCTL_SBP;
+                    ew32(RCTL, rctl);
+                    hw->tbi_compatibility_on = false;
+                }
+            } else {
+                /* If TBI compatibility is was previously off, turn it on. For
+                 * compatibility with a TBI link partner, we will store bad
+                 * packets. Some frames have an additional byte on the end and
+                 * will look like CRC errors to to the hardware.
+                 */
+                if (!hw->tbi_compatibility_on) {
+                    hw->tbi_compatibility_on = true;
+                    rctl = er32(RCTL);
+                    rctl |= E1000_RCTL_SBP;
+                    ew32(RCTL, rctl);
+                }
+            }
+        }
+    }
+    /* If we don't have link (auto-negotiation failed or link partner cannot
+     * auto-negotiate), the cable is plugged in (we have signal), and our
+     * link partner is not trying to auto-negotiate with us (we are receiving
+     * idles or data), we need to force link up. We also need to give
+     * auto-negotiation time to complete, in case the cable was just plugged
+     * in. The autoneg_failed flag does this.
+     */
+    else if ((((hw->media_type == e1000_media_type_fiber) &&
+              ((ctrl & E1000_CTRL_SWDPIN1) == signal)) ||
+              (hw->media_type == e1000_media_type_internal_serdes)) &&
+              (!(status & E1000_STATUS_LU)) &&
+              (!(rxcw & E1000_RXCW_C))) {
+        if (hw->autoneg_failed == 0) {
+            hw->autoneg_failed = 1;
+            return 0;
+        }
+        DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+        /* Disable auto-negotiation in the TXCW register */
+        ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
+
+        /* Force link-up and also force full-duplex. */
+        ctrl = er32(CTRL);
+        ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+        ew32(CTRL, ctrl);
+
+        /* Configure Flow Control after forcing link up. */
+        ret_val = e1000_config_fc_after_link_up(hw);
+        if (ret_val) {
+            DEBUGOUT("Error configuring flow control\n");
+            return ret_val;
+        }
+    }
+    /* If we are forcing link and we are receiving /C/ ordered sets, re-enable
+     * auto-negotiation in the TXCW register and disable forced link in the
+     * Device Control register in an attempt to auto-negotiate with our link
+     * partner.
+     */
+    else if (((hw->media_type == e1000_media_type_fiber) ||
+              (hw->media_type == e1000_media_type_internal_serdes)) &&
+              (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+        DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+        ew32(TXCW, hw->txcw);
+        ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+        hw->serdes_link_down = false;
+    }
+    /* If we force link for non-auto-negotiation switch, check link status
+     * based on MAC synchronization for internal serdes media type.
+     */
+    else if ((hw->media_type == e1000_media_type_internal_serdes) &&
+             !(E1000_TXCW_ANE & er32(TXCW))) {
+        /* SYNCH bit and IV bit are sticky. */
+        udelay(10);
+        if (E1000_RXCW_SYNCH & er32(RXCW)) {
+            if (!(rxcw & E1000_RXCW_IV)) {
+                hw->serdes_link_down = false;
+                DEBUGOUT("SERDES: Link is up.\n");
+            }
+        } else {
+            hw->serdes_link_down = true;
+            DEBUGOUT("SERDES: Link is down.\n");
+        }
+    }
+    if ((hw->media_type == e1000_media_type_internal_serdes) &&
+        (E1000_TXCW_ANE & er32(TXCW))) {
+        hw->serdes_link_down = !(E1000_STATUS_LU & er32(STATUS));
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ *****************************************************************************/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+    u32 status;
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_get_speed_and_duplex");
+
+    if (hw->mac_type >= e1000_82543) {
+        status = er32(STATUS);
+        if (status & E1000_STATUS_SPEED_1000) {
+            *speed = SPEED_1000;
+            DEBUGOUT("1000 Mbs, ");
+        } else if (status & E1000_STATUS_SPEED_100) {
+            *speed = SPEED_100;
+            DEBUGOUT("100 Mbs, ");
+        } else {
+            *speed = SPEED_10;
+            DEBUGOUT("10 Mbs, ");
+        }
+
+        if (status & E1000_STATUS_FD) {
+            *duplex = FULL_DUPLEX;
+            DEBUGOUT("Full Duplex\n");
+        } else {
+            *duplex = HALF_DUPLEX;
+            DEBUGOUT(" Half Duplex\n");
+        }
+    } else {
+        DEBUGOUT("1000 Mbs, Full Duplex\n");
+        *speed = SPEED_1000;
+        *duplex = FULL_DUPLEX;
+    }
+
+    /* IGP01 PHY may advertise full duplex operation after speed downgrade even
+     * if it is operating at half duplex.  Here we set the duplex settings to
+     * match the duplex in the link partner's capabilities.
+     */
+    if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+        ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+            *duplex = HALF_DUPLEX;
+        else {
+            ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
+            if (ret_val)
+                return ret_val;
+            if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
+               (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
+                *duplex = HALF_DUPLEX;
+        }
+    }
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (hw->media_type == e1000_media_type_copper)) {
+        if (*speed == SPEED_1000)
+            ret_val = e1000_configure_kmrn_for_1000(hw);
+        else
+            ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
+        ret_val = e1000_kumeran_lock_loss_workaround(hw);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Blocks until autoneg completes or times out (~4.5 seconds)
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 i;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_wait_autoneg");
+    DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+
+    /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+    for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+        /* Read the MII Status Register and wait for Auto-Neg
+         * Complete bit to be set.
+         */
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        if (phy_data & MII_SR_AUTONEG_COMPLETE) {
+            return E1000_SUCCESS;
+        }
+        msleep(100);
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Raises the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+    /* Raise the clock input to the Management Data Clock (by setting the MDC
+     * bit), and then delay 10 microseconds.
+     */
+    ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
+    E1000_WRITE_FLUSH();
+    udelay(10);
+}
+
+/******************************************************************************
+* Lowers the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+    /* Lower the clock input to the Management Data Clock (by clearing the MDC
+     * bit), and then delay 10 microseconds.
+     */
+    ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
+    E1000_WRITE_FLUSH();
+    udelay(10);
+}
+
+/******************************************************************************
+* Shifts data bits out to the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* data - Data to send out to the PHY
+* count - Number of bits to shift out
+*
+* Bits are shifted out in MSB to LSB order.
+******************************************************************************/
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
+{
+    u32 ctrl;
+    u32 mask;
+
+    /* We need to shift "count" number of bits out to the PHY. So, the value
+     * in the "data" parameter will be shifted out to the PHY one bit at a
+     * time. In order to do this, "data" must be broken down into bits.
+     */
+    mask = 0x01;
+    mask <<= (count - 1);
+
+    ctrl = er32(CTRL);
+
+    /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+    ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+    while (mask) {
+        /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
+         * then raising and lowering the Management Data Clock. A "0" is
+         * shifted out to the PHY by setting the MDIO bit to "0" and then
+         * raising and lowering the clock.
+         */
+        if (data & mask)
+            ctrl |= E1000_CTRL_MDIO;
+        else
+            ctrl &= ~E1000_CTRL_MDIO;
+
+        ew32(CTRL, ctrl);
+        E1000_WRITE_FLUSH();
+
+        udelay(10);
+
+        e1000_raise_mdi_clk(hw, &ctrl);
+        e1000_lower_mdi_clk(hw, &ctrl);
+
+        mask = mask >> 1;
+    }
+}
+
+/******************************************************************************
+* Shifts data bits in from the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Bits are shifted in in MSB to LSB order.
+******************************************************************************/
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    u16 data = 0;
+    u8 i;
+
+    /* In order to read a register from the PHY, we need to shift in a total
+     * of 18 bits from the PHY. The first two bit (turnaround) times are used
+     * to avoid contention on the MDIO pin when a read operation is performed.
+     * These two bits are ignored by us and thrown away. Bits are "shifted in"
+     * by raising the input to the Management Data Clock (setting the MDC bit),
+     * and then reading the value of the MDIO bit.
+     */
+    ctrl = er32(CTRL);
+
+    /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+    ctrl &= ~E1000_CTRL_MDIO_DIR;
+    ctrl &= ~E1000_CTRL_MDIO;
+
+    ew32(CTRL, ctrl);
+    E1000_WRITE_FLUSH();
+
+    /* Raise and Lower the clock before reading in the data. This accounts for
+     * the turnaround bits. The first clock occurred when we clocked out the
+     * last bit of the Register Address.
+     */
+    e1000_raise_mdi_clk(hw, &ctrl);
+    e1000_lower_mdi_clk(hw, &ctrl);
+
+    for (data = 0, i = 0; i < 16; i++) {
+        data = data << 1;
+        e1000_raise_mdi_clk(hw, &ctrl);
+        ctrl = er32(CTRL);
+        /* Check to see if we shifted in a "1". */
+        if (ctrl & E1000_CTRL_MDIO)
+            data |= 1;
+        e1000_lower_mdi_clk(hw, &ctrl);
+    }
+
+    e1000_raise_mdi_clk(hw, &ctrl);
+    e1000_lower_mdi_clk(hw, &ctrl);
+
+    return data;
+}
+
+static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
+{
+    u32 swfw_sync = 0;
+    u32 swmask = mask;
+    u32 fwmask = mask << 16;
+    s32 timeout = 200;
+
+    DEBUGFUNC("e1000_swfw_sync_acquire");
+
+    if (hw->swfwhw_semaphore_present)
+        return e1000_get_software_flag(hw);
+
+    if (!hw->swfw_sync_present)
+        return e1000_get_hw_eeprom_semaphore(hw);
+
+    while (timeout) {
+            if (e1000_get_hw_eeprom_semaphore(hw))
+                return -E1000_ERR_SWFW_SYNC;
+
+            swfw_sync = er32(SW_FW_SYNC);
+            if (!(swfw_sync & (fwmask | swmask))) {
+                break;
+            }
+
+            /* firmware currently using resource (fwmask) */
+            /* or other software thread currently using resource (swmask) */
+            e1000_put_hw_eeprom_semaphore(hw);
+            mdelay(5);
+            timeout--;
+    }
+
+    if (!timeout) {
+        DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+        return -E1000_ERR_SWFW_SYNC;
+    }
+
+    swfw_sync |= swmask;
+    ew32(SW_FW_SYNC, swfw_sync);
+
+    e1000_put_hw_eeprom_semaphore(hw);
+    return E1000_SUCCESS;
+}
+
+static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
+{
+    u32 swfw_sync;
+    u32 swmask = mask;
+
+    DEBUGFUNC("e1000_swfw_sync_release");
+
+    if (hw->swfwhw_semaphore_present) {
+        e1000_release_software_flag(hw);
+        return;
+    }
+
+    if (!hw->swfw_sync_present) {
+        e1000_put_hw_eeprom_semaphore(hw);
+        return;
+    }
+
+    /* if (e1000_get_hw_eeprom_semaphore(hw))
+     *    return -E1000_ERR_SWFW_SYNC; */
+    while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS);
+        /* empty */
+
+    swfw_sync = er32(SW_FW_SYNC);
+    swfw_sync &= ~swmask;
+    ew32(SW_FW_SYNC, swfw_sync);
+
+    e1000_put_hw_eeprom_semaphore(hw);
+}
+
+/*****************************************************************************
+* Reads the value from a PHY register, if the value is on a specific non zero
+* page, sets the page first.
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to read
+******************************************************************************/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
+{
+    u32 ret_val;
+    u16 swfw;
+
+    DEBUGFUNC("e1000_read_phy_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    if ((hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) &&
+       (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+        ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+                                         (u16)reg_addr);
+        if (ret_val) {
+            e1000_swfw_sync_release(hw, swfw);
+            return ret_val;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
+            (hw->mac_type == e1000_80003es2lan)) {
+            /* Select Configuration Page */
+            if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+                ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
+                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+            } else {
+                /* Use Alternative Page Select register to access
+                 * registers 30 and 31
+                 */
+                ret_val = e1000_write_phy_reg_ex(hw,
+                                                 GG82563_PHY_PAGE_SELECT_ALT,
+                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+            }
+
+            if (ret_val) {
+                e1000_swfw_sync_release(hw, swfw);
+                return ret_val;
+            }
+        }
+    }
+
+    ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+                                    phy_data);
+
+    e1000_swfw_sync_release(hw, swfw);
+    return ret_val;
+}
+
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+				 u16 *phy_data)
+{
+    u32 i;
+    u32 mdic = 0;
+    const u32 phy_addr = 1;
+
+    DEBUGFUNC("e1000_read_phy_reg_ex");
+
+    if (reg_addr > MAX_PHY_REG_ADDRESS) {
+        DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+        return -E1000_ERR_PARAM;
+    }
+
+    if (hw->mac_type > e1000_82543) {
+        /* Set up Op-code, Phy Address, and register address in the MDI
+         * Control register.  The MAC will take care of interfacing with the
+         * PHY to retrieve the desired data.
+         */
+        mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+                (phy_addr << E1000_MDIC_PHY_SHIFT) |
+                (E1000_MDIC_OP_READ));
+
+        ew32(MDIC, mdic);
+
+        /* Poll the ready bit to see if the MDI read completed */
+        for (i = 0; i < 64; i++) {
+            udelay(50);
+            mdic = er32(MDIC);
+            if (mdic & E1000_MDIC_READY) break;
+        }
+        if (!(mdic & E1000_MDIC_READY)) {
+            DEBUGOUT("MDI Read did not complete\n");
+            return -E1000_ERR_PHY;
+        }
+        if (mdic & E1000_MDIC_ERROR) {
+            DEBUGOUT("MDI Error\n");
+            return -E1000_ERR_PHY;
+        }
+        *phy_data = (u16)mdic;
+    } else {
+        /* We must first send a preamble through the MDIO pin to signal the
+         * beginning of an MII instruction.  This is done by sending 32
+         * consecutive "1" bits.
+         */
+        e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+        /* Now combine the next few fields that are required for a read
+         * operation.  We use this method instead of calling the
+         * e1000_shift_out_mdi_bits routine five different times. The format of
+         * a MII read instruction consists of a shift out of 14 bits and is
+         * defined as follows:
+         *    <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
+         * followed by a shift in of 18 bits.  This first two bits shifted in
+         * are TurnAround bits used to avoid contention on the MDIO pin when a
+         * READ operation is performed.  These two bits are thrown away
+         * followed by a shift in of 16 bits which contains the desired data.
+         */
+        mdic = ((reg_addr) | (phy_addr << 5) |
+                (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+        e1000_shift_out_mdi_bits(hw, mdic, 14);
+
+        /* Now that we've shifted out the read command to the MII, we need to
+         * "shift in" the 16-bit value (18 total bits) of the requested PHY
+         * register address.
+         */
+        *phy_data = e1000_shift_in_mdi_bits(hw);
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Writes a value to a PHY register
+*
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to write
+* data - data to write to the PHY
+******************************************************************************/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
+{
+    u32 ret_val;
+    u16 swfw;
+
+    DEBUGFUNC("e1000_write_phy_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    if ((hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) &&
+       (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+        ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+                                         (u16)reg_addr);
+        if (ret_val) {
+            e1000_swfw_sync_release(hw, swfw);
+            return ret_val;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
+            (hw->mac_type == e1000_80003es2lan)) {
+            /* Select Configuration Page */
+            if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+                ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
+                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+            } else {
+                /* Use Alternative Page Select register to access
+                 * registers 30 and 31
+                 */
+                ret_val = e1000_write_phy_reg_ex(hw,
+                                                 GG82563_PHY_PAGE_SELECT_ALT,
+                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+            }
+
+            if (ret_val) {
+                e1000_swfw_sync_release(hw, swfw);
+                return ret_val;
+            }
+        }
+    }
+
+    ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+                                     phy_data);
+
+    e1000_swfw_sync_release(hw, swfw);
+    return ret_val;
+}
+
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+				  u16 phy_data)
+{
+    u32 i;
+    u32 mdic = 0;
+    const u32 phy_addr = 1;
+
+    DEBUGFUNC("e1000_write_phy_reg_ex");
+
+    if (reg_addr > MAX_PHY_REG_ADDRESS) {
+        DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+        return -E1000_ERR_PARAM;
+    }
+
+    if (hw->mac_type > e1000_82543) {
+        /* Set up Op-code, Phy Address, register address, and data intended
+         * for the PHY register in the MDI Control register.  The MAC will take
+         * care of interfacing with the PHY to send the desired data.
+         */
+        mdic = (((u32)phy_data) |
+                (reg_addr << E1000_MDIC_REG_SHIFT) |
+                (phy_addr << E1000_MDIC_PHY_SHIFT) |
+                (E1000_MDIC_OP_WRITE));
+
+        ew32(MDIC, mdic);
+
+        /* Poll the ready bit to see if the MDI read completed */
+        for (i = 0; i < 641; i++) {
+            udelay(5);
+            mdic = er32(MDIC);
+            if (mdic & E1000_MDIC_READY) break;
+        }
+        if (!(mdic & E1000_MDIC_READY)) {
+            DEBUGOUT("MDI Write did not complete\n");
+            return -E1000_ERR_PHY;
+        }
+    } else {
+        /* We'll need to use the SW defined pins to shift the write command
+         * out to the PHY. We first send a preamble to the PHY to signal the
+         * beginning of the MII instruction.  This is done by sending 32
+         * consecutive "1" bits.
+         */
+        e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+        /* Now combine the remaining required fields that will indicate a
+         * write operation. We use this method instead of calling the
+         * e1000_shift_out_mdi_bits routine for each field in the command. The
+         * format of a MII write instruction is as follows:
+         * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+         */
+        mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
+                (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+        mdic <<= 16;
+        mdic |= (u32)phy_data;
+
+        e1000_shift_out_mdi_bits(hw, mdic, 32);
+    }
+
+    return E1000_SUCCESS;
+}
+
+static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data)
+{
+    u32 reg_val;
+    u16 swfw;
+    DEBUGFUNC("e1000_read_kmrn_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    /* Write register address */
+    reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
+              E1000_KUMCTRLSTA_OFFSET) |
+              E1000_KUMCTRLSTA_REN;
+    ew32(KUMCTRLSTA, reg_val);
+    udelay(2);
+
+    /* Read the data returned */
+    reg_val = er32(KUMCTRLSTA);
+    *data = (u16)reg_val;
+
+    e1000_swfw_sync_release(hw, swfw);
+    return E1000_SUCCESS;
+}
+
+static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data)
+{
+    u32 reg_val;
+    u16 swfw;
+    DEBUGFUNC("e1000_write_kmrn_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
+              E1000_KUMCTRLSTA_OFFSET) | data;
+    ew32(KUMCTRLSTA, reg_val);
+    udelay(2);
+
+    e1000_swfw_sync_release(hw, swfw);
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Returns the PHY to the power-on reset state
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+    u32 ctrl, ctrl_ext;
+    u32 led_ctrl;
+    s32 ret_val;
+    u16 swfw;
+
+    DEBUGFUNC("e1000_phy_hw_reset");
+
+    /* In the case of the phy reset being blocked, it's not an error, we
+     * simply return success without performing the reset. */
+    ret_val = e1000_check_phy_reset_block(hw);
+    if (ret_val)
+        return E1000_SUCCESS;
+
+    DEBUGOUT("Resetting Phy...\n");
+
+    if (hw->mac_type > e1000_82543) {
+        if ((hw->mac_type == e1000_80003es2lan) &&
+            (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+            swfw = E1000_SWFW_PHY1_SM;
+        } else {
+            swfw = E1000_SWFW_PHY0_SM;
+        }
+        if (e1000_swfw_sync_acquire(hw, swfw)) {
+            DEBUGOUT("Unable to acquire swfw sync\n");
+            return -E1000_ERR_SWFW_SYNC;
+        }
+        /* Read the device control register and assert the E1000_CTRL_PHY_RST
+         * bit. Then, take it out of reset.
+         * For pre-e1000_82571 hardware, we delay for 10ms between the assert
+         * and deassert.  For e1000_82571 hardware and later, we instead delay
+         * for 50us between and 10ms after the deassertion.
+         */
+        ctrl = er32(CTRL);
+        ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+        E1000_WRITE_FLUSH();
+
+        if (hw->mac_type < e1000_82571)
+            msleep(10);
+        else
+            udelay(100);
+
+        ew32(CTRL, ctrl);
+        E1000_WRITE_FLUSH();
+
+        if (hw->mac_type >= e1000_82571)
+            mdelay(10);
+
+        e1000_swfw_sync_release(hw, swfw);
+    } else {
+        /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
+         * bit to put the PHY into reset. Then, take it out of reset.
+         */
+        ctrl_ext = er32(CTRL_EXT);
+        ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+        ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+        ew32(CTRL_EXT, ctrl_ext);
+        E1000_WRITE_FLUSH();
+        msleep(10);
+        ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+        ew32(CTRL_EXT, ctrl_ext);
+        E1000_WRITE_FLUSH();
+    }
+    udelay(150);
+
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        /* Configure activity LED after PHY reset */
+        led_ctrl = er32(LEDCTL);
+        led_ctrl &= IGP_ACTIVITY_LED_MASK;
+        led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+        ew32(LEDCTL, led_ctrl);
+    }
+
+    /* Wait for FW to finish PHY configuration. */
+    ret_val = e1000_get_phy_cfg_done(hw);
+    if (ret_val != E1000_SUCCESS)
+        return ret_val;
+    e1000_release_software_semaphore(hw);
+
+    if ((hw->mac_type == e1000_ich8lan) && (hw->phy_type == e1000_phy_igp_3))
+        ret_val = e1000_init_lcd_from_nvm(hw);
+
+    return ret_val;
+}
+
+/******************************************************************************
+* Resets the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Sets bit 15 of the MII Control register
+******************************************************************************/
+s32 e1000_phy_reset(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_phy_reset");
+
+    /* In the case of the phy reset being blocked, it's not an error, we
+     * simply return success without performing the reset. */
+    ret_val = e1000_check_phy_reset_block(hw);
+    if (ret_val)
+        return E1000_SUCCESS;
+
+    switch (hw->phy_type) {
+    case e1000_phy_igp:
+    case e1000_phy_igp_2:
+    case e1000_phy_igp_3:
+    case e1000_phy_ife:
+        ret_val = e1000_phy_hw_reset(hw);
+        if (ret_val)
+            return ret_val;
+        break;
+    default:
+        ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= MII_CR_RESET;
+        ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        udelay(1);
+        break;
+    }
+
+    if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
+        e1000_phy_init_script(hw);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Work-around for 82566 power-down: on D3 entry-
+* 1) disable gigabit link
+* 2) write VR power-down enable
+* 3) read it back
+* if successful continue, else issue LCD reset and repeat
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw)
+{
+    s32 reg;
+    u16 phy_data;
+    s32 retry = 0;
+
+    DEBUGFUNC("e1000_phy_powerdown_workaround");
+
+    if (hw->phy_type != e1000_phy_igp_3)
+        return;
+
+    do {
+        /* Disable link */
+        reg = er32(PHY_CTRL);
+        ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+        /* Write VR power-down enable - bits 9:8 should be 10b */
+        e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+        phy_data |= (1 << 9);
+        phy_data &= ~(1 << 8);
+        e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data);
+
+        /* Read it back and test */
+        e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+        if (((phy_data & IGP3_VR_CTRL_MODE_MASK) == IGP3_VR_CTRL_MODE_SHUT) || retry)
+            break;
+
+        /* Issue PHY reset and repeat at most one more time */
+        reg = er32(CTRL);
+        ew32(CTRL, reg | E1000_CTRL_PHY_RST);
+        retry++;
+    } while (retry);
+
+    return;
+
+}
+
+/******************************************************************************
+* Work-around for 82566 Kumeran PCS lock loss:
+* On link status change (i.e. PCI reset, speed change) and link is up and
+* speed is gigabit-
+* 0) if workaround is optionally disabled do nothing
+* 1) wait 1ms for Kumeran link to come up
+* 2) check Kumeran Diagnostic register PCS lock loss bit
+* 3) if not set the link is locked (all is good), otherwise...
+* 4) reset the PHY
+* 5) repeat up to 10 times
+* Note: this is only called for IGP3 copper when speed is 1gb.
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    s32 reg;
+    s32 cnt;
+    u16 phy_data;
+
+    if (hw->kmrn_lock_loss_workaround_disabled)
+        return E1000_SUCCESS;
+
+    /* Make sure link is up before proceeding.  If not just return.
+     * Attempting this while link is negotiating fouled up link
+     * stability */
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+
+    if (phy_data & MII_SR_LINK_STATUS) {
+        for (cnt = 0; cnt < 10; cnt++) {
+            /* read once to clear */
+            ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+            if (ret_val)
+                return ret_val;
+            /* and again to get new status */
+            ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            /* check for PCS lock */
+            if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+                return E1000_SUCCESS;
+
+            /* Issue PHY reset */
+            e1000_phy_hw_reset(hw);
+            mdelay(5);
+        }
+        /* Disable GigE link negotiation */
+        reg = er32(PHY_CTRL);
+        ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+        /* unable to acquire PCS lock */
+        return E1000_ERR_PHY;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Probes the expected PHY address for known PHY IDs
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
+{
+    s32 phy_init_status, ret_val;
+    u16 phy_id_high, phy_id_low;
+    bool match = false;
+
+    DEBUGFUNC("e1000_detect_gig_phy");
+
+    if (hw->phy_id != 0)
+        return E1000_SUCCESS;
+
+    /* The 82571 firmware may still be configuring the PHY.  In this
+     * case, we cannot access the PHY until the configuration is done.  So
+     * we explicitly set the PHY values. */
+    if (hw->mac_type == e1000_82571 ||
+        hw->mac_type == e1000_82572) {
+        hw->phy_id = IGP01E1000_I_PHY_ID;
+        hw->phy_type = e1000_phy_igp_2;
+        return E1000_SUCCESS;
+    }
+
+    /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work-
+     * around that forces PHY page 0 to be set or the reads fail.  The rest of
+     * the code in this routine uses e1000_read_phy_reg to read the PHY ID.
+     * So for ESB-2 we need to have this set so our reads won't fail.  If the
+     * attached PHY is not a e1000_phy_gg82563, the routines below will figure
+     * this out as well. */
+    if (hw->mac_type == e1000_80003es2lan)
+        hw->phy_type = e1000_phy_gg82563;
+
+    /* Read the PHY ID Registers to identify which PHY is onboard. */
+    ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
+    if (ret_val)
+        return ret_val;
+
+    hw->phy_id = (u32)(phy_id_high << 16);
+    udelay(20);
+    ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
+    if (ret_val)
+        return ret_val;
+
+    hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK);
+    hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK;
+
+    switch (hw->mac_type) {
+    case e1000_82543:
+        if (hw->phy_id == M88E1000_E_PHY_ID) match = true;
+        break;
+    case e1000_82544:
+        if (hw->phy_id == M88E1000_I_PHY_ID) match = true;
+        break;
+    case e1000_82540:
+    case e1000_82545:
+    case e1000_82545_rev_3:
+    case e1000_82546:
+    case e1000_82546_rev_3:
+        if (hw->phy_id == M88E1011_I_PHY_ID) match = true;
+        break;
+    case e1000_82541:
+    case e1000_82541_rev_2:
+    case e1000_82547:
+    case e1000_82547_rev_2:
+        if (hw->phy_id == IGP01E1000_I_PHY_ID) match = true;
+        break;
+    case e1000_82573:
+        if (hw->phy_id == M88E1111_I_PHY_ID) match = true;
+        break;
+    case e1000_80003es2lan:
+        if (hw->phy_id == GG82563_E_PHY_ID) match = true;
+        break;
+    case e1000_ich8lan:
+        if (hw->phy_id == IGP03E1000_E_PHY_ID) match = true;
+        if (hw->phy_id == IFE_E_PHY_ID) match = true;
+        if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = true;
+        if (hw->phy_id == IFE_C_E_PHY_ID) match = true;
+        break;
+    default:
+        DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+        return -E1000_ERR_CONFIG;
+    }
+    phy_init_status = e1000_set_phy_type(hw);
+
+    if ((match) && (phy_init_status == E1000_SUCCESS)) {
+        DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+        return E1000_SUCCESS;
+    }
+    DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+    return -E1000_ERR_PHY;
+}
+
+/******************************************************************************
+* Resets the PHY's DSP
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    DEBUGFUNC("e1000_phy_reset_dsp");
+
+    do {
+        if (hw->phy_type != e1000_phy_gg82563) {
+            ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
+            if (ret_val) break;
+        }
+        ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
+        if (ret_val) break;
+        ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
+        if (ret_val) break;
+        ret_val = E1000_SUCCESS;
+    } while (0);
+
+    return ret_val;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for igp PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+				  struct e1000_phy_info *phy_info)
+{
+    s32 ret_val;
+    u16 phy_data, min_length, max_length, average;
+    e1000_rev_polarity polarity;
+
+    DEBUGFUNC("e1000_phy_igp_get_info");
+
+    /* The downshift status is checked only once, after link is established,
+     * and it stored in the hw->speed_downgraded parameter. */
+    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+
+    /* IGP01E1000 does not need to support it. */
+    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+    /* IGP01E1000 always correct polarity reversal */
+    phy_info->polarity_correction = e1000_polarity_reversal_enabled;
+
+    /* Check polarity status */
+    ret_val = e1000_check_polarity(hw, &polarity);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->cable_polarity = polarity;
+
+    ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & IGP01E1000_PSSR_MDIX) >>
+                          IGP01E1000_PSSR_MDIX_SHIFT);
+
+    if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+       IGP01E1000_PSSR_SPEED_1000MBPS) {
+        /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+                             SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+                             e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+        phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+                              SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+                              e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+        /* Get cable length */
+        ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+        if (ret_val)
+            return ret_val;
+
+        /* Translate to old method */
+        average = (max_length + min_length) / 2;
+
+        if (average <= e1000_igp_cable_length_50)
+            phy_info->cable_length = e1000_cable_length_50;
+        else if (average <= e1000_igp_cable_length_80)
+            phy_info->cable_length = e1000_cable_length_50_80;
+        else if (average <= e1000_igp_cable_length_110)
+            phy_info->cable_length = e1000_cable_length_80_110;
+        else if (average <= e1000_igp_cable_length_140)
+            phy_info->cable_length = e1000_cable_length_110_140;
+        else
+            phy_info->cable_length = e1000_cable_length_140;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for ife PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
+				  struct e1000_phy_info *phy_info)
+{
+    s32 ret_val;
+    u16 phy_data;
+    e1000_rev_polarity polarity;
+
+    DEBUGFUNC("e1000_phy_ife_get_info");
+
+    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+    ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+    phy_info->polarity_correction =
+                        ((phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
+                        IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT) ?
+                        e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+    if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
+        ret_val = e1000_check_polarity(hw, &polarity);
+        if (ret_val)
+            return ret_val;
+    } else {
+        /* Polarity is forced. */
+        polarity = ((phy_data & IFE_PSC_FORCE_POLARITY) >>
+                     IFE_PSC_FORCE_POLARITY_SHIFT) ?
+                     e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+    }
+    phy_info->cable_polarity = polarity;
+
+    ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->mdix_mode = (e1000_auto_x_mode)
+                     ((phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
+                     IFE_PMC_MDIX_MODE_SHIFT);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers fot m88 PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+				  struct e1000_phy_info *phy_info)
+{
+    s32 ret_val;
+    u16 phy_data;
+    e1000_rev_polarity polarity;
+
+    DEBUGFUNC("e1000_phy_m88_get_info");
+
+    /* The downshift status is checked only once, after link is established,
+     * and it stored in the hw->speed_downgraded parameter. */
+    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->extended_10bt_distance =
+        ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
+        M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
+        e1000_10bt_ext_dist_enable_lower : e1000_10bt_ext_dist_enable_normal;
+
+    phy_info->polarity_correction =
+        ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
+        M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
+        e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+    /* Check polarity status */
+    ret_val = e1000_check_polarity(hw, &polarity);
+    if (ret_val)
+        return ret_val;
+    phy_info->cable_polarity = polarity;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & M88E1000_PSSR_MDIX) >>
+                          M88E1000_PSSR_MDIX_SHIFT);
+
+    if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+        /* Cable Length Estimation and Local/Remote Receiver Information
+         * are only valid at 1000 Mbps.
+         */
+        if (hw->phy_type != e1000_phy_gg82563) {
+            phy_info->cable_length = (e1000_cable_length)((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+                                      M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+        } else {
+            ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_info->cable_length = (e1000_cable_length)(phy_data & GG82563_DSPD_CABLE_LENGTH);
+        }
+
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+                             SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+                             e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+        phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+                              SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+                              e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
+{
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_phy_get_info");
+
+    phy_info->cable_length = e1000_cable_length_undefined;
+    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
+    phy_info->cable_polarity = e1000_rev_polarity_undefined;
+    phy_info->downshift = e1000_downshift_undefined;
+    phy_info->polarity_correction = e1000_polarity_reversal_undefined;
+    phy_info->mdix_mode = e1000_auto_x_mode_undefined;
+    phy_info->local_rx = e1000_1000t_rx_status_undefined;
+    phy_info->remote_rx = e1000_1000t_rx_status_undefined;
+
+    if (hw->media_type != e1000_media_type_copper) {
+        DEBUGOUT("PHY info is only valid for copper media\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+        DEBUGOUT("PHY info is only valid if link is up\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    if (hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2)
+        return e1000_phy_igp_get_info(hw, phy_info);
+    else if (hw->phy_type == e1000_phy_ife)
+        return e1000_phy_ife_get_info(hw, phy_info);
+    else
+        return e1000_phy_m88_get_info(hw, phy_info);
+}
+
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_validate_mdi_settings");
+
+    if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+        DEBUGOUT("Invalid MDI setting detected\n");
+        hw->mdix = 1;
+        return -E1000_ERR_CONFIG;
+    }
+    return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Sets up eeprom variables in the hw struct.  Must be called after mac_type
+ * is configured.  Additionally, if this is ICH8, the flash controller GbE
+ * registers must be mapped, or this will crash.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_init_eeprom_params(struct e1000_hw *hw)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u32 eecd = er32(EECD);
+    s32 ret_val = E1000_SUCCESS;
+    u16 eeprom_size;
+
+    DEBUGFUNC("e1000_init_eeprom_params");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+    case e1000_82544:
+        eeprom->type = e1000_eeprom_microwire;
+        eeprom->word_size = 64;
+        eeprom->opcode_bits = 3;
+        eeprom->address_bits = 6;
+        eeprom->delay_usec = 50;
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
+        break;
+    case e1000_82540:
+    case e1000_82545:
+    case e1000_82545_rev_3:
+    case e1000_82546:
+    case e1000_82546_rev_3:
+        eeprom->type = e1000_eeprom_microwire;
+        eeprom->opcode_bits = 3;
+        eeprom->delay_usec = 50;
+        if (eecd & E1000_EECD_SIZE) {
+            eeprom->word_size = 256;
+            eeprom->address_bits = 8;
+        } else {
+            eeprom->word_size = 64;
+            eeprom->address_bits = 6;
+        }
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
+        break;
+    case e1000_82541:
+    case e1000_82541_rev_2:
+    case e1000_82547:
+    case e1000_82547_rev_2:
+        if (eecd & E1000_EECD_TYPE) {
+            eeprom->type = e1000_eeprom_spi;
+            eeprom->opcode_bits = 8;
+            eeprom->delay_usec = 1;
+            if (eecd & E1000_EECD_ADDR_BITS) {
+                eeprom->page_size = 32;
+                eeprom->address_bits = 16;
+            } else {
+                eeprom->page_size = 8;
+                eeprom->address_bits = 8;
+            }
+        } else {
+            eeprom->type = e1000_eeprom_microwire;
+            eeprom->opcode_bits = 3;
+            eeprom->delay_usec = 50;
+            if (eecd & E1000_EECD_ADDR_BITS) {
+                eeprom->word_size = 256;
+                eeprom->address_bits = 8;
+            } else {
+                eeprom->word_size = 64;
+                eeprom->address_bits = 6;
+            }
+        }
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
+        break;
+    case e1000_82571:
+    case e1000_82572:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
+        break;
+    case e1000_82573:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = true;
+        eeprom->use_eewr = true;
+        if (!e1000_is_onboard_nvm_eeprom(hw)) {
+            eeprom->type = e1000_eeprom_flash;
+            eeprom->word_size = 2048;
+
+            /* Ensure that the Autonomous FLASH update bit is cleared due to
+             * Flash update issue on parts which use a FLASH for NVM. */
+            eecd &= ~E1000_EECD_AUPDEN;
+            ew32(EECD, eecd);
+        }
+        break;
+    case e1000_80003es2lan:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = true;
+        eeprom->use_eewr = false;
+        break;
+    case e1000_ich8lan:
+        {
+        s32  i = 0;
+        u32 flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
+
+        eeprom->type = e1000_eeprom_ich8;
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
+        eeprom->word_size = E1000_SHADOW_RAM_WORDS;
+
+        /* Zero the shadow RAM structure. But don't load it from NVM
+         * so as to save time for driver init */
+        if (hw->eeprom_shadow_ram != NULL) {
+            for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+                hw->eeprom_shadow_ram[i].modified = false;
+                hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+            }
+        }
+
+        hw->flash_base_addr = (flash_size & ICH_GFPREG_BASE_MASK) *
+                              ICH_FLASH_SECTOR_SIZE;
+
+        hw->flash_bank_size = ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
+        hw->flash_bank_size -= (flash_size & ICH_GFPREG_BASE_MASK);
+
+        hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
+
+        hw->flash_bank_size /= 2 * sizeof(u16);
+
+        break;
+        }
+    default:
+        break;
+    }
+
+    if (eeprom->type == e1000_eeprom_spi) {
+        /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
+         * 32KB (incremented by powers of 2).
+         */
+        if (hw->mac_type <= e1000_82547_rev_2) {
+            /* Set to default value for initial eeprom read. */
+            eeprom->word_size = 64;
+            ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
+            if (ret_val)
+                return ret_val;
+            eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+            /* 256B eeprom size was not supported in earlier hardware, so we
+             * bump eeprom_size up one to ensure that "1" (which maps to 256B)
+             * is never the result used in the shifting logic below. */
+            if (eeprom_size)
+                eeprom_size++;
+        } else {
+            eeprom_size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+                          E1000_EECD_SIZE_EX_SHIFT);
+        }
+
+        eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
+    }
+    return ret_val;
+}
+
+/******************************************************************************
+ * Raises the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
+{
+    /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+     * wait <delay> microseconds.
+     */
+    *eecd = *eecd | E1000_EECD_SK;
+    ew32(EECD, *eecd);
+    E1000_WRITE_FLUSH();
+    udelay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Lowers the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
+{
+    /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+     * wait 50 microseconds.
+     */
+    *eecd = *eecd & ~E1000_EECD_SK;
+    ew32(EECD, *eecd);
+    E1000_WRITE_FLUSH();
+    udelay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Shift data bits out to the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * data - data to send to the EEPROM
+ * count - number of bits to shift out
+ *****************************************************************************/
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u32 eecd;
+    u32 mask;
+
+    /* We need to shift "count" bits out to the EEPROM. So, value in the
+     * "data" parameter will be shifted out to the EEPROM one bit at a time.
+     * In order to do this, "data" must be broken down into bits.
+     */
+    mask = 0x01 << (count - 1);
+    eecd = er32(EECD);
+    if (eeprom->type == e1000_eeprom_microwire) {
+        eecd &= ~E1000_EECD_DO;
+    } else if (eeprom->type == e1000_eeprom_spi) {
+        eecd |= E1000_EECD_DO;
+    }
+    do {
+        /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+         * and then raising and then lowering the clock (the SK bit controls
+         * the clock input to the EEPROM).  A "0" is shifted out to the EEPROM
+         * by setting "DI" to "0" and then raising and then lowering the clock.
+         */
+        eecd &= ~E1000_EECD_DI;
+
+        if (data & mask)
+            eecd |= E1000_EECD_DI;
+
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+
+        udelay(eeprom->delay_usec);
+
+        e1000_raise_ee_clk(hw, &eecd);
+        e1000_lower_ee_clk(hw, &eecd);
+
+        mask = mask >> 1;
+
+    } while (mask);
+
+    /* We leave the "DI" bit set to "0" when we leave this routine. */
+    eecd &= ~E1000_EECD_DI;
+    ew32(EECD, eecd);
+}
+
+/******************************************************************************
+ * Shift data bits in from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
+{
+    u32 eecd;
+    u32 i;
+    u16 data;
+
+    /* In order to read a register from the EEPROM, we need to shift 'count'
+     * bits in from the EEPROM. Bits are "shifted in" by raising the clock
+     * input to the EEPROM (setting the SK bit), and then reading the value of
+     * the "DO" bit.  During this "shifting in" process the "DI" bit should
+     * always be clear.
+     */
+
+    eecd = er32(EECD);
+
+    eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+    data = 0;
+
+    for (i = 0; i < count; i++) {
+        data = data << 1;
+        e1000_raise_ee_clk(hw, &eecd);
+
+        eecd = er32(EECD);
+
+        eecd &= ~(E1000_EECD_DI);
+        if (eecd & E1000_EECD_DO)
+            data |= 1;
+
+        e1000_lower_ee_clk(hw, &eecd);
+    }
+
+    return data;
+}
+
+/******************************************************************************
+ * Prepares EEPROM for access
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
+ * function should be called before issuing a command to the EEPROM.
+ *****************************************************************************/
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u32 eecd, i=0;
+
+    DEBUGFUNC("e1000_acquire_eeprom");
+
+    if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
+        return -E1000_ERR_SWFW_SYNC;
+    eecd = er32(EECD);
+
+    if (hw->mac_type != e1000_82573) {
+        /* Request EEPROM Access */
+        if (hw->mac_type > e1000_82544) {
+            eecd |= E1000_EECD_REQ;
+            ew32(EECD, eecd);
+            eecd = er32(EECD);
+            while ((!(eecd & E1000_EECD_GNT)) &&
+                  (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+                i++;
+                udelay(5);
+                eecd = er32(EECD);
+            }
+            if (!(eecd & E1000_EECD_GNT)) {
+                eecd &= ~E1000_EECD_REQ;
+                ew32(EECD, eecd);
+                DEBUGOUT("Could not acquire EEPROM grant\n");
+                e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+                return -E1000_ERR_EEPROM;
+            }
+        }
+    }
+
+    /* Setup EEPROM for Read/Write */
+
+    if (eeprom->type == e1000_eeprom_microwire) {
+        /* Clear SK and DI */
+        eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+        ew32(EECD, eecd);
+
+        /* Set CS */
+        eecd |= E1000_EECD_CS;
+        ew32(EECD, eecd);
+    } else if (eeprom->type == e1000_eeprom_spi) {
+        /* Clear SK and CS */
+        eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+        ew32(EECD, eecd);
+        udelay(1);
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Returns EEPROM to a "standby" state
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_standby_eeprom(struct e1000_hw *hw)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u32 eecd;
+
+    eecd = er32(EECD);
+
+    if (eeprom->type == e1000_eeprom_microwire) {
+        eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(eeprom->delay_usec);
+
+        /* Clock high */
+        eecd |= E1000_EECD_SK;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(eeprom->delay_usec);
+
+        /* Select EEPROM */
+        eecd |= E1000_EECD_CS;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(eeprom->delay_usec);
+
+        /* Clock low */
+        eecd &= ~E1000_EECD_SK;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(eeprom->delay_usec);
+    } else if (eeprom->type == e1000_eeprom_spi) {
+        /* Toggle CS to flush commands */
+        eecd |= E1000_EECD_CS;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(eeprom->delay_usec);
+        eecd &= ~E1000_EECD_CS;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(eeprom->delay_usec);
+    }
+}
+
+/******************************************************************************
+ * Terminates a command by inverting the EEPROM's chip select pin
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_release_eeprom(struct e1000_hw *hw)
+{
+    u32 eecd;
+
+    DEBUGFUNC("e1000_release_eeprom");
+
+    eecd = er32(EECD);
+
+    if (hw->eeprom.type == e1000_eeprom_spi) {
+        eecd |= E1000_EECD_CS;  /* Pull CS high */
+        eecd &= ~E1000_EECD_SK; /* Lower SCK */
+
+        ew32(EECD, eecd);
+
+        udelay(hw->eeprom.delay_usec);
+    } else if (hw->eeprom.type == e1000_eeprom_microwire) {
+        /* cleanup eeprom */
+
+        /* CS on Microwire is active-high */
+        eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+
+        ew32(EECD, eecd);
+
+        /* Rising edge of clock */
+        eecd |= E1000_EECD_SK;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(hw->eeprom.delay_usec);
+
+        /* Falling edge of clock */
+        eecd &= ~E1000_EECD_SK;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(hw->eeprom.delay_usec);
+    }
+
+    /* Stop requesting EEPROM access */
+    if (hw->mac_type > e1000_82544) {
+        eecd &= ~E1000_EECD_REQ;
+        ew32(EECD, eecd);
+    }
+
+    e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
+{
+    u16 retry_count = 0;
+    u8 spi_stat_reg;
+
+    DEBUGFUNC("e1000_spi_eeprom_ready");
+
+    /* Read "Status Register" repeatedly until the LSB is cleared.  The
+     * EEPROM will signal that the command has been completed by clearing
+     * bit 0 of the internal status register.  If it's not cleared within
+     * 5 milliseconds, then error out.
+     */
+    retry_count = 0;
+    do {
+        e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
+                                hw->eeprom.opcode_bits);
+        spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8);
+        if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
+            break;
+
+        udelay(5);
+        retry_count += 5;
+
+        e1000_standby_eeprom(hw);
+    } while (retry_count < EEPROM_MAX_RETRY_SPI);
+
+    /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
+     * only 0-5mSec on 5V devices)
+     */
+    if (retry_count >= EEPROM_MAX_RETRY_SPI) {
+        DEBUGOUT("SPI EEPROM Status error\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+    s32 ret;
+    spin_lock(&e1000_eeprom_lock);
+    ret = e1000_do_read_eeprom(hw, offset, words, data);
+    spin_unlock(&e1000_eeprom_lock);
+    return ret;
+}
+
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u32 i = 0;
+
+    DEBUGFUNC("e1000_read_eeprom");
+
+    /* If eeprom is not yet detected, do so now */
+    if (eeprom->word_size == 0)
+        e1000_init_eeprom_params(hw);
+
+    /* A check for invalid values:  offset too large, too many words, and not
+     * enough words.
+     */
+    if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+       (words == 0)) {
+        DEBUGOUT2("\"words\" parameter out of bounds. Words = %d, size = %d\n", offset, eeprom->word_size);
+        return -E1000_ERR_EEPROM;
+    }
+
+    /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
+     * directly. In this case, we need to acquire the EEPROM so that
+     * FW or other port software does not interrupt.
+     */
+    if (e1000_is_onboard_nvm_eeprom(hw) && !hw->eeprom.use_eerd) {
+        /* Prepare the EEPROM for bit-bang reading */
+        if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+            return -E1000_ERR_EEPROM;
+    }
+
+    /* Eerd register EEPROM access requires no eeprom aquire/release */
+    if (eeprom->use_eerd)
+        return e1000_read_eeprom_eerd(hw, offset, words, data);
+
+    /* ICH EEPROM access is done via the ICH flash controller */
+    if (eeprom->type == e1000_eeprom_ich8)
+        return e1000_read_eeprom_ich8(hw, offset, words, data);
+
+    /* Set up the SPI or Microwire EEPROM for bit-bang reading.  We have
+     * acquired the EEPROM at this point, so any returns should relase it */
+    if (eeprom->type == e1000_eeprom_spi) {
+        u16 word_in;
+        u8 read_opcode = EEPROM_READ_OPCODE_SPI;
+
+        if (e1000_spi_eeprom_ready(hw)) {
+            e1000_release_eeprom(hw);
+            return -E1000_ERR_EEPROM;
+        }
+
+        e1000_standby_eeprom(hw);
+
+        /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+        if ((eeprom->address_bits == 8) && (offset >= 128))
+            read_opcode |= EEPROM_A8_OPCODE_SPI;
+
+        /* Send the READ command (opcode + addr)  */
+        e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
+        e1000_shift_out_ee_bits(hw, (u16)(offset*2), eeprom->address_bits);
+
+        /* Read the data.  The address of the eeprom internally increments with
+         * each byte (spi) being read, saving on the overhead of eeprom setup
+         * and tear-down.  The address counter will roll over if reading beyond
+         * the size of the eeprom, thus allowing the entire memory to be read
+         * starting from any offset. */
+        for (i = 0; i < words; i++) {
+            word_in = e1000_shift_in_ee_bits(hw, 16);
+            data[i] = (word_in >> 8) | (word_in << 8);
+        }
+    } else if (eeprom->type == e1000_eeprom_microwire) {
+        for (i = 0; i < words; i++) {
+            /* Send the READ command (opcode + addr)  */
+            e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
+                                    eeprom->opcode_bits);
+            e1000_shift_out_ee_bits(hw, (u16)(offset + i),
+                                    eeprom->address_bits);
+
+            /* Read the data.  For microwire, each word requires the overhead
+             * of eeprom setup and tear-down. */
+            data[i] = e1000_shift_in_ee_bits(hw, 16);
+            e1000_standby_eeprom(hw);
+        }
+    }
+
+    /* End this read operation */
+    e1000_release_eeprom(hw);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data)
+{
+    u32 i, eerd = 0;
+    s32 error = 0;
+
+    for (i = 0; i < words; i++) {
+        eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
+                         E1000_EEPROM_RW_REG_START;
+
+        ew32(EERD, eerd);
+        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
+
+        if (error) {
+            break;
+        }
+        data[i] = (er32(EERD) >> E1000_EEPROM_RW_REG_DATA);
+
+    }
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word from the EEPROM using the EEWR register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
+				   u16 *data)
+{
+    u32    register_value = 0;
+    u32    i              = 0;
+    s32     error          = 0;
+
+    if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
+        return -E1000_ERR_SWFW_SYNC;
+
+    for (i = 0; i < words; i++) {
+        register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
+                         ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
+                         E1000_EEPROM_RW_REG_START;
+
+        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+        if (error) {
+            break;
+        }
+
+        ew32(EEWR, register_value);
+
+        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+
+        if (error) {
+            break;
+        }
+    }
+
+    e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+    return error;
+}
+
+/******************************************************************************
+ * Polls the status bit (bit 1) of the EERD to determine when the read is done.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
+{
+    u32 attempts = 100000;
+    u32 i, reg = 0;
+    s32 done = E1000_ERR_EEPROM;
+
+    for (i = 0; i < attempts; i++) {
+        if (eerd == E1000_EEPROM_POLL_READ)
+            reg = er32(EERD);
+        else
+            reg = er32(EEWR);
+
+        if (reg & E1000_EEPROM_RW_REG_DONE) {
+            done = E1000_SUCCESS;
+            break;
+        }
+        udelay(5);
+    }
+
+    return done;
+}
+
+/***************************************************************************
+* Description:     Determines if the onboard NVM is FLASH or EEPROM.
+*
+* hw - Struct containing variables accessed by shared code
+****************************************************************************/
+static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
+{
+    u32 eecd = 0;
+
+    DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
+
+    if (hw->mac_type == e1000_ich8lan)
+        return false;
+
+    if (hw->mac_type == e1000_82573) {
+        eecd = er32(EECD);
+
+        /* Isolate bits 15 & 16 */
+        eecd = ((eecd >> 15) & 0x03);
+
+        /* If both bits are set, device is Flash type */
+        if (eecd == 0x03) {
+            return false;
+        }
+    }
+    return true;
+}
+
+/******************************************************************************
+ * Verifies that the EEPROM has a valid checksum
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Reads the first 64 16 bit words of the EEPROM and sums the values read.
+ * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * valid.
+ *****************************************************************************/
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
+{
+    u16 checksum = 0;
+    u16 i, eeprom_data;
+
+    DEBUGFUNC("e1000_validate_eeprom_checksum");
+
+    if ((hw->mac_type == e1000_82573) && !e1000_is_onboard_nvm_eeprom(hw)) {
+        /* Check bit 4 of word 10h.  If it is 0, firmware is done updating
+         * 10h-12h.  Checksum may need to be fixed. */
+        e1000_read_eeprom(hw, 0x10, 1, &eeprom_data);
+        if ((eeprom_data & 0x10) == 0) {
+            /* Read 0x23 and check bit 15.  This bit is a 1 when the checksum
+             * has already been fixed.  If the checksum is still wrong and this
+             * bit is a 1, we need to return bad checksum.  Otherwise, we need
+             * to set this bit to a 1 and update the checksum. */
+            e1000_read_eeprom(hw, 0x23, 1, &eeprom_data);
+            if ((eeprom_data & 0x8000) == 0) {
+                eeprom_data |= 0x8000;
+                e1000_write_eeprom(hw, 0x23, 1, &eeprom_data);
+                e1000_update_eeprom_checksum(hw);
+            }
+        }
+    }
+
+    if (hw->mac_type == e1000_ich8lan) {
+        /* Drivers must allocate the shadow ram structure for the
+         * EEPROM checksum to be updated.  Otherwise, this bit as well
+         * as the checksum must both be set correctly for this
+         * validation to pass.
+         */
+        e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
+        if ((eeprom_data & 0x40) == 0) {
+            eeprom_data |= 0x40;
+            e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
+            e1000_update_eeprom_checksum(hw);
+        }
+    }
+
+    for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+        if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        checksum += eeprom_data;
+    }
+
+    if (checksum == (u16)EEPROM_SUM)
+        return E1000_SUCCESS;
+    else {
+        DEBUGOUT("EEPROM Checksum Invalid\n");
+        return -E1000_ERR_EEPROM;
+    }
+}
+
+/******************************************************************************
+ * Calculates the EEPROM checksum and writes it to the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
+ * Writes the difference to word offset 63 of the EEPROM.
+ *****************************************************************************/
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
+{
+    u32 ctrl_ext;
+    u16 checksum = 0;
+    u16 i, eeprom_data;
+
+    DEBUGFUNC("e1000_update_eeprom_checksum");
+
+    for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+        if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        checksum += eeprom_data;
+    }
+    checksum = (u16)EEPROM_SUM - checksum;
+    if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+        DEBUGOUT("EEPROM Write Error\n");
+        return -E1000_ERR_EEPROM;
+    } else if (hw->eeprom.type == e1000_eeprom_flash) {
+        e1000_commit_shadow_ram(hw);
+    } else if (hw->eeprom.type == e1000_eeprom_ich8) {
+        e1000_commit_shadow_ram(hw);
+        /* Reload the EEPROM, or else modifications will not appear
+         * until after next adapter reset. */
+        ctrl_ext = er32(CTRL_EXT);
+        ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+        ew32(CTRL_EXT, ctrl_ext);
+        msleep(10);
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Parent function for writing words to the different EEPROM types.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - 16 bit word to be written to the EEPROM
+ *
+ * If e1000_update_eeprom_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ *****************************************************************************/
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+    s32 ret;
+    spin_lock(&e1000_eeprom_lock);
+    ret = e1000_do_write_eeprom(hw, offset, words, data);
+    spin_unlock(&e1000_eeprom_lock);
+    return ret;
+}
+
+
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    s32 status = 0;
+
+    DEBUGFUNC("e1000_write_eeprom");
+
+    /* If eeprom is not yet detected, do so now */
+    if (eeprom->word_size == 0)
+        e1000_init_eeprom_params(hw);
+
+    /* A check for invalid values:  offset too large, too many words, and not
+     * enough words.
+     */
+    if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+       (words == 0)) {
+        DEBUGOUT("\"words\" parameter out of bounds\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    /* 82573 writes only through eewr */
+    if (eeprom->use_eewr)
+        return e1000_write_eeprom_eewr(hw, offset, words, data);
+
+    if (eeprom->type == e1000_eeprom_ich8)
+        return e1000_write_eeprom_ich8(hw, offset, words, data);
+
+    /* Prepare the EEPROM for writing  */
+    if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+        return -E1000_ERR_EEPROM;
+
+    if (eeprom->type == e1000_eeprom_microwire) {
+        status = e1000_write_eeprom_microwire(hw, offset, words, data);
+    } else {
+        status = e1000_write_eeprom_spi(hw, offset, words, data);
+        msleep(10);
+    }
+
+    /* Done with writing */
+    e1000_release_eeprom(hw);
+
+    return status;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in an SPI EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 8 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u16 widx = 0;
+
+    DEBUGFUNC("e1000_write_eeprom_spi");
+
+    while (widx < words) {
+        u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
+
+        if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
+
+        e1000_standby_eeprom(hw);
+
+        /*  Send the WRITE ENABLE command (8 bit opcode )  */
+        e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
+                                    eeprom->opcode_bits);
+
+        e1000_standby_eeprom(hw);
+
+        /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+        if ((eeprom->address_bits == 8) && (offset >= 128))
+            write_opcode |= EEPROM_A8_OPCODE_SPI;
+
+        /* Send the Write command (8-bit opcode + addr) */
+        e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
+
+        e1000_shift_out_ee_bits(hw, (u16)((offset + widx)*2),
+                                eeprom->address_bits);
+
+        /* Send the data */
+
+        /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+        while (widx < words) {
+            u16 word_out = data[widx];
+            word_out = (word_out >> 8) | (word_out << 8);
+            e1000_shift_out_ee_bits(hw, word_out, 16);
+            widx++;
+
+            /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
+             * operation, while the smaller eeproms are capable of an 8-byte
+             * PAGE WRITE operation.  Break the inner loop to pass new address
+             */
+            if ((((offset + widx)*2) % eeprom->page_size) == 0) {
+                e1000_standby_eeprom(hw);
+                break;
+            }
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in a Microwire EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 16 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+					u16 words, u16 *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u32 eecd;
+    u16 words_written = 0;
+    u16 i = 0;
+
+    DEBUGFUNC("e1000_write_eeprom_microwire");
+
+    /* Send the write enable command to the EEPROM (3-bit opcode plus
+     * 6/8-bit dummy address beginning with 11).  It's less work to include
+     * the 11 of the dummy address as part of the opcode than it is to shift
+     * it over the correct number of bits for the address.  This puts the
+     * EEPROM into write/erase mode.
+     */
+    e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
+                            (u16)(eeprom->opcode_bits + 2));
+
+    e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
+
+    /* Prepare the EEPROM */
+    e1000_standby_eeprom(hw);
+
+    while (words_written < words) {
+        /* Send the Write command (3-bit opcode + addr) */
+        e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
+                                eeprom->opcode_bits);
+
+        e1000_shift_out_ee_bits(hw, (u16)(offset + words_written),
+                                eeprom->address_bits);
+
+        /* Send the data */
+        e1000_shift_out_ee_bits(hw, data[words_written], 16);
+
+        /* Toggle the CS line.  This in effect tells the EEPROM to execute
+         * the previous command.
+         */
+        e1000_standby_eeprom(hw);
+
+        /* Read DO repeatedly until it is high (equal to '1').  The EEPROM will
+         * signal that the command has been completed by raising the DO signal.
+         * If DO does not go high in 10 milliseconds, then error out.
+         */
+        for (i = 0; i < 200; i++) {
+            eecd = er32(EECD);
+            if (eecd & E1000_EECD_DO) break;
+            udelay(50);
+        }
+        if (i == 200) {
+            DEBUGOUT("EEPROM Write did not complete\n");
+            return -E1000_ERR_EEPROM;
+        }
+
+        /* Recover from write */
+        e1000_standby_eeprom(hw);
+
+        words_written++;
+    }
+
+    /* Send the write disable command to the EEPROM (3-bit opcode plus
+     * 6/8-bit dummy address beginning with 10).  It's less work to include
+     * the 10 of the dummy address as part of the opcode than it is to shift
+     * it over the correct number of bits for the address.  This takes the
+     * EEPROM out of write/erase mode.
+     */
+    e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
+                            (u16)(eeprom->opcode_bits + 2));
+
+    e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Flushes the cached eeprom to NVM. This is done by saving the modified values
+ * in the eeprom cache and the non modified values in the currently active bank
+ * to the new bank.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_commit_shadow_ram(struct e1000_hw *hw)
+{
+    u32 attempts = 100000;
+    u32 eecd = 0;
+    u32 flop = 0;
+    u32 i = 0;
+    s32 error = E1000_SUCCESS;
+    u32 old_bank_offset = 0;
+    u32 new_bank_offset = 0;
+    u8 low_byte = 0;
+    u8 high_byte = 0;
+    bool sector_write_failed = false;
+
+    if (hw->mac_type == e1000_82573) {
+        /* The flop register will be used to determine if flash type is STM */
+        flop = er32(FLOP);
+        for (i=0; i < attempts; i++) {
+            eecd = er32(EECD);
+            if ((eecd & E1000_EECD_FLUPD) == 0) {
+                break;
+            }
+            udelay(5);
+        }
+
+        if (i == attempts) {
+            return -E1000_ERR_EEPROM;
+        }
+
+        /* If STM opcode located in bits 15:8 of flop, reset firmware */
+        if ((flop & 0xFF00) == E1000_STM_OPCODE) {
+            ew32(HICR, E1000_HICR_FW_RESET);
+        }
+
+        /* Perform the flash update */
+        ew32(EECD, eecd | E1000_EECD_FLUPD);
+
+        for (i=0; i < attempts; i++) {
+            eecd = er32(EECD);
+            if ((eecd & E1000_EECD_FLUPD) == 0) {
+                break;
+            }
+            udelay(5);
+        }
+
+        if (i == attempts) {
+            return -E1000_ERR_EEPROM;
+        }
+    }
+
+    if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) {
+        /* We're writing to the opposite bank so if we're on bank 1,
+         * write to bank 0 etc.  We also need to erase the segment that
+         * is going to be written */
+        if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
+            new_bank_offset = hw->flash_bank_size * 2;
+            old_bank_offset = 0;
+            e1000_erase_ich8_4k_segment(hw, 1);
+        } else {
+            old_bank_offset = hw->flash_bank_size * 2;
+            new_bank_offset = 0;
+            e1000_erase_ich8_4k_segment(hw, 0);
+        }
+
+        sector_write_failed = false;
+        /* Loop for every byte in the shadow RAM,
+         * which is in units of words. */
+        for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+            /* Determine whether to write the value stored
+             * in the other NVM bank or a modified value stored
+             * in the shadow RAM */
+            if (hw->eeprom_shadow_ram[i].modified) {
+                low_byte = (u8)hw->eeprom_shadow_ram[i].eeprom_word;
+                udelay(100);
+                error = e1000_verify_write_ich8_byte(hw,
+                            (i << 1) + new_bank_offset, low_byte);
+
+                if (error != E1000_SUCCESS)
+                    sector_write_failed = true;
+                else {
+                    high_byte =
+                        (u8)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
+                    udelay(100);
+                }
+            } else {
+                e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
+                                     &low_byte);
+                udelay(100);
+                error = e1000_verify_write_ich8_byte(hw,
+                            (i << 1) + new_bank_offset, low_byte);
+
+                if (error != E1000_SUCCESS)
+                    sector_write_failed = true;
+                else {
+                    e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
+                                         &high_byte);
+                    udelay(100);
+                }
+            }
+
+            /* If the write of the low byte was successful, go ahead and
+             * write the high byte while checking to make sure that if it
+             * is the signature byte, then it is handled properly */
+            if (!sector_write_failed) {
+                /* If the word is 0x13, then make sure the signature bits
+                 * (15:14) are 11b until the commit has completed.
+                 * This will allow us to write 10b which indicates the
+                 * signature is valid.  We want to do this after the write
+                 * has completed so that we don't mark the segment valid
+                 * while the write is still in progress */
+                if (i == E1000_ICH_NVM_SIG_WORD)
+                    high_byte = E1000_ICH_NVM_SIG_MASK | high_byte;
+
+                error = e1000_verify_write_ich8_byte(hw,
+                            (i << 1) + new_bank_offset + 1, high_byte);
+                if (error != E1000_SUCCESS)
+                    sector_write_failed = true;
+
+            } else {
+                /* If the write failed then break from the loop and
+                 * return an error */
+                break;
+            }
+        }
+
+        /* Don't bother writing the segment valid bits if sector
+         * programming failed. */
+        if (!sector_write_failed) {
+            /* Finally validate the new segment by setting bit 15:14
+             * to 10b in word 0x13 , this can be done without an
+             * erase as well since these bits are 11 to start with
+             * and we need to change bit 14 to 0b */
+            e1000_read_ich8_byte(hw,
+                                 E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+                                 &high_byte);
+            high_byte &= 0xBF;
+            error = e1000_verify_write_ich8_byte(hw,
+                        E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
+            /* And invalidate the previously valid segment by setting
+             * its signature word (0x13) high_byte to 0b. This can be
+             * done without an erase because flash erase sets all bits
+             * to 1's. We can write 1's to 0's without an erase */
+            if (error == E1000_SUCCESS) {
+                error = e1000_verify_write_ich8_byte(hw,
+                            E1000_ICH_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
+            }
+
+            /* Clear the now not used entry in the cache */
+            for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+                hw->eeprom_shadow_ram[i].modified = false;
+                hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+            }
+        }
+    }
+
+    return error;
+}
+
+/******************************************************************************
+ * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
+ * second function of dual function devices
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+    u16 offset;
+    u16 eeprom_data, i;
+
+    DEBUGFUNC("e1000_read_mac_addr");
+
+    for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+        offset = i >> 1;
+        if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF);
+        hw->perm_mac_addr[i+1] = (u8)(eeprom_data >> 8);
+    }
+
+    switch (hw->mac_type) {
+    default:
+        break;
+    case e1000_82546:
+    case e1000_82546_rev_3:
+    case e1000_82571:
+    case e1000_80003es2lan:
+        if (er32(STATUS) & E1000_STATUS_FUNC_1)
+            hw->perm_mac_addr[5] ^= 0x01;
+        break;
+    }
+
+    for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+        hw->mac_addr[i] = hw->perm_mac_addr[i];
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Initializes receive address filters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive addresss registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ *****************************************************************************/
+static void e1000_init_rx_addrs(struct e1000_hw *hw)
+{
+    u32 i;
+    u32 rar_num;
+
+    DEBUGFUNC("e1000_init_rx_addrs");
+
+    /* Setup the receive address. */
+    DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+    e1000_rar_set(hw, hw->mac_addr, 0);
+
+    rar_num = E1000_RAR_ENTRIES;
+
+    /* Reserve a spot for the Locally Administered Address to work around
+     * an 82571 issue in which a reset on one port will reload the MAC on
+     * the other port. */
+    if ((hw->mac_type == e1000_82571) && (hw->laa_is_present))
+        rar_num -= 1;
+    if (hw->mac_type == e1000_ich8lan)
+        rar_num = E1000_RAR_ENTRIES_ICH8LAN;
+
+    /* Zero out the other 15 receive addresses. */
+    DEBUGOUT("Clearing RAR[1-15]\n");
+    for (i = 1; i < rar_num; i++) {
+        E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+        E1000_WRITE_FLUSH();
+        E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+        E1000_WRITE_FLUSH();
+    }
+}
+
+/******************************************************************************
+ * Hashes an address to determine its location in the multicast table
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *****************************************************************************/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+    u32 hash_value = 0;
+
+    /* The portion of the address that is used for the hash table is
+     * determined by the mc_filter_type setting.
+     */
+    switch (hw->mc_filter_type) {
+    /* [0] [1] [2] [3] [4] [5]
+     * 01  AA  00  12  34  56
+     * LSB                 MSB
+     */
+    case 0:
+        if (hw->mac_type == e1000_ich8lan) {
+            /* [47:38] i.e. 0x158 for above example address */
+            hash_value = ((mc_addr[4] >> 6) | (((u16)mc_addr[5]) << 2));
+        } else {
+            /* [47:36] i.e. 0x563 for above example address */
+            hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+        }
+        break;
+    case 1:
+        if (hw->mac_type == e1000_ich8lan) {
+            /* [46:37] i.e. 0x2B1 for above example address */
+            hash_value = ((mc_addr[4] >> 5) | (((u16)mc_addr[5]) << 3));
+        } else {
+            /* [46:35] i.e. 0xAC6 for above example address */
+            hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+        }
+        break;
+    case 2:
+        if (hw->mac_type == e1000_ich8lan) {
+            /*[45:36] i.e. 0x163 for above example address */
+            hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+        } else {
+            /* [45:34] i.e. 0x5D8 for above example address */
+            hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+        }
+        break;
+    case 3:
+        if (hw->mac_type == e1000_ich8lan) {
+            /* [43:34] i.e. 0x18D for above example address */
+            hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+        } else {
+            /* [43:32] i.e. 0x634 for above example address */
+            hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+        }
+        break;
+    }
+
+    hash_value &= 0xFFF;
+    if (hw->mac_type == e1000_ich8lan)
+        hash_value &= 0x3FF;
+
+    return hash_value;
+}
+
+/******************************************************************************
+ * Sets the bit in the multicast table corresponding to the hash value.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ *****************************************************************************/
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
+{
+    u32 hash_bit, hash_reg;
+    u32 mta;
+    u32 temp;
+
+    /* The MTA is a register array of 128 32-bit registers.
+     * It is treated like an array of 4096 bits.  We want to set
+     * bit BitArray[hash_value]. So we figure out what register
+     * the bit is in, read it, OR in the new bit, then write
+     * back the new value.  The register is determined by the
+     * upper 7 bits of the hash value and the bit within that
+     * register are determined by the lower 5 bits of the value.
+     */
+    hash_reg = (hash_value >> 5) & 0x7F;
+    if (hw->mac_type == e1000_ich8lan)
+        hash_reg &= 0x1F;
+
+    hash_bit = hash_value & 0x1F;
+
+    mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
+
+    mta |= (1 << hash_bit);
+
+    /* If we are on an 82544 and we are trying to write an odd offset
+     * in the MTA, save off the previous entry before writing and
+     * restore the old value after writing.
+     */
+    if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
+        temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
+        E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+        E1000_WRITE_FLUSH();
+        E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp);
+        E1000_WRITE_FLUSH();
+    } else {
+        E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+        E1000_WRITE_FLUSH();
+    }
+}
+
+/******************************************************************************
+ * Puts an ethernet address into a receive address register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * addr - Address to put into receive address register
+ * index - Receive address register to write
+ *****************************************************************************/
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+    u32 rar_low, rar_high;
+
+    /* HW expects these in little endian so we reverse the byte order
+     * from network order (big endian) to little endian
+     */
+    rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
+               ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+    rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+    /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
+     * unit hang.
+     *
+     * Description:
+     * If there are any Rx frames queued up or otherwise present in the HW
+     * before RSS is enabled, and then we enable RSS, the HW Rx unit will
+     * hang.  To work around this issue, we have to disable receives and
+     * flush out all Rx frames before we enable RSS. To do so, we modify we
+     * redirect all Rx traffic to manageability and then reset the HW.
+     * This flushes away Rx frames, and (since the redirections to
+     * manageability persists across resets) keeps new ones from coming in
+     * while we work.  Then, we clear the Address Valid AV bit for all MAC
+     * addresses and undo the re-direction to manageability.
+     * Now, frames are coming in again, but the MAC won't accept them, so
+     * far so good.  We now proceed to initialize RSS (if necessary) and
+     * configure the Rx unit.  Last, we re-enable the AV bits and continue
+     * on our merry way.
+     */
+    switch (hw->mac_type) {
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_80003es2lan:
+        if (hw->leave_av_bit_off)
+            break;
+    default:
+        /* Indicate to hardware the Address is Valid. */
+        rar_high |= E1000_RAH_AV;
+        break;
+    }
+
+    E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+    E1000_WRITE_FLUSH();
+    E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+    E1000_WRITE_FLUSH();
+}
+
+/******************************************************************************
+ * Writes a value to the specified offset in the VLAN filter table.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - Offset in VLAN filer table to write
+ * value - Value to write into VLAN filter table
+ *****************************************************************************/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+    u32 temp;
+
+    if (hw->mac_type == e1000_ich8lan)
+        return;
+
+    if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+        temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
+        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+        E1000_WRITE_FLUSH();
+        E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+        E1000_WRITE_FLUSH();
+    } else {
+        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+        E1000_WRITE_FLUSH();
+    }
+}
+
+/******************************************************************************
+ * Clears the VLAN filer table
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_clear_vfta(struct e1000_hw *hw)
+{
+    u32 offset;
+    u32 vfta_value = 0;
+    u32 vfta_offset = 0;
+    u32 vfta_bit_in_reg = 0;
+
+    if (hw->mac_type == e1000_ich8lan)
+        return;
+
+    if (hw->mac_type == e1000_82573) {
+        if (hw->mng_cookie.vlan_id != 0) {
+            /* The VFTA is a 4096b bit-field, each identifying a single VLAN
+             * ID.  The following operations determine which 32b entry
+             * (i.e. offset) into the array we want to set the VLAN ID
+             * (i.e. bit) of the manageability unit. */
+            vfta_offset = (hw->mng_cookie.vlan_id >>
+                           E1000_VFTA_ENTRY_SHIFT) &
+                          E1000_VFTA_ENTRY_MASK;
+            vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
+                                    E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+        }
+    }
+    for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+        /* If the offset we want to clear is the same offset of the
+         * manageability VLAN ID, then clear all bits except that of the
+         * manageability unit */
+        vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+        E1000_WRITE_FLUSH();
+    }
+}
+
+static s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+    u32 ledctl;
+    const u32 ledctl_mask = 0x000000FF;
+    const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+    const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+    u16 eeprom_data, i, temp;
+    const u16 led_mask = 0x0F;
+
+    DEBUGFUNC("e1000_id_led_init");
+
+    if (hw->mac_type < e1000_82540) {
+        /* Nothing to do */
+        return E1000_SUCCESS;
+    }
+
+    ledctl = er32(LEDCTL);
+    hw->ledctl_default = ledctl;
+    hw->ledctl_mode1 = hw->ledctl_default;
+    hw->ledctl_mode2 = hw->ledctl_default;
+
+    if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+        DEBUGOUT("EEPROM Read Error\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    if ((hw->mac_type == e1000_82573) &&
+        (eeprom_data == ID_LED_RESERVED_82573))
+        eeprom_data = ID_LED_DEFAULT_82573;
+    else if ((eeprom_data == ID_LED_RESERVED_0000) ||
+            (eeprom_data == ID_LED_RESERVED_FFFF)) {
+        if (hw->mac_type == e1000_ich8lan)
+            eeprom_data = ID_LED_DEFAULT_ICH8LAN;
+        else
+            eeprom_data = ID_LED_DEFAULT;
+    }
+
+    for (i = 0; i < 4; i++) {
+        temp = (eeprom_data >> (i << 2)) & led_mask;
+        switch (temp) {
+        case ID_LED_ON1_DEF2:
+        case ID_LED_ON1_ON2:
+        case ID_LED_ON1_OFF2:
+            hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode1 |= ledctl_on << (i << 3);
+            break;
+        case ID_LED_OFF1_DEF2:
+        case ID_LED_OFF1_ON2:
+        case ID_LED_OFF1_OFF2:
+            hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode1 |= ledctl_off << (i << 3);
+            break;
+        default:
+            /* Do nothing */
+            break;
+        }
+        switch (temp) {
+        case ID_LED_DEF1_ON2:
+        case ID_LED_ON1_ON2:
+        case ID_LED_OFF1_ON2:
+            hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode2 |= ledctl_on << (i << 3);
+            break;
+        case ID_LED_DEF1_OFF2:
+        case ID_LED_ON1_OFF2:
+        case ID_LED_OFF1_OFF2:
+            hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode2 |= ledctl_off << (i << 3);
+            break;
+        default:
+            /* Do nothing */
+            break;
+        }
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Prepares SW controlable LED for use and saves the current state of the LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+    u32 ledctl;
+    s32 ret_val = E1000_SUCCESS;
+
+    DEBUGFUNC("e1000_setup_led");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+    case e1000_82544:
+        /* No setup necessary */
+        break;
+    case e1000_82541:
+    case e1000_82547:
+    case e1000_82541_rev_2:
+    case e1000_82547_rev_2:
+        /* Turn off PHY Smart Power Down (if enabled) */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                     &hw->phy_spd_default);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                      (u16)(hw->phy_spd_default &
+                                      ~IGP01E1000_GMII_SPD));
+        if (ret_val)
+            return ret_val;
+        /* Fall Through */
+    default:
+        if (hw->media_type == e1000_media_type_fiber) {
+            ledctl = er32(LEDCTL);
+            /* Save current LEDCTL settings */
+            hw->ledctl_default = ledctl;
+            /* Turn off LED0 */
+            ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+                        E1000_LEDCTL_LED0_BLINK |
+                        E1000_LEDCTL_LED0_MODE_MASK);
+            ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+                       E1000_LEDCTL_LED0_MODE_SHIFT);
+            ew32(LEDCTL, ledctl);
+        } else if (hw->media_type == e1000_media_type_copper)
+            ew32(LEDCTL, hw->ledctl_mode1);
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Used on 82571 and later Si that has LED blink bits.
+ * Callers must use their own timer and should have already called
+ * e1000_id_led_init()
+ * Call e1000_cleanup led() to stop blinking
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_blink_led_start(struct e1000_hw *hw)
+{
+    s16  i;
+    u32 ledctl_blink = 0;
+
+    DEBUGFUNC("e1000_id_led_blink_on");
+
+    if (hw->mac_type < e1000_82571) {
+        /* Nothing to do */
+        return E1000_SUCCESS;
+    }
+    if (hw->media_type == e1000_media_type_fiber) {
+        /* always blink LED0 for PCI-E fiber */
+        ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+                     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+    } else {
+        /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */
+        ledctl_blink = hw->ledctl_mode2;
+        for (i=0; i < 4; i++)
+            if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) ==
+                E1000_LEDCTL_MODE_LED_ON)
+                ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8));
+    }
+
+    ew32(LEDCTL, ledctl_blink);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Restores the saved state of the SW controlable LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+    s32 ret_val = E1000_SUCCESS;
+
+    DEBUGFUNC("e1000_cleanup_led");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+    case e1000_82544:
+        /* No cleanup necessary */
+        break;
+    case e1000_82541:
+    case e1000_82547:
+    case e1000_82541_rev_2:
+    case e1000_82547_rev_2:
+        /* Turn on PHY Smart Power Down (if previously enabled) */
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                      hw->phy_spd_default);
+        if (ret_val)
+            return ret_val;
+        /* Fall Through */
+    default:
+        if (hw->phy_type == e1000_phy_ife) {
+            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+            break;
+        }
+        /* Restore LEDCTL settings */
+        ew32(LEDCTL, hw->ledctl_default);
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns on the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+    u32 ctrl = er32(CTRL);
+
+    DEBUGFUNC("e1000_led_on");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+        /* Set SW Defineable Pin 0 to turn on the LED */
+        ctrl |= E1000_CTRL_SWDPIN0;
+        ctrl |= E1000_CTRL_SWDPIO0;
+        break;
+    case e1000_82544:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Set SW Defineable Pin 0 to turn on the LED */
+            ctrl |= E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else {
+            /* Clear SW Defineable Pin 0 to turn on the LED */
+            ctrl &= ~E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        }
+        break;
+    default:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Clear SW Defineable Pin 0 to turn on the LED */
+            ctrl &= ~E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else if (hw->phy_type == e1000_phy_ife) {
+            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+        } else if (hw->media_type == e1000_media_type_copper) {
+            ew32(LEDCTL, hw->ledctl_mode2);
+            return E1000_SUCCESS;
+        }
+        break;
+    }
+
+    ew32(CTRL, ctrl);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns off the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+    u32 ctrl = er32(CTRL);
+
+    DEBUGFUNC("e1000_led_off");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+        /* Clear SW Defineable Pin 0 to turn off the LED */
+        ctrl &= ~E1000_CTRL_SWDPIN0;
+        ctrl |= E1000_CTRL_SWDPIO0;
+        break;
+    case e1000_82544:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Clear SW Defineable Pin 0 to turn off the LED */
+            ctrl &= ~E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else {
+            /* Set SW Defineable Pin 0 to turn off the LED */
+            ctrl |= E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        }
+        break;
+    default:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Set SW Defineable Pin 0 to turn off the LED */
+            ctrl |= E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else if (hw->phy_type == e1000_phy_ife) {
+            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+        } else if (hw->media_type == e1000_media_type_copper) {
+            ew32(LEDCTL, hw->ledctl_mode1);
+            return E1000_SUCCESS;
+        }
+        break;
+    }
+
+    ew32(CTRL, ctrl);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Clears all hardware statistics counters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
+{
+    volatile u32 temp;
+
+    temp = er32(CRCERRS);
+    temp = er32(SYMERRS);
+    temp = er32(MPC);
+    temp = er32(SCC);
+    temp = er32(ECOL);
+    temp = er32(MCC);
+    temp = er32(LATECOL);
+    temp = er32(COLC);
+    temp = er32(DC);
+    temp = er32(SEC);
+    temp = er32(RLEC);
+    temp = er32(XONRXC);
+    temp = er32(XONTXC);
+    temp = er32(XOFFRXC);
+    temp = er32(XOFFTXC);
+    temp = er32(FCRUC);
+
+    if (hw->mac_type != e1000_ich8lan) {
+    temp = er32(PRC64);
+    temp = er32(PRC127);
+    temp = er32(PRC255);
+    temp = er32(PRC511);
+    temp = er32(PRC1023);
+    temp = er32(PRC1522);
+    }
+
+    temp = er32(GPRC);
+    temp = er32(BPRC);
+    temp = er32(MPRC);
+    temp = er32(GPTC);
+    temp = er32(GORCL);
+    temp = er32(GORCH);
+    temp = er32(GOTCL);
+    temp = er32(GOTCH);
+    temp = er32(RNBC);
+    temp = er32(RUC);
+    temp = er32(RFC);
+    temp = er32(ROC);
+    temp = er32(RJC);
+    temp = er32(TORL);
+    temp = er32(TORH);
+    temp = er32(TOTL);
+    temp = er32(TOTH);
+    temp = er32(TPR);
+    temp = er32(TPT);
+
+    if (hw->mac_type != e1000_ich8lan) {
+    temp = er32(PTC64);
+    temp = er32(PTC127);
+    temp = er32(PTC255);
+    temp = er32(PTC511);
+    temp = er32(PTC1023);
+    temp = er32(PTC1522);
+    }
+
+    temp = er32(MPTC);
+    temp = er32(BPTC);
+
+    if (hw->mac_type < e1000_82543) return;
+
+    temp = er32(ALGNERRC);
+    temp = er32(RXERRC);
+    temp = er32(TNCRS);
+    temp = er32(CEXTERR);
+    temp = er32(TSCTC);
+    temp = er32(TSCTFC);
+
+    if (hw->mac_type <= e1000_82544) return;
+
+    temp = er32(MGTPRC);
+    temp = er32(MGTPDC);
+    temp = er32(MGTPTC);
+
+    if (hw->mac_type <= e1000_82547_rev_2) return;
+
+    temp = er32(IAC);
+    temp = er32(ICRXOC);
+
+    if (hw->mac_type == e1000_ich8lan) return;
+
+    temp = er32(ICRXPTC);
+    temp = er32(ICRXATC);
+    temp = er32(ICTXPTC);
+    temp = er32(ICTXATC);
+    temp = er32(ICTXQEC);
+    temp = er32(ICTXQMTC);
+    temp = er32(ICRXDMTC);
+}
+
+/******************************************************************************
+ * Resets Adaptive IFS to its default state.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Call this after e1000_init_hw. You may override the IFS defaults by setting
+ * hw->ifs_params_forced to true. However, you must initialize hw->
+ * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
+ * before calling this function.
+ *****************************************************************************/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_reset_adaptive");
+
+    if (hw->adaptive_ifs) {
+        if (!hw->ifs_params_forced) {
+            hw->current_ifs_val = 0;
+            hw->ifs_min_val = IFS_MIN;
+            hw->ifs_max_val = IFS_MAX;
+            hw->ifs_step_size = IFS_STEP;
+            hw->ifs_ratio = IFS_RATIO;
+        }
+        hw->in_ifs_mode = false;
+        ew32(AIT, 0);
+    } else {
+        DEBUGOUT("Not in Adaptive IFS mode!\n");
+    }
+}
+
+/******************************************************************************
+ * Called during the callback/watchdog routine to update IFS value based on
+ * the ratio of transmits to collisions.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * tx_packets - Number of transmits since last callback
+ * total_collisions - Number of collisions since last callback
+ *****************************************************************************/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_update_adaptive");
+
+    if (hw->adaptive_ifs) {
+        if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
+            if (hw->tx_packet_delta > MIN_NUM_XMITS) {
+                hw->in_ifs_mode = true;
+                if (hw->current_ifs_val < hw->ifs_max_val) {
+                    if (hw->current_ifs_val == 0)
+                        hw->current_ifs_val = hw->ifs_min_val;
+                    else
+                        hw->current_ifs_val += hw->ifs_step_size;
+                    ew32(AIT, hw->current_ifs_val);
+                }
+            }
+        } else {
+            if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+                hw->current_ifs_val = 0;
+                hw->in_ifs_mode = false;
+                ew32(AIT, 0);
+            }
+        }
+    } else {
+        DEBUGOUT("Not in Adaptive IFS mode!\n");
+    }
+}
+
+/******************************************************************************
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ *
+ * hw - Struct containing variables accessed by shared code
+ * frame_len - The length of the frame in question
+ * mac_addr - The Ethernet destination address of the frame in question
+ *****************************************************************************/
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
+			    u32 frame_len, u8 *mac_addr)
+{
+    u64 carry_bit;
+
+    /* First adjust the frame length. */
+    frame_len--;
+    /* We need to adjust the statistics counters, since the hardware
+     * counters overcount this packet as a CRC error and undercount
+     * the packet as a good packet
+     */
+    /* This packet should not be counted as a CRC error.    */
+    stats->crcerrs--;
+    /* This packet does count as a Good Packet Received.    */
+    stats->gprc++;
+
+    /* Adjust the Good Octets received counters             */
+    carry_bit = 0x80000000 & stats->gorcl;
+    stats->gorcl += frame_len;
+    /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+     * Received Count) was one before the addition,
+     * AND it is zero after, then we lost the carry out,
+     * need to add one to Gorch (Good Octets Received Count High).
+     * This could be simplified if all environments supported
+     * 64-bit integers.
+     */
+    if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
+        stats->gorch++;
+    /* Is this a broadcast or multicast?  Check broadcast first,
+     * since the test for a multicast frame will test positive on
+     * a broadcast frame.
+     */
+    if ((mac_addr[0] == (u8)0xff) && (mac_addr[1] == (u8)0xff))
+        /* Broadcast packet */
+        stats->bprc++;
+    else if (*mac_addr & 0x01)
+        /* Multicast packet */
+        stats->mprc++;
+
+    if (frame_len == hw->max_frame_size) {
+        /* In this case, the hardware has overcounted the number of
+         * oversize frames.
+         */
+        if (stats->roc > 0)
+            stats->roc--;
+    }
+
+    /* Adjust the bin counters when the extra byte put the frame in the
+     * wrong bin. Remember that the frame_len was adjusted above.
+     */
+    if (frame_len == 64) {
+        stats->prc64++;
+        stats->prc127--;
+    } else if (frame_len == 127) {
+        stats->prc127++;
+        stats->prc255--;
+    } else if (frame_len == 255) {
+        stats->prc255++;
+        stats->prc511--;
+    } else if (frame_len == 511) {
+        stats->prc511++;
+        stats->prc1023--;
+    } else if (frame_len == 1023) {
+        stats->prc1023++;
+        stats->prc1522--;
+    } else if (frame_len == 1522) {
+        stats->prc1522++;
+    }
+}
+
+/******************************************************************************
+ * Gets the current PCI bus type, speed, and width of the hardware
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void e1000_get_bus_info(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 pci_ex_link_status;
+    u32 status;
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+        hw->bus_type = e1000_bus_type_pci;
+        hw->bus_speed = e1000_bus_speed_unknown;
+        hw->bus_width = e1000_bus_width_unknown;
+        break;
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_82573:
+    case e1000_80003es2lan:
+        hw->bus_type = e1000_bus_type_pci_express;
+        hw->bus_speed = e1000_bus_speed_2500;
+        ret_val = e1000_read_pcie_cap_reg(hw,
+                                      PCI_EX_LINK_STATUS,
+                                      &pci_ex_link_status);
+        if (ret_val)
+            hw->bus_width = e1000_bus_width_unknown;
+        else
+            hw->bus_width = (pci_ex_link_status & PCI_EX_LINK_WIDTH_MASK) >>
+                          PCI_EX_LINK_WIDTH_SHIFT;
+        break;
+    case e1000_ich8lan:
+        hw->bus_type = e1000_bus_type_pci_express;
+        hw->bus_speed = e1000_bus_speed_2500;
+        hw->bus_width = e1000_bus_width_pciex_1;
+        break;
+    default:
+        status = er32(STATUS);
+        hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
+                       e1000_bus_type_pcix : e1000_bus_type_pci;
+
+        if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+            hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
+                            e1000_bus_speed_66 : e1000_bus_speed_120;
+        } else if (hw->bus_type == e1000_bus_type_pci) {
+            hw->bus_speed = (status & E1000_STATUS_PCI66) ?
+                            e1000_bus_speed_66 : e1000_bus_speed_33;
+        } else {
+            switch (status & E1000_STATUS_PCIX_SPEED) {
+            case E1000_STATUS_PCIX_SPEED_66:
+                hw->bus_speed = e1000_bus_speed_66;
+                break;
+            case E1000_STATUS_PCIX_SPEED_100:
+                hw->bus_speed = e1000_bus_speed_100;
+                break;
+            case E1000_STATUS_PCIX_SPEED_133:
+                hw->bus_speed = e1000_bus_speed_133;
+                break;
+            default:
+                hw->bus_speed = e1000_bus_speed_reserved;
+                break;
+            }
+        }
+        hw->bus_width = (status & E1000_STATUS_BUS64) ?
+                        e1000_bus_width_64 : e1000_bus_width_32;
+        break;
+    }
+}
+
+/******************************************************************************
+ * Writes a value to one of the devices registers using port I/O (as opposed to
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset to write to
+ * value - value to write
+ *****************************************************************************/
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
+{
+    unsigned long io_addr = hw->io_base;
+    unsigned long io_data = hw->io_base + 4;
+
+    e1000_io_write(hw, io_addr, offset);
+    e1000_io_write(hw, io_data, value);
+}
+
+/******************************************************************************
+ * Estimates the cable length.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * min_length - The estimated minimum length
+ * max_length - The estimated maximum length
+ *
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
+ *
+ * This function always returns a ranged length (minimum & maximum).
+ * So for M88 phy's, this function interprets the one value returned from the
+ * register to the minimum and maximum range.
+ * For IGP phy's, the function calculates the range by the AGC registers.
+ *****************************************************************************/
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+				  u16 *max_length)
+{
+    s32 ret_val;
+    u16 agc_value = 0;
+    u16 i, phy_data;
+    u16 cable_length;
+
+    DEBUGFUNC("e1000_get_cable_length");
+
+    *min_length = *max_length = 0;
+
+    /* Use old method for Phy older than IGP */
+    if (hw->phy_type == e1000_phy_m88) {
+
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+                       M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+        /* Convert the enum value to ranged values */
+        switch (cable_length) {
+        case e1000_cable_length_50:
+            *min_length = 0;
+            *max_length = e1000_igp_cable_length_50;
+            break;
+        case e1000_cable_length_50_80:
+            *min_length = e1000_igp_cable_length_50;
+            *max_length = e1000_igp_cable_length_80;
+            break;
+        case e1000_cable_length_80_110:
+            *min_length = e1000_igp_cable_length_80;
+            *max_length = e1000_igp_cable_length_110;
+            break;
+        case e1000_cable_length_110_140:
+            *min_length = e1000_igp_cable_length_110;
+            *max_length = e1000_igp_cable_length_140;
+            break;
+        case e1000_cable_length_140:
+            *min_length = e1000_igp_cable_length_140;
+            *max_length = e1000_igp_cable_length_170;
+            break;
+        default:
+            return -E1000_ERR_PHY;
+            break;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+        switch (cable_length) {
+        case e1000_gg_cable_length_60:
+            *min_length = 0;
+            *max_length = e1000_igp_cable_length_60;
+            break;
+        case e1000_gg_cable_length_60_115:
+            *min_length = e1000_igp_cable_length_60;
+            *max_length = e1000_igp_cable_length_115;
+            break;
+        case e1000_gg_cable_length_115_150:
+            *min_length = e1000_igp_cable_length_115;
+            *max_length = e1000_igp_cable_length_150;
+            break;
+        case e1000_gg_cable_length_150:
+            *min_length = e1000_igp_cable_length_150;
+            *max_length = e1000_igp_cable_length_180;
+            break;
+        default:
+            return -E1000_ERR_PHY;
+            break;
+        }
+    } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+        u16 cur_agc_value;
+        u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+        u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+                                                         {IGP01E1000_PHY_AGC_A,
+                                                          IGP01E1000_PHY_AGC_B,
+                                                          IGP01E1000_PHY_AGC_C,
+                                                          IGP01E1000_PHY_AGC_D};
+        /* Read the AGC registers for all channels */
+        for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+
+            ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+            /* Value bound check. */
+            if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+                (cur_agc_value == 0))
+                return -E1000_ERR_PHY;
+
+            agc_value += cur_agc_value;
+
+            /* Update minimal AGC value. */
+            if (min_agc_value > cur_agc_value)
+                min_agc_value = cur_agc_value;
+        }
+
+        /* Remove the minimal AGC result for length < 50m */
+        if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+            agc_value -= min_agc_value;
+
+            /* Get the average length of the remaining 3 channels */
+            agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+        } else {
+            /* Get the average length of all the 4 channels. */
+            agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+        }
+
+        /* Set the range of the calculated length. */
+        *min_length = ((e1000_igp_cable_length_table[agc_value] -
+                       IGP01E1000_AGC_RANGE) > 0) ?
+                       (e1000_igp_cable_length_table[agc_value] -
+                       IGP01E1000_AGC_RANGE) : 0;
+        *max_length = e1000_igp_cable_length_table[agc_value] +
+                      IGP01E1000_AGC_RANGE;
+    } else if (hw->phy_type == e1000_phy_igp_2 ||
+               hw->phy_type == e1000_phy_igp_3) {
+        u16 cur_agc_index, max_agc_index = 0;
+        u16 min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
+        u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
+                                                         {IGP02E1000_PHY_AGC_A,
+                                                          IGP02E1000_PHY_AGC_B,
+                                                          IGP02E1000_PHY_AGC_C,
+                                                          IGP02E1000_PHY_AGC_D};
+        /* Read the AGC registers for all channels */
+        for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+            ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            /* Getting bits 15:9, which represent the combination of course and
+             * fine gain values.  The result is a number that can be put into
+             * the lookup table to obtain the approximate cable length. */
+            cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+                            IGP02E1000_AGC_LENGTH_MASK;
+
+            /* Array index bound check. */
+            if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
+                (cur_agc_index == 0))
+                return -E1000_ERR_PHY;
+
+            /* Remove min & max AGC values from calculation. */
+            if (e1000_igp_2_cable_length_table[min_agc_index] >
+                e1000_igp_2_cable_length_table[cur_agc_index])
+                min_agc_index = cur_agc_index;
+            if (e1000_igp_2_cable_length_table[max_agc_index] <
+                e1000_igp_2_cable_length_table[cur_agc_index])
+                max_agc_index = cur_agc_index;
+
+            agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+        }
+
+        agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+                      e1000_igp_2_cable_length_table[max_agc_index]);
+        agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+        /* Calculate cable length with the error range of +/- 10 meters. */
+        *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+                       (agc_value - IGP02E1000_AGC_RANGE) : 0;
+        *max_length = agc_value + IGP02E1000_AGC_RANGE;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check the cable polarity
+ *
+ * hw - Struct containing variables accessed by shared code
+ * polarity - output parameter : 0 - Polarity is not reversed
+ *                               1 - Polarity is reversed.
+ *
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
+ *
+ * For phy's older than IGP, this function simply reads the polarity bit in the
+ * Phy Status register.  For IGP phy's, this bit is valid only if link speed is
+ * 10 Mbps.  If the link speed is 100 Mbps there is no polarity so this bit will
+ * return 0.  If the link speed is 1000 Mbps the polarity status is in the
+ * IGP01E1000_PHY_PCS_INIT_REG.
+ *****************************************************************************/
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+				e1000_rev_polarity *polarity)
+{
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_check_polarity");
+
+    if ((hw->phy_type == e1000_phy_m88) ||
+        (hw->phy_type == e1000_phy_gg82563)) {
+        /* return the Polarity bit in the Status register. */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
+                     M88E1000_PSSR_REV_POLARITY_SHIFT) ?
+                     e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+
+    } else if (hw->phy_type == e1000_phy_igp ||
+              hw->phy_type == e1000_phy_igp_3 ||
+              hw->phy_type == e1000_phy_igp_2) {
+        /* Read the Status register to check the speed */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
+         * find the polarity status */
+        if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+           IGP01E1000_PSSR_SPEED_1000MBPS) {
+
+            /* Read the GIG initialization PCS register (0x00B4) */
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            /* Check the polarity bits */
+            *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ?
+                         e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+        } else {
+            /* For 10 Mbps, read the polarity bit in the status register. (for
+             * 100 Mbps this bit is always 0) */
+            *polarity = (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
+                         e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+        }
+    } else if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        *polarity = ((phy_data & IFE_PESC_POLARITY_REVERSED) >>
+                     IFE_PESC_POLARITY_REVERSED_SHIFT) ?
+                     e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check if Downshift occured
+ *
+ * hw - Struct containing variables accessed by shared code
+ * downshift - output parameter : 0 - No Downshift ocured.
+ *                                1 - Downshift ocured.
+ *
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
+ *
+ * For phy's older than IGP, this function reads the Downshift bit in the Phy
+ * Specific Status register.  For IGP phy's, it reads the Downgrade bit in the
+ * Link Health register.  In IGP this bit is latched high, so the driver must
+ * read it immediately after link is established.
+ *****************************************************************************/
+static s32 e1000_check_downshift(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_check_downshift");
+
+    if (hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) {
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
+    } else if ((hw->phy_type == e1000_phy_m88) ||
+               (hw->phy_type == e1000_phy_gg82563)) {
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
+                               M88E1000_PSSR_DOWNSHIFT_SHIFT;
+    } else if (hw->phy_type == e1000_phy_ife) {
+        /* e1000_phy_ife supports 10/100 speed only */
+        hw->speed_downgraded = false;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
+{
+    s32 ret_val;
+    u16 phy_data, phy_saved_data, speed, duplex, i;
+    u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+                                        {IGP01E1000_PHY_AGC_PARAM_A,
+                                        IGP01E1000_PHY_AGC_PARAM_B,
+                                        IGP01E1000_PHY_AGC_PARAM_C,
+                                        IGP01E1000_PHY_AGC_PARAM_D};
+    u16 min_length, max_length;
+
+    DEBUGFUNC("e1000_config_dsp_after_link_change");
+
+    if (hw->phy_type != e1000_phy_igp)
+        return E1000_SUCCESS;
+
+    if (link_up) {
+        ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+        if (ret_val) {
+            DEBUGOUT("Error getting link speed and duplex\n");
+            return ret_val;
+        }
+
+        if (speed == SPEED_1000) {
+
+            ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+            if (ret_val)
+                return ret_val;
+
+            if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
+                min_length >= e1000_igp_cable_length_50) {
+
+                for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+                    ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
+                                                 &phy_data);
+                    if (ret_val)
+                        return ret_val;
+
+                    phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+                    ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
+                                                  phy_data);
+                    if (ret_val)
+                        return ret_val;
+                }
+                hw->dsp_config_state = e1000_dsp_config_activated;
+            }
+
+            if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
+               (min_length < e1000_igp_cable_length_50)) {
+
+                u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+                u32 idle_errs = 0;
+
+                /* clear previous idle error counts */
+                ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+                                             &phy_data);
+                if (ret_val)
+                    return ret_val;
+
+                for (i = 0; i < ffe_idle_err_timeout; i++) {
+                    udelay(1000);
+                    ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+                                                 &phy_data);
+                    if (ret_val)
+                        return ret_val;
+
+                    idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
+                    if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+                        hw->ffe_config_state = e1000_ffe_config_active;
+
+                        ret_val = e1000_write_phy_reg(hw,
+                                    IGP01E1000_PHY_DSP_FFE,
+                                    IGP01E1000_PHY_DSP_FFE_CM_CP);
+                        if (ret_val)
+                            return ret_val;
+                        break;
+                    }
+
+                    if (idle_errs)
+                        ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+                }
+            }
+        }
+    } else {
+        if (hw->dsp_config_state == e1000_dsp_config_activated) {
+            /* Save off the current value of register 0x2F5B to be restored at
+             * the end of the routines. */
+            ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            /* Disable the PHY transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+            if (ret_val)
+                return ret_val;
+
+            mdelay(20);
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_FORCE_GIGA);
+            if (ret_val)
+                return ret_val;
+            for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+                ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
+                if (ret_val)
+                    return ret_val;
+
+                phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+                phy_data |=  IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+                ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
+                if (ret_val)
+                    return ret_val;
+            }
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_RESTART_AUTONEG);
+            if (ret_val)
+                return ret_val;
+
+            mdelay(20);
+
+            /* Now enable the transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            hw->dsp_config_state = e1000_dsp_config_enabled;
+        }
+
+        if (hw->ffe_config_state == e1000_ffe_config_active) {
+            /* Save off the current value of register 0x2F5B to be restored at
+             * the end of the routines. */
+            ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            /* Disable the PHY transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+            if (ret_val)
+                return ret_val;
+
+            mdelay(20);
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_FORCE_GIGA);
+            if (ret_val)
+                return ret_val;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
+                                          IGP01E1000_PHY_DSP_FFE_DEFAULT);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_RESTART_AUTONEG);
+            if (ret_val)
+                return ret_val;
+
+            mdelay(20);
+
+            /* Now enable the transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            hw->ffe_config_state = e1000_ffe_config_enabled;
+        }
+    }
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set PHY to class A mode
+ * Assumes the following operations will follow to enable the new class mode.
+ *  1. Do a PHY soft reset
+ *  2. Restart auto-negotiation or force link.
+ *
+ * hw - Struct containing variables accessed by shared code
+ ****************************************************************************/
+static s32 e1000_set_phy_mode(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 eeprom_data;
+
+    DEBUGFUNC("e1000_set_phy_mode");
+
+    if ((hw->mac_type == e1000_82545_rev_3) &&
+        (hw->media_type == e1000_media_type_copper)) {
+        ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
+        if (ret_val) {
+            return ret_val;
+        }
+
+        if ((eeprom_data != EEPROM_RESERVED_WORD) &&
+            (eeprom_data & EEPROM_PHY_CLASS_A)) {
+            ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
+            if (ret_val)
+                return ret_val;
+            ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
+            if (ret_val)
+                return ret_val;
+
+            hw->phy_reset_disable = false;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu state according to the active flag.  When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+    u32 phy_ctrl = 0;
+    s32 ret_val;
+    u16 phy_data;
+    DEBUGFUNC("e1000_set_d3_lplu_state");
+
+    if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
+        && hw->phy_type != e1000_phy_igp_3)
+        return E1000_SUCCESS;
+
+    /* During driver activity LPLU should not be used or it will attain link
+     * from the lowest speeds starting from 10Mbps. The capability is used for
+     * Dx transitions and states */
+    if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->mac_type == e1000_ich8lan) {
+        /* MAC writes into PHY register based on the state transition
+         * and start auto-negotiation. SW driver can overwrite the settings
+         * in CSR PHY power control E1000_PHY_CTRL register. */
+        phy_ctrl = er32(PHY_CTRL);
+    } else {
+        ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if (!active) {
+        if (hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547_rev_2) {
+            phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+            if (ret_val)
+                return ret_val;
+        } else {
+            if (hw->mac_type == e1000_ich8lan) {
+                phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+                ew32(PHY_CTRL, phy_ctrl);
+            } else {
+                phy_data &= ~IGP02E1000_PM_D3_LPLU;
+                ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                              phy_data);
+                if (ret_val)
+                    return ret_val;
+            }
+        }
+
+        /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used during
+         * Dx states where the power conservation is most important.  During
+         * driver activity we should enable SmartSpeed, so performance is
+         * maintained. */
+        if (hw->smart_speed == e1000_smart_speed_on) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        } else if (hw->smart_speed == e1000_smart_speed_off) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+    } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
+               (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
+               (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
+
+        if (hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547_rev_2) {
+            phy_data |= IGP01E1000_GMII_FLEX_SPD;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+            if (ret_val)
+                return ret_val;
+        } else {
+            if (hw->mac_type == e1000_ich8lan) {
+                phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+                ew32(PHY_CTRL, phy_ctrl);
+            } else {
+                phy_data |= IGP02E1000_PM_D3_LPLU;
+                ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                              phy_data);
+                if (ret_val)
+                    return ret_val;
+            }
+        }
+
+        /* When LPLU is enabled we should disable SmartSpeed */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+        if (ret_val)
+            return ret_val;
+
+    }
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu d0 state according to the active flag.  When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+    u32 phy_ctrl = 0;
+    s32 ret_val;
+    u16 phy_data;
+    DEBUGFUNC("e1000_set_d0_lplu_state");
+
+    if (hw->mac_type <= e1000_82547_rev_2)
+        return E1000_SUCCESS;
+
+    if (hw->mac_type == e1000_ich8lan) {
+        phy_ctrl = er32(PHY_CTRL);
+    } else {
+        ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if (!active) {
+        if (hw->mac_type == e1000_ich8lan) {
+            phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+            ew32(PHY_CTRL, phy_ctrl);
+        } else {
+            phy_data &= ~IGP02E1000_PM_D0_LPLU;
+            ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+        /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used during
+         * Dx states where the power conservation is most important.  During
+         * driver activity we should enable SmartSpeed, so performance is
+         * maintained. */
+        if (hw->smart_speed == e1000_smart_speed_on) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        } else if (hw->smart_speed == e1000_smart_speed_off) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+
+    } else {
+
+        if (hw->mac_type == e1000_ich8lan) {
+            phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+            ew32(PHY_CTRL, phy_ctrl);
+        } else {
+            phy_data |= IGP02E1000_PM_D0_LPLU;
+            ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+        /* When LPLU is enabled we should disable SmartSpeed */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+        if (ret_val)
+            return ret_val;
+
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Change VCO speed register to improve Bit Error Rate performance of SERDES.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_set_vco_speed(struct e1000_hw *hw)
+{
+    s32  ret_val;
+    u16 default_page = 0;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_set_vco_speed");
+
+    switch (hw->mac_type) {
+    case e1000_82545_rev_3:
+    case e1000_82546_rev_3:
+       break;
+    default:
+        return E1000_SUCCESS;
+    }
+
+    /* Set PHY register 30, page 5, bit 8 to 0 */
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Set PHY register 30, page 4, bit 11 to 1 */
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
+    if (ret_val)
+        return ret_val;
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function reads the cookie from ARC ram.
+ *
+ * returns: - E1000_SUCCESS .
+ ****************************************************************************/
+static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer)
+{
+    u8 i;
+    u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET;
+    u8 length = E1000_MNG_DHCP_COOKIE_LENGTH;
+
+    length = (length >> 2);
+    offset = (offset >> 2);
+
+    for (i = 0; i < length; i++) {
+        *((u32 *)buffer + i) =
+            E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
+    }
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function checks whether the HOST IF is enabled for command operaton
+ * and also checks whether the previous command is completed.
+ * It busy waits in case of previous command is not completed.
+ *
+ * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
+ *            timeout
+ *          - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+    u32 hicr;
+    u8 i;
+
+    /* Check that the host interface is enabled. */
+    hicr = er32(HICR);
+    if ((hicr & E1000_HICR_EN) == 0) {
+        DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+        return -E1000_ERR_HOST_INTERFACE_COMMAND;
+    }
+    /* check the previous command is completed */
+    for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+        hicr = er32(HICR);
+        if (!(hicr & E1000_HICR_C))
+            break;
+        mdelay(1);
+    }
+
+    if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+        DEBUGOUT("Previous command timeout failed .\n");
+        return -E1000_ERR_HOST_INTERFACE_COMMAND;
+    }
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient way.
+ * Also fills up the sum of the buffer in *buffer parameter.
+ *
+ * returns  - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+				   u16 offset, u8 *sum)
+{
+    u8 *tmp;
+    u8 *bufptr = buffer;
+    u32 data = 0;
+    u16 remaining, i, j, prev_bytes;
+
+    /* sum = only sum of the data and it is not checksum */
+
+    if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
+        return -E1000_ERR_PARAM;
+    }
+
+    tmp = (u8 *)&data;
+    prev_bytes = offset & 0x3;
+    offset &= 0xFFFC;
+    offset >>= 2;
+
+    if (prev_bytes) {
+        data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
+        for (j = prev_bytes; j < sizeof(u32); j++) {
+            *(tmp + j) = *bufptr++;
+            *sum += *(tmp + j);
+        }
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data);
+        length -= j - prev_bytes;
+        offset++;
+    }
+
+    remaining = length & 0x3;
+    length -= remaining;
+
+    /* Calculate length in DWORDs */
+    length >>= 2;
+
+    /* The device driver writes the relevant command block into the
+     * ram area. */
+    for (i = 0; i < length; i++) {
+        for (j = 0; j < sizeof(u32); j++) {
+            *(tmp + j) = *bufptr++;
+            *sum += *(tmp + j);
+        }
+
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
+    }
+    if (remaining) {
+        for (j = 0; j < sizeof(u32); j++) {
+            if (j < remaining)
+                *(tmp + j) = *bufptr++;
+            else
+                *(tmp + j) = 0;
+
+            *sum += *(tmp + j);
+        }
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function writes the command header after does the checksum calculation.
+ *
+ * returns  - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+				      struct e1000_host_mng_command_header *hdr)
+{
+    u16 i;
+    u8 sum;
+    u8 *buffer;
+
+    /* Write the whole command header structure which includes sum of
+     * the buffer */
+
+    u16 length = sizeof(struct e1000_host_mng_command_header);
+
+    sum = hdr->checksum;
+    hdr->checksum = 0;
+
+    buffer = (u8 *)hdr;
+    i = length;
+    while (i--)
+        sum += buffer[i];
+
+    hdr->checksum = 0 - sum;
+
+    length >>= 2;
+    /* The device driver writes the relevant command block into the ram area. */
+    for (i = 0; i < length; i++) {
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *)hdr + i));
+        E1000_WRITE_FLUSH();
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function indicates to ARC that a new command is pending which completes
+ * one write operation by the driver.
+ *
+ * returns  - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_write_commit(struct e1000_hw *hw)
+{
+    u32 hicr;
+
+    hicr = er32(HICR);
+    /* Setting this bit tells the ARC that a new command is pending. */
+    ew32(HICR, hicr | E1000_HICR_C);
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function checks the mode of the firmware.
+ *
+ * returns  - true when the mode is IAMT or false.
+ ****************************************************************************/
+bool e1000_check_mng_mode(struct e1000_hw *hw)
+{
+    u32 fwsm;
+
+    fwsm = er32(FWSM);
+
+    if (hw->mac_type == e1000_ich8lan) {
+        if ((fwsm & E1000_FWSM_MODE_MASK) ==
+            (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+            return true;
+    } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
+               (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+        return true;
+
+    return false;
+}
+
+
+/*****************************************************************************
+ * This function writes the dhcp info .
+ ****************************************************************************/
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+    s32 ret_val;
+    struct e1000_host_mng_command_header hdr;
+
+    hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+    hdr.command_length = length;
+    hdr.reserved1 = 0;
+    hdr.reserved2 = 0;
+    hdr.checksum = 0;
+
+    ret_val = e1000_mng_enable_host_if(hw);
+    if (ret_val == E1000_SUCCESS) {
+        ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr),
+                                          &(hdr.checksum));
+        if (ret_val == E1000_SUCCESS) {
+            ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+            if (ret_val == E1000_SUCCESS)
+                ret_val = e1000_mng_write_commit(hw);
+        }
+    }
+    return ret_val;
+}
+
+
+/*****************************************************************************
+ * This function calculates the checksum.
+ *
+ * returns  - checksum of buffer contents.
+ ****************************************************************************/
+static u8 e1000_calculate_mng_checksum(char *buffer, u32 length)
+{
+    u8 sum = 0;
+    u32 i;
+
+    if (!buffer)
+        return 0;
+
+    for (i=0; i < length; i++)
+        sum += buffer[i];
+
+    return (u8)(0 - sum);
+}
+
+/*****************************************************************************
+ * This function checks whether tx pkt filtering needs to be enabled or not.
+ *
+ * returns  - true for packet filtering or false.
+ ****************************************************************************/
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+    /* called in init as well as watchdog timer functions */
+
+    s32 ret_val, checksum;
+    bool tx_filter = false;
+    struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
+    u8 *buffer = (u8 *) &(hw->mng_cookie);
+
+    if (e1000_check_mng_mode(hw)) {
+        ret_val = e1000_mng_enable_host_if(hw);
+        if (ret_val == E1000_SUCCESS) {
+            ret_val = e1000_host_if_read_cookie(hw, buffer);
+            if (ret_val == E1000_SUCCESS) {
+                checksum = hdr->checksum;
+                hdr->checksum = 0;
+                if ((hdr->signature == E1000_IAMT_SIGNATURE) &&
+                    checksum == e1000_calculate_mng_checksum((char *)buffer,
+                                               E1000_MNG_DHCP_COOKIE_LENGTH)) {
+                    if (hdr->status &
+                        E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
+                        tx_filter = true;
+                } else
+                    tx_filter = true;
+            } else
+                tx_filter = true;
+        }
+    }
+
+    hw->tx_pkt_filtering = tx_filter;
+    return tx_filter;
+}
+
+/******************************************************************************
+ * Verifies the hardware needs to allow ARPs to be processed by the host
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * returns: - true/false
+ *
+ *****************************************************************************/
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+    u32 manc;
+    u32 fwsm, factps;
+
+    if (hw->asf_firmware_present) {
+        manc = er32(MANC);
+
+        if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+            !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+            return false;
+        if (e1000_arc_subsystem_valid(hw)) {
+            fwsm = er32(FWSM);
+            factps = er32(FACTPS);
+
+            if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
+                   e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
+                return true;
+        } else
+            if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
+                return true;
+    }
+    return false;
+}
+
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 mii_status_reg;
+    u16 i;
+
+    /* Polarity reversal workaround for forced 10F/10H links. */
+
+    /* Disable the transmitter on the PHY */
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+    if (ret_val)
+        return ret_val;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+    if (ret_val)
+        return ret_val;
+
+    /* This loop will early-out if the NO link condition has been met. */
+    for (i = PHY_FORCE_TIME; i > 0; i--) {
+        /* Read the MII Status Register and wait for Link Status bit
+         * to be clear.
+         */
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
+        mdelay(100);
+    }
+
+    /* Recommended delay time after link has been lost */
+    mdelay(1000);
+
+    /* Now we will re-enable th transmitter on the PHY */
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+    if (ret_val)
+        return ret_val;
+    mdelay(50);
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+    if (ret_val)
+        return ret_val;
+    mdelay(50);
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+    if (ret_val)
+        return ret_val;
+    mdelay(50);
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+    if (ret_val)
+        return ret_val;
+
+    /* This loop will early-out if the link condition has been met. */
+    for (i = PHY_FORCE_TIME; i > 0; i--) {
+        /* Read the MII Status Register and wait for Link Status bit
+         * to be set.
+         */
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        if (mii_status_reg & MII_SR_LINK_STATUS) break;
+        mdelay(100);
+    }
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Disables PCI-Express master access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - none.
+ *
+ ***************************************************************************/
+static void e1000_set_pci_express_master_disable(struct e1000_hw *hw)
+{
+    u32 ctrl;
+
+    DEBUGFUNC("e1000_set_pci_express_master_disable");
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return;
+
+    ctrl = er32(CTRL);
+    ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+    ew32(CTRL, ctrl);
+}
+
+/*******************************************************************************
+ *
+ * Disables PCI-Express master access and verifies there are no pending requests
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't
+ *            caused the master requests to be disabled.
+ *            E1000_SUCCESS master requests disabled.
+ *
+ ******************************************************************************/
+s32 e1000_disable_pciex_master(struct e1000_hw *hw)
+{
+    s32 timeout = MASTER_DISABLE_TIMEOUT;   /* 80ms */
+
+    DEBUGFUNC("e1000_disable_pciex_master");
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return E1000_SUCCESS;
+
+    e1000_set_pci_express_master_disable(hw);
+
+    while (timeout) {
+        if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
+            break;
+        else
+            udelay(100);
+        timeout--;
+    }
+
+    if (!timeout) {
+        DEBUGOUT("Master requests are pending.\n");
+        return -E1000_ERR_MASTER_REQUESTS_PENDING;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*******************************************************************************
+ *
+ * Check for EEPROM Auto Read bit done.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ *            E1000_SUCCESS at any other case.
+ *
+ ******************************************************************************/
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
+{
+    s32 timeout = AUTO_READ_DONE_TIMEOUT;
+
+    DEBUGFUNC("e1000_get_auto_rd_done");
+
+    switch (hw->mac_type) {
+    default:
+        msleep(5);
+        break;
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_82573:
+    case e1000_80003es2lan:
+    case e1000_ich8lan:
+        while (timeout) {
+            if (er32(EECD) & E1000_EECD_AUTO_RD)
+                break;
+            else msleep(1);
+            timeout--;
+        }
+
+        if (!timeout) {
+            DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
+            return -E1000_ERR_RESET;
+        }
+        break;
+    }
+
+    /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
+     * Need to wait for PHY configuration completion before accessing NVM
+     * and PHY. */
+    if (hw->mac_type == e1000_82573)
+        msleep(25);
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ * Checks if the PHY configuration is done
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ *            E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+    s32 timeout = PHY_CFG_TIMEOUT;
+    u32 cfg_mask = E1000_EEPROM_CFG_DONE;
+
+    DEBUGFUNC("e1000_get_phy_cfg_done");
+
+    switch (hw->mac_type) {
+    default:
+        mdelay(10);
+        break;
+    case e1000_80003es2lan:
+        /* Separate *_CFG_DONE_* bit for each port */
+        if (er32(STATUS) & E1000_STATUS_FUNC_1)
+            cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1;
+        /* Fall Through */
+    case e1000_82571:
+    case e1000_82572:
+        while (timeout) {
+            if (er32(EEMNGCTL) & cfg_mask)
+                break;
+            else
+                msleep(1);
+            timeout--;
+        }
+        if (!timeout) {
+            DEBUGOUT("MNG configuration cycle has not completed.\n");
+            return -E1000_ERR_RESET;
+        }
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Using the combination of SMBI and SWESMBI semaphore bits when resetting
+ * adapter or Eeprom access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_EEPROM if fail to access EEPROM.
+ *            E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
+{
+    s32 timeout;
+    u32 swsm;
+
+    DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
+
+    if (!hw->eeprom_semaphore_present)
+        return E1000_SUCCESS;
+
+    if (hw->mac_type == e1000_80003es2lan) {
+        /* Get the SW semaphore. */
+        if (e1000_get_software_semaphore(hw) != E1000_SUCCESS)
+            return -E1000_ERR_EEPROM;
+    }
+
+    /* Get the FW semaphore. */
+    timeout = hw->eeprom.word_size + 1;
+    while (timeout) {
+        swsm = er32(SWSM);
+        swsm |= E1000_SWSM_SWESMBI;
+        ew32(SWSM, swsm);
+        /* if we managed to set the bit we got the semaphore. */
+        swsm = er32(SWSM);
+        if (swsm & E1000_SWSM_SWESMBI)
+            break;
+
+        udelay(50);
+        timeout--;
+    }
+
+    if (!timeout) {
+        /* Release semaphores */
+        e1000_put_hw_eeprom_semaphore(hw);
+        DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ * This function clears HW semaphore bits.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - None.
+ *
+ ***************************************************************************/
+static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
+{
+    u32 swsm;
+
+    DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
+
+    if (!hw->eeprom_semaphore_present)
+        return;
+
+    swsm = er32(SWSM);
+    if (hw->mac_type == e1000_80003es2lan) {
+        /* Release both semaphores. */
+        swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+    } else
+        swsm &= ~(E1000_SWSM_SWESMBI);
+    ew32(SWSM, swsm);
+}
+
+/***************************************************************************
+ *
+ * Obtaining software semaphore bit (SMBI) before resetting PHY.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to obtain semaphore.
+ *            E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+static s32 e1000_get_software_semaphore(struct e1000_hw *hw)
+{
+    s32 timeout = hw->eeprom.word_size + 1;
+    u32 swsm;
+
+    DEBUGFUNC("e1000_get_software_semaphore");
+
+    if (hw->mac_type != e1000_80003es2lan) {
+        return E1000_SUCCESS;
+    }
+
+    while (timeout) {
+        swsm = er32(SWSM);
+        /* If SMBI bit cleared, it is now set and we hold the semaphore */
+        if (!(swsm & E1000_SWSM_SMBI))
+            break;
+        mdelay(1);
+        timeout--;
+    }
+
+    if (!timeout) {
+        DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+        return -E1000_ERR_RESET;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release semaphore bit (SMBI).
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static void e1000_release_software_semaphore(struct e1000_hw *hw)
+{
+    u32 swsm;
+
+    DEBUGFUNC("e1000_release_software_semaphore");
+
+    if (hw->mac_type != e1000_80003es2lan) {
+        return;
+    }
+
+    swsm = er32(SWSM);
+    /* Release the SW semaphores.*/
+    swsm &= ~E1000_SWSM_SMBI;
+    ew32(SWSM, swsm);
+}
+
+/******************************************************************************
+ * Checks if PHY reset is blocked due to SOL/IDER session, for example.
+ * Returning E1000_BLK_PHY_RESET isn't necessarily an error.  But it's up to
+ * the caller to figure out how to deal with it.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_BLK_PHY_RESET
+ *            E1000_SUCCESS
+ *
+ *****************************************************************************/
+s32 e1000_check_phy_reset_block(struct e1000_hw *hw)
+{
+    u32 manc = 0;
+    u32 fwsm = 0;
+
+    if (hw->mac_type == e1000_ich8lan) {
+        fwsm = er32(FWSM);
+        return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
+                                            : E1000_BLK_PHY_RESET;
+    }
+
+    if (hw->mac_type > e1000_82547_rev_2)
+        manc = er32(MANC);
+    return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+        E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw)
+{
+    u32 fwsm;
+
+    /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
+     * may not be provided a DMA clock when no manageability features are
+     * enabled.  We do not want to perform any reads/writes to these registers
+     * if this is the case.  We read FWSM to determine the manageability mode.
+     */
+    switch (hw->mac_type) {
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_82573:
+    case e1000_80003es2lan:
+        fwsm = er32(FWSM);
+        if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
+            return true;
+        break;
+    case e1000_ich8lan:
+        return true;
+    default:
+        break;
+    }
+    return false;
+}
+
+
+/******************************************************************************
+ * Configure PCI-Ex no-snoop
+ *
+ * hw - Struct containing variables accessed by shared code.
+ * no_snoop - Bitmap of no-snoop events.
+ *
+ * returns: E1000_SUCCESS
+ *
+ *****************************************************************************/
+static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
+{
+    u32 gcr_reg = 0;
+
+    DEBUGFUNC("e1000_set_pci_ex_no_snoop");
+
+    if (hw->bus_type == e1000_bus_type_unknown)
+        e1000_get_bus_info(hw);
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return E1000_SUCCESS;
+
+    if (no_snoop) {
+        gcr_reg = er32(GCR);
+        gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
+        gcr_reg |= no_snoop;
+        ew32(GCR, gcr_reg);
+    }
+    if (hw->mac_type == e1000_ich8lan) {
+        u32 ctrl_ext;
+
+        ew32(GCR, PCI_EX_82566_SNOOP_ALL);
+
+        ctrl_ext = er32(CTRL_EXT);
+        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+        ew32(CTRL_EXT, ctrl_ext);
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Get software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static s32 e1000_get_software_flag(struct e1000_hw *hw)
+{
+    s32 timeout = PHY_CFG_TIMEOUT;
+    u32 extcnf_ctrl;
+
+    DEBUGFUNC("e1000_get_software_flag");
+
+    if (hw->mac_type == e1000_ich8lan) {
+        while (timeout) {
+            extcnf_ctrl = er32(EXTCNF_CTRL);
+            extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+            ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+            extcnf_ctrl = er32(EXTCNF_CTRL);
+            if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+                break;
+            mdelay(1);
+            timeout--;
+        }
+
+        if (!timeout) {
+            DEBUGOUT("FW or HW locks the resource too long.\n");
+            return -E1000_ERR_CONFIG;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static void e1000_release_software_flag(struct e1000_hw *hw)
+{
+    u32 extcnf_ctrl;
+
+    DEBUGFUNC("e1000_release_software_flag");
+
+    if (hw->mac_type == e1000_ich8lan) {
+        extcnf_ctrl= er32(EXTCNF_CTRL);
+        extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+        ew32(EXTCNF_CTRL, extcnf_ctrl);
+    }
+
+    return;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
+ * register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data)
+{
+    s32  error = E1000_SUCCESS;
+    u32 flash_bank = 0;
+    u32 act_offset = 0;
+    u32 bank_offset = 0;
+    u16 word = 0;
+    u16 i = 0;
+
+    /* We need to know which is the valid flash bank.  In the event
+     * that we didn't allocate eeprom_shadow_ram, we may not be
+     * managing flash_bank.  So it cannot be trusted and needs
+     * to be updated with each read.
+     */
+    /* Value of bit 22 corresponds to the flash bank we're on. */
+    flash_bank = (er32(EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
+
+    /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
+    bank_offset = flash_bank * (hw->flash_bank_size * 2);
+
+    error = e1000_get_software_flag(hw);
+    if (error != E1000_SUCCESS)
+        return error;
+
+    for (i = 0; i < words; i++) {
+        if (hw->eeprom_shadow_ram != NULL &&
+            hw->eeprom_shadow_ram[offset+i].modified) {
+            data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
+        } else {
+            /* The NVM part needs a byte offset, hence * 2 */
+            act_offset = bank_offset + ((offset + i) * 2);
+            error = e1000_read_ich8_word(hw, act_offset, &word);
+            if (error != E1000_SUCCESS)
+                break;
+            data[i] = word;
+        }
+    }
+
+    e1000_release_software_flag(hw);
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
+ * register.  Actually, writes are written to the shadow ram cache in the hw
+ * structure hw->e1000_shadow_ram.  e1000_commit_shadow_ram flushes this to
+ * the NVM, which occurs when the NVM checksum is updated.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to write
+ * words - number of words to write
+ * data - words to write to the EEPROM
+ *****************************************************************************/
+static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+				   u16 *data)
+{
+    u32 i = 0;
+    s32 error = E1000_SUCCESS;
+
+    error = e1000_get_software_flag(hw);
+    if (error != E1000_SUCCESS)
+        return error;
+
+    /* A driver can write to the NVM only if it has eeprom_shadow_ram
+     * allocated.  Subsequent reads to the modified words are read from
+     * this cached structure as well.  Writes will only go into this
+     * cached structure unless it's followed by a call to
+     * e1000_update_eeprom_checksum() where it will commit the changes
+     * and clear the "modified" field.
+     */
+    if (hw->eeprom_shadow_ram != NULL) {
+        for (i = 0; i < words; i++) {
+            if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
+                hw->eeprom_shadow_ram[offset+i].modified = true;
+                hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
+            } else {
+                error = -E1000_ERR_EEPROM;
+                break;
+            }
+        }
+    } else {
+        /* Drivers have the option to not allocate eeprom_shadow_ram as long
+         * as they don't perform any NVM writes.  An attempt in doing so
+         * will result in this error.
+         */
+        error = -E1000_ERR_EEPROM;
+    }
+
+    e1000_release_software_flag(hw);
+
+    return error;
+}
+
+/******************************************************************************
+ * This function does initial flash setup so that a new read/write/erase cycle
+ * can be started.
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+static s32 e1000_ich8_cycle_init(struct e1000_hw *hw)
+{
+    union ich8_hws_flash_status hsfsts;
+    s32 error = E1000_ERR_EEPROM;
+    s32 i     = 0;
+
+    DEBUGFUNC("e1000_ich8_cycle_init");
+
+    hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+    /* May be check the Flash Des Valid bit in Hw status */
+    if (hsfsts.hsf_status.fldesvalid == 0) {
+        DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.");
+        return error;
+    }
+
+    /* Clear FCERR in Hw status by writing 1 */
+    /* Clear DAEL in Hw status by writing a 1 */
+    hsfsts.hsf_status.flcerr = 1;
+    hsfsts.hsf_status.dael = 1;
+
+    E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+
+    /* Either we should have a hardware SPI cycle in progress bit to check
+     * against, in order to start a new cycle or FDONE bit should be changed
+     * in the hardware so that it is 1 after harware reset, which can then be
+     * used as an indication whether a cycle is in progress or has been
+     * completed .. we should also have some software semaphore mechanism to
+     * guard FDONE or the cycle in progress bit so that two threads access to
+     * those bits can be sequentiallized or a way so that 2 threads dont
+     * start the cycle at the same time */
+
+    if (hsfsts.hsf_status.flcinprog == 0) {
+        /* There is no cycle running at present, so we can start a cycle */
+        /* Begin by setting Flash Cycle Done. */
+        hsfsts.hsf_status.flcdone = 1;
+        E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+        error = E1000_SUCCESS;
+    } else {
+        /* otherwise poll for sometime so the current cycle has a chance
+         * to end before giving up. */
+        for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
+            hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+            if (hsfsts.hsf_status.flcinprog == 0) {
+                error = E1000_SUCCESS;
+                break;
+            }
+            udelay(1);
+        }
+        if (error == E1000_SUCCESS) {
+            /* Successful in waiting for previous cycle to timeout,
+             * now set the Flash Cycle Done. */
+            hsfsts.hsf_status.flcdone = 1;
+            E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+        } else {
+            DEBUGOUT("Flash controller busy, cannot get access");
+        }
+    }
+    return error;
+}
+
+/******************************************************************************
+ * This function starts a flash cycle and waits for its completion
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
+{
+    union ich8_hws_flash_ctrl hsflctl;
+    union ich8_hws_flash_status hsfsts;
+    s32 error = E1000_ERR_EEPROM;
+    u32 i = 0;
+
+    /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+    hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+    hsflctl.hsf_ctrl.flcgo = 1;
+    E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+    /* wait till FDONE bit is set to 1 */
+    do {
+        hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+        if (hsfsts.hsf_status.flcdone == 1)
+            break;
+        udelay(1);
+        i++;
+    } while (i < timeout);
+    if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
+        error = E1000_SUCCESS;
+    }
+    return error;
+}
+
+/******************************************************************************
+ * Reads a byte or word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte or word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - Pointer to the word to store the value read.
+ *****************************************************************************/
+static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+				u16 *data)
+{
+    union ich8_hws_flash_status hsfsts;
+    union ich8_hws_flash_ctrl hsflctl;
+    u32 flash_linear_address;
+    u32 flash_data = 0;
+    s32 error = -E1000_ERR_EEPROM;
+    s32 count = 0;
+
+    DEBUGFUNC("e1000_read_ich8_data");
+
+    if (size < 1  || size > 2 || data == NULL ||
+        index > ICH_FLASH_LINEAR_ADDR_MASK)
+        return error;
+
+    flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
+                           hw->flash_base_addr;
+
+    do {
+        udelay(1);
+        /* Steps */
+        error = e1000_ich8_cycle_init(hw);
+        if (error != E1000_SUCCESS)
+            break;
+
+        hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+        /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+        hsflctl.hsf_ctrl.fldbcount = size - 1;
+        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+        E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+        /* Write the last 24 bits of index into Flash Linear address field in
+         * Flash Address */
+        /* TODO: TBD maybe check the index against the size of flash */
+
+        E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
+
+        error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
+
+        /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
+         * sequence a few more times, else read in (shift in) the Flash Data0,
+         * the order is least significant byte first msb to lsb */
+        if (error == E1000_SUCCESS) {
+            flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
+            if (size == 1) {
+                *data = (u8)(flash_data & 0x000000FF);
+            } else if (size == 2) {
+                *data = (u16)(flash_data & 0x0000FFFF);
+            }
+            break;
+        } else {
+            /* If we've gotten here, then things are probably completely hosed,
+             * but if the error condition is detected, it won't hurt to give
+             * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+             */
+            hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+            if (hsfsts.hsf_status.flcerr == 1) {
+                /* Repeat for some time before giving up. */
+                continue;
+            } else if (hsfsts.hsf_status.flcdone == 0) {
+                DEBUGOUT("Timeout error - flash cycle did not complete.");
+                break;
+            }
+        }
+    } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes One /two bytes to the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte/word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - The byte(s) to write to the NVM.
+ *****************************************************************************/
+static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+				 u16 data)
+{
+    union ich8_hws_flash_status hsfsts;
+    union ich8_hws_flash_ctrl hsflctl;
+    u32 flash_linear_address;
+    u32 flash_data = 0;
+    s32 error = -E1000_ERR_EEPROM;
+    s32 count = 0;
+
+    DEBUGFUNC("e1000_write_ich8_data");
+
+    if (size < 1  || size > 2 || data > size * 0xff ||
+        index > ICH_FLASH_LINEAR_ADDR_MASK)
+        return error;
+
+    flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
+                           hw->flash_base_addr;
+
+    do {
+        udelay(1);
+        /* Steps */
+        error = e1000_ich8_cycle_init(hw);
+        if (error != E1000_SUCCESS)
+            break;
+
+        hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+        /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+        hsflctl.hsf_ctrl.fldbcount = size -1;
+        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+        E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+        /* Write the last 24 bits of index into Flash Linear address field in
+         * Flash Address */
+        E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
+
+        if (size == 1)
+            flash_data = (u32)data & 0x00FF;
+        else
+            flash_data = (u32)data;
+
+        E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
+
+        /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
+         * sequence a few more times else done */
+        error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
+        if (error == E1000_SUCCESS) {
+            break;
+        } else {
+            /* If we're here, then things are most likely completely hosed,
+             * but if the error condition is detected, it won't hurt to give
+             * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+             */
+            hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+            if (hsfsts.hsf_status.flcerr == 1) {
+                /* Repeat for some time before giving up. */
+                continue;
+            } else if (hsfsts.hsf_status.flcdone == 0) {
+                DEBUGOUT("Timeout error - flash cycle did not complete.");
+                break;
+            }
+        }
+    } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+    return error;
+}
+
+/******************************************************************************
+ * Reads a single byte from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - Pointer to a byte to store the value read.
+ *****************************************************************************/
+static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data)
+{
+    s32 status = E1000_SUCCESS;
+    u16 word = 0;
+
+    status = e1000_read_ich8_data(hw, index, 1, &word);
+    if (status == E1000_SUCCESS) {
+        *data = (u8)word;
+    }
+
+    return status;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ * Performs verification by reading back the value and then going through
+ * a retry algorithm before giving up.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to write.
+ * byte - The byte to write to the NVM.
+ *****************************************************************************/
+static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
+{
+    s32 error = E1000_SUCCESS;
+    s32 program_retries = 0;
+
+    DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index);
+
+    error = e1000_write_ich8_byte(hw, index, byte);
+
+    if (error != E1000_SUCCESS) {
+        for (program_retries = 0; program_retries < 100; program_retries++) {
+            DEBUGOUT2("Retrying \t Byte := %2.2X Offset := %d\n", byte, index);
+            error = e1000_write_ich8_byte(hw, index, byte);
+            udelay(100);
+            if (error == E1000_SUCCESS)
+                break;
+        }
+    }
+
+    if (program_retries == 100)
+        error = E1000_ERR_EEPROM;
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - The byte to write to the NVM.
+ *****************************************************************************/
+static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
+{
+    s32 status = E1000_SUCCESS;
+    u16 word = (u16)data;
+
+    status = e1000_write_ich8_data(hw, index, 1, word);
+
+    return status;
+}
+
+/******************************************************************************
+ * Reads a word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The starting byte index of the word to read.
+ * data - Pointer to a word to store the value read.
+ *****************************************************************************/
+static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
+{
+    s32 status = E1000_SUCCESS;
+    status = e1000_read_ich8_data(hw, index, 2, data);
+    return status;
+}
+
+/******************************************************************************
+ * Erases the bank specified. Each bank may be a 4, 8 or 64k block. Banks are 0
+ * based.
+ *
+ * hw - pointer to e1000_hw structure
+ * bank - 0 for first bank, 1 for second bank
+ *
+ * Note that this function may actually erase as much as 8 or 64 KBytes.  The
+ * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the
+ * bank size may be 4, 8 or 64 KBytes
+ *****************************************************************************/
+static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
+{
+    union ich8_hws_flash_status hsfsts;
+    union ich8_hws_flash_ctrl hsflctl;
+    u32 flash_linear_address;
+    s32  count = 0;
+    s32  error = E1000_ERR_EEPROM;
+    s32  iteration;
+    s32  sub_sector_size = 0;
+    s32  bank_size;
+    s32  j = 0;
+    s32  error_flag = 0;
+
+    hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+    /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
+    /* 00: The Hw sector is 256 bytes, hence we need to erase 16
+     *     consecutive sectors.  The start index for the nth Hw sector can be
+     *     calculated as bank * 4096 + n * 256
+     * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+     *     The start index for the nth Hw sector can be calculated
+     *     as bank * 4096
+     * 10: The HW sector is 8K bytes
+     * 11: The Hw sector size is 64K bytes */
+    if (hsfsts.hsf_status.berasesz == 0x0) {
+        /* Hw sector size 256 */
+        sub_sector_size = ICH_FLASH_SEG_SIZE_256;
+        bank_size = ICH_FLASH_SECTOR_SIZE;
+        iteration = ICH_FLASH_SECTOR_SIZE / ICH_FLASH_SEG_SIZE_256;
+    } else if (hsfsts.hsf_status.berasesz == 0x1) {
+        bank_size = ICH_FLASH_SEG_SIZE_4K;
+        iteration = 1;
+    } else if (hsfsts.hsf_status.berasesz == 0x3) {
+        bank_size = ICH_FLASH_SEG_SIZE_64K;
+        iteration = 1;
+    } else {
+        return error;
+    }
+
+    for (j = 0; j < iteration ; j++) {
+        do {
+            count++;
+            /* Steps */
+            error = e1000_ich8_cycle_init(hw);
+            if (error != E1000_SUCCESS) {
+                error_flag = 1;
+                break;
+            }
+
+            /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
+             * Control */
+            hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+            hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+            E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+            /* Write the last 24 bits of an index within the block into Flash
+             * Linear address field in Flash Address.  This probably needs to
+             * be calculated here based off the on-chip erase sector size and
+             * the software bank size (4, 8 or 64 KBytes) */
+            flash_linear_address = bank * bank_size + j * sub_sector_size;
+            flash_linear_address += hw->flash_base_addr;
+            flash_linear_address &= ICH_FLASH_LINEAR_ADDR_MASK;
+
+            E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
+
+            error = e1000_ich8_flash_cycle(hw, ICH_FLASH_ERASE_TIMEOUT);
+            /* Check if FCERR is set to 1.  If 1, clear it and try the whole
+             * sequence a few more times else Done */
+            if (error == E1000_SUCCESS) {
+                break;
+            } else {
+                hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+                if (hsfsts.hsf_status.flcerr == 1) {
+                    /* repeat for some time before giving up */
+                    continue;
+                } else if (hsfsts.hsf_status.flcdone == 0) {
+                    error_flag = 1;
+                    break;
+                }
+            }
+        } while ((count < ICH_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
+        if (error_flag == 1)
+            break;
+    }
+    if (error_flag != 1)
+        error = E1000_SUCCESS;
+    return error;
+}
+
+static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+						 u32 cnf_base_addr,
+						 u32 cnf_size)
+{
+    u32 ret_val = E1000_SUCCESS;
+    u16 word_addr, reg_data, reg_addr;
+    u16 i;
+
+    /* cnf_base_addr is in DWORD */
+    word_addr = (u16)(cnf_base_addr << 1);
+
+    /* cnf_size is returned in size of dwords */
+    for (i = 0; i < cnf_size; i++) {
+        ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, &reg_data);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, &reg_addr);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_get_software_flag(hw);
+        if (ret_val != E1000_SUCCESS)
+            return ret_val;
+
+        ret_val = e1000_write_phy_reg_ex(hw, (u32)reg_addr, reg_data);
+
+        e1000_release_software_flag(hw);
+    }
+
+    return ret_val;
+}
+
+
+/******************************************************************************
+ * This function initializes the PHY from the NVM on ICH8 platforms. This
+ * is needed due to an issue where the NVM configuration is not properly
+ * autoloaded after power transitions. Therefore, after each PHY reset, we
+ * will load the configuration data out of the NVM manually.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw)
+{
+    u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop;
+
+    if (hw->phy_type != e1000_phy_igp_3)
+          return E1000_SUCCESS;
+
+    /* Check if SW needs configure the PHY */
+    reg_data = er32(FEXTNVM);
+    if (!(reg_data & FEXTNVM_SW_CONFIG))
+        return E1000_SUCCESS;
+
+    /* Wait for basic configuration completes before proceeding*/
+    loop = 0;
+    do {
+        reg_data = er32(STATUS) & E1000_STATUS_LAN_INIT_DONE;
+        udelay(100);
+        loop++;
+    } while ((!reg_data) && (loop < 50));
+
+    /* Clear the Init Done bit for the next init event */
+    reg_data = er32(STATUS);
+    reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
+    ew32(STATUS, reg_data);
+
+    /* Make sure HW does not configure LCD from PHY extended configuration
+       before SW configuration */
+    reg_data = er32(EXTCNF_CTRL);
+    if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
+        reg_data = er32(EXTCNF_SIZE);
+        cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
+        cnf_size >>= 16;
+        if (cnf_size) {
+            reg_data = er32(EXTCNF_CTRL);
+            cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
+            /* cnf_base_addr is in DWORD */
+            cnf_base_addr >>= 16;
+
+            /* Configure LCD from extended configuration region. */
+            ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
+                                                            cnf_size);
+            if (ret_val)
+                return ret_val;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_hw-2.6.31-ethercat.h	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,3406 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.h
+ * Structures, enums, and macros for the MAC
+ */
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep-2.6.31-ethercat.h"
+
+
+/* Forward declarations of structures used by the shared code */
+struct e1000_hw;
+struct e1000_hw_stats;
+
+/* Enumerated types specific to the e1000 hardware */
+/* Media Access Controlers */
+typedef enum {
+    e1000_undefined = 0,
+    e1000_82542_rev2_0,
+    e1000_82542_rev2_1,
+    e1000_82543,
+    e1000_82544,
+    e1000_82540,
+    e1000_82545,
+    e1000_82545_rev_3,
+    e1000_82546,
+    e1000_82546_rev_3,
+    e1000_82541,
+    e1000_82541_rev_2,
+    e1000_82547,
+    e1000_82547_rev_2,
+    e1000_82571,
+    e1000_82572,
+    e1000_82573,
+    e1000_80003es2lan,
+    e1000_ich8lan,
+    e1000_num_macs
+} e1000_mac_type;
+
+typedef enum {
+    e1000_eeprom_uninitialized = 0,
+    e1000_eeprom_spi,
+    e1000_eeprom_microwire,
+    e1000_eeprom_flash,
+    e1000_eeprom_ich8,
+    e1000_eeprom_none, /* No NVM support */
+    e1000_num_eeprom_types
+} e1000_eeprom_type;
+
+/* Media Types */
+typedef enum {
+    e1000_media_type_copper = 0,
+    e1000_media_type_fiber = 1,
+    e1000_media_type_internal_serdes = 2,
+    e1000_num_media_types
+} e1000_media_type;
+
+typedef enum {
+    e1000_10_half = 0,
+    e1000_10_full = 1,
+    e1000_100_half = 2,
+    e1000_100_full = 3
+} e1000_speed_duplex_type;
+
+/* Flow Control Settings */
+typedef enum {
+    E1000_FC_NONE = 0,
+    E1000_FC_RX_PAUSE = 1,
+    E1000_FC_TX_PAUSE = 2,
+    E1000_FC_FULL = 3,
+    E1000_FC_DEFAULT = 0xFF
+} e1000_fc_type;
+
+struct e1000_shadow_ram {
+    u16 eeprom_word;
+    bool modified;
+};
+
+/* PCI bus types */
+typedef enum {
+    e1000_bus_type_unknown = 0,
+    e1000_bus_type_pci,
+    e1000_bus_type_pcix,
+    e1000_bus_type_pci_express,
+    e1000_bus_type_reserved
+} e1000_bus_type;
+
+/* PCI bus speeds */
+typedef enum {
+    e1000_bus_speed_unknown = 0,
+    e1000_bus_speed_33,
+    e1000_bus_speed_66,
+    e1000_bus_speed_100,
+    e1000_bus_speed_120,
+    e1000_bus_speed_133,
+    e1000_bus_speed_2500,
+    e1000_bus_speed_reserved
+} e1000_bus_speed;
+
+/* PCI bus widths */
+typedef enum {
+    e1000_bus_width_unknown = 0,
+    /* These PCIe values should literally match the possible return values
+     * from config space */
+    e1000_bus_width_pciex_1 = 1,
+    e1000_bus_width_pciex_2 = 2,
+    e1000_bus_width_pciex_4 = 4,
+    e1000_bus_width_32,
+    e1000_bus_width_64,
+    e1000_bus_width_reserved
+} e1000_bus_width;
+
+/* PHY status info structure and supporting enums */
+typedef enum {
+    e1000_cable_length_50 = 0,
+    e1000_cable_length_50_80,
+    e1000_cable_length_80_110,
+    e1000_cable_length_110_140,
+    e1000_cable_length_140,
+    e1000_cable_length_undefined = 0xFF
+} e1000_cable_length;
+
+typedef enum {
+    e1000_gg_cable_length_60 = 0,
+    e1000_gg_cable_length_60_115 = 1,
+    e1000_gg_cable_length_115_150 = 2,
+    e1000_gg_cable_length_150 = 4
+} e1000_gg_cable_length;
+
+typedef enum {
+    e1000_igp_cable_length_10  = 10,
+    e1000_igp_cable_length_20  = 20,
+    e1000_igp_cable_length_30  = 30,
+    e1000_igp_cable_length_40  = 40,
+    e1000_igp_cable_length_50  = 50,
+    e1000_igp_cable_length_60  = 60,
+    e1000_igp_cable_length_70  = 70,
+    e1000_igp_cable_length_80  = 80,
+    e1000_igp_cable_length_90  = 90,
+    e1000_igp_cable_length_100 = 100,
+    e1000_igp_cable_length_110 = 110,
+    e1000_igp_cable_length_115 = 115,
+    e1000_igp_cable_length_120 = 120,
+    e1000_igp_cable_length_130 = 130,
+    e1000_igp_cable_length_140 = 140,
+    e1000_igp_cable_length_150 = 150,
+    e1000_igp_cable_length_160 = 160,
+    e1000_igp_cable_length_170 = 170,
+    e1000_igp_cable_length_180 = 180
+} e1000_igp_cable_length;
+
+typedef enum {
+    e1000_10bt_ext_dist_enable_normal = 0,
+    e1000_10bt_ext_dist_enable_lower,
+    e1000_10bt_ext_dist_enable_undefined = 0xFF
+} e1000_10bt_ext_dist_enable;
+
+typedef enum {
+    e1000_rev_polarity_normal = 0,
+    e1000_rev_polarity_reversed,
+    e1000_rev_polarity_undefined = 0xFF
+} e1000_rev_polarity;
+
+typedef enum {
+    e1000_downshift_normal = 0,
+    e1000_downshift_activated,
+    e1000_downshift_undefined = 0xFF
+} e1000_downshift;
+
+typedef enum {
+    e1000_smart_speed_default = 0,
+    e1000_smart_speed_on,
+    e1000_smart_speed_off
+} e1000_smart_speed;
+
+typedef enum {
+    e1000_polarity_reversal_enabled = 0,
+    e1000_polarity_reversal_disabled,
+    e1000_polarity_reversal_undefined = 0xFF
+} e1000_polarity_reversal;
+
+typedef enum {
+    e1000_auto_x_mode_manual_mdi = 0,
+    e1000_auto_x_mode_manual_mdix,
+    e1000_auto_x_mode_auto1,
+    e1000_auto_x_mode_auto2,
+    e1000_auto_x_mode_undefined = 0xFF
+} e1000_auto_x_mode;
+
+typedef enum {
+    e1000_1000t_rx_status_not_ok = 0,
+    e1000_1000t_rx_status_ok,
+    e1000_1000t_rx_status_undefined = 0xFF
+} e1000_1000t_rx_status;
+
+typedef enum {
+    e1000_phy_m88 = 0,
+    e1000_phy_igp,
+    e1000_phy_igp_2,
+    e1000_phy_gg82563,
+    e1000_phy_igp_3,
+    e1000_phy_ife,
+    e1000_phy_undefined = 0xFF
+} e1000_phy_type;
+
+typedef enum {
+    e1000_ms_hw_default = 0,
+    e1000_ms_force_master,
+    e1000_ms_force_slave,
+    e1000_ms_auto
+} e1000_ms_type;
+
+typedef enum {
+    e1000_ffe_config_enabled = 0,
+    e1000_ffe_config_active,
+    e1000_ffe_config_blocked
+} e1000_ffe_config;
+
+typedef enum {
+    e1000_dsp_config_disabled = 0,
+    e1000_dsp_config_enabled,
+    e1000_dsp_config_activated,
+    e1000_dsp_config_undefined = 0xFF
+} e1000_dsp_config;
+
+struct e1000_phy_info {
+    e1000_cable_length cable_length;
+    e1000_10bt_ext_dist_enable extended_10bt_distance;
+    e1000_rev_polarity cable_polarity;
+    e1000_downshift downshift;
+    e1000_polarity_reversal polarity_correction;
+    e1000_auto_x_mode mdix_mode;
+    e1000_1000t_rx_status local_rx;
+    e1000_1000t_rx_status remote_rx;
+};
+
+struct e1000_phy_stats {
+    u32 idle_errors;
+    u32 receive_errors;
+};
+
+struct e1000_eeprom_info {
+    e1000_eeprom_type type;
+    u16 word_size;
+    u16 opcode_bits;
+    u16 address_bits;
+    u16 delay_usec;
+    u16 page_size;
+    bool use_eerd;
+    bool use_eewr;
+};
+
+/* Flex ASF Information */
+#define E1000_HOST_IF_MAX_SIZE  2048
+
+typedef enum {
+    e1000_byte_align = 0,
+    e1000_word_align = 1,
+    e1000_dword_align = 2
+} e1000_align_type;
+
+
+
+/* Error Codes */
+#define E1000_SUCCESS      0
+#define E1000_ERR_EEPROM   1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_TYPE 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+
+#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \
+                                     (((_value) & 0xff00) >> 8))
+
+/* Function prototypes */
+/* Initialization */
+s32 e1000_reset_hw(struct e1000_hw *hw);
+s32 e1000_init_hw(struct e1000_hw *hw);
+s32 e1000_set_mac_type(struct e1000_hw *hw);
+void e1000_set_media_type(struct e1000_hw *hw);
+
+/* Link Configuration */
+s32 e1000_setup_link(struct e1000_hw *hw);
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+s32 e1000_check_for_link(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+s32 e1000_force_mac_fc(struct e1000_hw *hw);
+
+/* PHY */
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data);
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
+s32 e1000_phy_hw_reset(struct e1000_hw *hw);
+s32 e1000_phy_reset(struct e1000_hw *hw);
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
+
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
+
+/* EEPROM Functions */
+s32 e1000_init_eeprom_params(struct e1000_hw *hw);
+
+/* MNG HOST IF functions */
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD   64
+#define E1000_HI_MAX_MNG_DATA_LENGTH    0x6F8   /* Host Interface data length */
+
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT  10      /* Time in ms to process MNG command */
+#define E1000_MNG_DHCP_COOKIE_OFFSET    0x6F0   /* Cookie offset */
+#define E1000_MNG_DHCP_COOKIE_LENGTH    0x10    /* Cookie length */
+#define E1000_MNG_IAMT_MODE             0x3
+#define E1000_MNG_ICH_IAMT_MODE         0x2
+#define E1000_IAMT_SIGNATURE            0x544D4149 /* Intel(R) Active Management Technology signature */
+
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT    0x2 /* DHCP parsing enabled */
+#define E1000_VFTA_ENTRY_SHIFT                       0x5
+#define E1000_VFTA_ENTRY_MASK                        0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK              0x1F
+
+struct e1000_host_mng_command_header {
+    u8 command_id;
+    u8 checksum;
+    u16 reserved1;
+    u16 reserved2;
+    u16 command_length;
+};
+
+struct e1000_host_mng_command_info {
+    struct e1000_host_mng_command_header command_header;  /* Command Head/Command Result Head has 4 bytes */
+    u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];   /* Command data can length 0..0x658*/
+};
+#ifdef __BIG_ENDIAN
+struct e1000_host_mng_dhcp_cookie{
+    u32 signature;
+    u16 vlan_id;
+    u8 reserved0;
+    u8 status;
+    u32 reserved1;
+    u8 checksum;
+    u8 reserved3;
+    u16 reserved2;
+};
+#else
+struct e1000_host_mng_dhcp_cookie{
+    u32 signature;
+    u8 status;
+    u8 reserved0;
+    u16 vlan_id;
+    u32 reserved1;
+    u16 reserved2;
+    u8 reserved3;
+    u8 checksum;
+};
+#endif
+
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer,
+                                  u16 length);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
+s32 e1000_read_mac_addr(struct e1000_hw * hw);
+
+/* Filters (multicast, vlan, receive) */
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
+void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+
+/* LED functions */
+s32 e1000_setup_led(struct e1000_hw *hw);
+s32 e1000_cleanup_led(struct e1000_hw *hw);
+s32 e1000_led_on(struct e1000_hw *hw);
+s32 e1000_led_off(struct e1000_hw *hw);
+s32 e1000_blink_led_start(struct e1000_hw *hw);
+
+/* Adaptive IFS Functions */
+
+/* Everything else */
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr);
+void e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
+int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
+/* Port I/O is only supported on 82544 and newer */
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
+s32 e1000_disable_pciex_master(struct e1000_hw *hw);
+s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
+
+
+#define E1000_READ_REG_IO(a, reg) \
+    e1000_read_reg_io((a), E1000_##reg)
+#define E1000_WRITE_REG_IO(a, reg, val) \
+    e1000_write_reg_io((a), E1000_##reg, val)
+
+/* PCI Device IDs */
+#define E1000_DEV_ID_82542               0x1000
+#define E1000_DEV_ID_82543GC_FIBER       0x1001
+#define E1000_DEV_ID_82543GC_COPPER      0x1004
+#define E1000_DEV_ID_82544EI_COPPER      0x1008
+#define E1000_DEV_ID_82544EI_FIBER       0x1009
+#define E1000_DEV_ID_82544GC_COPPER      0x100C
+#define E1000_DEV_ID_82544GC_LOM         0x100D
+#define E1000_DEV_ID_82540EM             0x100E
+#define E1000_DEV_ID_82540EM_LOM         0x1015
+#define E1000_DEV_ID_82540EP_LOM         0x1016
+#define E1000_DEV_ID_82540EP             0x1017
+#define E1000_DEV_ID_82540EP_LP          0x101E
+#define E1000_DEV_ID_82545EM_COPPER      0x100F
+#define E1000_DEV_ID_82545EM_FIBER       0x1011
+#define E1000_DEV_ID_82545GM_COPPER      0x1026
+#define E1000_DEV_ID_82545GM_FIBER       0x1027
+#define E1000_DEV_ID_82545GM_SERDES      0x1028
+#define E1000_DEV_ID_82546EB_COPPER      0x1010
+#define E1000_DEV_ID_82546EB_FIBER       0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
+#define E1000_DEV_ID_82541EI             0x1013
+#define E1000_DEV_ID_82541EI_MOBILE      0x1018
+#define E1000_DEV_ID_82541ER_LOM         0x1014
+#define E1000_DEV_ID_82541ER             0x1078
+#define E1000_DEV_ID_82547GI             0x1075
+#define E1000_DEV_ID_82541GI             0x1076
+#define E1000_DEV_ID_82541GI_MOBILE      0x1077
+#define E1000_DEV_ID_82541GI_LF          0x107C
+#define E1000_DEV_ID_82546GB_COPPER      0x1079
+#define E1000_DEV_ID_82546GB_FIBER       0x107A
+#define E1000_DEV_ID_82546GB_SERDES      0x107B
+#define E1000_DEV_ID_82546GB_PCIE        0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
+#define E1000_DEV_ID_82547EI             0x1019
+#define E1000_DEV_ID_82547EI_MOBILE      0x101A
+#define E1000_DEV_ID_82571EB_COPPER      0x105E
+#define E1000_DEV_ID_82571EB_FIBER       0x105F
+#define E1000_DEV_ID_82571EB_SERDES      0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER  0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE  0x10BC
+#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
+#define E1000_DEV_ID_82572EI_COPPER      0x107D
+#define E1000_DEV_ID_82572EI_FIBER       0x107E
+#define E1000_DEV_ID_82572EI_SERDES      0x107F
+#define E1000_DEV_ID_82572EI             0x10B9
+#define E1000_DEV_ID_82573E              0x108B
+#define E1000_DEV_ID_82573E_IAMT         0x108C
+#define E1000_DEV_ID_82573L              0x109A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT     0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT     0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT     0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT     0x10BB
+
+#define E1000_DEV_ID_ICH8_IGP_M_AMT      0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT        0x104A
+#define E1000_DEV_ID_ICH8_IGP_C          0x104B
+#define E1000_DEV_ID_ICH8_IFE            0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT         0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G          0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M          0x104D
+
+
+#define NODE_ADDRESS_SIZE 6
+#define ETH_LENGTH_OF_ADDRESS 6
+
+/* MAC decode size is 128K - This is the size of BAR0 */
+#define MAC_DECODE_SIZE (128 * 1024)
+
+#define E1000_82542_2_0_REV_ID 2
+#define E1000_82542_2_1_REV_ID 3
+#define E1000_REVISION_0       0
+#define E1000_REVISION_1       1
+#define E1000_REVISION_2       2
+#define E1000_REVISION_3       3
+
+#define SPEED_10    10
+#define SPEED_100   100
+#define SPEED_1000  1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* The sizes (in bytes) of a ethernet packet */
+#define ENET_HEADER_SIZE             14
+#define MAXIMUM_ETHERNET_FRAME_SIZE  1518 /* With FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE  64   /* With FCS */
+#define ETHERNET_FCS_SIZE            4
+#define MAXIMUM_ETHERNET_PACKET_SIZE \
+    (MAXIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define MINIMUM_ETHERNET_PACKET_SIZE \
+    (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define CRC_LENGTH                   ETHERNET_FCS_SIZE
+#define MAX_JUMBO_FRAME_SIZE         0x3F00
+
+
+/* 802.1q VLAN Packet Sizes */
+#define VLAN_TAG_SIZE  4     /* 802.3ac tag (not DMAed) */
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
+#define ETHERNET_IP_TYPE        0x0800  /* IP packets */
+#define ETHERNET_ARP_TYPE       0x0806  /* Address Resolution Protocol (ARP) */
+
+/* Packet Header defines */
+#define IP_PROTOCOL_TCP    6
+#define IP_PROTOCOL_UDP    0x11
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+    E1000_IMS_RXDMT0 |         \
+    E1000_IMS_RXSEQ)
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+    E1000_IMS_RXT0   |    \
+    E1000_IMS_TXDW   |    \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ  |    \
+    E1000_IMS_LSC)
+
+/* Additional interrupts need to be handled for e1000_ich8lan:
+    DSW = The FW changed the status of the DISSW bit in FWSM
+    PHYINT = The LAN connected device generates an interrupt
+    EPRST = Manageability reset event */
+#define IMS_ICH8LAN_ENABLE_MASK (\
+    E1000_IMS_DSW   | \
+    E1000_IMS_PHYINT | \
+    E1000_IMS_EPRST)
+
+/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor. We
+ * reserve one of these spots for our directed address, allowing us room for
+ * E1000_RAR_ENTRIES - 1 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+
+#define E1000_RAR_ENTRIES_ICH8LAN  6
+
+#define MIN_NUMBER_OF_DESCRIPTORS  8
+#define MAX_NUMBER_OF_DESCRIPTORS  0xFFF8
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+    __le64 buffer_addr; /* Address of the descriptor's data buffer */
+    __le16 length;     /* Length of data DMAed into data buffer */
+    __le16 csum;       /* Packet checksum */
+    u8 status;      /* Descriptor status */
+    u8 errors;      /* Descriptor Errors */
+    __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+    struct {
+        __le64 buffer_addr;
+        __le64 reserved;
+    } read;
+    struct {
+        struct {
+            __le32 mrq;              /* Multiple Rx Queues */
+            union {
+                __le32 rss;          /* RSS Hash */
+                struct {
+                    __le16 ip_id;    /* IP id */
+                    __le16 csum;     /* Packet Checksum */
+                } csum_ip;
+            } hi_dword;
+        } lower;
+        struct {
+            __le32 status_error;     /* ext status/error */
+            __le16 length;
+            __le16 vlan;             /* VLAN tag */
+        } upper;
+    } wb;  /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+    struct {
+        /* one buffer for protocol header(s), three data buffers */
+        __le64 buffer_addr[MAX_PS_BUFFERS];
+    } read;
+    struct {
+        struct {
+            __le32 mrq;              /* Multiple Rx Queues */
+            union {
+                __le32 rss;          /* RSS Hash */
+                struct {
+                    __le16 ip_id;    /* IP id */
+                    __le16 csum;     /* Packet Checksum */
+                } csum_ip;
+            } hi_dword;
+        } lower;
+        struct {
+            __le32 status_error;     /* ext status/error */
+            __le16 length0;          /* length of buffer 0 */
+            __le16 vlan;             /* VLAN tag */
+        } middle;
+        struct {
+            __le16 header_status;
+            __le16 length[3];        /* length of buffers 1-3 */
+        } upper;
+        __le64 reserved;
+    } wb; /* writeback */
+};
+
+/* Receive Decriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum caculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF      0x80    /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV    0x200   /* IP identification valid */
+#define E1000_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
+#define E1000_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
+#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
+#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
+#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
+#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK  0xE000  /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK  0x1000  /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_TCPE  0x20000000
+#define E1000_RXDEXT_STATERR_IPE   0x40000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP        0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK  0x000003FF
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+    E1000_RXD_ERR_CE  |                \
+    E1000_RXD_ERR_SE  |                \
+    E1000_RXD_ERR_SEQ |                \
+    E1000_RXD_ERR_CXE |                \
+    E1000_RXD_ERR_RXE)
+
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+    E1000_RXDEXT_STATERR_CE  |            \
+    E1000_RXDEXT_STATERR_SE  |            \
+    E1000_RXDEXT_STATERR_SEQ |            \
+    E1000_RXDEXT_STATERR_CXE |            \
+    E1000_RXDEXT_STATERR_RXE)
+
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+    __le64 buffer_addr;       /* Address of the descriptor's data buffer */
+    union {
+        __le32 data;
+        struct {
+            __le16 length;    /* Data buffer length */
+            u8 cso;        /* Checksum offset */
+            u8 cmd;        /* Descriptor control */
+        } flags;
+    } lower;
+    union {
+        __le32 data;
+        struct {
+            u8 status;     /* Descriptor status */
+            u8 css;        /* Checksum start */
+            __le16 special;
+        } fields;
+    } upper;
+};
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C     0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+    union {
+        __le32 ip_config;
+        struct {
+            u8 ipcss;      /* IP checksum start */
+            u8 ipcso;      /* IP checksum offset */
+            __le16 ipcse;     /* IP checksum end */
+        } ip_fields;
+    } lower_setup;
+    union {
+        __le32 tcp_config;
+        struct {
+            u8 tucss;      /* TCP checksum start */
+            u8 tucso;      /* TCP checksum offset */
+            __le16 tucse;     /* TCP checksum end */
+        } tcp_fields;
+    } upper_setup;
+    __le32 cmd_and_length;    /* */
+    union {
+        __le32 data;
+        struct {
+            u8 status;     /* Descriptor status */
+            u8 hdr_len;    /* Header length */
+            __le16 mss;       /* Maximum segment size */
+        } fields;
+    } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+    __le64 buffer_addr;       /* Address of the descriptor's buffer address */
+    union {
+        __le32 data;
+        struct {
+            __le16 length;    /* Data buffer length */
+            u8 typ_len_ext;        /* */
+            u8 cmd;        /* */
+        } flags;
+    } lower;
+    union {
+        __le32 data;
+        struct {
+            u8 status;     /* Descriptor status */
+            u8 popts;      /* Packet Options */
+            __le16 special;   /* */
+        } fields;
+    } upper;
+};
+
+/* Filters */
+#define E1000_NUM_UNICAST          16   /* Unicast filter entries */
+#define E1000_MC_TBL_SIZE          128  /* Multicast Filter Table (4096 bits) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+#define E1000_NUM_UNICAST_ICH8LAN  7
+#define E1000_MC_TBL_SIZE_ICH8LAN  32
+
+
+/* Receive Address Register */
+struct e1000_rar {
+    volatile __le32 low;      /* receive address low */
+    volatile __le32 high;     /* receive address high */
+};
+
+/* Number of entries in the Multicast Table Array (MTA). */
+#define E1000_NUM_MTA_REGISTERS 128
+#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
+
+/* IPv4 Address Table Entry */
+struct e1000_ipv4_at_entry {
+    volatile u32 ipv4_addr;        /* IP Address (RW) */
+    volatile u32 reserved;
+};
+
+/* Four wakeup IP addresses are supported */
+#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
+#define E1000_IP4AT_SIZE                  E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
+#define E1000_IP4AT_SIZE_ICH8LAN          3
+#define E1000_IP6AT_SIZE                  1
+
+/* IPv6 Address Table Entry */
+struct e1000_ipv6_at_entry {
+    volatile u8 ipv6_addr[16];
+};
+
+/* Flexible Filter Length Table Entry */
+struct e1000_fflt_entry {
+    volatile u32 length;   /* Flexible Filter Length (RW) */
+    volatile u32 reserved;
+};
+
+/* Flexible Filter Mask Table Entry */
+struct e1000_ffmt_entry {
+    volatile u32 mask;     /* Flexible Filter Mask (RW) */
+    volatile u32 reserved;
+};
+
+/* Flexible Filter Value Table Entry */
+struct e1000_ffvt_entry {
+    volatile u32 value;    /* Flexible Filter Value (RW) */
+    volatile u32 reserved;
+};
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX  128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+#define E1000_DISABLE_SERDES_LOOPBACK   0x0400
+
+/* Register Set. (82543, 82544)
+ *
+ * Registers are defined to be 32 bits and  should be accessed as 32 bit values.
+ * These registers are physically located on the NIC, but are mapped into the
+ * host memory address space.
+ *
+ * RW - register is both readable and writable
+ * RO - register is read only
+ * WO - register is write only
+ * R/clr - register is read only and is cleared when read
+ * A - register array
+ */
+#define E1000_CTRL     0x00000  /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004  /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS   0x00008  /* Device Status - RO */
+#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014  /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
+#define E1000_FLA      0x0001C  /* Flash Access - RW */
+#define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_SCTL     0x00024  /* SerDes Control - RW */
+#define E1000_FEXTNVM  0x00028  /* Future Extended NVM register */
+#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
+#define E1000_FCT      0x00030  /* Flow Control Type - RW */
+#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
+#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL     0x00100  /* RX Control - RW */
+#define E1000_RDTR1    0x02820  /* RX Delay Timer (1) - RW */
+#define E1000_RDBAL1   0x02900  /* RX Descriptor Base Address Low (1) - RW */
+#define E1000_RDBAH1   0x02904  /* RX Descriptor Base Address High (1) - RW */
+#define E1000_RDLEN1   0x02908  /* RX Descriptor Length (1) - RW */
+#define E1000_RDH1     0x02910  /* RX Descriptor Head (1) - RW */
+#define E1000_RDT1     0x02918  /* RX Descriptor Tail (1) - RW */
+#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178  /* TX Configuration Word - RW */
+#define E1000_RXCW     0x00180  /* RX Configuration Word - RO */
+#define E1000_TCTL     0x00400  /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404  /* Extended TX Control - RW */
+#define E1000_TIPG     0x00410  /* TX Inter-packet gap -RW */
+#define E1000_TBT      0x00448  /* TX Burst Timer - RW */
+#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
+#define E1000_EXTCNF_CTRL  0x00F00  /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE  0x00F08  /* Extended Configuration Size */
+#define E1000_PHY_CTRL     0x00F10  /* PHY Control Register in CSR */
+#define FEXTNVM_SW_CONFIG  0x0001
+#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008  /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_FLASH_UPDATES 1000
+#define E1000_EEARBC   0x01024  /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT   0x01028  /* FLASH Timer Register */
+#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL  0x01030  /* FLASH control register */
+#define E1000_FLSWDATA 0x01034  /* FLASH data register */
+#define E1000_FLSWCNT  0x01038  /* FLASH Access Counter */
+#define E1000_FLOP     0x0103C  /* FLASH Opcode Register */
+#define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
+#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL   0x02170  /* Packet Split Receive Control - RW */
+#define E1000_RDBAL    0x02800  /* RX Descriptor Base Address Low - RW */
+#define E1000_RDBAH    0x02804  /* RX Descriptor Base Address High - RW */
+#define E1000_RDLEN    0x02808  /* RX Descriptor Length - RW */
+#define E1000_RDH      0x02810  /* RX Descriptor Head - RW */
+#define E1000_RDT      0x02818  /* RX Descriptor Tail - RW */
+#define E1000_RDTR     0x02820  /* RX Delay Timer - RW */
+#define E1000_RDBAL0   E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
+#define E1000_RDBAH0   E1000_RDBAH /* RX Desc Base Address High (0) - RW */
+#define E1000_RDLEN0   E1000_RDLEN /* RX Desc Length (0) - RW */
+#define E1000_RDH0     E1000_RDH   /* RX Desc Head (0) - RW */
+#define E1000_RDT0     E1000_RDT   /* RX Desc Tail (0) - RW */
+#define E1000_RDTR0    E1000_RDTR  /* RX Delay Timer (0) - RW */
+#define E1000_RXDCTL   0x02828  /* RX Descriptor Control queue 0 - RW */
+#define E1000_RXDCTL1  0x02928  /* RX Descriptor Control queue 1 - RW */
+#define E1000_RADV     0x0282C  /* RX Interrupt Absolute Delay Timer - RW */
+#define E1000_RSRPD    0x02C00  /* RX Small Packet Detect - RW */
+#define E1000_RAID     0x02C08  /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC   0x03000  /* TX DMA Control - RW */
+#define E1000_KABGTXD  0x03004  /* AFE Band Gap Transmit Ref Data */
+#define E1000_TDFH     0x03410  /* TX Data FIFO Head - RW */
+#define E1000_TDFT     0x03418  /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420  /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFTS    0x03428  /* TX Data FIFO Tail Saved - RW */
+#define E1000_TDFPC    0x03430  /* TX Data FIFO Packet Count - RW */
+#define E1000_TDBAL    0x03800  /* TX Descriptor Base Address Low - RW */
+#define E1000_TDBAH    0x03804  /* TX Descriptor Base Address High - RW */
+#define E1000_TDLEN    0x03808  /* TX Descriptor Length - RW */
+#define E1000_TDH      0x03810  /* TX Descriptor Head - RW */
+#define E1000_TDT      0x03818  /* TX Descripotr Tail - RW */
+#define E1000_TIDV     0x03820  /* TX Interrupt Delay Value - RW */
+#define E1000_TXDCTL   0x03828  /* TX Descriptor Control - RW */
+#define E1000_TADV     0x0382C  /* TX Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_TARC0    0x03840  /* TX Arbitration Count (0) */
+#define E1000_TDBAL1   0x03900  /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1   0x03904  /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1   0x03908  /* TX Desc Length (1) - RW */
+#define E1000_TDH1     0x03910  /* TX Desc Head (1) - RW */
+#define E1000_TDT1     0x03918  /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1  0x03928  /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1    0x03940  /* TX Arbitration Count (1) */
+#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028  /* Collision Count - R/clr */
+#define E1000_DC       0x04030  /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034  /* TX-No CRS - R/clr */
+#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048  /* XON RX Count - R/clr */
+#define E1000_XONTXC   0x0404C  /* XON TX Count - R/clr */
+#define E1000_XOFFRXC  0x04050  /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC  0x04054  /* XOFF TX Count - R/clr */
+#define E1000_FCRUC    0x04058  /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C  /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060  /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064  /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068  /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C  /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070  /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074  /* Good Packets RX Count - R/clr */
+#define E1000_BPRC     0x04078  /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC     0x0407C  /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC     0x04080  /* Good Packets TX Count - R/clr */
+#define E1000_GORCL    0x04088  /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH    0x0408C  /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL    0x04090  /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH    0x04094  /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC     0x040A0  /* RX No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4  /* RX Undersize Count - R/clr */
+#define E1000_RFC      0x040A8  /* RX Fragment Count - R/clr */
+#define E1000_ROC      0x040AC  /* RX Oversize Count - R/clr */
+#define E1000_RJC      0x040B0  /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4  /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC  /* Management Packets TX Count - R/clr */
+#define E1000_TORL     0x040C0  /* Total Octets RX Low - R/clr */
+#define E1000_TORH     0x040C4  /* Total Octets RX High - R/clr */
+#define E1000_TOTL     0x040C8  /* Total Octets TX Low - R/clr */
+#define E1000_TOTH     0x040CC  /* Total Octets TX High - R/clr */
+#define E1000_TPR      0x040D0  /* Total Packets RX - R/clr */
+#define E1000_TPT      0x040D4  /* Total Packets TX - R/clr */
+#define E1000_PTC64    0x040D8  /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC  /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0  /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4  /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8  /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC  /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0  /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC     0x040F4  /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
+#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
+#define E1000_RXCSUM   0x05000  /* RX Checksum Control - RW */
+#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
+#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400  /* Receive Address - RW Array */
+#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC      0x05800  /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810  /* Wakeup Status - RO */
+#define E1000_MANC     0x05820  /* Management Control - RW */
+#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
+#define E1000_IP4AT    0x05840  /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT    0x05880  /* IPv6 Address Table - RW Array */
+#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
+#define E1000_WUPM     0x05A00  /* Wakeup Packet Memory - RO A */
+#define E1000_FFLT     0x05F00  /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF  0x08800  /* Host Interface */
+#define E1000_FFMT     0x09000  /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT     0x09800  /* Flexible Filter Value Table - RW Array */
+
+#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA     0x0003C  /* PHY address - RW */
+#define E1000_MANC2H     0x05860  /* Managment Control To Host - RW */
+#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+
+#define E1000_GCR       0x05B00 /* PCI-Ex Control */
+#define E1000_GSCL_1    0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2    0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3    0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4    0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM      0x05B50 /* SW Semaphore */
+#define E1000_FWSM      0x05B54 /* FW Semaphore */
+#define E1000_FFLT_DBG  0x05F04 /* Debug Register */
+#define E1000_HICR      0x08F00 /* Host Inteface Control */
+
+/* RSS registers */
+#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA      0x05C00 /* Redirection Table - RW Array */
+#define E1000_RSSRK     0x05C80 /* RSS Random Key - RW Array */
+#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
+/* Register Set (82542)
+ *
+ * Some of the 82542 registers are located at different offsets than they are
+ * in more current versions of the 8254x. Despite the difference in location,
+ * the registers function in the same manner.
+ */
+#define E1000_82542_CTRL     E1000_CTRL
+#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
+#define E1000_82542_STATUS   E1000_STATUS
+#define E1000_82542_EECD     E1000_EECD
+#define E1000_82542_EERD     E1000_EERD
+#define E1000_82542_CTRL_EXT E1000_CTRL_EXT
+#define E1000_82542_FLA      E1000_FLA
+#define E1000_82542_MDIC     E1000_MDIC
+#define E1000_82542_SCTL     E1000_SCTL
+#define E1000_82542_FEXTNVM  E1000_FEXTNVM
+#define E1000_82542_FCAL     E1000_FCAL
+#define E1000_82542_FCAH     E1000_FCAH
+#define E1000_82542_FCT      E1000_FCT
+#define E1000_82542_VET      E1000_VET
+#define E1000_82542_RA       0x00040
+#define E1000_82542_ICR      E1000_ICR
+#define E1000_82542_ITR      E1000_ITR
+#define E1000_82542_ICS      E1000_ICS
+#define E1000_82542_IMS      E1000_IMS
+#define E1000_82542_IMC      E1000_IMC
+#define E1000_82542_RCTL     E1000_RCTL
+#define E1000_82542_RDTR     0x00108
+#define E1000_82542_RDBAL    0x00110
+#define E1000_82542_RDBAH    0x00114
+#define E1000_82542_RDLEN    0x00118
+#define E1000_82542_RDH      0x00120
+#define E1000_82542_RDT      0x00128
+#define E1000_82542_RDTR0    E1000_82542_RDTR
+#define E1000_82542_RDBAL0   E1000_82542_RDBAL
+#define E1000_82542_RDBAH0   E1000_82542_RDBAH
+#define E1000_82542_RDLEN0   E1000_82542_RDLEN
+#define E1000_82542_RDH0     E1000_82542_RDH
+#define E1000_82542_RDT0     E1000_82542_RDT
+#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
+                                                       * RX Control - RW */
+#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
+#define E1000_82542_RDBAH3   0x02B04 /* RX Desc Base High Queue 3 - RW */
+#define E1000_82542_RDBAL3   0x02B00 /* RX Desc Low Queue 3 - RW */
+#define E1000_82542_RDLEN3   0x02B08 /* RX Desc Length Queue 3 - RW */
+#define E1000_82542_RDH3     0x02B10 /* RX Desc Head Queue 3 - RW */
+#define E1000_82542_RDT3     0x02B18 /* RX Desc Tail Queue 3 - RW */
+#define E1000_82542_RDBAL2   0x02A00 /* RX Desc Base Low Queue 2 - RW */
+#define E1000_82542_RDBAH2   0x02A04 /* RX Desc Base High Queue 2 - RW */
+#define E1000_82542_RDLEN2   0x02A08 /* RX Desc Length Queue 2 - RW */
+#define E1000_82542_RDH2     0x02A10 /* RX Desc Head Queue 2 - RW */
+#define E1000_82542_RDT2     0x02A18 /* RX Desc Tail Queue 2 - RW */
+#define E1000_82542_RDTR1    0x00130
+#define E1000_82542_RDBAL1   0x00138
+#define E1000_82542_RDBAH1   0x0013C
+#define E1000_82542_RDLEN1   0x00140
+#define E1000_82542_RDH1     0x00148
+#define E1000_82542_RDT1     0x00150
+#define E1000_82542_FCRTH    0x00160
+#define E1000_82542_FCRTL    0x00168
+#define E1000_82542_FCTTV    E1000_FCTTV
+#define E1000_82542_TXCW     E1000_TXCW
+#define E1000_82542_RXCW     E1000_RXCW
+#define E1000_82542_MTA      0x00200
+#define E1000_82542_TCTL     E1000_TCTL
+#define E1000_82542_TCTL_EXT E1000_TCTL_EXT
+#define E1000_82542_TIPG     E1000_TIPG
+#define E1000_82542_TDBAL    0x00420
+#define E1000_82542_TDBAH    0x00424
+#define E1000_82542_TDLEN    0x00428
+#define E1000_82542_TDH      0x00430
+#define E1000_82542_TDT      0x00438
+#define E1000_82542_TIDV     0x00440
+#define E1000_82542_TBT      E1000_TBT
+#define E1000_82542_AIT      E1000_AIT
+#define E1000_82542_VFTA     0x00600
+#define E1000_82542_LEDCTL   E1000_LEDCTL
+#define E1000_82542_PBA      E1000_PBA
+#define E1000_82542_PBS      E1000_PBS
+#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
+#define E1000_82542_EEARBC   E1000_EEARBC
+#define E1000_82542_FLASHT   E1000_FLASHT
+#define E1000_82542_EEWR     E1000_EEWR
+#define E1000_82542_FLSWCTL  E1000_FLSWCTL
+#define E1000_82542_FLSWDATA E1000_FLSWDATA
+#define E1000_82542_FLSWCNT  E1000_FLSWCNT
+#define E1000_82542_FLOP     E1000_FLOP
+#define E1000_82542_EXTCNF_CTRL  E1000_EXTCNF_CTRL
+#define E1000_82542_EXTCNF_SIZE  E1000_EXTCNF_SIZE
+#define E1000_82542_PHY_CTRL E1000_PHY_CTRL
+#define E1000_82542_ERT      E1000_ERT
+#define E1000_82542_RXDCTL   E1000_RXDCTL
+#define E1000_82542_RXDCTL1  E1000_RXDCTL1
+#define E1000_82542_RADV     E1000_RADV
+#define E1000_82542_RSRPD    E1000_RSRPD
+#define E1000_82542_TXDMAC   E1000_TXDMAC
+#define E1000_82542_KABGTXD  E1000_KABGTXD
+#define E1000_82542_TDFHS    E1000_TDFHS
+#define E1000_82542_TDFTS    E1000_TDFTS
+#define E1000_82542_TDFPC    E1000_TDFPC
+#define E1000_82542_TXDCTL   E1000_TXDCTL
+#define E1000_82542_TADV     E1000_TADV
+#define E1000_82542_TSPMT    E1000_TSPMT
+#define E1000_82542_CRCERRS  E1000_CRCERRS
+#define E1000_82542_ALGNERRC E1000_ALGNERRC
+#define E1000_82542_SYMERRS  E1000_SYMERRS
+#define E1000_82542_RXERRC   E1000_RXERRC
+#define E1000_82542_MPC      E1000_MPC
+#define E1000_82542_SCC      E1000_SCC
+#define E1000_82542_ECOL     E1000_ECOL
+#define E1000_82542_MCC      E1000_MCC
+#define E1000_82542_LATECOL  E1000_LATECOL
+#define E1000_82542_COLC     E1000_COLC
+#define E1000_82542_DC       E1000_DC
+#define E1000_82542_TNCRS    E1000_TNCRS
+#define E1000_82542_SEC      E1000_SEC
+#define E1000_82542_CEXTERR  E1000_CEXTERR
+#define E1000_82542_RLEC     E1000_RLEC
+#define E1000_82542_XONRXC   E1000_XONRXC
+#define E1000_82542_XONTXC   E1000_XONTXC
+#define E1000_82542_XOFFRXC  E1000_XOFFRXC
+#define E1000_82542_XOFFTXC  E1000_XOFFTXC
+#define E1000_82542_FCRUC    E1000_FCRUC
+#define E1000_82542_PRC64    E1000_PRC64
+#define E1000_82542_PRC127   E1000_PRC127
+#define E1000_82542_PRC255   E1000_PRC255
+#define E1000_82542_PRC511   E1000_PRC511
+#define E1000_82542_PRC1023  E1000_PRC1023
+#define E1000_82542_PRC1522  E1000_PRC1522
+#define E1000_82542_GPRC     E1000_GPRC
+#define E1000_82542_BPRC     E1000_BPRC
+#define E1000_82542_MPRC     E1000_MPRC
+#define E1000_82542_GPTC     E1000_GPTC
+#define E1000_82542_GORCL    E1000_GORCL
+#define E1000_82542_GORCH    E1000_GORCH
+#define E1000_82542_GOTCL    E1000_GOTCL
+#define E1000_82542_GOTCH    E1000_GOTCH
+#define E1000_82542_RNBC     E1000_RNBC
+#define E1000_82542_RUC      E1000_RUC
+#define E1000_82542_RFC      E1000_RFC
+#define E1000_82542_ROC      E1000_ROC
+#define E1000_82542_RJC      E1000_RJC
+#define E1000_82542_MGTPRC   E1000_MGTPRC
+#define E1000_82542_MGTPDC   E1000_MGTPDC
+#define E1000_82542_MGTPTC   E1000_MGTPTC
+#define E1000_82542_TORL     E1000_TORL
+#define E1000_82542_TORH     E1000_TORH
+#define E1000_82542_TOTL     E1000_TOTL
+#define E1000_82542_TOTH     E1000_TOTH
+#define E1000_82542_TPR      E1000_TPR
+#define E1000_82542_TPT      E1000_TPT
+#define E1000_82542_PTC64    E1000_PTC64
+#define E1000_82542_PTC127   E1000_PTC127
+#define E1000_82542_PTC255   E1000_PTC255
+#define E1000_82542_PTC511   E1000_PTC511
+#define E1000_82542_PTC1023  E1000_PTC1023
+#define E1000_82542_PTC1522  E1000_PTC1522
+#define E1000_82542_MPTC     E1000_MPTC
+#define E1000_82542_BPTC     E1000_BPTC
+#define E1000_82542_TSCTC    E1000_TSCTC
+#define E1000_82542_TSCTFC   E1000_TSCTFC
+#define E1000_82542_RXCSUM   E1000_RXCSUM
+#define E1000_82542_WUC      E1000_WUC
+#define E1000_82542_WUFC     E1000_WUFC
+#define E1000_82542_WUS      E1000_WUS
+#define E1000_82542_MANC     E1000_MANC
+#define E1000_82542_IPAV     E1000_IPAV
+#define E1000_82542_IP4AT    E1000_IP4AT
+#define E1000_82542_IP6AT    E1000_IP6AT
+#define E1000_82542_WUPL     E1000_WUPL
+#define E1000_82542_WUPM     E1000_WUPM
+#define E1000_82542_FFLT     E1000_FFLT
+#define E1000_82542_TDFH     0x08010
+#define E1000_82542_TDFT     0x08018
+#define E1000_82542_FFMT     E1000_FFMT
+#define E1000_82542_FFVT     E1000_FFVT
+#define E1000_82542_HOST_IF  E1000_HOST_IF
+#define E1000_82542_IAM         E1000_IAM
+#define E1000_82542_EEMNGCTL    E1000_EEMNGCTL
+#define E1000_82542_PSRCTL      E1000_PSRCTL
+#define E1000_82542_RAID        E1000_RAID
+#define E1000_82542_TARC0       E1000_TARC0
+#define E1000_82542_TDBAL1      E1000_TDBAL1
+#define E1000_82542_TDBAH1      E1000_TDBAH1
+#define E1000_82542_TDLEN1      E1000_TDLEN1
+#define E1000_82542_TDH1        E1000_TDH1
+#define E1000_82542_TDT1        E1000_TDT1
+#define E1000_82542_TXDCTL1     E1000_TXDCTL1
+#define E1000_82542_TARC1       E1000_TARC1
+#define E1000_82542_RFCTL       E1000_RFCTL
+#define E1000_82542_GCR         E1000_GCR
+#define E1000_82542_GSCL_1      E1000_GSCL_1
+#define E1000_82542_GSCL_2      E1000_GSCL_2
+#define E1000_82542_GSCL_3      E1000_GSCL_3
+#define E1000_82542_GSCL_4      E1000_GSCL_4
+#define E1000_82542_FACTPS      E1000_FACTPS
+#define E1000_82542_SWSM        E1000_SWSM
+#define E1000_82542_FWSM        E1000_FWSM
+#define E1000_82542_FFLT_DBG    E1000_FFLT_DBG
+#define E1000_82542_IAC         E1000_IAC
+#define E1000_82542_ICRXPTC     E1000_ICRXPTC
+#define E1000_82542_ICRXATC     E1000_ICRXATC
+#define E1000_82542_ICTXPTC     E1000_ICTXPTC
+#define E1000_82542_ICTXATC     E1000_ICTXATC
+#define E1000_82542_ICTXQEC     E1000_ICTXQEC
+#define E1000_82542_ICTXQMTC    E1000_ICTXQMTC
+#define E1000_82542_ICRXDMTC    E1000_ICRXDMTC
+#define E1000_82542_ICRXOC      E1000_ICRXOC
+#define E1000_82542_HICR        E1000_HICR
+
+#define E1000_82542_CPUVEC      E1000_CPUVEC
+#define E1000_82542_MRQC        E1000_MRQC
+#define E1000_82542_RETA        E1000_RETA
+#define E1000_82542_RSSRK       E1000_RSSRK
+#define E1000_82542_RSSIM       E1000_RSSIM
+#define E1000_82542_RSSIR       E1000_RSSIR
+#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
+#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
+#define E1000_82542_MANC2H      E1000_MANC2H
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+	u64		crcerrs;
+	u64		algnerrc;
+	u64		symerrs;
+	u64		rxerrc;
+	u64		txerrc;
+	u64		mpc;
+	u64		scc;
+	u64		ecol;
+	u64		mcc;
+	u64		latecol;
+	u64		colc;
+	u64		dc;
+	u64		tncrs;
+	u64		sec;
+	u64		cexterr;
+	u64		rlec;
+	u64		xonrxc;
+	u64		xontxc;
+	u64		xoffrxc;
+	u64		xofftxc;
+	u64		fcruc;
+	u64		prc64;
+	u64		prc127;
+	u64		prc255;
+	u64		prc511;
+	u64		prc1023;
+	u64		prc1522;
+	u64		gprc;
+	u64		bprc;
+	u64		mprc;
+	u64		gptc;
+	u64		gorcl;
+	u64		gorch;
+	u64		gotcl;
+	u64		gotch;
+	u64		rnbc;
+	u64		ruc;
+	u64		rfc;
+	u64		roc;
+	u64		rlerrc;
+	u64		rjc;
+	u64		mgprc;
+	u64		mgpdc;
+	u64		mgptc;
+	u64		torl;
+	u64		torh;
+	u64		totl;
+	u64		toth;
+	u64		tpr;
+	u64		tpt;
+	u64		ptc64;
+	u64		ptc127;
+	u64		ptc255;
+	u64		ptc511;
+	u64		ptc1023;
+	u64		ptc1522;
+	u64		mptc;
+	u64		bptc;
+	u64		tsctc;
+	u64		tsctfc;
+	u64		iac;
+	u64		icrxptc;
+	u64		icrxatc;
+	u64		ictxptc;
+	u64		ictxatc;
+	u64		ictxqec;
+	u64		ictxqmtc;
+	u64		icrxdmtc;
+	u64		icrxoc;
+};
+
+/* Structure containing variables used by the shared code (e1000_hw.c) */
+struct e1000_hw {
+	u8 __iomem		*hw_addr;
+	u8 __iomem		*flash_address;
+	e1000_mac_type		mac_type;
+	e1000_phy_type		phy_type;
+	u32		phy_init_script;
+	e1000_media_type	media_type;
+	void			*back;
+	struct e1000_shadow_ram	*eeprom_shadow_ram;
+	u32		flash_bank_size;
+	u32		flash_base_addr;
+	e1000_fc_type		fc;
+	e1000_bus_speed		bus_speed;
+	e1000_bus_width		bus_width;
+	e1000_bus_type		bus_type;
+	struct e1000_eeprom_info eeprom;
+	e1000_ms_type		master_slave;
+	e1000_ms_type		original_master_slave;
+	e1000_ffe_config	ffe_config_state;
+	u32		asf_firmware_present;
+	u32		eeprom_semaphore_present;
+	u32		swfw_sync_present;
+	u32		swfwhw_semaphore_present;
+	unsigned long		io_base;
+	u32		phy_id;
+	u32		phy_revision;
+	u32		phy_addr;
+	u32		original_fc;
+	u32		txcw;
+	u32		autoneg_failed;
+	u32		max_frame_size;
+	u32		min_frame_size;
+	u32		mc_filter_type;
+	u32		num_mc_addrs;
+	u32		collision_delta;
+	u32		tx_packet_delta;
+	u32		ledctl_default;
+	u32		ledctl_mode1;
+	u32		ledctl_mode2;
+	bool			tx_pkt_filtering;
+	struct e1000_host_mng_dhcp_cookie mng_cookie;
+	u16		phy_spd_default;
+	u16		autoneg_advertised;
+	u16		pci_cmd_word;
+	u16		fc_high_water;
+	u16		fc_low_water;
+	u16		fc_pause_time;
+	u16		current_ifs_val;
+	u16		ifs_min_val;
+	u16		ifs_max_val;
+	u16		ifs_step_size;
+	u16		ifs_ratio;
+	u16		device_id;
+	u16		vendor_id;
+	u16		subsystem_id;
+	u16		subsystem_vendor_id;
+	u8			revision_id;
+	u8			autoneg;
+	u8			mdix;
+	u8			forced_speed_duplex;
+	u8			wait_autoneg_complete;
+	u8			dma_fairness;
+	u8			mac_addr[NODE_ADDRESS_SIZE];
+	u8			perm_mac_addr[NODE_ADDRESS_SIZE];
+	bool			disable_polarity_correction;
+	bool			speed_downgraded;
+	e1000_smart_speed	smart_speed;
+	e1000_dsp_config	dsp_config_state;
+	bool			get_link_status;
+	bool			serdes_link_down;
+	bool			tbi_compatibility_en;
+	bool			tbi_compatibility_on;
+	bool			laa_is_present;
+	bool			phy_reset_disable;
+	bool			initialize_hw_bits_disable;
+	bool			fc_send_xon;
+	bool			fc_strict_ieee;
+	bool			report_tx_early;
+	bool			adaptive_ifs;
+	bool			ifs_params_forced;
+	bool			in_ifs_mode;
+	bool			mng_reg_access_disabled;
+	bool			leave_av_bit_off;
+	bool			kmrn_lock_loss_workaround_disabled;
+	bool			bad_tx_carr_stats_fd;
+	bool			has_manc2h;
+	bool			rx_needs_kicking;
+	bool			has_smbus;
+};
+
+
+#define E1000_EEPROM_SWDPIN0   0x0001   /* SWDPIN 0 EEPROM Value */
+#define E1000_EEPROM_LED_LOGIC 0x0020   /* Led Logic Word */
+#define E1000_EEPROM_RW_REG_DATA   16   /* Offset to data in EEPROM read/write registers */
+#define E1000_EEPROM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_EEPROM_RW_REG_START  1    /* First bit for telling part to start operation */
+#define E1000_EEPROM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_EEPROM_POLL_WRITE    1    /* Flag for polling for write complete */
+#define E1000_EEPROM_POLL_READ     0    /* Flag for polling for read complete */
+/* Register Bit Masks */
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM      0x00000002  /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR    0x00000004  /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME      0x00000010  /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE      0x00000020  /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2  0x00100000  /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3  0x00200000  /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1  0x00800000  /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2  0x01000000  /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3  0x02000000  /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_RTE      0x20000000  /* Routing tag enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000  /* Initiate an interrupt to manageability engine */
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_0     0x00000000      /* Function 0 */
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_TBIMODE    0x00000020      /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200   /* Lan Init Completion
+                                                   by EEPROM/Flash */
+#define E1000_STATUS_ASDV       0x00000300      /* Auto speed detect value */
+#define E1000_STATUS_DOCK_CI    0x00000800      /* Change in Dock/Undock state. Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_MTXCKOK    0x00000400      /* MTX clock running OK */
+#define E1000_STATUS_PCI66      0x00000800      /* In 66Mhz slot */
+#define E1000_STATUS_BUS64      0x00001000      /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE  0x00002000      /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000      /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0  0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1  0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2  0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE   0x01000000 /* BMC external code execution disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_FUSE_8       0x04000000
+#define E1000_STATUS_FUSE_9       0x08000000
+#define E1000_STATUS_SERDES0_DIS  0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS  0x20000000 /* SERDES disabled on port 1 */
+
+/* Constants used to intrepret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed  50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed  66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+
+/* EEPROM/Flash Control */
+#define E1000_EECD_SK        0x00000001 /* EEPROM Clock */
+#define E1000_EECD_CS        0x00000002 /* EEPROM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* EEPROM Data In */
+#define E1000_EECD_DO        0x00000008 /* EEPROM Data Out */
+#define E1000_EECD_FWE_MASK  0x00000030
+#define E1000_EECD_FWE_DIS   0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN    0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ       0x00000040 /* EEPROM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* EEPROM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* EEPROM Present */
+#define E1000_EECD_SIZE      0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
+#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
+                                         * (0-small, 1-large) */
+#define E1000_EECD_TYPE      0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_EEPROM_GRANT_ATTEMPTS
+#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+#define E1000_EECD_AUTO_RD          0x00000200  /* EEPROM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* EEprom Size */
+#define E1000_EECD_SIZE_EX_SHIFT    11
+#define E1000_EECD_NVADDS    0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD   0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM  0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV     0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
+#define E1000_EECD_SECVAL_SHIFT      22
+#define E1000_STM_OPCODE     0xDB00
+#define E1000_HICR_FW_RESET  0xC0
+
+#define E1000_SHADOW_RAM_WORDS     2048
+#define E1000_ICH_NVM_SIG_WORD     0x13
+#define E1000_ICH_NVM_SIG_MASK     0xC0
+
+/* EEPROM Read */
+#define E1000_EERD_START      0x00000001 /* Start Read */
+#define E1000_EERD_DONE       0x00000010 /* Read Done */
+#define E1000_EERD_ADDR_SHIFT 8
+#define E1000_EERD_ADDR_MASK  0x0000FF00 /* Read Address */
+#define E1000_EERD_DATA_SHIFT 16
+#define E1000_EERD_DATA_MASK  0xFFFF0000 /* Read Data */
+
+/* SPI EEPROM Status Register */
+#define EEPROM_STATUS_RDY_SPI  0x01
+#define EEPROM_STATUS_WEN_SPI  0x02
+#define EEPROM_STATUS_BP0_SPI  0x04
+#define EEPROM_STATUS_BP1_SPI  0x08
+#define EEPROM_STATUS_WPEN_SPI 0x80
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN   0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN   0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN   0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN   0x00000008 /* Maps SDP7 to GPI3 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT   E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define E1000_CTRL_EXT_SDP4_DIR  0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR  0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR  0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR  0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK    0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
+#define E1000_CTRL_EXT_WR_WMARK_MASK  0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256   0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME           0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CRTL_EXT_PB_PAREN       0x01000000 /* packet buffer parity error detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN       0x02000000 /* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_GHOST_PAREN    0x40000000
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
+#define E1000_MDIC_ERROR     0x40000000
+
+#define E1000_KUMCTRLSTA_MASK           0x0000FFFF
+#define E1000_KUMCTRLSTA_OFFSET         0x001F0000
+#define E1000_KUMCTRLSTA_OFFSET_SHIFT   16
+#define E1000_KUMCTRLSTA_REN            0x00200000
+
+#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL      0x00000000
+#define E1000_KUMCTRLSTA_OFFSET_CTRL           0x00000001
+#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL       0x00000002
+#define E1000_KUMCTRLSTA_OFFSET_DIAG           0x00000003
+#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS       0x00000004
+#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM      0x00000009
+#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL        0x00000010
+#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES     0x0000001E
+#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES      0x0000001F
+
+/* FIFO Control */
+#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS   0x00000008
+#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS   0x00000800
+
+/* In-Band Control */
+#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT    0x00000500
+#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING  0x00000010
+
+/* Half-Duplex Control */
+#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004
+#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT  0x00000000
+
+#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL       0x0000001E
+
+#define E1000_KUMCTRLSTA_DIAG_FELPBK           0x2000
+#define E1000_KUMCTRLSTA_DIAG_NELPBK           0x1000
+
+#define E1000_KUMCTRLSTA_K0S_100_EN            0x2000
+#define E1000_KUMCTRLSTA_K0S_GBE_EN            0x1000
+#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK   0x0003
+
+#define E1000_KABGTXD_BGSQLBIAS                0x00050000
+
+#define E1000_PHY_CTRL_SPD_EN                  0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU                0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU             0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE      0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE             0x00000040
+#define E1000_PHY_CTRL_B2B_EN                  0x00000080
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT      0
+#define E1000_LEDCTL_LED0_BLINK_RATE      0x0000020
+#define E1000_LEDCTL_LED0_IVRT            0x00000040
+#define E1000_LEDCTL_LED0_BLINK           0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK       0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT      8
+#define E1000_LEDCTL_LED1_BLINK_RATE      0x0002000
+#define E1000_LEDCTL_LED1_IVRT            0x00004000
+#define E1000_LEDCTL_LED1_BLINK           0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK       0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT      16
+#define E1000_LEDCTL_LED2_BLINK_RATE      0x00200000
+#define E1000_LEDCTL_LED2_IVRT            0x00400000
+#define E1000_LEDCTL_LED2_BLINK           0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK       0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT      24
+#define E1000_LEDCTL_LED3_BLINK_RATE      0x20000000
+#define E1000_LEDCTL_LED3_IVRT            0x40000000
+#define E1000_LEDCTL_LED3_BLINK           0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000  0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP       0x2
+#define E1000_LEDCTL_MODE_ACTIVITY      0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10       0x5
+#define E1000_LEDCTL_MODE_LINK_100      0x6
+#define E1000_LEDCTL_MODE_LINK_1000     0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE     0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX   0x9
+#define E1000_LEDCTL_MODE_COLLISION     0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED     0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE      0xC
+#define E1000_LEDCTL_MODE_PAUSED        0xD
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Receive Address */
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE          0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO           0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC          0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG         0x00000400 /* RX /c/ ordered set */
+#define E1000_ICR_GPI_EN0       0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1       0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2       0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3       0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW       0x00008000
+#define E1000_ICR_SRPD          0x00010000
+#define E1000_ICR_ACK           0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG           0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK          0x00080000 /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR  0x00400000 /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR        0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY    0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW           0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT        0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST         0x00100000 /* ME handware reset occurs */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_ICS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_ICS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_ICS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_ICS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_ICS_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_ICS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD      E1000_ICR_SRPD
+#define E1000_ICS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_ICS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_ICS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW       E1000_ICR_DSW
+#define E1000_ICS_PHYINT    E1000_ICR_PHYINT
+#define E1000_ICS_EPRST     E1000_ICR_EPRST
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMS_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD      E1000_ICR_SRPD
+#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW       E1000_ICR_DSW
+#define E1000_IMS_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMS_EPRST     E1000_ICR_EPRST
+
+/* Interrupt Mask Clear */
+#define E1000_IMC_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMC_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMC_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMC_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMC_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMC_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_IMC_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMC_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMC_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_IMC_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMC_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMC_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMC_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMC_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMC_SRPD      E1000_ICR_SRPD
+#define E1000_IMC_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMC_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMC_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMC_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_IMC_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMC_DSW       E1000_ICR_DSW
+#define E1000_IMC_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMC_EPRST     E1000_ICR_EPRST
+
+/* Receive Control */
+#define E1000_RCTL_RST            0x00000001    /* Software reset */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promiscuous enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promiscuous enab */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP        0x00000080    /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK      0x00000C00    /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT     0x00000100    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH    0x00000200    /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_MO_0           0x00000000    /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1           0x00001000    /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2           0x00002000    /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
+#define E1000_RCTL_MDR            0x00004000    /* multicast desc ring 0 */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048        0x00000000    /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000    /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000    /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384       0x00010000    /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000    /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000    /* rx buffer size 4096 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
+#define E1000_RCTL_DPF            0x00400000    /* discard pause frames */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK    0x78000000    /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT   27            /* Flexible buffer shift */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SW_W_SYNC definitions */
+#define E1000_SWFW_EEP_SM     0x0001
+#define E1000_SWFW_PHY0_SM    0x0002
+#define E1000_SWFW_PHY1_SM    0x0004
+#define E1000_SWFW_MAC_CSR_SM 0x0008
+
+/* Receive Descriptor */
+#define E1000_RDT_DELAY 0x0000ffff      /* Delay timer (1=1024us) */
+#define E1000_RDT_FPDB  0x80000000      /* Flush descriptor block */
+#define E1000_RDLEN_LEN 0x0007ff80      /* descriptor length */
+#define E1000_RDH_RDH   0x0000ffff      /* receive descriptor head */
+#define E1000_RDT_RDT   0x0000ffff      /* receive descriptor tail */
+
+/* Flow Control */
+#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000     /* External Flow Control Enable */
+#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+/* Header split receive */
+#define E1000_RFCTL_ISCSI_DIS           0x00000001
+#define E1000_RFCTL_ISCSI_DWC_MASK      0x0000003E
+#define E1000_RFCTL_ISCSI_DWC_SHIFT     1
+#define E1000_RFCTL_NFSW_DIS            0x00000040
+#define E1000_RFCTL_NFSR_DIS            0x00000080
+#define E1000_RFCTL_NFS_VER_MASK        0x00000300
+#define E1000_RFCTL_NFS_VER_SHIFT       8
+#define E1000_RFCTL_IPV6_DIS            0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS       0x00000800
+#define E1000_RFCTL_ACK_DIS             0x00001000
+#define E1000_RFCTL_ACKD_DIS            0x00002000
+#define E1000_RFCTL_IPFRSP_DIS          0x00004000
+#define E1000_RFCTL_EXTEN               0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
+
+/* Receive Descriptor Control */
+#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
+#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
+#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
+#define E1000_RXDCTL_GRAN    0x01000000 /* RXDCTL Granularity */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN    0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
+                                              still to be processed. */
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
+#define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
+#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
+#define E1000_TXCW_RF         0x00003000        /* TXCW remote fault */
+#define E1000_TXCW_NP         0x00008000        /* TXCW next page */
+#define E1000_TXCW_CW         0x0000ffff        /* TxConfigWord mask */
+#define E1000_TXCW_TXC        0x40000000        /* Transmit Config control */
+#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW    0x0000ffff     /* RxConfigWord mask */
+#define E1000_RXCW_NC    0x04000000     /* Receive config no carrier */
+#define E1000_RXCW_IV    0x08000000     /* Receive config invalid */
+#define E1000_RXCW_CC    0x10000000     /* Receive config change */
+#define E1000_RXCW_C     0x20000000     /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000     /* Receive config synch */
+#define E1000_RXCW_ANC   0x80000000     /* Auto-neg complete */
+
+/* Transmit Control */
+#define E1000_TCTL_RST    0x00000001    /* software reset */
+#define E1000_TCTL_EN     0x00000002    /* enable tx */
+#define E1000_TCTL_BCE    0x00000004    /* busy check enable */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000    /* SW Xoff transmission */
+#define E1000_TCTL_PBE    0x00800000    /* Packet Burst Enable */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU   0x02000000    /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
+/* Extended Transmit Control */
+#define E1000_TCTL_EXT_BST_MASK  0x000003FF /* Backoff Slot Time */
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+
+#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX   0x00010000
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF   /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL   0x00000400   /* IPv6 checksum offload */
+#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
+
+/* Multiple Receive Queue Control */
+#define E1000_MRQC_ENABLE_MASK              0x00000003
+#define E1000_MRQC_ENABLE_RSS_2Q            0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT           0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK           0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP       0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4           0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX    0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX        0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6           0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP       0x00200000
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME       0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME     0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_SPM        0x80000000 /* Enable SPM */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO      0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET 16       /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
+#define E1000_WUS_MAG  0x00000002 /* Magic Packet Received */
+#define E1000_WUS_EX   0x00000004 /* Directed Exact Received */
+#define E1000_WUS_MC   0x00000008 /* Directed Multicast Received */
+#define E1000_WUS_BC   0x00000010 /* Broadcast Received */
+#define E1000_WUS_ARP  0x00000020 /* ARP Request Packet Received */
+#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
+#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
+#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
+#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
+#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
+#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
+#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE    0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN       0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN       0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN       0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN       0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN       0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_NEIGHBOR_EN   0x00004000 /* Enable Neighbor Discovery
+                                             * Filtering */
+#define E1000_MANC_ARP_RES_EN    0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET     0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL       0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000 /* Enable MAC address
+                                                    * filtering */
+#define E1000_MANC_EN_MNG2HOST   0x00200000 /* Enable MNG packets to host
+                                             * memory */
+#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000 /* Enable IP address
+                                                    * filtering */
+#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN         0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN   0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT  0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT   0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT  28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT   29 /* SMBus Clock Out Shift */
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG         0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
+
+/* FW Semaphore Register */
+#define E1000_FWSM_MODE_MASK    0x0000000E /* FW mode */
+#define E1000_FWSM_MODE_SHIFT            1
+#define E1000_FWSM_FW_VALID     0x00008000 /* FW established a valid mode */
+
+#define E1000_FWSM_RSPCIPHY        0x00000040 /* Reset PHY on PCI reset */
+#define E1000_FWSM_DISSW           0x10000000 /* FW disable SW Write Access */
+#define E1000_FWSM_SKUSEL_MASK     0x60000000 /* LAN SKU select */
+#define E1000_FWSM_SKUEL_SHIFT     29
+#define E1000_FWSM_SKUSEL_EMB      0x0 /* Embedded SKU */
+#define E1000_FWSM_SKUSEL_CONS     0x1 /* Consumer SKU */
+#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
+#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
+
+/* FFLT Debug Register */
+#define E1000_FFLT_DBG_INVC     0x00100000 /* Invalid /C/ code handling */
+
+typedef enum {
+    e1000_mng_mode_none     = 0,
+    e1000_mng_mode_asf,
+    e1000_mng_mode_pt,
+    e1000_mng_mode_ipmi,
+    e1000_mng_mode_host_interface_only
+} e1000_mng_mode;
+
+/* Host Inteface Control Register */
+#define E1000_HICR_EN           0x00000001  /* Enable Bit - RO */
+#define E1000_HICR_C            0x00000002  /* Driver sets this bit when done
+                                             * to put command in RAM */
+#define E1000_HICR_SV           0x00000004  /* Status Validity */
+#define E1000_HICR_FWR          0x00000080  /* FW reset. Set by the Host */
+
+/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
+#define E1000_HI_MAX_DATA_LENGTH         252 /* Host Interface data length */
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH  1792 /* Number of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH  448 /* Number of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT         500 /* Time in ms to process HI command */
+
+struct e1000_host_command_header {
+    u8 command_id;
+    u8 command_length;
+    u8 command_options;   /* I/F bits for command, status for return */
+    u8 checksum;
+};
+struct e1000_host_command_info {
+    struct e1000_host_command_header command_header;  /* Command Head/Command Result Head has 4 bytes */
+    u8 command_data[E1000_HI_MAX_DATA_LENGTH];   /* Command data can length 0..252 */
+};
+
+/* Host SMB register #0 */
+#define E1000_HSMC0R_CLKIN      0x00000001  /* SMB Clock in */
+#define E1000_HSMC0R_DATAIN     0x00000002  /* SMB Data in */
+#define E1000_HSMC0R_DATAOUT    0x00000004  /* SMB Data out */
+#define E1000_HSMC0R_CLKOUT     0x00000008  /* SMB Clock out */
+
+/* Host SMB register #1 */
+#define E1000_HSMC1R_CLKIN      E1000_HSMC0R_CLKIN
+#define E1000_HSMC1R_DATAIN     E1000_HSMC0R_DATAIN
+#define E1000_HSMC1R_DATAOUT    E1000_HSMC0R_DATAOUT
+#define E1000_HSMC1R_CLKOUT     E1000_HSMC0R_CLKOUT
+
+/* FW Status Register */
+#define E1000_FWSTS_FWS_MASK    0x000000FF  /* FW Status */
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF   /* Only the lower 12 bits are valid */
+
+#define E1000_MDALIGN          4096
+
+/* PCI-Ex registers*/
+
+/* PCI-Ex Control Register */
+#define E1000_GCR_RXD_NO_SNOOP          0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
+#define E1000_GCR_TXD_NO_SNOOP          0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
+
+#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
+                             E1000_GCR_RXDSCW_NO_SNOOP      | \
+                             E1000_GCR_RXDSCR_NO_SNOOP      | \
+                             E1000_GCR_TXD_NO_SNOOP         | \
+                             E1000_GCR_TXDSCW_NO_SNOOP      | \
+                             E1000_GCR_TXDSCR_NO_SNOOP)
+
+#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+/* Function Active and Power State to MNG */
+#define E1000_FACTPS_FUNC0_POWER_STATE_MASK         0x00000003
+#define E1000_FACTPS_LAN0_VALID                     0x00000004
+#define E1000_FACTPS_FUNC0_AUX_EN                   0x00000008
+#define E1000_FACTPS_FUNC1_POWER_STATE_MASK         0x000000C0
+#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT        6
+#define E1000_FACTPS_LAN1_VALID                     0x00000100
+#define E1000_FACTPS_FUNC1_AUX_EN                   0x00000200
+#define E1000_FACTPS_FUNC2_POWER_STATE_MASK         0x00003000
+#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT        12
+#define E1000_FACTPS_IDE_ENABLE                     0x00004000
+#define E1000_FACTPS_FUNC2_AUX_EN                   0x00008000
+#define E1000_FACTPS_FUNC3_POWER_STATE_MASK         0x000C0000
+#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT        18
+#define E1000_FACTPS_SP_ENABLE                      0x00100000
+#define E1000_FACTPS_FUNC3_AUX_EN                   0x00200000
+#define E1000_FACTPS_FUNC4_POWER_STATE_MASK         0x03000000
+#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT        24
+#define E1000_FACTPS_IPMI_ENABLE                    0x04000000
+#define E1000_FACTPS_FUNC4_AUX_EN                   0x08000000
+#define E1000_FACTPS_MNGCG                          0x20000000
+#define E1000_FACTPS_LAN_FUNC_SEL                   0x40000000
+#define E1000_FACTPS_PM_STATE_CHANGED               0x80000000
+
+/* PCI-Ex Config Space */
+#define PCI_EX_LINK_STATUS           0x12
+#define PCI_EX_LINK_WIDTH_MASK       0x3F0
+#define PCI_EX_LINK_WIDTH_SHIFT      4
+
+/* EEPROM Commands - Microwire */
+#define EEPROM_READ_OPCODE_MICROWIRE  0x6  /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5  /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7  /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE_MICROWIRE  0x13 /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE_MICROWIRE  0x10 /* EEPROM erast/write disable */
+
+/* EEPROM Commands - SPI */
+#define EEPROM_MAX_RETRY_SPI        5000 /* Max wait of 5ms, for RDY signal */
+#define EEPROM_READ_OPCODE_SPI      0x03  /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_SPI     0x02  /* EEPROM write opcode */
+#define EEPROM_A8_OPCODE_SPI        0x08  /* opcode bit-3 = address bit-8 */
+#define EEPROM_WREN_OPCODE_SPI      0x06  /* EEPROM set Write Enable latch */
+#define EEPROM_WRDI_OPCODE_SPI      0x04  /* EEPROM reset Write Enable latch */
+#define EEPROM_RDSR_OPCODE_SPI      0x05  /* EEPROM read Status register */
+#define EEPROM_WRSR_OPCODE_SPI      0x01  /* EEPROM write Status register */
+#define EEPROM_ERASE4K_OPCODE_SPI   0x20  /* EEPROM ERASE 4KB */
+#define EEPROM_ERASE64K_OPCODE_SPI  0xD8  /* EEPROM ERASE 64KB */
+#define EEPROM_ERASE256_OPCODE_SPI  0xDB  /* EEPROM ERASE 256B */
+
+/* EEPROM Size definitions */
+#define EEPROM_WORD_SIZE_SHIFT  6
+#define EEPROM_SIZE_SHIFT       10
+#define EEPROM_SIZE_MASK        0x1C00
+
+/* EEPROM Word Offsets */
+#define EEPROM_COMPAT                 0x0003
+#define EEPROM_ID_LED_SETTINGS        0x0004
+#define EEPROM_VERSION                0x0005
+#define EEPROM_SERDES_AMPLITUDE       0x0006 /* For SERDES output amplitude adjustment. */
+#define EEPROM_PHY_CLASS_WORD         0x0007
+#define EEPROM_INIT_CONTROL1_REG      0x000A
+#define EEPROM_INIT_CONTROL2_REG      0x000F
+#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define EEPROM_INIT_CONTROL3_PORT_B   0x0014
+#define EEPROM_INIT_3GIO_3            0x001A
+#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define EEPROM_INIT_CONTROL3_PORT_A   0x0024
+#define EEPROM_CFG                    0x0012
+#define EEPROM_FLASH_VERSION          0x0032
+#define EEPROM_CHECKSUM_REG           0x003F
+
+#define E1000_EEPROM_CFG_DONE         0x00040000   /* MNG config cycle done */
+#define E1000_EEPROM_CFG_DONE_PORT_1  0x00080000   /* ...for second port */
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_RESERVED_82573  0xF746
+#define ID_LED_DEFAULT_82573   0x1811
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2 << 12) | \
+                              (ID_LED_OFF1_OFF2 << 8) | \
+                              (ID_LED_DEF1_DEF2 << 4) | \
+                              (ID_LED_DEF1_DEF2))
+#define ID_LED_DEFAULT_ICH8LAN  ((ID_LED_DEF1_DEF2 << 12) | \
+                                 (ID_LED_DEF1_OFF2 <<  8) | \
+                                 (ID_LED_DEF1_ON2  <<  4) | \
+                                 (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+
+/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */
+#define EEPROM_SERDES_AMPLITUDE_MASK  0x000F
+
+/* Mask bit for PHY class in Word 7 of the EEPROM */
+#define EEPROM_PHY_CLASS_A   0x8000
+
+/* Mask bits for fields in Word 0x0a of the EEPROM */
+#define EEPROM_WORD0A_ILOS   0x0010
+#define EEPROM_WORD0A_SWDPIO 0x01E0
+#define EEPROM_WORD0A_LRST   0x0200
+#define EEPROM_WORD0A_FD     0x0400
+#define EEPROM_WORD0A_66MHZ  0x0800
+
+/* Mask bits for fields in Word 0x0f of the EEPROM */
+#define EEPROM_WORD0F_PAUSE_MASK 0x3000
+#define EEPROM_WORD0F_PAUSE      0x1000
+#define EEPROM_WORD0F_ASM_DIR    0x2000
+#define EEPROM_WORD0F_ANE        0x0800
+#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0
+#define EEPROM_WORD0F_LPLU       0x0001
+
+/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */
+#define EEPROM_WORD1020_GIGA_DISABLE         0x0010
+#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008
+
+/* Mask bits for fields in Word 0x1a of the EEPROM */
+#define EEPROM_WORD1A_ASPM_MASK  0x000C
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
+#define EEPROM_SUM 0xBABA
+
+/* EEPROM Map defines (WORD OFFSETS)*/
+#define EEPROM_NODE_ADDRESS_BYTE_0 0
+#define EEPROM_PBA_BYTE_1          8
+
+#define EEPROM_RESERVED_WORD          0xFFFF
+
+/* EEPROM Map Sizes (Byte Counts) */
+#define PBA_SIZE 4
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+/* Collision distance is a 0-based value that applies to
+ * half-duplex-capable hardware only. */
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLLISION_DISTANCE_82542  64
+#define E1000_FDX_COLLISION_DISTANCE    E1000_COLLISION_DISTANCE
+#define E1000_HDX_COLLISION_DISTANCE    E1000_COLLISION_DISTANCE
+#define E1000_COLD_SHIFT                12
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82542_TIPG_IPGT        10
+#define DEFAULT_82543_TIPG_IPGT_FIBER  9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK  0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82542_TIPG_IPGR1 2
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT  10
+
+#define DEFAULT_82542_TIPG_IPGR2 10
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT  20
+
+#define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009
+#define DEFAULT_80003ES2LAN_TIPG_IPGT_1000   0x00000008
+#define E1000_TXDMAC_DPP 0x00000001
+
+/* Adaptive IFS defines */
+#define TX_THRESHOLD_START     8
+#define TX_THRESHOLD_INCREMENT 10
+#define TX_THRESHOLD_DECREMENT 1
+#define TX_THRESHOLD_STOP      190
+#define TX_THRESHOLD_DISABLE   0
+#define TX_THRESHOLD_TIMER_MS  10000
+#define MIN_NUM_XMITS          1000
+#define IFS_MAX                80
+#define IFS_STEP               10
+#define IFS_MIN                40
+#define IFS_RATIO              4
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE  0x00000002
+#define E1000_EXTCNF_CTRL_D_UD_ENABLE       0x00000004
+#define E1000_EXTCNF_CTRL_D_UD_LATENCY      0x00000008
+#define E1000_EXTCNF_CTRL_D_UD_OWNER        0x00000010
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER   0x0FFF0000
+
+#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH    0x000000FF
+#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH   0x0000FF00
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH   0x00FF0000
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE  0x00000001
+#define E1000_EXTCNF_CTRL_SWFLAG            0x00000020
+
+/* PBA constants */
+#define E1000_PBA_8K 0x0008    /* 8KB, default Rx allocation */
+#define E1000_PBA_12K 0x000C    /* 12KB, default Rx allocation */
+#define E1000_PBA_16K 0x0010    /* 16KB, default TX allocation */
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030    /* 48KB, default RX allocation */
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* The historical defaults for the flow control values are given below. */
+#define FC_DEFAULT_HI_THRESH        (0x8000)    /* 32KB */
+#define FC_DEFAULT_LO_THRESH        (0x4000)    /* 16KB */
+#define FC_DEFAULT_TX_TIMER         (0x100)     /* ~130 us */
+
+/* PCIX Config space */
+#define PCIX_COMMAND_REGISTER    0xE6
+#define PCIX_STATUS_REGISTER_LO  0xE8
+#define PCIX_STATUS_REGISTER_HI  0xEA
+
+#define PCIX_COMMAND_MMRBC_MASK      0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT     0x2
+#define PCIX_STATUS_HI_MMRBC_MASK    0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT   0x5
+#define PCIX_STATUS_HI_MMRBC_4K      0x3
+#define PCIX_STATUS_HI_MMRBC_2K      0x2
+
+
+/* Number of bits required to shift right the "pause" bits from the
+ * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register.
+ */
+#define PAUSE_SHIFT 5
+
+/* Number of bits required to shift left the "SWDPIO" bits from the
+ * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register.
+ */
+#define SWDPIO_SHIFT 17
+
+/* Number of bits required to shift left the "SWDPIO_EXT" bits from the
+ * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register.
+ */
+#define SWDPIO__EXT_SHIFT 4
+
+/* Number of bits required to shift left the "ILOS" bit from the EEPROM
+ * (bit 4) to the "ILOS" (bit 7) field in the CTRL register.
+ */
+#define ILOS_SHIFT  3
+
+
+#define RECEIVE_BUFFER_ALIGN_SIZE  (256)
+
+/* Number of milliseconds we wait for auto-negotiation to complete */
+#define LINK_UP_TIMEOUT             500
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
+#define AUTO_READ_DONE_TIMEOUT      10
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+
+#define E1000_TX_BUFFER_SIZE ((u32)1514)
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION   0x0F
+
+/* TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ *      adapter = a pointer to struct e1000_hw
+ *      status = the 8 bit status field of the RX descriptor with EOP set
+ *      error = the 8 bit error field of the RX descriptor with EOP set
+ *      length = the sum of all the length fields of the RX descriptors that
+ *               make up the current frame
+ *      last_byte = the last byte of the frame DMAed by the hardware
+ *      max_frame_length = the maximum frame length we want to accept.
+ *      min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ *  ...
+ *  if (TBI_ACCEPT) {
+ *      accept_frame = true;
+ *      e1000_tbi_adjust_stats(adapter, MacAddress);
+ *      frame_length--;
+ *  } else {
+ *      accept_frame = false;
+ *  }
+ *  ...
+ */
+
+#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \
+    ((adapter)->tbi_compatibility_on && \
+     (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+     ((last_byte) == CARRIER_EXTENSION) && \
+     (((status) & E1000_RXD_STAT_VP) ? \
+          (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \
+           ((length) <= ((adapter)->max_frame_size + 1))) : \
+          (((length) > (adapter)->min_frame_size) && \
+           ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+
+/* Structures, enums, and macros for the PHY */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR  E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET      E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR       E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO           E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR        E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC            E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4     E1000_CTRL_EXT_SDP4_DATA
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CTRL         0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Regiser */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
+
+#define MAX_PHY_REG_ADDRESS        0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG     0xF   /* Registers equal on all pages */
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE        0x12  /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS        0x13  /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR       0x15  /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL      0x1A  /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8  0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800    /* improved BER performance */
+
+#define IGP01E1000_IEEE_REGS_PAGE  0x0000
+#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
+#define IGP01E1000_IEEE_FORCE_GIGA      0x0140
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
+#define IGP01E1000_PHY_PORT_CTRL   0x12 /* PHY Specific Control Register */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
+#define IGP01E1000_GMII_FIFO       0x14 /* GMII FIFO Register */
+#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
+#define IGP02E1000_PHY_POWER_MGMT      0x19
+#define IGP01E1000_PHY_PAGE_SELECT     0x1F /* PHY Page Select Core Register */
+
+/* IGP01E1000 AGC Registers - stores the cable length values*/
+#define IGP01E1000_PHY_AGC_A        0x1172
+#define IGP01E1000_PHY_AGC_B        0x1272
+#define IGP01E1000_PHY_AGC_C        0x1472
+#define IGP01E1000_PHY_AGC_D        0x1872
+
+/* IGP02E1000 AGC Registers for cable length values */
+#define IGP02E1000_PHY_AGC_A        0x11B1
+#define IGP02E1000_PHY_AGC_B        0x12B1
+#define IGP02E1000_PHY_AGC_C        0x14B1
+#define IGP02E1000_PHY_AGC_D        0x18B1
+
+/* IGP01E1000 DSP Reset Register */
+#define IGP01E1000_PHY_DSP_RESET   0x1F33
+#define IGP01E1000_PHY_DSP_SET     0x1F71
+#define IGP01E1000_PHY_DSP_FFE     0x1F35
+
+#define IGP01E1000_PHY_CHANNEL_NUM    4
+#define IGP02E1000_PHY_CHANNEL_NUM    4
+
+#define IGP01E1000_PHY_AGC_PARAM_A    0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B    0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C    0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D    0x1871
+
+#define IGP01E1000_PHY_EDAC_MU_INDEX        0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000
+
+#define IGP01E1000_PHY_ANALOG_TX_STATE      0x2890
+#define IGP01E1000_PHY_ANALOG_CLASS_A       0x2000
+#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE  0x0004
+#define IGP01E1000_PHY_DSP_FFE_CM_CP        0x0069
+
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT      0x002A
+/* IGP01E1000 PCS Initialization register - stores the polarity status when
+ * speed = 1000 Mbps. */
+#define IGP01E1000_PHY_PCS_INIT_REG  0x00B4
+#define IGP01E1000_PHY_PCS_CTRL_REG  0x00B5
+
+#define IGP01E1000_ANALOG_REGS_PAGE  0x20C0
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT        5
+#define GG82563_REG(page, reg)    \
+        (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG       30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL           \
+        GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_SPEC_STATUS         \
+        GG82563_REG(0, 17) /* PHY Specific Status */
+#define GG82563_PHY_INT_ENABLE          \
+        GG82563_REG(0, 18) /* Interrupt Enable */
+#define GG82563_PHY_SPEC_STATUS_2       \
+        GG82563_REG(0, 19) /* PHY Specific Status 2 */
+#define GG82563_PHY_RX_ERR_CNTR         \
+        GG82563_REG(0, 21) /* Receive Error Counter */
+#define GG82563_PHY_PAGE_SELECT         \
+        GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2         \
+        GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT     \
+        GG82563_REG(0, 29) /* Alternate Page Select */
+#define GG82563_PHY_TEST_CLK_CTRL       \
+        GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
+
+#define GG82563_PHY_MAC_SPEC_CTRL       \
+        GG82563_REG(2, 21) /* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL_2     \
+        GG82563_REG(2, 26) /* MAC Specific Control 2 */
+
+#define GG82563_PHY_DSP_DISTANCE    \
+        GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL   \
+        GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PORT_RESET          \
+        GG82563_REG(193, 17) /* Port Reset */
+#define GG82563_PHY_REVISION_ID         \
+        GG82563_REG(193, 18) /* Revision ID */
+#define GG82563_PHY_DEVICE_ID           \
+        GG82563_REG(193, 19) /* Device ID */
+#define GG82563_PHY_PWR_MGMT_CTRL       \
+        GG82563_REG(193, 20) /* Power Management Control */
+#define GG82563_PHY_RATE_ADAPT_CTRL     \
+        GG82563_REG(193, 25) /* Rate Adaptation Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
+        GG82563_REG(194, 16) /* FIFO's Control/Status */
+#define GG82563_PHY_KMRN_CTRL           \
+        GG82563_REG(194, 17) /* Control */
+#define GG82563_PHY_INBAND_CTRL         \
+        GG82563_REG(194, 18) /* Inband Control */
+#define GG82563_PHY_KMRN_DIAGNOSTIC     \
+        GG82563_REG(194, 19) /* Diagnostic */
+#define GG82563_PHY_ACK_TIMEOUTS        \
+        GG82563_REG(194, 20) /* Acknowledge Timeouts */
+#define GG82563_PHY_ADV_ABILITY         \
+        GG82563_REG(194, 21) /* Advertised Ability */
+#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
+        GG82563_REG(194, 23) /* Link Partner Advertised Ability */
+#define GG82563_PHY_ADV_NEXT_PAGE       \
+        GG82563_REG(194, 24) /* Advertised Next Page */
+#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
+        GG82563_REG(194, 25) /* Link Partner Advertised Next page */
+#define GG82563_PHY_KMRN_MISC           \
+        GG82563_REG(194, 26) /* Misc. */
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080  /* Collision test enable */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_ISOLATE          0x0400  /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS     0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT     0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS      0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT      0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS   0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS     0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS     0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS       0x0800 /* 10T   Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS       0x1000 /* 10T   Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS      0x2000 /* 100X  Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS      0x4000 /* 100X  Full Duplex Capable */
+#define MII_SR_100T4_CAPS        0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD 0x0001   /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS    0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS    0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS  0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS  0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS     0x0200   /* 100T4 Capable */
+#define NWAY_AR_PAUSE          0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR        0x0800   /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT   0x2000   /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE      0x8000   /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS    0x0020 /* LP is 10T   Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS    0x0040 /* LP is 10T   Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS  0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS     0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT   0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE    0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE      0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS      0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD          0x0002 /* LP is 10T   Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS    0x0004 /* LP is 10T   Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT  0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* Next Page TX Register */
+#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define NPTX_TOGGLE         0x0800 /* Toggles between exchanges
+                                    * of different NP
+                                    */
+#define NPTX_ACKNOWLDGE2    0x1000 /* 1 = will comply with msg
+                                    * 0 = cannot comply with msg
+                                    */
+#define NPTX_MSG_PAGE       0x2000 /* formatted(1)/unformatted(0) pg */
+#define NPTX_NEXT_PAGE      0x8000 /* 1 = addition NP will follow
+                                    * 0 = sending last NP
+                                    */
+
+/* Link Partner Next Page Register */
+#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define LP_RNPR_TOGGLE         0x0800 /* Toggles between exchanges
+                                       * of different NP
+                                       */
+#define LP_RNPR_ACKNOWLDGE2    0x1000 /* 1 = will comply with msg
+                                       * 0 = cannot comply with msg
+                                       */
+#define LP_RNPR_MSG_PAGE       0x2000  /* formatted(1)/unformatted(0) pg */
+#define LP_RNPR_ACKNOWLDGE     0x4000  /* 1 = ACK / 0 = NO ACK */
+#define LP_RNPR_NEXT_PAGE      0x8000  /* 1 = addition NP will follow
+                                        * 0 = sending last NP
+                                        */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE      0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+#define CR_1000T_REPEATER_DTE    0x0400 /* 1=Repeater/switch device port */
+                                        /* 0=DTE device */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+                                        /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE       0x1000 /* 1=Master/Slave manual config value */
+                                        /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1     0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2     0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3     0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4     0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT   0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR   0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS       0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS       0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES    0x4000 /* 1=Local TX is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT  0x8000 /* Master/Slave config fault */
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT          12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT           13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT    5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20            20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100           100
+
+/* Extended Status Register */
+#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
+#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
+#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
+#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+
+#define PHY_TX_POLARITY_MASK   0x0100 /* register 10h bit 8 (polarity bit) */
+#define PHY_TX_NORMAL_POLARITY 0      /* register 10h bit 8 (normal polarity) */
+
+#define AUTO_POLARITY_DISABLE  0x0010 /* register 11h bit 4 */
+                                      /* (0=enable, 1=disable) */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
+#define M88E1000_PSCR_CLK125_DISABLE    0x0010 /* 1=CLK125 low,
+                                                * 0=CLK125 toggling
+                                                */
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
+                                               /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040  /* 1000BASE-T: Auto crossover,
+                                                *  100BASE-TX/10BASE-T:
+                                                *  MDI Mode
+                                                */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060  /* Auto crossover enabled
+                                                * all speeds.
+                                                */
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080
+                                        /* 1=Enable Extended 10BASE-T distance
+                                         * (Lower 10BASE-T RX Threshold)
+                                         * 0=Normal 10BASE-T RX Threshold */
+#define M88E1000_PSCR_MII_5BIT_ENABLE      0x0100
+                                        /* 1=5-Bit interface in 100BASE-TX
+                                         * 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Transmit */
+
+#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT    1
+#define M88E1000_PSCR_AUTO_X_MODE_SHIFT          5
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER             0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380 /* 0=<50M;1=50-80M;2=80-110M;
+                                            * 3=110-140M;4=>140M */
+#define M88E1000_PSSR_LINK               0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD          0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX               0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS              0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS             0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_REV_POLARITY_SHIFT 1
+#define M88E1000_PSSR_DOWNSHIFT_SHIFT    5
+#define M88E1000_PSSR_MDIX_SHIFT         6
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000 /* 1=Lost lock detect enabled.
+                                              * Will assert lost lock and bring
+                                              * link down if idle not seen
+                                              * within 1ms in 1000BASE-T
+                                              */
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X   0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X   0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X   0x0C00
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS   0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X    0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X    0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5     0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0       0x0000 /* NO  TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X    0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X    0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X    0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X    0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X    0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X    0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X    0x0E00
+
+/* IGP01E1000 Specific Port Config Register - R/W */
+#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT  0x0010
+#define IGP01E1000_PSCFR_PRE_EN                0x0020
+#define IGP01E1000_PSCFR_SMART_SPEED           0x0080
+#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK    0x0100
+#define IGP01E1000_PSCFR_DISABLE_JABBER        0x0400
+#define IGP01E1000_PSCFR_DISABLE_TRANSMIT      0x2000
+
+/* IGP01E1000 Specific Port Status Register - R/O */
+#define IGP01E1000_PSSR_AUTONEG_FAILED         0x0001 /* RO LH SC */
+#define IGP01E1000_PSSR_POLARITY_REVERSED      0x0002
+#define IGP01E1000_PSSR_CABLE_LENGTH           0x007C
+#define IGP01E1000_PSSR_FULL_DUPLEX            0x0200
+#define IGP01E1000_PSSR_LINK_UP                0x0400
+#define IGP01E1000_PSSR_MDIX                   0x0800
+#define IGP01E1000_PSSR_SPEED_MASK             0xC000 /* speed bits mask */
+#define IGP01E1000_PSSR_SPEED_10MBPS           0x4000
+#define IGP01E1000_PSSR_SPEED_100MBPS          0x8000
+#define IGP01E1000_PSSR_SPEED_1000MBPS         0xC000
+#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT     0x0002 /* shift right 2 */
+#define IGP01E1000_PSSR_MDIX_SHIFT             0x000B /* shift right 11 */
+
+/* IGP01E1000 Specific Port Control Register - R/W */
+#define IGP01E1000_PSCR_TP_LOOPBACK            0x0010
+#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR      0x0200
+#define IGP01E1000_PSCR_TEN_CRS_SELECT         0x0400
+#define IGP01E1000_PSCR_FLIP_CHIP              0x0800
+#define IGP01E1000_PSCR_AUTO_MDIX              0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX         0x2000 /* 0-MDI, 1-MDIX */
+
+/* IGP01E1000 Specific Port Link Health Register */
+#define IGP01E1000_PLHR_SS_DOWNGRADE           0x8000
+#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR    0x4000
+#define IGP01E1000_PLHR_MASTER_FAULT           0x2000
+#define IGP01E1000_PLHR_MASTER_RESOLUTION      0x1000
+#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK       0x0800 /* LH */
+#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW   0x0400 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_1             0x0200 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_0             0x0100
+#define IGP01E1000_PLHR_AUTONEG_FAULT          0x0040
+#define IGP01E1000_PLHR_AUTONEG_ACTIVE         0x0010
+#define IGP01E1000_PLHR_VALID_CHANNEL_D        0x0008
+#define IGP01E1000_PLHR_VALID_CHANNEL_C        0x0004
+#define IGP01E1000_PLHR_VALID_CHANNEL_B        0x0002
+#define IGP01E1000_PLHR_VALID_CHANNEL_A        0x0001
+
+/* IGP01E1000 Channel Quality Register */
+#define IGP01E1000_MSE_CHANNEL_D        0x000F
+#define IGP01E1000_MSE_CHANNEL_C        0x00F0
+#define IGP01E1000_MSE_CHANNEL_B        0x0F00
+#define IGP01E1000_MSE_CHANNEL_A        0xF000
+
+#define IGP02E1000_PM_SPD                         0x0001  /* Smart Power Down */
+#define IGP02E1000_PM_D3_LPLU                     0x0004  /* Enable LPLU in non-D0a modes */
+#define IGP02E1000_PM_D0_LPLU                     0x0002  /* Enable LPLU in D0a mode */
+
+/* IGP01E1000 DSP reset macros */
+#define DSP_RESET_ENABLE     0x0
+#define DSP_RESET_DISABLE    0x2
+#define E1000_MAX_DSP_RESETS 10
+
+/* IGP01E1000 & IGP02E1000 AGC Registers */
+
+#define IGP01E1000_AGC_LENGTH_SHIFT 7         /* Coarse - 13:11, Fine - 10:7 */
+#define IGP02E1000_AGC_LENGTH_SHIFT 9         /* Coarse - 15:13, Fine - 12:9 */
+
+/* IGP02E1000 AGC Register Length 9-bit mask */
+#define IGP02E1000_AGC_LENGTH_MASK  0x7F
+
+/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
+#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
+#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113
+
+/* The precision error of the cable length is +/- 10 meters */
+#define IGP01E1000_AGC_RANGE    10
+#define IGP02E1000_AGC_RANGE    15
+
+/* IGP01E1000 PCS Initialization register */
+/* bits 3:6 in the PCS registers stores the channels polarity */
+#define IGP01E1000_PHY_POLARITY_MASK    0x0078
+
+/* IGP01E1000 GMII FIFO Register */
+#define IGP01E1000_GMII_FLEX_SPD               0x10 /* Enable flexible speed
+                                                     * on Link-Up */
+#define IGP01E1000_GMII_SPD                    0x20 /* Enable SPD */
+
+/* IGP01E1000 Analog Register */
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS       0x20D1
+#define IGP01E1000_ANALOG_FUSE_STATUS             0x20D0
+#define IGP01E1000_ANALOG_FUSE_CONTROL            0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS             0x20DE
+
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK            0xF000
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK            0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK          0x0070
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED        0x0100
+#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL    0x0002
+
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH        0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10            0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1               0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10              0x0500
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_DISABLE_JABBER             0x0001 /* 1=Disable Jabber */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE  0x0002 /* 1=Polarity Reversal Disabled */
+#define GG82563_PSCR_POWER_DOWN                 0x0004 /* 1=Power Down */
+#define GG82563_PSCR_COPPER_TRANSMITER_DISABLE  0x0008 /* 1=Transmitter Disabled */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK        0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI         0x0000 /* 00=Manual MDI configuration */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX        0x0020 /* 01=Manual MDIX configuration */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO        0x0060 /* 11=Automatic crossover */
+#define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE   0x0080 /* 1=Enable Extended Distance */
+#define GG82563_PSCR_ENERGY_DETECT_MASK         0x0300
+#define GG82563_PSCR_ENERGY_DETECT_OFF          0x0000 /* 00,01=Off */
+#define GG82563_PSCR_ENERGY_DETECT_RX           0x0200 /* 10=Sense on Rx only (Energy Detect) */
+#define GG82563_PSCR_ENERGY_DETECT_RX_TM        0x0300 /* 11=Sense and Tx NLP */
+#define GG82563_PSCR_FORCE_LINK_GOOD            0x0400 /* 1=Force Link Good */
+#define GG82563_PSCR_DOWNSHIFT_ENABLE           0x0800 /* 1=Enable Downshift */
+#define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK     0x7000
+#define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT    12
+
+/* PHY Specific Status Register (Page 0, Register 17) */
+#define GG82563_PSSR_JABBER                0x0001 /* 1=Jabber */
+#define GG82563_PSSR_POLARITY              0x0002 /* 1=Polarity Reversed */
+#define GG82563_PSSR_LINK                  0x0008 /* 1=Link is Up */
+#define GG82563_PSSR_ENERGY_DETECT         0x0010 /* 1=Sleep, 0=Active */
+#define GG82563_PSSR_DOWNSHIFT             0x0020 /* 1=Downshift */
+#define GG82563_PSSR_CROSSOVER_STATUS      0x0040 /* 1=MDIX, 0=MDI */
+#define GG82563_PSSR_RX_PAUSE_ENABLED      0x0100 /* 1=Receive Pause Enabled */
+#define GG82563_PSSR_TX_PAUSE_ENABLED      0x0200 /* 1=Transmit Pause Enabled */
+#define GG82563_PSSR_LINK_UP               0x0400 /* 1=Link Up */
+#define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */
+#define GG82563_PSSR_PAGE_RECEIVED         0x1000 /* 1=Page Received */
+#define GG82563_PSSR_DUPLEX                0x2000 /* 1-Full-Duplex */
+#define GG82563_PSSR_SPEED_MASK            0xC000
+#define GG82563_PSSR_SPEED_10MBPS          0x0000 /* 00=10Mbps */
+#define GG82563_PSSR_SPEED_100MBPS         0x4000 /* 01=100Mbps */
+#define GG82563_PSSR_SPEED_1000MBPS        0x8000 /* 10=1000Mbps */
+
+/* PHY Specific Status Register 2 (Page 0, Register 19) */
+#define GG82563_PSSR2_JABBER                0x0001 /* 1=Jabber */
+#define GG82563_PSSR2_POLARITY_CHANGED      0x0002 /* 1=Polarity Changed */
+#define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */
+#define GG82563_PSSR2_DOWNSHIFT_INTERRUPT   0x0020 /* 1=Downshift Detected */
+#define GG82563_PSSR2_MDI_CROSSOVER_CHANGE  0x0040 /* 1=Crossover Changed */
+#define GG82563_PSSR2_FALSE_CARRIER         0x0100 /* 1=False Carrier */
+#define GG82563_PSSR2_SYMBOL_ERROR          0x0200 /* 1=Symbol Error */
+#define GG82563_PSSR2_LINK_STATUS_CHANGED   0x0400 /* 1=Link Status Changed */
+#define GG82563_PSSR2_AUTO_NEG_COMPLETED    0x0800 /* 1=Auto-Neg Completed */
+#define GG82563_PSSR2_PAGE_RECEIVED         0x1000 /* 1=Page Received */
+#define GG82563_PSSR2_DUPLEX_CHANGED        0x2000 /* 1=Duplex Changed */
+#define GG82563_PSSR2_SPEED_CHANGED         0x4000 /* 1=Speed Changed */
+#define GG82563_PSSR2_AUTO_NEG_ERROR        0x8000 /* 1=Auto-Neg Error */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_10BT_POLARITY_FORCE           0x0002 /* 1=Force Negative Polarity */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_MASK       0x000C
+#define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL     0x0000 /* 00,01=Normal Operation */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_112NS      0x0008 /* 10=Select 112ns Sequence */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_16NS       0x000C /* 11=Select 16ns Sequence */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG              0x2000 /* 1=Reverse Auto-Negotiation */
+#define GG82563_PSCR2_1000BT_DISABLE                0x4000 /* 1=Disable 1000BASE-T */
+#define GG82563_PSCR2_TRANSMITER_TYPE_MASK          0x8000
+#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B      0x0000 /* 0=Class B */
+#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A      0x8000 /* 1=Class A */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK                    0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ           0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25MHZ           0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ         0x0006
+#define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ          0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX               0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26) */
+#define GG82563_DSPD_CABLE_LENGTH               0x0007 /* 0 = <50M;
+                                                          1 = 50-80M;
+                                                          2 = 80-110M;
+                                                          3 = 110-140M;
+                                                          4 = >140M */
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PHY_LEDS_EN                    0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
+#define GG82563_KMCR_FORCE_LINK_UP                  0x0040 /* 1=Force Link Up */
+#define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT         0x0080
+#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK     0x0400
+#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT          0x0400 /* 1=6.25MHz, 0=0.8MHz */
+#define GG82563_KMCR_PASS_FALSE_CARRIER             0x0800
+
+/* Power Management Control Register (Page 193, Register 20) */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE         0x0001 /* 1=Enalbe SERDES Electrical Idle */
+#define GG82563_PMCR_DISABLE_PORT                   0x0002 /* 1=Disable Port */
+#define GG82563_PMCR_DISABLE_SERDES                 0x0004 /* 1=Disable SERDES */
+#define GG82563_PMCR_REVERSE_AUTO_NEG               0x0008 /* 1=Enable Reverse Auto-Negotiation */
+#define GG82563_PMCR_DISABLE_1000_NON_D0            0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */
+#define GG82563_PMCR_DISABLE_1000                   0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */
+#define GG82563_PMCR_REVERSE_AUTO_NEG_D0A           0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */
+#define GG82563_PMCR_FORCE_POWER_STATE              0x0080 /* 1=Force Power State */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK    0x0300
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR      0x0000 /* 00=Dr */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U     0x0100 /* 01=D0u */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A     0x0200 /* 10=D0a */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3      0x0300 /* 11=D3 */
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING                     0x0010 /* Disable Padding Use */
+
+
+/* Bit definitions for valid PHY IDs. */
+/* I = Integrated
+ * E = External
+ */
+#define M88_VENDOR         0x0141
+#define M88E1000_E_PHY_ID  0x01410C50
+#define M88E1000_I_PHY_ID  0x01410C30
+#define M88E1011_I_PHY_ID  0x01410C20
+#define IGP01E1000_I_PHY_ID  0x02A80380
+#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID
+#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
+#define M88E1011_I_REV_4   0x04
+#define M88E1111_I_PHY_ID  0x01410CC0
+#define L1LXT971A_PHY_ID   0x001378E0
+#define GG82563_E_PHY_ID   0x01410CA0
+
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define PHY_PAGE_SHIFT        5
+#define PHY_REG(page, reg)    \
+        (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+
+#define IGP3_PHY_PORT_CTRL           \
+        PHY_REG(769, 17) /* Port General Configuration */
+#define IGP3_PHY_RATE_ADAPT_CTRL \
+        PHY_REG(769, 25) /* Rate Adapter Control Register */
+
+#define IGP3_KMRN_FIFO_CTRL_STATS \
+        PHY_REG(770, 16) /* KMRN FIFO's control/status register */
+#define IGP3_KMRN_POWER_MNG_CTRL \
+        PHY_REG(770, 17) /* KMRN Power Management Control Register */
+#define IGP3_KMRN_INBAND_CTRL \
+        PHY_REG(770, 18) /* KMRN Inband Control Register */
+#define IGP3_KMRN_DIAG \
+        PHY_REG(770, 19) /* KMRN Diagnostic register */
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
+#define IGP3_KMRN_ACK_TIMEOUT \
+        PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
+
+#define IGP3_VR_CTRL \
+        PHY_REG(776, 18) /* Voltage regulator control register */
+#define IGP3_VR_CTRL_MODE_SHUT       0x0200 /* Enter powerdown, shutdown VRs */
+#define IGP3_VR_CTRL_MODE_MASK       0x0300 /* Shutdown VR Mask */
+
+#define IGP3_CAPABILITY \
+        PHY_REG(776, 19) /* IGP3 Capability Register */
+
+/* Capabilities for SKU Control  */
+#define IGP3_CAP_INITIATE_TEAM       0x0001 /* Able to initiate a team */
+#define IGP3_CAP_WFM                 0x0002 /* Support WoL and PXE */
+#define IGP3_CAP_ASF                 0x0004 /* Support ASF */
+#define IGP3_CAP_LPLU                0x0008 /* Support Low Power Link Up */
+#define IGP3_CAP_DC_AUTO_SPEED       0x0010 /* Support AC/DC Auto Link Speed */
+#define IGP3_CAP_SPD                 0x0020 /* Support Smart Power Down */
+#define IGP3_CAP_MULT_QUEUE          0x0040 /* Support 2 tx & 2 rx queues */
+#define IGP3_CAP_RSS                 0x0080 /* Support RSS */
+#define IGP3_CAP_8021PQ              0x0100 /* Support 802.1Q & 802.1p */
+#define IGP3_CAP_AMT_CB              0x0200 /* Support active manageability and circuit breaker */
+
+#define IGP3_PPC_JORDAN_EN           0x0001
+#define IGP3_PPC_JORDAN_GIGA_SPEED   0x0002
+
+#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS         0x0001
+#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK   0x001E
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA        0x0020
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_100         0x0040
+
+#define IGP3E1000_PHY_MISC_CTRL                0x1B   /* Misc. Ctrl register */
+#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET        0x1000 /* Duplex Manual Set */
+
+#define IGP3_KMRN_EXT_CTRL  PHY_REG(770, 18)
+#define IGP3_KMRN_EC_DIS_INBAND    0x0080
+
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define IFE_E_PHY_ID         0x02A80330 /* 10/100 PHY */
+#define IFE_PLUS_E_PHY_ID    0x02A80320
+#define IFE_C_E_PHY_ID       0x02A80310
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL   0x10  /* 100BaseTx Extended Status, Control and Address */
+#define IFE_PHY_SPECIAL_CONTROL           0x11  /* 100BaseTx PHY special control register */
+#define IFE_PHY_RCV_FALSE_CARRIER         0x13  /* 100BaseTx Receive False Carrier Counter */
+#define IFE_PHY_RCV_DISCONNECT            0x14  /* 100BaseTx Receive Disconnet Counter */
+#define IFE_PHY_RCV_ERROT_FRAME           0x15  /* 100BaseTx Receive Error Frame Counter */
+#define IFE_PHY_RCV_SYMBOL_ERR            0x16  /* Receive Symbol Error Counter */
+#define IFE_PHY_PREM_EOF_ERR              0x17  /* 100BaseTx Receive Premature End Of Frame Error Counter */
+#define IFE_PHY_RCV_EOF_ERR               0x18  /* 10BaseT Receive End Of Frame Error Counter */
+#define IFE_PHY_TX_JABBER_DETECT          0x19  /* 10BaseT Transmit Jabber Detect Counter */
+#define IFE_PHY_EQUALIZER                 0x1A  /* PHY Equalizer Control and Status */
+#define IFE_PHY_SPECIAL_CONTROL_LED       0x1B  /* PHY special control and LED configuration */
+#define IFE_PHY_MDIX_CONTROL              0x1C  /* MDI/MDI-X Control register */
+#define IFE_PHY_HWI_CONTROL               0x1D  /* Hardware Integrity Control (HWI) */
+
+#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE  0x2000  /* Defaut 1 = Disable auto reduced power down */
+#define IFE_PESC_100BTX_POWER_DOWN           0x0400  /* Indicates the power state of 100BASE-TX */
+#define IFE_PESC_10BTX_POWER_DOWN            0x0200  /* Indicates the power state of 10BASE-T */
+#define IFE_PESC_POLARITY_REVERSED           0x0100  /* Indicates 10BASE-T polarity */
+#define IFE_PESC_PHY_ADDR_MASK               0x007C  /* Bit 6:2 for sampled PHY address */
+#define IFE_PESC_SPEED                       0x0002  /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
+#define IFE_PESC_DUPLEX                      0x0001  /* Auto-negotiation duplex result 1=Full, 0=Half */
+#define IFE_PESC_POLARITY_REVERSED_SHIFT     8
+
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN   0x0100  /* 1 = Dyanmic Power Down disabled */
+#define IFE_PSC_FORCE_POLARITY               0x0020  /* 1=Reversed Polarity, 0=Normal */
+#define IFE_PSC_AUTO_POLARITY_DISABLE        0x0010  /* 1=Auto Polarity Disabled, 0=Enabled */
+#define IFE_PSC_JABBER_FUNC_DISABLE          0x0001  /* 1=Jabber Disabled, 0=Normal Jabber Operation */
+#define IFE_PSC_FORCE_POLARITY_SHIFT         5
+#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT  4
+
+#define IFE_PMC_AUTO_MDIX                    0x0080  /* 1=enable MDI/MDI-X feature, default 0=disabled */
+#define IFE_PMC_FORCE_MDIX                   0x0040  /* 1=force MDIX-X, 0=force MDI */
+#define IFE_PMC_MDIX_STATUS                  0x0020  /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_AUTO_MDIX_COMPLETE           0x0010  /* Resolution algorithm is completed */
+#define IFE_PMC_MDIX_MODE_SHIFT              6
+#define IFE_PHC_MDIX_RESET_ALL_MASK          0x0000  /* Disable auto MDI-X */
+
+#define IFE_PHC_HWI_ENABLE                   0x8000  /* Enable the HWI feature */
+#define IFE_PHC_ABILITY_CHECK                0x4000  /* 1= Test Passed, 0=failed */
+#define IFE_PHC_TEST_EXEC                    0x2000  /* PHY launch test pulses on the wire */
+#define IFE_PHC_HIGHZ                        0x0200  /* 1 = Open Circuit */
+#define IFE_PHC_LOWZ                         0x0400  /* 1 = Short Circuit */
+#define IFE_PHC_LOW_HIGH_Z_MASK              0x0600  /* Mask for indication type of problem on the line */
+#define IFE_PHC_DISTANCE_MASK                0x01FF  /* Mask for distance to the cable problem, in 80cm granularity */
+#define IFE_PHC_RESET_ALL_MASK               0x0000  /* Disable HWI */
+#define IFE_PSCL_PROBE_MODE                  0x0020  /* LED Probe mode */
+#define IFE_PSCL_PROBE_LEDS_OFF              0x0006  /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON               0x0007  /* Force LEDs 0 and 2 on */
+
+#define ICH_FLASH_COMMAND_TIMEOUT            5000    /* 5000 uSecs - adjusted */
+#define ICH_FLASH_ERASE_TIMEOUT              3000000 /* Up to 3 seconds - worst case */
+#define ICH_FLASH_CYCLE_REPEAT_COUNT         10      /* 10 cycles */
+#define ICH_FLASH_SEG_SIZE_256               256
+#define ICH_FLASH_SEG_SIZE_4K                4096
+#define ICH_FLASH_SEG_SIZE_64K               65536
+
+#define ICH_CYCLE_READ                       0x0
+#define ICH_CYCLE_RESERVED                   0x1
+#define ICH_CYCLE_WRITE                      0x2
+#define ICH_CYCLE_ERASE                      0x3
+
+#define ICH_FLASH_GFPREG   0x0000
+#define ICH_FLASH_HSFSTS   0x0004
+#define ICH_FLASH_HSFCTL   0x0006
+#define ICH_FLASH_FADDR    0x0008
+#define ICH_FLASH_FDATA0   0x0010
+#define ICH_FLASH_FRACC    0x0050
+#define ICH_FLASH_FREG0    0x0054
+#define ICH_FLASH_FREG1    0x0058
+#define ICH_FLASH_FREG2    0x005C
+#define ICH_FLASH_FREG3    0x0060
+#define ICH_FLASH_FPR0     0x0074
+#define ICH_FLASH_FPR1     0x0078
+#define ICH_FLASH_SSFSTS   0x0090
+#define ICH_FLASH_SSFCTL   0x0092
+#define ICH_FLASH_PREOP    0x0094
+#define ICH_FLASH_OPTYPE   0x0096
+#define ICH_FLASH_OPMENU   0x0098
+
+#define ICH_FLASH_REG_MAPSIZE      0x00A0
+#define ICH_FLASH_SECTOR_SIZE      4096
+#define ICH_GFPREG_BASE_MASK       0x1FFF
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+
+/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+    struct ich8_hsfsts {
+#ifdef __BIG_ENDIAN
+        u16 reserved2      :6;
+        u16 fldesvalid     :1;
+        u16 flockdn        :1;
+        u16 flcdone        :1;
+        u16 flcerr         :1;
+        u16 dael           :1;
+        u16 berasesz       :2;
+        u16 flcinprog      :1;
+        u16 reserved1      :2;
+#else
+        u16 flcdone        :1;   /* bit 0 Flash Cycle Done */
+        u16 flcerr         :1;   /* bit 1 Flash Cycle Error */
+        u16 dael           :1;   /* bit 2 Direct Access error Log */
+        u16 berasesz       :2;   /* bit 4:3 Block/Sector Erase Size */
+        u16 flcinprog      :1;   /* bit 5 flash SPI cycle in Progress */
+        u16 reserved1      :2;   /* bit 13:6 Reserved */
+        u16 reserved2      :6;   /* bit 13:6 Reserved */
+        u16 fldesvalid     :1;   /* bit 14 Flash Descriptor Valid */
+        u16 flockdn        :1;   /* bit 15 Flash Configuration Lock-Down */
+#endif
+    } hsf_status;
+    u16 regval;
+};
+
+/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+    struct ich8_hsflctl {
+#ifdef __BIG_ENDIAN
+        u16 fldbcount      :2;
+        u16 flockdn        :6;
+        u16 flcgo          :1;
+        u16 flcycle        :2;
+        u16 reserved       :5;
+#else
+        u16 flcgo          :1;   /* 0 Flash Cycle Go */
+        u16 flcycle        :2;   /* 2:1 Flash Cycle */
+        u16 reserved       :5;   /* 7:3 Reserved  */
+        u16 fldbcount      :2;   /* 9:8 Flash Data Byte Count */
+        u16 flockdn        :6;   /* 15:10 Reserved */
+#endif
+    } hsf_ctrl;
+    u16 regval;
+};
+
+/* ICH8 Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+    struct ich8_flracc {
+#ifdef __BIG_ENDIAN
+        u32 gmwag          :8;
+        u32 gmrag          :8;
+        u32 grwa           :8;
+        u32 grra           :8;
+#else
+        u32 grra           :8;   /* 0:7 GbE region Read Access */
+        u32 grwa           :8;   /* 8:15 GbE region Write Access */
+        u32 gmrag          :8;   /* 23:16 GbE Master Read Access Grant  */
+        u32 gmwag          :8;   /* 31:24 GbE Master Write Access Grant */
+#endif
+    } hsf_flregacc;
+    u16 regval;
+};
+
+/* Miscellaneous PHY bit definitions. */
+#define PHY_PREAMBLE        0xFFFFFFFF
+#define PHY_SOF             0x01
+#define PHY_OP_READ         0x02
+#define PHY_OP_WRITE        0x01
+#define PHY_TURNAROUND      0x02
+#define PHY_PREAMBLE_SIZE   32
+#define MII_CR_SPEED_1000   0x0040
+#define MII_CR_SPEED_100    0x2000
+#define MII_CR_SPEED_10     0x0000
+#define E1000_PHY_ADDRESS   0x01
+#define PHY_AUTO_NEG_TIME   45  /* 4.5 Seconds */
+#define PHY_FORCE_TIME      20  /* 2.0 Seconds */
+#define PHY_REVISION_MASK   0xFFFFFFF0
+#define DEVICE_SPEED_MASK   0x00000300  /* Device Ctrl Reg Speed Mask */
+#define REG4_SPEED_MASK     0x01E0
+#define REG9_SPEED_MASK     0x0300
+#define ADVERTISE_10_HALF   0x0001
+#define ADVERTISE_10_FULL   0x0002
+#define ADVERTISE_100_HALF  0x0004
+#define ADVERTISE_100_FULL  0x0008
+#define ADVERTISE_1000_HALF 0x0010
+#define ADVERTISE_1000_FULL 0x0020
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F  /* Everything but 1000-Half */
+#define AUTONEG_ADVERTISE_10_100_ALL    0x000F /* All 10/100 speeds*/
+#define AUTONEG_ADVERTISE_10_ALL        0x0003 /* 10Mbps Full & Half speeds*/
+
+#endif /* _E1000_HW_H_ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_hw-2.6.31-orig.c	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,8878 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.c
+ * Shared functions for accessing and configuring the MAC
+ */
+
+
+#include "e1000_hw.h"
+
+static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask);
+static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask);
+static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data);
+static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
+static s32 e1000_get_software_semaphore(struct e1000_hw *hw);
+static void e1000_release_software_semaphore(struct e1000_hw *hw);
+
+static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw);
+static s32 e1000_check_downshift(struct e1000_hw *hw);
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+				e1000_rev_polarity *polarity);
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
+static void e1000_clear_vfta(struct e1000_hw *hw);
+static s32 e1000_commit_shadow_ram(struct e1000_hw *hw);
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
+					      bool link_up);
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
+static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank);
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+				  u16 *max_length);
+static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static s32 e1000_get_software_flag(struct e1000_hw *hw);
+static s32 e1000_ich8_cycle_init(struct e1000_hw *hw);
+static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout);
+static s32 e1000_id_led_init(struct e1000_hw *hw);
+static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+						 u32 cnf_base_addr,
+						 u32 cnf_size);
+static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw);
+static void e1000_init_rx_addrs(struct e1000_hw *hw);
+static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
+static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
+static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+				   u16 offset, u8 *sum);
+static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw,
+				      struct e1000_host_mng_command_header
+				      *hdr);
+static s32 e1000_mng_write_commit(struct e1000_hw *hw);
+static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
+				  struct e1000_phy_info *phy_info);
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+				  struct e1000_phy_info *phy_info);
+static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data);
+static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
+				   u16 *data);
+static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+				  struct e1000_phy_info *phy_info);
+static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
+static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data);
+static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index,
+					u8 byte);
+static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
+static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data);
+static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+				u16 *data);
+static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+				 u16 data);
+static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data);
+static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+				   u16 *data);
+static void e1000_release_software_flag(struct e1000_hw *hw);
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+static void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
+static s32 e1000_set_phy_type(struct e1000_hw *hw);
+static void e1000_phy_init_script(struct e1000_hw *hw);
+static s32 e1000_setup_copper_link(struct e1000_hw *hw);
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data,
+				     u16 count);
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
+                                      u16 words, u16 *data);
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+					u16 words, u16 *data);
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count);
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+				  u16 phy_data);
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr,
+				 u16 *phy_data);
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
+static void e1000_release_eeprom(struct e1000_hw *hw);
+static void e1000_standby_eeprom(struct e1000_hw *hw);
+static s32 e1000_set_vco_speed(struct e1000_hw *hw);
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
+static s32 e1000_set_phy_mode(struct e1000_hw *hw);
+static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer);
+static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
+static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex);
+static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+
+/* IGP cable length table */
+static const
+u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
+    { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+      25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+      40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+      60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+      90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+      100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+      110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
+
+static const
+u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
+    { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+      0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+      6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+      21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+      40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+      60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+      83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+      104, 109, 114, 118, 121, 124};
+
+static DEFINE_SPINLOCK(e1000_eeprom_lock);
+
+/******************************************************************************
+ * Set the phy type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_set_phy_type(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_set_phy_type");
+
+    if (hw->mac_type == e1000_undefined)
+        return -E1000_ERR_PHY_TYPE;
+
+    switch (hw->phy_id) {
+    case M88E1000_E_PHY_ID:
+    case M88E1000_I_PHY_ID:
+    case M88E1011_I_PHY_ID:
+    case M88E1111_I_PHY_ID:
+        hw->phy_type = e1000_phy_m88;
+        break;
+    case IGP01E1000_I_PHY_ID:
+        if (hw->mac_type == e1000_82541 ||
+            hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547 ||
+            hw->mac_type == e1000_82547_rev_2) {
+            hw->phy_type = e1000_phy_igp;
+            break;
+        }
+    case IGP03E1000_E_PHY_ID:
+        hw->phy_type = e1000_phy_igp_3;
+        break;
+    case IFE_E_PHY_ID:
+    case IFE_PLUS_E_PHY_ID:
+    case IFE_C_E_PHY_ID:
+        hw->phy_type = e1000_phy_ife;
+        break;
+    case GG82563_E_PHY_ID:
+        if (hw->mac_type == e1000_80003es2lan) {
+            hw->phy_type = e1000_phy_gg82563;
+            break;
+        }
+        /* Fall Through */
+    default:
+        /* Should never have loaded on this device */
+        hw->phy_type = e1000_phy_undefined;
+        return -E1000_ERR_PHY_TYPE;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * IGP phy init script - initializes the GbE PHY
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_phy_init_script(struct e1000_hw *hw)
+{
+    u32 ret_val;
+    u16 phy_saved_data;
+
+    DEBUGFUNC("e1000_phy_init_script");
+
+    if (hw->phy_init_script) {
+        msleep(20);
+
+        /* Save off the current value of register 0x2F5B to be restored at
+         * the end of this routine. */
+        ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+        /* Disabled the PHY transmitter */
+        e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+        msleep(20);
+
+        e1000_write_phy_reg(hw,0x0000,0x0140);
+
+        msleep(5);
+
+        switch (hw->mac_type) {
+        case e1000_82541:
+        case e1000_82547:
+            e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+
+            e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+
+            e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+
+            e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+
+            e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+
+            e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+
+            e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+
+            e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+
+            e1000_write_phy_reg(hw, 0x2010, 0x0008);
+            break;
+
+        case e1000_82541_rev_2:
+        case e1000_82547_rev_2:
+            e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+            break;
+        default:
+            break;
+        }
+
+        e1000_write_phy_reg(hw, 0x0000, 0x3300);
+
+        msleep(20);
+
+        /* Now enable the transmitter */
+        e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+        if (hw->mac_type == e1000_82547) {
+            u16 fused, fine, coarse;
+
+            /* Move to analog registers page */
+            e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
+
+            if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+                e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
+
+                fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+                coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+                if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+                    coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
+                    fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+                } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+                    fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+                fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+                        (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+                        (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+                e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused);
+                e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS,
+                                    IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+            }
+        }
+    }
+}
+
+/******************************************************************************
+ * Set the mac type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_set_mac_type");
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82542:
+		switch (hw->revision_id) {
+		case E1000_82542_2_0_REV_ID:
+			hw->mac_type = e1000_82542_rev2_0;
+			break;
+		case E1000_82542_2_1_REV_ID:
+			hw->mac_type = e1000_82542_rev2_1;
+			break;
+		default:
+			/* Invalid 82542 revision ID */
+			return -E1000_ERR_MAC_TYPE;
+		}
+		break;
+	case E1000_DEV_ID_82543GC_FIBER:
+	case E1000_DEV_ID_82543GC_COPPER:
+		hw->mac_type = e1000_82543;
+		break;
+	case E1000_DEV_ID_82544EI_COPPER:
+	case E1000_DEV_ID_82544EI_FIBER:
+	case E1000_DEV_ID_82544GC_COPPER:
+	case E1000_DEV_ID_82544GC_LOM:
+		hw->mac_type = e1000_82544;
+		break;
+	case E1000_DEV_ID_82540EM:
+	case E1000_DEV_ID_82540EM_LOM:
+	case E1000_DEV_ID_82540EP:
+	case E1000_DEV_ID_82540EP_LOM:
+	case E1000_DEV_ID_82540EP_LP:
+		hw->mac_type = e1000_82540;
+		break;
+	case E1000_DEV_ID_82545EM_COPPER:
+	case E1000_DEV_ID_82545EM_FIBER:
+		hw->mac_type = e1000_82545;
+		break;
+	case E1000_DEV_ID_82545GM_COPPER:
+	case E1000_DEV_ID_82545GM_FIBER:
+	case E1000_DEV_ID_82545GM_SERDES:
+		hw->mac_type = e1000_82545_rev_3;
+		break;
+	case E1000_DEV_ID_82546EB_COPPER:
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546EB_QUAD_COPPER:
+		hw->mac_type = e1000_82546;
+		break;
+	case E1000_DEV_ID_82546GB_COPPER:
+	case E1000_DEV_ID_82546GB_FIBER:
+	case E1000_DEV_ID_82546GB_SERDES:
+	case E1000_DEV_ID_82546GB_PCIE:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		hw->mac_type = e1000_82546_rev_3;
+		break;
+	case E1000_DEV_ID_82541EI:
+	case E1000_DEV_ID_82541EI_MOBILE:
+	case E1000_DEV_ID_82541ER_LOM:
+		hw->mac_type = e1000_82541;
+		break;
+	case E1000_DEV_ID_82541ER:
+	case E1000_DEV_ID_82541GI:
+	case E1000_DEV_ID_82541GI_LF:
+	case E1000_DEV_ID_82541GI_MOBILE:
+		hw->mac_type = e1000_82541_rev_2;
+		break;
+	case E1000_DEV_ID_82547EI:
+	case E1000_DEV_ID_82547EI_MOBILE:
+		hw->mac_type = e1000_82547;
+		break;
+	case E1000_DEV_ID_82547GI:
+		hw->mac_type = e1000_82547_rev_2;
+		break;
+	case E1000_DEV_ID_82571EB_COPPER:
+	case E1000_DEV_ID_82571EB_FIBER:
+	case E1000_DEV_ID_82571EB_SERDES:
+	case E1000_DEV_ID_82571EB_SERDES_DUAL:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+		hw->mac_type = e1000_82571;
+		break;
+	case E1000_DEV_ID_82572EI_COPPER:
+	case E1000_DEV_ID_82572EI_FIBER:
+	case E1000_DEV_ID_82572EI_SERDES:
+	case E1000_DEV_ID_82572EI:
+		hw->mac_type = e1000_82572;
+		break;
+	case E1000_DEV_ID_82573E:
+	case E1000_DEV_ID_82573E_IAMT:
+	case E1000_DEV_ID_82573L:
+		hw->mac_type = e1000_82573;
+		break;
+	case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
+	case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
+	case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
+	case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+		hw->mac_type = e1000_80003es2lan;
+		break;
+	case E1000_DEV_ID_ICH8_IGP_M_AMT:
+	case E1000_DEV_ID_ICH8_IGP_AMT:
+	case E1000_DEV_ID_ICH8_IGP_C:
+	case E1000_DEV_ID_ICH8_IFE:
+	case E1000_DEV_ID_ICH8_IFE_GT:
+	case E1000_DEV_ID_ICH8_IFE_G:
+	case E1000_DEV_ID_ICH8_IGP_M:
+		hw->mac_type = e1000_ich8lan;
+		break;
+	default:
+		/* Should never have loaded on this device */
+		return -E1000_ERR_MAC_TYPE;
+	}
+
+	switch (hw->mac_type) {
+	case e1000_ich8lan:
+		hw->swfwhw_semaphore_present = true;
+		hw->asf_firmware_present = true;
+		break;
+	case e1000_80003es2lan:
+		hw->swfw_sync_present = true;
+		/* fall through */
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+		hw->eeprom_semaphore_present = true;
+		/* fall through */
+	case e1000_82541:
+	case e1000_82547:
+	case e1000_82541_rev_2:
+	case e1000_82547_rev_2:
+		hw->asf_firmware_present = true;
+		break;
+	default:
+		break;
+	}
+
+	/* The 82543 chip does not count tx_carrier_errors properly in
+	 * FD mode
+	 */
+	if (hw->mac_type == e1000_82543)
+		hw->bad_tx_carr_stats_fd = true;
+
+	/* capable of receiving management packets to the host */
+	if (hw->mac_type >= e1000_82571)
+		hw->has_manc2h = true;
+
+	/* In rare occasions, ESB2 systems would end up started without
+	 * the RX unit being turned on.
+	 */
+	if (hw->mac_type == e1000_80003es2lan)
+		hw->rx_needs_kicking = true;
+
+	if (hw->mac_type > e1000_82544)
+		hw->has_smbus = true;
+
+	return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set media type and TBI compatibility.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * **************************************************************************/
+void e1000_set_media_type(struct e1000_hw *hw)
+{
+    u32 status;
+
+    DEBUGFUNC("e1000_set_media_type");
+
+    if (hw->mac_type != e1000_82543) {
+        /* tbi_compatibility is only valid on 82543 */
+        hw->tbi_compatibility_en = false;
+    }
+
+    switch (hw->device_id) {
+    case E1000_DEV_ID_82545GM_SERDES:
+    case E1000_DEV_ID_82546GB_SERDES:
+    case E1000_DEV_ID_82571EB_SERDES:
+    case E1000_DEV_ID_82571EB_SERDES_DUAL:
+    case E1000_DEV_ID_82571EB_SERDES_QUAD:
+    case E1000_DEV_ID_82572EI_SERDES:
+    case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+        hw->media_type = e1000_media_type_internal_serdes;
+        break;
+    default:
+        switch (hw->mac_type) {
+        case e1000_82542_rev2_0:
+        case e1000_82542_rev2_1:
+            hw->media_type = e1000_media_type_fiber;
+            break;
+        case e1000_ich8lan:
+        case e1000_82573:
+            /* The STATUS_TBIMODE bit is reserved or reused for the this
+             * device.
+             */
+            hw->media_type = e1000_media_type_copper;
+            break;
+        default:
+            status = er32(STATUS);
+            if (status & E1000_STATUS_TBIMODE) {
+                hw->media_type = e1000_media_type_fiber;
+                /* tbi_compatibility not valid on fiber */
+                hw->tbi_compatibility_en = false;
+            } else {
+                hw->media_type = e1000_media_type_copper;
+            }
+            break;
+        }
+    }
+}
+
+/******************************************************************************
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    u32 ctrl_ext;
+    u32 icr;
+    u32 manc;
+    u32 led_ctrl;
+    u32 timeout;
+    u32 extcnf_ctrl;
+    s32 ret_val;
+
+    DEBUGFUNC("e1000_reset_hw");
+
+    /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+        e1000_pci_clear_mwi(hw);
+    }
+
+    if (hw->bus_type == e1000_bus_type_pci_express) {
+        /* Prevent the PCI-E bus from sticking if there is no TLP connection
+         * on the last TLP read/write transaction when MAC is reset.
+         */
+        if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
+            DEBUGOUT("PCI-E Master disable polling has failed.\n");
+        }
+    }
+
+    /* Clear interrupt mask to stop board from generating interrupts */
+    DEBUGOUT("Masking off all interrupts\n");
+    ew32(IMC, 0xffffffff);
+
+    /* Disable the Transmit and Receive units.  Then delay to allow
+     * any pending transactions to complete before we hit the MAC with
+     * the global reset.
+     */
+    ew32(RCTL, 0);
+    ew32(TCTL, E1000_TCTL_PSP);
+    E1000_WRITE_FLUSH();
+
+    /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
+    hw->tbi_compatibility_on = false;
+
+    /* Delay to allow any outstanding PCI transactions to complete before
+     * resetting the device
+     */
+    msleep(10);
+
+    ctrl = er32(CTRL);
+
+    /* Must reset the PHY before resetting the MAC */
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
+        msleep(5);
+    }
+
+    /* Must acquire the MDIO ownership before MAC reset.
+     * Ownership defaults to firmware after a reset. */
+    if (hw->mac_type == e1000_82573) {
+        timeout = 10;
+
+        extcnf_ctrl = er32(EXTCNF_CTRL);
+        extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+        do {
+            ew32(EXTCNF_CTRL, extcnf_ctrl);
+            extcnf_ctrl = er32(EXTCNF_CTRL);
+
+            if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+                break;
+            else
+                extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+            msleep(2);
+            timeout--;
+        } while (timeout);
+    }
+
+    /* Workaround for ICH8 bit corruption issue in FIFO memory */
+    if (hw->mac_type == e1000_ich8lan) {
+        /* Set Tx and Rx buffer allocation to 8k apiece. */
+        ew32(PBA, E1000_PBA_8K);
+        /* Set Packet Buffer Size to 16k. */
+        ew32(PBS, E1000_PBS_16K);
+    }
+
+    /* Issue a global reset to the MAC.  This will reset the chip's
+     * transmit, receive, DMA, and link units.  It will not effect
+     * the current PCI configuration.  The global reset bit is self-
+     * clearing, and should clear within a microsecond.
+     */
+    DEBUGOUT("Issuing a global reset to MAC\n");
+
+    switch (hw->mac_type) {
+        case e1000_82544:
+        case e1000_82540:
+        case e1000_82545:
+        case e1000_82546:
+        case e1000_82541:
+        case e1000_82541_rev_2:
+            /* These controllers can't ack the 64-bit write when issuing the
+             * reset, so use IO-mapping as a workaround to issue the reset */
+            E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
+            break;
+        case e1000_82545_rev_3:
+        case e1000_82546_rev_3:
+            /* Reset is performed on a shadow of the control register */
+            ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
+            break;
+        case e1000_ich8lan:
+            if (!hw->phy_reset_disable &&
+                e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
+                /* e1000_ich8lan PHY HW reset requires MAC CORE reset
+                 * at the same time to make sure the interface between
+                 * MAC and the external PHY is reset.
+                 */
+                ctrl |= E1000_CTRL_PHY_RST;
+            }
+
+            e1000_get_software_flag(hw);
+            ew32(CTRL, (ctrl | E1000_CTRL_RST));
+            msleep(5);
+            break;
+        default:
+            ew32(CTRL, (ctrl | E1000_CTRL_RST));
+            break;
+    }
+
+    /* After MAC reset, force reload of EEPROM to restore power-on settings to
+     * device.  Later controllers reload the EEPROM automatically, so just wait
+     * for reload to complete.
+     */
+    switch (hw->mac_type) {
+        case e1000_82542_rev2_0:
+        case e1000_82542_rev2_1:
+        case e1000_82543:
+        case e1000_82544:
+            /* Wait for reset to complete */
+            udelay(10);
+            ctrl_ext = er32(CTRL_EXT);
+            ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+            ew32(CTRL_EXT, ctrl_ext);
+            E1000_WRITE_FLUSH();
+            /* Wait for EEPROM reload */
+            msleep(2);
+            break;
+        case e1000_82541:
+        case e1000_82541_rev_2:
+        case e1000_82547:
+        case e1000_82547_rev_2:
+            /* Wait for EEPROM reload */
+            msleep(20);
+            break;
+        case e1000_82573:
+            if (!e1000_is_onboard_nvm_eeprom(hw)) {
+                udelay(10);
+                ctrl_ext = er32(CTRL_EXT);
+                ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+                ew32(CTRL_EXT, ctrl_ext);
+                E1000_WRITE_FLUSH();
+            }
+            /* fall through */
+        default:
+            /* Auto read done will delay 5ms or poll based on mac type */
+            ret_val = e1000_get_auto_rd_done(hw);
+            if (ret_val)
+                return ret_val;
+            break;
+    }
+
+    /* Disable HW ARPs on ASF enabled adapters */
+    if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
+        manc = er32(MANC);
+        manc &= ~(E1000_MANC_ARP_EN);
+        ew32(MANC, manc);
+    }
+
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        e1000_phy_init_script(hw);
+
+        /* Configure activity LED after PHY reset */
+        led_ctrl = er32(LEDCTL);
+        led_ctrl &= IGP_ACTIVITY_LED_MASK;
+        led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+        ew32(LEDCTL, led_ctrl);
+    }
+
+    /* Clear interrupt mask to stop board from generating interrupts */
+    DEBUGOUT("Masking off all interrupts\n");
+    ew32(IMC, 0xffffffff);
+
+    /* Clear any pending interrupt events. */
+    icr = er32(ICR);
+
+    /* If MWI was previously enabled, reenable it. */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+            e1000_pci_set_mwi(hw);
+    }
+
+    if (hw->mac_type == e1000_ich8lan) {
+        u32 kab = er32(KABGTXD);
+        kab |= E1000_KABGTXD_BGSQLBIAS;
+        ew32(KABGTXD, kab);
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ *
+ * Initialize a number of hardware-dependent bits
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * This function contains hardware limitation workarounds for PCI-E adapters
+ *
+ *****************************************************************************/
+static void e1000_initialize_hardware_bits(struct e1000_hw *hw)
+{
+    if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) {
+        /* Settings common to all PCI-express silicon */
+        u32 reg_ctrl, reg_ctrl_ext;
+        u32 reg_tarc0, reg_tarc1;
+        u32 reg_tctl;
+        u32 reg_txdctl, reg_txdctl1;
+
+        /* link autonegotiation/sync workarounds */
+        reg_tarc0 = er32(TARC0);
+        reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27));
+
+        /* Enable not-done TX descriptor counting */
+        reg_txdctl = er32(TXDCTL);
+        reg_txdctl |= E1000_TXDCTL_COUNT_DESC;
+        ew32(TXDCTL, reg_txdctl);
+        reg_txdctl1 = er32(TXDCTL1);
+        reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC;
+        ew32(TXDCTL1, reg_txdctl1);
+
+        switch (hw->mac_type) {
+            case e1000_82571:
+            case e1000_82572:
+                /* Clear PHY TX compatible mode bits */
+                reg_tarc1 = er32(TARC1);
+                reg_tarc1 &= ~((1 << 30)|(1 << 29));
+
+                /* link autonegotiation/sync workarounds */
+                reg_tarc0 |= ((1 << 26)|(1 << 25)|(1 << 24)|(1 << 23));
+
+                /* TX ring control fixes */
+                reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24));
+
+                /* Multiple read bit is reversed polarity */
+                reg_tctl = er32(TCTL);
+                if (reg_tctl & E1000_TCTL_MULR)
+                    reg_tarc1 &= ~(1 << 28);
+                else
+                    reg_tarc1 |= (1 << 28);
+
+                ew32(TARC1, reg_tarc1);
+                break;
+            case e1000_82573:
+                reg_ctrl_ext = er32(CTRL_EXT);
+                reg_ctrl_ext &= ~(1 << 23);
+                reg_ctrl_ext |= (1 << 22);
+
+                /* TX byte count fix */
+                reg_ctrl = er32(CTRL);
+                reg_ctrl &= ~(1 << 29);
+
+                ew32(CTRL_EXT, reg_ctrl_ext);
+                ew32(CTRL, reg_ctrl);
+                break;
+            case e1000_80003es2lan:
+                /* improve small packet performace for fiber/serdes */
+                if ((hw->media_type == e1000_media_type_fiber) ||
+                    (hw->media_type == e1000_media_type_internal_serdes)) {
+                    reg_tarc0 &= ~(1 << 20);
+                }
+
+                /* Multiple read bit is reversed polarity */
+                reg_tctl = er32(TCTL);
+                reg_tarc1 = er32(TARC1);
+                if (reg_tctl & E1000_TCTL_MULR)
+                    reg_tarc1 &= ~(1 << 28);
+                else
+                    reg_tarc1 |= (1 << 28);
+
+                ew32(TARC1, reg_tarc1);
+                break;
+            case e1000_ich8lan:
+                /* Reduce concurrent DMA requests to 3 from 4 */
+                if ((hw->revision_id < 3) ||
+                    ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
+                     (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))
+                    reg_tarc0 |= ((1 << 29)|(1 << 28));
+
+                reg_ctrl_ext = er32(CTRL_EXT);
+                reg_ctrl_ext |= (1 << 22);
+                ew32(CTRL_EXT, reg_ctrl_ext);
+
+                /* workaround TX hang with TSO=on */
+                reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23));
+
+                /* Multiple read bit is reversed polarity */
+                reg_tctl = er32(TCTL);
+                reg_tarc1 = er32(TARC1);
+                if (reg_tctl & E1000_TCTL_MULR)
+                    reg_tarc1 &= ~(1 << 28);
+                else
+                    reg_tarc1 |= (1 << 28);
+
+                /* workaround TX hang with TSO=on */
+                reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24));
+
+                ew32(TARC1, reg_tarc1);
+                break;
+            default:
+                break;
+        }
+
+        ew32(TARC0, reg_tarc0);
+    }
+}
+
+/******************************************************************************
+ * Performs basic configuration of the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes the receive address registers,
+ * multicast table, and VLAN filter table. Calls routines to setup link
+ * configuration and flow control settings. Clears all on-chip counters. Leaves
+ * the transmit and receive units disabled and uninitialized.
+ *****************************************************************************/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    u32 i;
+    s32 ret_val;
+    u32 mta_size;
+    u32 reg_data;
+    u32 ctrl_ext;
+
+    DEBUGFUNC("e1000_init_hw");
+
+    /* force full DMA clock frequency for 10/100 on ICH8 A0-B0 */
+    if ((hw->mac_type == e1000_ich8lan) &&
+        ((hw->revision_id < 3) ||
+         ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
+          (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) {
+            reg_data = er32(STATUS);
+            reg_data &= ~0x80000000;
+            ew32(STATUS, reg_data);
+    }
+
+    /* Initialize Identification LED */
+    ret_val = e1000_id_led_init(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Initializing Identification LED\n");
+        return ret_val;
+    }
+
+    /* Set the media type and TBI compatibility */
+    e1000_set_media_type(hw);
+
+    /* Must be called after e1000_set_media_type because media_type is used */
+    e1000_initialize_hardware_bits(hw);
+
+    /* Disabling VLAN filtering. */
+    DEBUGOUT("Initializing the IEEE VLAN\n");
+    /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */
+    if (hw->mac_type != e1000_ich8lan) {
+        if (hw->mac_type < e1000_82545_rev_3)
+            ew32(VET, 0);
+        e1000_clear_vfta(hw);
+    }
+
+    /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+        e1000_pci_clear_mwi(hw);
+        ew32(RCTL, E1000_RCTL_RST);
+        E1000_WRITE_FLUSH();
+        msleep(5);
+    }
+
+    /* Setup the receive address. This involves initializing all of the Receive
+     * Address Registers (RARs 0 - 15).
+     */
+    e1000_init_rx_addrs(hw);
+
+    /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        ew32(RCTL, 0);
+        E1000_WRITE_FLUSH();
+        msleep(1);
+        if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+            e1000_pci_set_mwi(hw);
+    }
+
+    /* Zero out the Multicast HASH table */
+    DEBUGOUT("Zeroing the MTA\n");
+    mta_size = E1000_MC_TBL_SIZE;
+    if (hw->mac_type == e1000_ich8lan)
+        mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
+    for (i = 0; i < mta_size; i++) {
+        E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+        /* use write flush to prevent Memory Write Block (MWB) from
+         * occuring when accessing our register space */
+        E1000_WRITE_FLUSH();
+    }
+
+    /* Set the PCI priority bit correctly in the CTRL register.  This
+     * determines if the adapter gives priority to receives, or if it
+     * gives equal priority to transmits and receives.  Valid only on
+     * 82542 and 82543 silicon.
+     */
+    if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
+        ctrl = er32(CTRL);
+        ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
+    }
+
+    switch (hw->mac_type) {
+    case e1000_82545_rev_3:
+    case e1000_82546_rev_3:
+        break;
+    default:
+        /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+	if (hw->bus_type == e1000_bus_type_pcix && e1000_pcix_get_mmrbc(hw) > 2048)
+		e1000_pcix_set_mmrbc(hw, 2048);
+	break;
+    }
+
+    /* More time needed for PHY to initialize */
+    if (hw->mac_type == e1000_ich8lan)
+        msleep(15);
+
+    /* Call a subroutine to configure the link and setup flow control. */
+    ret_val = e1000_setup_link(hw);
+
+    /* Set the transmit descriptor write-back policy */
+    if (hw->mac_type > e1000_82544) {
+        ctrl = er32(TXDCTL);
+        ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+        ew32(TXDCTL, ctrl);
+    }
+
+    if (hw->mac_type == e1000_82573) {
+        e1000_enable_tx_pkt_filtering(hw);
+    }
+
+    switch (hw->mac_type) {
+    default:
+        break;
+    case e1000_80003es2lan:
+        /* Enable retransmit on late collisions */
+        reg_data = er32(TCTL);
+        reg_data |= E1000_TCTL_RTLC;
+        ew32(TCTL, reg_data);
+
+        /* Configure Gigabit Carry Extend Padding */
+        reg_data = er32(TCTL_EXT);
+        reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+        reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
+        ew32(TCTL_EXT, reg_data);
+
+        /* Configure Transmit Inter-Packet Gap */
+        reg_data = er32(TIPG);
+        reg_data &= ~E1000_TIPG_IPGT_MASK;
+        reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
+        ew32(TIPG, reg_data);
+
+        reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001);
+        reg_data &= ~0x00100000;
+        E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data);
+        /* Fall through */
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_ich8lan:
+        ctrl = er32(TXDCTL1);
+        ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+        ew32(TXDCTL1, ctrl);
+        break;
+    }
+
+
+    if (hw->mac_type == e1000_82573) {
+        u32 gcr = er32(GCR);
+        gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+        ew32(GCR, gcr);
+    }
+
+    /* Clear all of the statistics registers (clear on read).  It is
+     * important that we do this after we have tried to establish link
+     * because the symbol error count will increment wildly if there
+     * is no link.
+     */
+    e1000_clear_hw_cntrs(hw);
+
+    /* ICH8 No-snoop bits are opposite polarity.
+     * Set to snoop by default after reset. */
+    if (hw->mac_type == e1000_ich8lan)
+        e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
+
+    if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+        hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+        ctrl_ext = er32(CTRL_EXT);
+        /* Relaxed ordering must be disabled to avoid a parity
+         * error crash in a PCI slot. */
+        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+        ew32(CTRL_EXT, ctrl_ext);
+    }
+
+    return ret_val;
+}
+
+/******************************************************************************
+ * Adjust SERDES output amplitude based on EEPROM setting.
+ *
+ * hw - Struct containing variables accessed by shared code.
+ *****************************************************************************/
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
+{
+    u16 eeprom_data;
+    s32  ret_val;
+
+    DEBUGFUNC("e1000_adjust_serdes_amplitude");
+
+    if (hw->media_type != e1000_media_type_internal_serdes)
+        return E1000_SUCCESS;
+
+    switch (hw->mac_type) {
+    case e1000_82545_rev_3:
+    case e1000_82546_rev_3:
+        break;
+    default:
+        return E1000_SUCCESS;
+    }
+
+    ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, &eeprom_data);
+    if (ret_val) {
+        return ret_val;
+    }
+
+    if (eeprom_data != EEPROM_RESERVED_WORD) {
+        /* Adjust SERDES output amplitude only. */
+        eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control and link settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Determines which flow control settings to use. Calls the apropriate media-
+ * specific link configuration function. Configures the flow control settings.
+ * Assuming the adapter has a valid link partner, a valid link should be
+ * established. Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ *****************************************************************************/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+    u32 ctrl_ext;
+    s32 ret_val;
+    u16 eeprom_data;
+
+    DEBUGFUNC("e1000_setup_link");
+
+    /* In the case of the phy reset being blocked, we already have a link.
+     * We do not have to set it up again. */
+    if (e1000_check_phy_reset_block(hw))
+        return E1000_SUCCESS;
+
+    /* Read and store word 0x0F of the EEPROM. This word contains bits
+     * that determine the hardware's default PAUSE (flow control) mode,
+     * a bit that determines whether the HW defaults to enabling or
+     * disabling auto-negotiation, and the direction of the
+     * SW defined pins. If there is no SW over-ride of the flow
+     * control setting, then the variable hw->fc will
+     * be initialized based on a value in the EEPROM.
+     */
+    if (hw->fc == E1000_FC_DEFAULT) {
+        switch (hw->mac_type) {
+        case e1000_ich8lan:
+        case e1000_82573:
+            hw->fc = E1000_FC_FULL;
+            break;
+        default:
+            ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+                                        1, &eeprom_data);
+            if (ret_val) {
+                DEBUGOUT("EEPROM Read Error\n");
+                return -E1000_ERR_EEPROM;
+            }
+            if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
+                hw->fc = E1000_FC_NONE;
+            else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
+                    EEPROM_WORD0F_ASM_DIR)
+                hw->fc = E1000_FC_TX_PAUSE;
+            else
+                hw->fc = E1000_FC_FULL;
+            break;
+        }
+    }
+
+    /* We want to save off the original Flow Control configuration just
+     * in case we get disconnected and then reconnected into a different
+     * hub or switch with different Flow Control capabilities.
+     */
+    if (hw->mac_type == e1000_82542_rev2_0)
+        hw->fc &= (~E1000_FC_TX_PAUSE);
+
+    if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+        hw->fc &= (~E1000_FC_RX_PAUSE);
+
+    hw->original_fc = hw->fc;
+
+    DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
+
+    /* Take the 4 bits from EEPROM word 0x0F that determine the initial
+     * polarity value for the SW controlled pins, and setup the
+     * Extended Device Control reg with that info.
+     * This is needed because one of the SW controlled pins is used for
+     * signal detection.  So this should be done before e1000_setup_pcs_link()
+     * or e1000_phy_setup() is called.
+     */
+    if (hw->mac_type == e1000_82543) {
+        ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+                                    1, &eeprom_data);
+        if (ret_val) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
+                    SWDPIO__EXT_SHIFT);
+        ew32(CTRL_EXT, ctrl_ext);
+    }
+
+    /* Call the necessary subroutine to configure the link. */
+    ret_val = (hw->media_type == e1000_media_type_copper) ?
+              e1000_setup_copper_link(hw) :
+              e1000_setup_fiber_serdes_link(hw);
+
+    /* Initialize the flow control address, type, and PAUSE timer
+     * registers to their default values.  This is done even if flow
+     * control is disabled, because it does not hurt anything to
+     * initialize these registers.
+     */
+    DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+
+    /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */
+    if (hw->mac_type != e1000_ich8lan) {
+        ew32(FCT, FLOW_CONTROL_TYPE);
+        ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+        ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
+    }
+
+    ew32(FCTTV, hw->fc_pause_time);
+
+    /* Set the flow control receive threshold registers.  Normally,
+     * these registers will be set to a default threshold that may be
+     * adjusted later by the driver's runtime code.  However, if the
+     * ability to transmit pause frames in not enabled, then these
+     * registers will be set to 0.
+     */
+    if (!(hw->fc & E1000_FC_TX_PAUSE)) {
+        ew32(FCRTL, 0);
+        ew32(FCRTH, 0);
+    } else {
+        /* We need to set up the Receive Threshold high and low water marks
+         * as well as (optionally) enabling the transmission of XON frames.
+         */
+        if (hw->fc_send_xon) {
+            ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
+            ew32(FCRTH, hw->fc_high_water);
+        } else {
+            ew32(FCRTL, hw->fc_low_water);
+            ew32(FCRTH, hw->fc_high_water);
+        }
+    }
+    return ret_val;
+}
+
+/******************************************************************************
+ * Sets up link for a fiber based or serdes based adapter
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Manipulates Physical Coding Sublayer functions in order to configure
+ * link. Assumes the hardware has been previously reset and the transmitter
+ * and receiver are not enabled.
+ *****************************************************************************/
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    u32 status;
+    u32 txcw = 0;
+    u32 i;
+    u32 signal = 0;
+    s32 ret_val;
+
+    DEBUGFUNC("e1000_setup_fiber_serdes_link");
+
+    /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists
+     * until explicitly turned off or a power cycle is performed.  A read to
+     * the register does not indicate its status.  Therefore, we ensure
+     * loopback mode is disabled during initialization.
+     */
+    if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572)
+        ew32(SCTL, E1000_DISABLE_SERDES_LOOPBACK);
+
+    /* On adapters with a MAC newer than 82544, SWDP 1 will be
+     * set when the optics detect a signal. On older adapters, it will be
+     * cleared when there is a signal.  This applies to fiber media only.
+     * If we're on serdes media, adjust the output amplitude to value
+     * set in the EEPROM.
+     */
+    ctrl = er32(CTRL);
+    if (hw->media_type == e1000_media_type_fiber)
+        signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+
+    ret_val = e1000_adjust_serdes_amplitude(hw);
+    if (ret_val)
+        return ret_val;
+
+    /* Take the link out of reset */
+    ctrl &= ~(E1000_CTRL_LRST);
+
+    /* Adjust VCO speed to improve BER performance */
+    ret_val = e1000_set_vco_speed(hw);
+    if (ret_val)
+        return ret_val;
+
+    e1000_config_collision_dist(hw);
+
+    /* Check for a software override of the flow control settings, and setup
+     * the device accordingly.  If auto-negotiation is enabled, then software
+     * will have to set the "PAUSE" bits to the correct value in the Tranmsit
+     * Config Word Register (TXCW) and re-start auto-negotiation.  However, if
+     * auto-negotiation is disabled, then software will have to manually
+     * configure the two flow control enable bits in the CTRL register.
+     *
+     * The possible values of the "fc" parameter are:
+     *      0:  Flow control is completely disabled
+     *      1:  Rx flow control is enabled (we can receive pause frames, but
+     *          not send pause frames).
+     *      2:  Tx flow control is enabled (we can send pause frames but we do
+     *          not support receiving pause frames).
+     *      3:  Both Rx and TX flow control (symmetric) are enabled.
+     */
+    switch (hw->fc) {
+    case E1000_FC_NONE:
+        /* Flow control is completely disabled by a software over-ride. */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+        break;
+    case E1000_FC_RX_PAUSE:
+        /* RX Flow control is enabled and TX Flow control is disabled by a
+         * software over-ride. Since there really isn't a way to advertise
+         * that we are capable of RX Pause ONLY, we will advertise that we
+         * support both symmetric and asymmetric RX PAUSE. Later, we will
+         *  disable the adapter's ability to send PAUSE frames.
+         */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+        break;
+    case E1000_FC_TX_PAUSE:
+        /* TX Flow control is enabled, and RX Flow control is disabled, by a
+         * software over-ride.
+         */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+        break;
+    case E1000_FC_FULL:
+        /* Flow control (both RX and TX) is enabled by a software over-ride. */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+        break;
+    default:
+        DEBUGOUT("Flow control param set incorrectly\n");
+        return -E1000_ERR_CONFIG;
+        break;
+    }
+
+    /* Since auto-negotiation is enabled, take the link out of reset (the link
+     * will be in reset, because we previously reset the chip). This will
+     * restart auto-negotiation.  If auto-neogtiation is successful then the
+     * link-up status bit will be set and the flow control enable bits (RFCE
+     * and TFCE) will be set according to their negotiated value.
+     */
+    DEBUGOUT("Auto-negotiation enabled\n");
+
+    ew32(TXCW, txcw);
+    ew32(CTRL, ctrl);
+    E1000_WRITE_FLUSH();
+
+    hw->txcw = txcw;
+    msleep(1);
+
+    /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
+     * indication in the Device Status Register.  Time-out if a link isn't
+     * seen in 500 milliseconds seconds (Auto-negotiation should complete in
+     * less than 500 milliseconds even if the other end is doing it in SW).
+     * For internal serdes, we just assume a signal is present, then poll.
+     */
+    if (hw->media_type == e1000_media_type_internal_serdes ||
+       (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
+        DEBUGOUT("Looking for Link\n");
+        for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+            msleep(10);
+            status = er32(STATUS);
+            if (status & E1000_STATUS_LU) break;
+        }
+        if (i == (LINK_UP_TIMEOUT / 10)) {
+            DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+            hw->autoneg_failed = 1;
+            /* AutoNeg failed to achieve a link, so we'll call
+             * e1000_check_for_link. This routine will force the link up if
+             * we detect a signal. This will allow us to communicate with
+             * non-autonegotiating link partners.
+             */
+            ret_val = e1000_check_for_link(hw);
+            if (ret_val) {
+                DEBUGOUT("Error while checking for link\n");
+                return ret_val;
+            }
+            hw->autoneg_failed = 0;
+        } else {
+            hw->autoneg_failed = 0;
+            DEBUGOUT("Valid Link Found\n");
+        }
+    } else {
+        DEBUGOUT("No Signal Detected\n");
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Make sure we have a valid PHY and change PHY mode before link setup.
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_copper_link_preconfig");
+
+    ctrl = er32(CTRL);
+    /* With 82543, we need to force speed and duplex on the MAC equal to what
+     * the PHY speed and duplex configuration is. In addition, we need to
+     * perform a hardware reset on the PHY to take it out of reset.
+     */
+    if (hw->mac_type > e1000_82543) {
+        ctrl |= E1000_CTRL_SLU;
+        ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+        ew32(CTRL, ctrl);
+    } else {
+        ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
+        ew32(CTRL, ctrl);
+        ret_val = e1000_phy_hw_reset(hw);
+        if (ret_val)
+            return ret_val;
+    }
+
+    /* Make sure we have a valid PHY */
+    ret_val = e1000_detect_gig_phy(hw);
+    if (ret_val) {
+        DEBUGOUT("Error, did not detect valid phy.\n");
+        return ret_val;
+    }
+    DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+
+    /* Set PHY to class A mode (if necessary) */
+    ret_val = e1000_set_phy_mode(hw);
+    if (ret_val)
+        return ret_val;
+
+    if ((hw->mac_type == e1000_82545_rev_3) ||
+       (hw->mac_type == e1000_82546_rev_3)) {
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+        phy_data |= 0x00000008;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+    }
+
+    if (hw->mac_type <= e1000_82543 ||
+        hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+        hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
+        hw->phy_reset_disable = false;
+
+   return E1000_SUCCESS;
+}
+
+
+/********************************************************************
+* Copper link setup for e1000_phy_igp series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
+{
+    u32 led_ctrl;
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_copper_link_igp_setup");
+
+    if (hw->phy_reset_disable)
+        return E1000_SUCCESS;
+
+    ret_val = e1000_phy_reset(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Resetting the PHY\n");
+        return ret_val;
+    }
+
+    /* Wait 15ms for MAC to configure PHY from eeprom settings */
+    msleep(15);
+    if (hw->mac_type != e1000_ich8lan) {
+    /* Configure activity LED after PHY reset */
+    led_ctrl = er32(LEDCTL);
+    led_ctrl &= IGP_ACTIVITY_LED_MASK;
+    led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+    ew32(LEDCTL, led_ctrl);
+    }
+
+    /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
+    if (hw->phy_type == e1000_phy_igp) {
+        /* disable lplu d3 during driver init */
+        ret_val = e1000_set_d3_lplu_state(hw, false);
+        if (ret_val) {
+            DEBUGOUT("Error Disabling LPLU D3\n");
+            return ret_val;
+        }
+    }
+
+    /* disable lplu d0 during driver init */
+    ret_val = e1000_set_d0_lplu_state(hw, false);
+    if (ret_val) {
+        DEBUGOUT("Error Disabling LPLU D0\n");
+        return ret_val;
+    }
+    /* Configure mdi-mdix settings */
+    ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        hw->dsp_config_state = e1000_dsp_config_disabled;
+        /* Force MDI for earlier revs of the IGP PHY */
+        phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX);
+        hw->mdix = 1;
+
+    } else {
+        hw->dsp_config_state = e1000_dsp_config_enabled;
+        phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+        switch (hw->mdix) {
+        case 1:
+            phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+            break;
+        case 2:
+            phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+            break;
+        case 0:
+        default:
+            phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
+            break;
+        }
+    }
+    ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    /* set auto-master slave resolution settings */
+    if (hw->autoneg) {
+        e1000_ms_type phy_ms_setting = hw->master_slave;
+
+        if (hw->ffe_config_state == e1000_ffe_config_active)
+            hw->ffe_config_state = e1000_ffe_config_enabled;
+
+        if (hw->dsp_config_state == e1000_dsp_config_activated)
+            hw->dsp_config_state = e1000_dsp_config_enabled;
+
+        /* when autonegotiation advertisment is only 1000Mbps then we
+          * should disable SmartSpeed and enable Auto MasterSlave
+          * resolution as hardware default. */
+        if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+            /* Disable SmartSpeed */
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+            /* Set auto Master/Slave resolution process */
+            ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+            if (ret_val)
+                return ret_val;
+            phy_data &= ~CR_1000T_MS_ENABLE;
+            ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* load defaults for future use */
+        hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
+                                        ((phy_data & CR_1000T_MS_VALUE) ?
+                                         e1000_ms_force_master :
+                                         e1000_ms_force_slave) :
+                                         e1000_ms_auto;
+
+        switch (phy_ms_setting) {
+        case e1000_ms_force_master:
+            phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+            break;
+        case e1000_ms_force_slave:
+            phy_data |= CR_1000T_MS_ENABLE;
+            phy_data &= ~(CR_1000T_MS_VALUE);
+            break;
+        case e1000_ms_auto:
+            phy_data &= ~CR_1000T_MS_ENABLE;
+            default:
+            break;
+        }
+        ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_gg82563 series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_ggp_setup(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 phy_data;
+    u32 reg_data;
+
+    DEBUGFUNC("e1000_copper_link_ggp_setup");
+
+    if (!hw->phy_reset_disable) {
+
+        /* Enable CRS on TX for half-duplex operation. */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+        /* Use 25MHz for both link down and 1000BASE-T for Tx clock */
+        phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ;
+
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+                                      phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* Options:
+         *   MDI/MDI-X = 0 (default)
+         *   0 - Auto for all speeds
+         *   1 - MDI mode
+         *   2 - MDI-X mode
+         *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+         */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+        switch (hw->mdix) {
+        case 1:
+            phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+            break;
+        case 2:
+            phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+            break;
+        case 0:
+        default:
+            phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+            break;
+        }
+
+        /* Options:
+         *   disable_polarity_correction = 0 (default)
+         *       Automatic Correction for Reversed Cable Polarity
+         *   0 - Disabled
+         *   1 - Enabled
+         */
+        phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+        if (hw->disable_polarity_correction == 1)
+            phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+
+        if (ret_val)
+            return ret_val;
+
+        /* SW Reset the PHY so all changes take effect */
+        ret_val = e1000_phy_reset(hw);
+        if (ret_val) {
+            DEBUGOUT("Error Resetting the PHY\n");
+            return ret_val;
+        }
+    } /* phy_reset_disable */
+
+    if (hw->mac_type == e1000_80003es2lan) {
+        /* Bypass RX and TX FIFO's */
+        ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL,
+                                       E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
+                                       E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data);
+
+        if (ret_val)
+            return ret_val;
+
+        reg_data = er32(CTRL_EXT);
+        reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
+        ew32(CTRL_EXT, reg_data);
+
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+                                          &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* Do not init these registers when the HW is in IAMT mode, since the
+         * firmware will have already initialized them.  We only initialize
+         * them if the HW is not in IAMT mode.
+         */
+        if (!e1000_check_mng_mode(hw)) {
+            /* Enable Electrical Idle on the PHY */
+            phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+            ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+            ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+                                          phy_data);
+
+            if (ret_val)
+                return ret_val;
+        }
+
+        /* Workaround: Disable padding in Kumeran interface in the MAC
+         * and in the PHY to avoid CRC errors.
+         */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        phy_data |= GG82563_ICR_DIS_PADDING;
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
+                                      phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_m88 series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_copper_link_mgp_setup");
+
+    if (hw->phy_reset_disable)
+        return E1000_SUCCESS;
+
+    /* Enable CRS on TX. This must be set for half-duplex operation. */
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+    /* Options:
+     *   MDI/MDI-X = 0 (default)
+     *   0 - Auto for all speeds
+     *   1 - MDI mode
+     *   2 - MDI-X mode
+     *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+     */
+    phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+    switch (hw->mdix) {
+    case 1:
+        phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+        break;
+    case 2:
+        phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+        break;
+    case 3:
+        phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+        break;
+    case 0:
+    default:
+        phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+        break;
+    }
+
+    /* Options:
+     *   disable_polarity_correction = 0 (default)
+     *       Automatic Correction for Reversed Cable Polarity
+     *   0 - Disabled
+     *   1 - Enabled
+     */
+    phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+    if (hw->disable_polarity_correction == 1)
+        phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if (hw->phy_revision < M88E1011_I_REV_4) {
+        /* Force TX_CLK in the Extended PHY Specific Control Register
+         * to 25MHz clock.
+         */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+        if ((hw->phy_revision == E1000_REVISION_2) &&
+            (hw->phy_id == M88E1111_I_PHY_ID)) {
+            /* Vidalia Phy, set the downshift counter to 5x */
+            phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
+            phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+            ret_val = e1000_write_phy_reg(hw,
+                                        M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+            if (ret_val)
+                return ret_val;
+        } else {
+            /* Configure Master and Slave downshift values */
+            phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+                              M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+            phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+                             M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+            ret_val = e1000_write_phy_reg(hw,
+                                        M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+            if (ret_val)
+               return ret_val;
+        }
+    }
+
+    /* SW Reset the PHY so all changes take effect */
+    ret_val = e1000_phy_reset(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Resetting the PHY\n");
+        return ret_val;
+    }
+
+   return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Setup auto-negotiation and flow control advertisements,
+* and then perform auto-negotiation.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_copper_link_autoneg");
+
+    /* Perform some bounds checking on the hw->autoneg_advertised
+     * parameter.  If this variable is zero, then set it to the default.
+     */
+    hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+    /* If autoneg_advertised is zero, we assume it was not defaulted
+     * by the calling code so we set to advertise full capability.
+     */
+    if (hw->autoneg_advertised == 0)
+        hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+    /* IFE phy only supports 10/100 */
+    if (hw->phy_type == e1000_phy_ife)
+        hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
+    DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+    ret_val = e1000_phy_setup_autoneg(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Setting up Auto-Negotiation\n");
+        return ret_val;
+    }
+    DEBUGOUT("Restarting Auto-Neg\n");
+
+    /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+     * the Auto Neg Restart bit in the PHY control register.
+     */
+    ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+    ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Does the user want to wait for Auto-Neg to complete here, or
+     * check at a later time (for example, callback routine).
+     */
+    if (hw->wait_autoneg_complete) {
+        ret_val = e1000_wait_autoneg(hw);
+        if (ret_val) {
+            DEBUGOUT("Error while waiting for autoneg to complete\n");
+            return ret_val;
+        }
+    }
+
+    hw->get_link_status = true;
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Config the MAC and the PHY after link is up.
+*   1) Set up the MAC to the current PHY speed/duplex
+*      if we are on 82543.  If we
+*      are on newer silicon, we only need to configure
+*      collision distance in the Transmit Control Register.
+*   2) Set up flow control on the MAC to that established with
+*      the link partner.
+*   3) Config DSP to improve Gigabit link quality for some PHY revisions.
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    DEBUGFUNC("e1000_copper_link_postconfig");
+
+    if (hw->mac_type >= e1000_82544) {
+        e1000_config_collision_dist(hw);
+    } else {
+        ret_val = e1000_config_mac_to_phy(hw);
+        if (ret_val) {
+            DEBUGOUT("Error configuring MAC to PHY settings\n");
+            return ret_val;
+        }
+    }
+    ret_val = e1000_config_fc_after_link_up(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Configuring Flow Control\n");
+        return ret_val;
+    }
+
+    /* Config DSP to improve Giga link quality */
+    if (hw->phy_type == e1000_phy_igp) {
+        ret_val = e1000_config_dsp_after_link_change(hw, true);
+        if (ret_val) {
+            DEBUGOUT("Error Configuring DSP after link up\n");
+            return ret_val;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Detects which PHY is present and setup the speed and duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_setup_copper_link(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 i;
+    u16 phy_data;
+    u16 reg_data;
+
+    DEBUGFUNC("e1000_setup_copper_link");
+
+    switch (hw->mac_type) {
+    case e1000_80003es2lan:
+    case e1000_ich8lan:
+        /* Set the mac to wait the maximum time between each
+         * iteration and increase the max iterations when
+         * polling the phy; this fixes erroneous timeouts at 10Mbps. */
+        ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
+        if (ret_val)
+            return ret_val;
+        reg_data |= 0x3F;
+        ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+        if (ret_val)
+            return ret_val;
+    default:
+        break;
+    }
+
+    /* Check if it is a valid PHY and set PHY mode if necessary. */
+    ret_val = e1000_copper_link_preconfig(hw);
+    if (ret_val)
+        return ret_val;
+
+    switch (hw->mac_type) {
+    case e1000_80003es2lan:
+        /* Kumeran registers are written-only */
+        reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT;
+        reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING;
+        ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
+                                       reg_data);
+        if (ret_val)
+            return ret_val;
+        break;
+    default:
+        break;
+    }
+
+    if (hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) {
+        ret_val = e1000_copper_link_igp_setup(hw);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->phy_type == e1000_phy_m88) {
+        ret_val = e1000_copper_link_mgp_setup(hw);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        ret_val = e1000_copper_link_ggp_setup(hw);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if (hw->autoneg) {
+        /* Setup autoneg and flow control advertisement
+          * and perform autonegotiation */
+        ret_val = e1000_copper_link_autoneg(hw);
+        if (ret_val)
+            return ret_val;
+    } else {
+        /* PHY will be set to 10H, 10F, 100H,or 100F
+          * depending on value from forced_speed_duplex. */
+        DEBUGOUT("Forcing speed and duplex\n");
+        ret_val = e1000_phy_force_speed_duplex(hw);
+        if (ret_val) {
+            DEBUGOUT("Error Forcing Speed and Duplex\n");
+            return ret_val;
+        }
+    }
+
+    /* Check link status. Wait up to 100 microseconds for link to become
+     * valid.
+     */
+    for (i = 0; i < 10; i++) {
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if (phy_data & MII_SR_LINK_STATUS) {
+            /* Config the MAC and PHY after link is up */
+            ret_val = e1000_copper_link_postconfig(hw);
+            if (ret_val)
+                return ret_val;
+
+            DEBUGOUT("Valid link established!!!\n");
+            return E1000_SUCCESS;
+        }
+        udelay(10);
+    }
+
+    DEBUGOUT("Unable to establish link!!!\n");
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Configure the MAC-to-PHY interface for 10/100Mbps
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
+{
+    s32 ret_val = E1000_SUCCESS;
+    u32 tipg;
+    u16 reg_data;
+
+    DEBUGFUNC("e1000_configure_kmrn_for_10_100");
+
+    reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT;
+    ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
+                                   reg_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Configure Transmit Inter-Packet Gap */
+    tipg = er32(TIPG);
+    tipg &= ~E1000_TIPG_IPGT_MASK;
+    tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
+    ew32(TIPG, tipg);
+
+    ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+
+    if (ret_val)
+        return ret_val;
+
+    if (duplex == HALF_DUPLEX)
+        reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+    else
+        reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+    ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+    return ret_val;
+}
+
+static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
+{
+    s32 ret_val = E1000_SUCCESS;
+    u16 reg_data;
+    u32 tipg;
+
+    DEBUGFUNC("e1000_configure_kmrn_for_1000");
+
+    reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT;
+    ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
+                                   reg_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Configure Transmit Inter-Packet Gap */
+    tipg = er32(TIPG);
+    tipg &= ~E1000_TIPG_IPGT_MASK;
+    tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
+    ew32(TIPG, tipg);
+
+    ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+
+    if (ret_val)
+        return ret_val;
+
+    reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+    ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+    return ret_val;
+}
+
+/******************************************************************************
+* Configures PHY autoneg and flow control advertisement settings
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 mii_autoneg_adv_reg;
+    u16 mii_1000t_ctrl_reg;
+
+    DEBUGFUNC("e1000_phy_setup_autoneg");
+
+    /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+    ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+    if (ret_val)
+        return ret_val;
+
+    if (hw->phy_type != e1000_phy_ife) {
+        /* Read the MII 1000Base-T Control Register (Address 9). */
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+        if (ret_val)
+            return ret_val;
+    } else
+        mii_1000t_ctrl_reg=0;
+
+    /* Need to parse both autoneg_advertised and fc and set up
+     * the appropriate PHY registers.  First we will parse for
+     * autoneg_advertised software override.  Since we can advertise
+     * a plethora of combinations, we need to check each bit
+     * individually.
+     */
+
+    /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+     * Advertisement Register (Address 4) and the 1000 mb speed bits in
+     * the  1000Base-T Control Register (Address 9).
+     */
+    mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
+    mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
+
+    DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
+
+    /* Do we want to advertise 10 Mb Half Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
+        DEBUGOUT("Advertise 10mb Half duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+    }
+
+    /* Do we want to advertise 10 Mb Full Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
+        DEBUGOUT("Advertise 10mb Full duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+    }
+
+    /* Do we want to advertise 100 Mb Half Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
+        DEBUGOUT("Advertise 100mb Half duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+    }
+
+    /* Do we want to advertise 100 Mb Full Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
+        DEBUGOUT("Advertise 100mb Full duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+    }
+
+    /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+    if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+        DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
+    }
+
+    /* Do we want to advertise 1000 Mb Full Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+        DEBUGOUT("Advertise 1000mb Full duplex\n");
+        mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+        if (hw->phy_type == e1000_phy_ife) {
+            DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
+        }
+    }
+
+    /* Check for a software override of the flow control settings, and
+     * setup the PHY advertisement registers accordingly.  If
+     * auto-negotiation is enabled, then software will have to set the
+     * "PAUSE" bits to the correct value in the Auto-Negotiation
+     * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+     *
+     * The possible values of the "fc" parameter are:
+     *      0:  Flow control is completely disabled
+     *      1:  Rx flow control is enabled (we can receive pause frames
+     *          but not send pause frames).
+     *      2:  Tx flow control is enabled (we can send pause frames
+     *          but we do not support receiving pause frames).
+     *      3:  Both Rx and TX flow control (symmetric) are enabled.
+     *  other:  No software override.  The flow control configuration
+     *          in the EEPROM is used.
+     */
+    switch (hw->fc) {
+    case E1000_FC_NONE: /* 0 */
+        /* Flow control (RX & TX) is completely disabled by a
+         * software over-ride.
+         */
+        mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+        break;
+    case E1000_FC_RX_PAUSE: /* 1 */
+        /* RX Flow control is enabled, and TX Flow control is
+         * disabled, by a software over-ride.
+         */
+        /* Since there really isn't a way to advertise that we are
+         * capable of RX Pause ONLY, we will advertise that we
+         * support both symmetric and asymmetric RX PAUSE.  Later
+         * (in e1000_config_fc_after_link_up) we will disable the
+         *hw's ability to send PAUSE frames.
+         */
+        mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+        break;
+    case E1000_FC_TX_PAUSE: /* 2 */
+        /* TX Flow control is enabled, and RX Flow control is
+         * disabled, by a software over-ride.
+         */
+        mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+        mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+        break;
+    case E1000_FC_FULL: /* 3 */
+        /* Flow control (both RX and TX) is enabled by a software
+         * over-ride.
+         */
+        mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+        break;
+    default:
+        DEBUGOUT("Flow control param set incorrectly\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+    if (ret_val)
+        return ret_val;
+
+    DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+    if (hw->phy_type != e1000_phy_ife) {
+        ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Force PHY speed and duplex settings to hw->forced_speed_duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    s32 ret_val;
+    u16 mii_ctrl_reg;
+    u16 mii_status_reg;
+    u16 phy_data;
+    u16 i;
+
+    DEBUGFUNC("e1000_phy_force_speed_duplex");
+
+    /* Turn off Flow control if we are forcing speed and duplex. */
+    hw->fc = E1000_FC_NONE;
+
+    DEBUGOUT1("hw->fc = %d\n", hw->fc);
+
+    /* Read the Device Control Register. */
+    ctrl = er32(CTRL);
+
+    /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
+    ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+    ctrl &= ~(DEVICE_SPEED_MASK);
+
+    /* Clear the Auto Speed Detect Enable bit. */
+    ctrl &= ~E1000_CTRL_ASDE;
+
+    /* Read the MII Control Register. */
+    ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
+    if (ret_val)
+        return ret_val;
+
+    /* We need to disable autoneg in order to force link and duplex. */
+
+    mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
+
+    /* Are we forcing Full or Half Duplex? */
+    if (hw->forced_speed_duplex == e1000_100_full ||
+        hw->forced_speed_duplex == e1000_10_full) {
+        /* We want to force full duplex so we SET the full duplex bits in the
+         * Device and MII Control Registers.
+         */
+        ctrl |= E1000_CTRL_FD;
+        mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
+        DEBUGOUT("Full Duplex\n");
+    } else {
+        /* We want to force half duplex so we CLEAR the full duplex bits in
+         * the Device and MII Control Registers.
+         */
+        ctrl &= ~E1000_CTRL_FD;
+        mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
+        DEBUGOUT("Half Duplex\n");
+    }
+
+    /* Are we forcing 100Mbps??? */
+    if (hw->forced_speed_duplex == e1000_100_full ||
+       hw->forced_speed_duplex == e1000_100_half) {
+        /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
+        ctrl |= E1000_CTRL_SPD_100;
+        mii_ctrl_reg |= MII_CR_SPEED_100;
+        mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+        DEBUGOUT("Forcing 100mb ");
+    } else {
+        /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
+        ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+        mii_ctrl_reg |= MII_CR_SPEED_10;
+        mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+        DEBUGOUT("Forcing 10mb ");
+    }
+
+    e1000_config_collision_dist(hw);
+
+    /* Write the configured values back to the Device Control Reg. */
+    ew32(CTRL, ctrl);
+
+    if ((hw->phy_type == e1000_phy_m88) ||
+        (hw->phy_type == e1000_phy_gg82563)) {
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+         * forced whenever speed are duplex are forced.
+         */
+        phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+
+        /* Need to reset the PHY or these changes will be ignored */
+        mii_ctrl_reg |= MII_CR_RESET;
+
+    /* Disable MDI-X support for 10/100 */
+    } else if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IFE_PMC_AUTO_MDIX;
+        phy_data &= ~IFE_PMC_FORCE_MDIX;
+
+        ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+    } else {
+        /* Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+         * forced whenever speed or duplex are forced.
+         */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+        phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    /* Write back the modified PHY MII control register. */
+    ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
+    if (ret_val)
+        return ret_val;
+
+    udelay(1);
+
+    /* The wait_autoneg_complete flag may be a little misleading here.
+     * Since we are forcing speed and duplex, Auto-Neg is not enabled.
+     * But we do want to delay for a period while forcing only so we
+     * don't generate false No Link messages.  So we will wait here
+     * only if the user has set wait_autoneg_complete to 1, which is
+     * the default.
+     */
+    if (hw->wait_autoneg_complete) {
+        /* We will wait for autoneg to complete. */
+        DEBUGOUT("Waiting for forced speed/duplex link.\n");
+        mii_status_reg = 0;
+
+        /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+        for (i = PHY_FORCE_TIME; i > 0; i--) {
+            /* Read the MII Status Register and wait for Auto-Neg Complete bit
+             * to be set.
+             */
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+
+            if (mii_status_reg & MII_SR_LINK_STATUS) break;
+            msleep(100);
+        }
+        if ((i == 0) &&
+           ((hw->phy_type == e1000_phy_m88) ||
+            (hw->phy_type == e1000_phy_gg82563))) {
+            /* We didn't get link.  Reset the DSP and wait again for link. */
+            ret_val = e1000_phy_reset_dsp(hw);
+            if (ret_val) {
+                DEBUGOUT("Error Resetting PHY DSP\n");
+                return ret_val;
+            }
+        }
+        /* This loop will early-out if the link condition has been met.  */
+        for (i = PHY_FORCE_TIME; i > 0; i--) {
+            if (mii_status_reg & MII_SR_LINK_STATUS) break;
+            msleep(100);
+            /* Read the MII Status Register and wait for Auto-Neg Complete bit
+             * to be set.
+             */
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+        }
+    }
+
+    if (hw->phy_type == e1000_phy_m88) {
+        /* Because we reset the PHY above, we need to re-force TX_CLK in the
+         * Extended PHY Specific Control Register to 25MHz clock.  This value
+         * defaults back to a 2.5MHz clock when the PHY is reset.
+         */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= M88E1000_EPSCR_TX_CLK_25;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* In addition, because of the s/w reset above, we need to enable CRS on
+         * TX.  This must be set for both full and half duplex operation.
+         */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+            (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full ||
+             hw->forced_speed_duplex == e1000_10_half)) {
+            ret_val = e1000_polarity_reversal_workaround(hw);
+            if (ret_val)
+                return ret_val;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        /* The TX_CLK of the Extended PHY Specific Control Register defaults
+         * to 2.5MHz on a reset.  We need to re-force it back to 25MHz, if
+         * we're not in a forced 10/duplex configuration. */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+        if ((hw->forced_speed_duplex == e1000_10_full) ||
+            (hw->forced_speed_duplex == e1000_10_half))
+            phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ;
+        else
+            phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ;
+
+        /* Also due to the reset, we need to enable CRS on Tx. */
+        phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Sets the collision distance in the Transmit Control register
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Link should have been established previously. Reads the speed and duplex
+* information from the Device Status register.
+******************************************************************************/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+    u32 tctl, coll_dist;
+
+    DEBUGFUNC("e1000_config_collision_dist");
+
+    if (hw->mac_type < e1000_82543)
+        coll_dist = E1000_COLLISION_DISTANCE_82542;
+    else
+        coll_dist = E1000_COLLISION_DISTANCE;
+
+    tctl = er32(TCTL);
+
+    tctl &= ~E1000_TCTL_COLD;
+    tctl |= coll_dist << E1000_COLD_SHIFT;
+
+    ew32(TCTL, tctl);
+    E1000_WRITE_FLUSH();
+}
+
+/******************************************************************************
+* Sets MAC speed and duplex settings to reflect the those in the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* mii_reg - data to write to the MII control register
+*
+* The contents of the PHY register containing the needed information need to
+* be passed in.
+******************************************************************************/
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_config_mac_to_phy");
+
+    /* 82544 or newer MAC, Auto Speed Detection takes care of
+    * MAC speed/duplex configuration.*/
+    if (hw->mac_type >= e1000_82544)
+        return E1000_SUCCESS;
+
+    /* Read the Device Control Register and set the bits to Force Speed
+     * and Duplex.
+     */
+    ctrl = er32(CTRL);
+    ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+    ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+    /* Set up duplex in the Device Control and Transmit Control
+     * registers depending on negotiated values.
+     */
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if (phy_data & M88E1000_PSSR_DPLX)
+        ctrl |= E1000_CTRL_FD;
+    else
+        ctrl &= ~E1000_CTRL_FD;
+
+    e1000_config_collision_dist(hw);
+
+    /* Set up speed in the Device Control register depending on
+     * negotiated values.
+     */
+    if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+        ctrl |= E1000_CTRL_SPD_1000;
+    else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+        ctrl |= E1000_CTRL_SPD_100;
+
+    /* Write the configured values back to the Device Control Reg. */
+    ew32(CTRL, ctrl);
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Forces the MAC's flow control settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets the TFCE and RFCE bits in the device control register to reflect
+ * the adapter settings. TFCE and RFCE need to be explicitly set by
+ * software when a Copper PHY is used because autonegotiation is managed
+ * by the PHY rather than the MAC. Software must also configure these
+ * bits when link is forced on a fiber connection.
+ *****************************************************************************/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+    u32 ctrl;
+
+    DEBUGFUNC("e1000_force_mac_fc");
+
+    /* Get the current configuration of the Device Control Register */
+    ctrl = er32(CTRL);
+
+    /* Because we didn't get link via the internal auto-negotiation
+     * mechanism (we either forced link or we got link via PHY
+     * auto-neg), we have to manually enable/disable transmit an
+     * receive flow control.
+     *
+     * The "Case" statement below enables/disable flow control
+     * according to the "hw->fc" parameter.
+     *
+     * The possible values of the "fc" parameter are:
+     *      0:  Flow control is completely disabled
+     *      1:  Rx flow control is enabled (we can receive pause
+     *          frames but not send pause frames).
+     *      2:  Tx flow control is enabled (we can send pause frames
+     *          frames but we do not receive pause frames).
+     *      3:  Both Rx and TX flow control (symmetric) is enabled.
+     *  other:  No other values should be possible at this point.
+     */
+
+    switch (hw->fc) {
+    case E1000_FC_NONE:
+        ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+        break;
+    case E1000_FC_RX_PAUSE:
+        ctrl &= (~E1000_CTRL_TFCE);
+        ctrl |= E1000_CTRL_RFCE;
+        break;
+    case E1000_FC_TX_PAUSE:
+        ctrl &= (~E1000_CTRL_RFCE);
+        ctrl |= E1000_CTRL_TFCE;
+        break;
+    case E1000_FC_FULL:
+        ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+        break;
+    default:
+        DEBUGOUT("Flow control param set incorrectly\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    /* Disable TX Flow Control for 82542 (rev 2.0) */
+    if (hw->mac_type == e1000_82542_rev2_0)
+        ctrl &= (~E1000_CTRL_TFCE);
+
+    ew32(CTRL, ctrl);
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control settings after link is established
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Should be called immediately after a valid link has been established.
+ * Forces MAC flow control settings if link was forced. When in MII/GMII mode
+ * and autonegotiation is enabled, the MAC flow control settings will be set
+ * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
+ * and RFCE bits will be automaticaly set to the negotiated flow control mode.
+ *****************************************************************************/
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 mii_status_reg;
+    u16 mii_nway_adv_reg;
+    u16 mii_nway_lp_ability_reg;
+    u16 speed;
+    u16 duplex;
+
+    DEBUGFUNC("e1000_config_fc_after_link_up");
+
+    /* Check for the case where we have fiber media and auto-neg failed
+     * so we had to force link.  In this case, we need to force the
+     * configuration of the MAC to match the "fc" parameter.
+     */
+    if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
+        ((hw->media_type == e1000_media_type_internal_serdes) &&
+         (hw->autoneg_failed)) ||
+        ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
+        ret_val = e1000_force_mac_fc(hw);
+        if (ret_val) {
+            DEBUGOUT("Error forcing flow control settings\n");
+            return ret_val;
+        }
+    }
+
+    /* Check for the case where we have copper media and auto-neg is
+     * enabled.  In this case, we need to check and see if Auto-Neg
+     * has completed, and if so, how the PHY and link partner has
+     * flow control configured.
+     */
+    if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+        /* Read the MII Status Register and check to see if AutoNeg
+         * has completed.  We read this twice because this reg has
+         * some "sticky" (latched) bits.
+         */
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+            /* The AutoNeg process has completed, so we now need to
+             * read both the Auto Negotiation Advertisement Register
+             * (Address 4) and the Auto_Negotiation Base Page Ability
+             * Register (Address 5) to determine how flow control was
+             * negotiated.
+             */
+            ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+                                         &mii_nway_adv_reg);
+            if (ret_val)
+                return ret_val;
+            ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+                                         &mii_nway_lp_ability_reg);
+            if (ret_val)
+                return ret_val;
+
+            /* Two bits in the Auto Negotiation Advertisement Register
+             * (Address 4) and two bits in the Auto Negotiation Base
+             * Page Ability Register (Address 5) determine flow control
+             * for both the PHY and the link partner.  The following
+             * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+             * 1999, describes these PAUSE resolution bits and how flow
+             * control is determined based upon these settings.
+             * NOTE:  DC = Don't Care
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+             *-------|---------|-------|---------|--------------------
+             *   0   |    0    |  DC   |   DC    | E1000_FC_NONE
+             *   0   |    1    |   0   |   DC    | E1000_FC_NONE
+             *   0   |    1    |   1   |    0    | E1000_FC_NONE
+             *   0   |    1    |   1   |    1    | E1000_FC_TX_PAUSE
+             *   1   |    0    |   0   |   DC    | E1000_FC_NONE
+             *   1   |   DC    |   1   |   DC    | E1000_FC_FULL
+             *   1   |    1    |   0   |    0    | E1000_FC_NONE
+             *   1   |    1    |   0   |    1    | E1000_FC_RX_PAUSE
+             *
+             */
+            /* Are both PAUSE bits set to 1?  If so, this implies
+             * Symmetric Flow Control is enabled at both ends.  The
+             * ASM_DIR bits are irrelevant per the spec.
+             *
+             * For Symmetric Flow Control:
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+             *-------|---------|-------|---------|--------------------
+             *   1   |   DC    |   1   |   DC    | E1000_FC_FULL
+             *
+             */
+            if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+                /* Now we need to check if the user selected RX ONLY
+                 * of pause frames.  In this case, we had to advertise
+                 * FULL flow control because we could not advertise RX
+                 * ONLY. Hence, we must now check to see if we need to
+                 * turn OFF  the TRANSMISSION of PAUSE frames.
+                 */
+                if (hw->original_fc == E1000_FC_FULL) {
+                    hw->fc = E1000_FC_FULL;
+                    DEBUGOUT("Flow Control = FULL.\n");
+                } else {
+                    hw->fc = E1000_FC_RX_PAUSE;
+                    DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+                }
+            }
+            /* For receiving PAUSE frames ONLY.
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+             *-------|---------|-------|---------|--------------------
+             *   0   |    1    |   1   |    1    | E1000_FC_TX_PAUSE
+             *
+             */
+            else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                     (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+                hw->fc = E1000_FC_TX_PAUSE;
+                DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+            }
+            /* For transmitting PAUSE frames ONLY.
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+             *-------|---------|-------|---------|--------------------
+             *   1   |    1    |   0   |    1    | E1000_FC_RX_PAUSE
+             *
+             */
+            else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                     (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                     !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+                hw->fc = E1000_FC_RX_PAUSE;
+                DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+            }
+            /* Per the IEEE spec, at this point flow control should be
+             * disabled.  However, we want to consider that we could
+             * be connected to a legacy switch that doesn't advertise
+             * desired flow control, but can be forced on the link
+             * partner.  So if we advertised no flow control, that is
+             * what we will resolve to.  If we advertised some kind of
+             * receive capability (Rx Pause Only or Full Flow Control)
+             * and the link partner advertised none, we will configure
+             * ourselves to enable Rx Flow Control only.  We can do
+             * this safely for two reasons:  If the link partner really
+             * didn't want flow control enabled, and we enable Rx, no
+             * harm done since we won't be receiving any PAUSE frames
+             * anyway.  If the intent on the link partner was to have
+             * flow control enabled, then by us enabling RX only, we
+             * can at least receive pause frames and process them.
+             * This is a good idea because in most cases, since we are
+             * predominantly a server NIC, more times than not we will
+             * be asked to delay transmission of packets than asking
+             * our link partner to pause transmission of frames.
+             */
+            else if ((hw->original_fc == E1000_FC_NONE ||
+                      hw->original_fc == E1000_FC_TX_PAUSE) ||
+                      hw->fc_strict_ieee) {
+                hw->fc = E1000_FC_NONE;
+                DEBUGOUT("Flow Control = NONE.\n");
+            } else {
+                hw->fc = E1000_FC_RX_PAUSE;
+                DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+            }
+
+            /* Now we need to do one last check...  If we auto-
+             * negotiated to HALF DUPLEX, flow control should not be
+             * enabled per IEEE 802.3 spec.
+             */
+            ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+            if (ret_val) {
+                DEBUGOUT("Error getting link speed and duplex\n");
+                return ret_val;
+            }
+
+            if (duplex == HALF_DUPLEX)
+                hw->fc = E1000_FC_NONE;
+
+            /* Now we call a subroutine to actually force the MAC
+             * controller to use the correct flow control settings.
+             */
+            ret_val = e1000_force_mac_fc(hw);
+            if (ret_val) {
+                DEBUGOUT("Error forcing flow control settings\n");
+                return ret_val;
+            }
+        } else {
+            DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
+        }
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Checks to see if the link status of the hardware has changed.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Called by any function that needs to check the link status of the adapter.
+ *****************************************************************************/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+    u32 rxcw = 0;
+    u32 ctrl;
+    u32 status;
+    u32 rctl;
+    u32 icr;
+    u32 signal = 0;
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_check_for_link");
+
+    ctrl = er32(CTRL);
+    status = er32(STATUS);
+
+    /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
+     * set when the optics detect a signal. On older adapters, it will be
+     * cleared when there is a signal.  This applies to fiber media only.
+     */
+    if ((hw->media_type == e1000_media_type_fiber) ||
+        (hw->media_type == e1000_media_type_internal_serdes)) {
+        rxcw = er32(RXCW);
+
+        if (hw->media_type == e1000_media_type_fiber) {
+            signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+            if (status & E1000_STATUS_LU)
+                hw->get_link_status = false;
+        }
+    }
+
+    /* If we have a copper PHY then we only want to go out to the PHY
+     * registers to see if Auto-Neg has completed and/or if our link
+     * status has changed.  The get_link_status flag will be set if we
+     * receive a Link Status Change interrupt or we have Rx Sequence
+     * Errors.
+     */
+    if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+        /* First we want to see if the MII Status Register reports
+         * link.  If so, then we want to get the current speed/duplex
+         * of the PHY.
+         * Read the register twice since the link bit is sticky.
+         */
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if (phy_data & MII_SR_LINK_STATUS) {
+            hw->get_link_status = false;
+            /* Check if there was DownShift, must be checked immediately after
+             * link-up */
+            e1000_check_downshift(hw);
+
+            /* If we are on 82544 or 82543 silicon and speed/duplex
+             * are forced to 10H or 10F, then we will implement the polarity
+             * reversal workaround.  We disable interrupts first, and upon
+             * returning, place the devices interrupt state to its previous
+             * value except for the link status change interrupt which will
+             * happen due to the execution of this workaround.
+             */
+
+            if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+                (!hw->autoneg) &&
+                (hw->forced_speed_duplex == e1000_10_full ||
+                 hw->forced_speed_duplex == e1000_10_half)) {
+                ew32(IMC, 0xffffffff);
+                ret_val = e1000_polarity_reversal_workaround(hw);
+                icr = er32(ICR);
+                ew32(ICS, (icr & ~E1000_ICS_LSC));
+                ew32(IMS, IMS_ENABLE_MASK);
+            }
+
+        } else {
+            /* No link detected */
+            e1000_config_dsp_after_link_change(hw, false);
+            return 0;
+        }
+
+        /* If we are forcing speed/duplex, then we simply return since
+         * we have already determined whether we have link or not.
+         */
+        if (!hw->autoneg) return -E1000_ERR_CONFIG;
+
+        /* optimize the dsp settings for the igp phy */
+        e1000_config_dsp_after_link_change(hw, true);
+
+        /* We have a M88E1000 PHY and Auto-Neg is enabled.  If we
+         * have Si on board that is 82544 or newer, Auto
+         * Speed Detection takes care of MAC speed/duplex
+         * configuration.  So we only need to configure Collision
+         * Distance in the MAC.  Otherwise, we need to force
+         * speed/duplex on the MAC to the current PHY speed/duplex
+         * settings.
+         */
+        if (hw->mac_type >= e1000_82544)
+            e1000_config_collision_dist(hw);
+        else {
+            ret_val = e1000_config_mac_to_phy(hw);
+            if (ret_val) {
+                DEBUGOUT("Error configuring MAC to PHY settings\n");
+                return ret_val;
+            }
+        }
+
+        /* Configure Flow Control now that Auto-Neg has completed. First, we
+         * need to restore the desired flow control settings because we may
+         * have had to re-autoneg with a different link partner.
+         */
+        ret_val = e1000_config_fc_after_link_up(hw);
+        if (ret_val) {
+            DEBUGOUT("Error configuring flow control\n");
+            return ret_val;
+        }
+
+        /* At this point we know that we are on copper and we have
+         * auto-negotiated link.  These are conditions for checking the link
+         * partner capability register.  We use the link speed to determine if
+         * TBI compatibility needs to be turned on or off.  If the link is not
+         * at gigabit speed, then TBI compatibility is not needed.  If we are
+         * at gigabit speed, we turn on TBI compatibility.
+         */
+        if (hw->tbi_compatibility_en) {
+            u16 speed, duplex;
+            ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+            if (ret_val) {
+                DEBUGOUT("Error getting link speed and duplex\n");
+                return ret_val;
+            }
+            if (speed != SPEED_1000) {
+                /* If link speed is not set to gigabit speed, we do not need
+                 * to enable TBI compatibility.
+                 */
+                if (hw->tbi_compatibility_on) {
+                    /* If we previously were in the mode, turn it off. */
+                    rctl = er32(RCTL);
+                    rctl &= ~E1000_RCTL_SBP;
+                    ew32(RCTL, rctl);
+                    hw->tbi_compatibility_on = false;
+                }
+            } else {
+                /* If TBI compatibility is was previously off, turn it on. For
+                 * compatibility with a TBI link partner, we will store bad
+                 * packets. Some frames have an additional byte on the end and
+                 * will look like CRC errors to to the hardware.
+                 */
+                if (!hw->tbi_compatibility_on) {
+                    hw->tbi_compatibility_on = true;
+                    rctl = er32(RCTL);
+                    rctl |= E1000_RCTL_SBP;
+                    ew32(RCTL, rctl);
+                }
+            }
+        }
+    }
+    /* If we don't have link (auto-negotiation failed or link partner cannot
+     * auto-negotiate), the cable is plugged in (we have signal), and our
+     * link partner is not trying to auto-negotiate with us (we are receiving
+     * idles or data), we need to force link up. We also need to give
+     * auto-negotiation time to complete, in case the cable was just plugged
+     * in. The autoneg_failed flag does this.
+     */
+    else if ((((hw->media_type == e1000_media_type_fiber) &&
+              ((ctrl & E1000_CTRL_SWDPIN1) == signal)) ||
+              (hw->media_type == e1000_media_type_internal_serdes)) &&
+              (!(status & E1000_STATUS_LU)) &&
+              (!(rxcw & E1000_RXCW_C))) {
+        if (hw->autoneg_failed == 0) {
+            hw->autoneg_failed = 1;
+            return 0;
+        }
+        DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+        /* Disable auto-negotiation in the TXCW register */
+        ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
+
+        /* Force link-up and also force full-duplex. */
+        ctrl = er32(CTRL);
+        ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+        ew32(CTRL, ctrl);
+
+        /* Configure Flow Control after forcing link up. */
+        ret_val = e1000_config_fc_after_link_up(hw);
+        if (ret_val) {
+            DEBUGOUT("Error configuring flow control\n");
+            return ret_val;
+        }
+    }
+    /* If we are forcing link and we are receiving /C/ ordered sets, re-enable
+     * auto-negotiation in the TXCW register and disable forced link in the
+     * Device Control register in an attempt to auto-negotiate with our link
+     * partner.
+     */
+    else if (((hw->media_type == e1000_media_type_fiber) ||
+              (hw->media_type == e1000_media_type_internal_serdes)) &&
+              (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+        DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+        ew32(TXCW, hw->txcw);
+        ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+        hw->serdes_link_down = false;
+    }
+    /* If we force link for non-auto-negotiation switch, check link status
+     * based on MAC synchronization for internal serdes media type.
+     */
+    else if ((hw->media_type == e1000_media_type_internal_serdes) &&
+             !(E1000_TXCW_ANE & er32(TXCW))) {
+        /* SYNCH bit and IV bit are sticky. */
+        udelay(10);
+        if (E1000_RXCW_SYNCH & er32(RXCW)) {
+            if (!(rxcw & E1000_RXCW_IV)) {
+                hw->serdes_link_down = false;
+                DEBUGOUT("SERDES: Link is up.\n");
+            }
+        } else {
+            hw->serdes_link_down = true;
+            DEBUGOUT("SERDES: Link is down.\n");
+        }
+    }
+    if ((hw->media_type == e1000_media_type_internal_serdes) &&
+        (E1000_TXCW_ANE & er32(TXCW))) {
+        hw->serdes_link_down = !(E1000_STATUS_LU & er32(STATUS));
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ *****************************************************************************/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+    u32 status;
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_get_speed_and_duplex");
+
+    if (hw->mac_type >= e1000_82543) {
+        status = er32(STATUS);
+        if (status & E1000_STATUS_SPEED_1000) {
+            *speed = SPEED_1000;
+            DEBUGOUT("1000 Mbs, ");
+        } else if (status & E1000_STATUS_SPEED_100) {
+            *speed = SPEED_100;
+            DEBUGOUT("100 Mbs, ");
+        } else {
+            *speed = SPEED_10;
+            DEBUGOUT("10 Mbs, ");
+        }
+
+        if (status & E1000_STATUS_FD) {
+            *duplex = FULL_DUPLEX;
+            DEBUGOUT("Full Duplex\n");
+        } else {
+            *duplex = HALF_DUPLEX;
+            DEBUGOUT(" Half Duplex\n");
+        }
+    } else {
+        DEBUGOUT("1000 Mbs, Full Duplex\n");
+        *speed = SPEED_1000;
+        *duplex = FULL_DUPLEX;
+    }
+
+    /* IGP01 PHY may advertise full duplex operation after speed downgrade even
+     * if it is operating at half duplex.  Here we set the duplex settings to
+     * match the duplex in the link partner's capabilities.
+     */
+    if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+        ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+            *duplex = HALF_DUPLEX;
+        else {
+            ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
+            if (ret_val)
+                return ret_val;
+            if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
+               (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
+                *duplex = HALF_DUPLEX;
+        }
+    }
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (hw->media_type == e1000_media_type_copper)) {
+        if (*speed == SPEED_1000)
+            ret_val = e1000_configure_kmrn_for_1000(hw);
+        else
+            ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
+        ret_val = e1000_kumeran_lock_loss_workaround(hw);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Blocks until autoneg completes or times out (~4.5 seconds)
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 i;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_wait_autoneg");
+    DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+
+    /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+    for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+        /* Read the MII Status Register and wait for Auto-Neg
+         * Complete bit to be set.
+         */
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        if (phy_data & MII_SR_AUTONEG_COMPLETE) {
+            return E1000_SUCCESS;
+        }
+        msleep(100);
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Raises the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+    /* Raise the clock input to the Management Data Clock (by setting the MDC
+     * bit), and then delay 10 microseconds.
+     */
+    ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
+    E1000_WRITE_FLUSH();
+    udelay(10);
+}
+
+/******************************************************************************
+* Lowers the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+    /* Lower the clock input to the Management Data Clock (by clearing the MDC
+     * bit), and then delay 10 microseconds.
+     */
+    ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
+    E1000_WRITE_FLUSH();
+    udelay(10);
+}
+
+/******************************************************************************
+* Shifts data bits out to the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* data - Data to send out to the PHY
+* count - Number of bits to shift out
+*
+* Bits are shifted out in MSB to LSB order.
+******************************************************************************/
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
+{
+    u32 ctrl;
+    u32 mask;
+
+    /* We need to shift "count" number of bits out to the PHY. So, the value
+     * in the "data" parameter will be shifted out to the PHY one bit at a
+     * time. In order to do this, "data" must be broken down into bits.
+     */
+    mask = 0x01;
+    mask <<= (count - 1);
+
+    ctrl = er32(CTRL);
+
+    /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+    ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+    while (mask) {
+        /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
+         * then raising and lowering the Management Data Clock. A "0" is
+         * shifted out to the PHY by setting the MDIO bit to "0" and then
+         * raising and lowering the clock.
+         */
+        if (data & mask)
+            ctrl |= E1000_CTRL_MDIO;
+        else
+            ctrl &= ~E1000_CTRL_MDIO;
+
+        ew32(CTRL, ctrl);
+        E1000_WRITE_FLUSH();
+
+        udelay(10);
+
+        e1000_raise_mdi_clk(hw, &ctrl);
+        e1000_lower_mdi_clk(hw, &ctrl);
+
+        mask = mask >> 1;
+    }
+}
+
+/******************************************************************************
+* Shifts data bits in from the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Bits are shifted in in MSB to LSB order.
+******************************************************************************/
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
+{
+    u32 ctrl;
+    u16 data = 0;
+    u8 i;
+
+    /* In order to read a register from the PHY, we need to shift in a total
+     * of 18 bits from the PHY. The first two bit (turnaround) times are used
+     * to avoid contention on the MDIO pin when a read operation is performed.
+     * These two bits are ignored by us and thrown away. Bits are "shifted in"
+     * by raising the input to the Management Data Clock (setting the MDC bit),
+     * and then reading the value of the MDIO bit.
+     */
+    ctrl = er32(CTRL);
+
+    /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+    ctrl &= ~E1000_CTRL_MDIO_DIR;
+    ctrl &= ~E1000_CTRL_MDIO;
+
+    ew32(CTRL, ctrl);
+    E1000_WRITE_FLUSH();
+
+    /* Raise and Lower the clock before reading in the data. This accounts for
+     * the turnaround bits. The first clock occurred when we clocked out the
+     * last bit of the Register Address.
+     */
+    e1000_raise_mdi_clk(hw, &ctrl);
+    e1000_lower_mdi_clk(hw, &ctrl);
+
+    for (data = 0, i = 0; i < 16; i++) {
+        data = data << 1;
+        e1000_raise_mdi_clk(hw, &ctrl);
+        ctrl = er32(CTRL);
+        /* Check to see if we shifted in a "1". */
+        if (ctrl & E1000_CTRL_MDIO)
+            data |= 1;
+        e1000_lower_mdi_clk(hw, &ctrl);
+    }
+
+    e1000_raise_mdi_clk(hw, &ctrl);
+    e1000_lower_mdi_clk(hw, &ctrl);
+
+    return data;
+}
+
+static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
+{
+    u32 swfw_sync = 0;
+    u32 swmask = mask;
+    u32 fwmask = mask << 16;
+    s32 timeout = 200;
+
+    DEBUGFUNC("e1000_swfw_sync_acquire");
+
+    if (hw->swfwhw_semaphore_present)
+        return e1000_get_software_flag(hw);
+
+    if (!hw->swfw_sync_present)
+        return e1000_get_hw_eeprom_semaphore(hw);
+
+    while (timeout) {
+            if (e1000_get_hw_eeprom_semaphore(hw))
+                return -E1000_ERR_SWFW_SYNC;
+
+            swfw_sync = er32(SW_FW_SYNC);
+            if (!(swfw_sync & (fwmask | swmask))) {
+                break;
+            }
+
+            /* firmware currently using resource (fwmask) */
+            /* or other software thread currently using resource (swmask) */
+            e1000_put_hw_eeprom_semaphore(hw);
+            mdelay(5);
+            timeout--;
+    }
+
+    if (!timeout) {
+        DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+        return -E1000_ERR_SWFW_SYNC;
+    }
+
+    swfw_sync |= swmask;
+    ew32(SW_FW_SYNC, swfw_sync);
+
+    e1000_put_hw_eeprom_semaphore(hw);
+    return E1000_SUCCESS;
+}
+
+static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
+{
+    u32 swfw_sync;
+    u32 swmask = mask;
+
+    DEBUGFUNC("e1000_swfw_sync_release");
+
+    if (hw->swfwhw_semaphore_present) {
+        e1000_release_software_flag(hw);
+        return;
+    }
+
+    if (!hw->swfw_sync_present) {
+        e1000_put_hw_eeprom_semaphore(hw);
+        return;
+    }
+
+    /* if (e1000_get_hw_eeprom_semaphore(hw))
+     *    return -E1000_ERR_SWFW_SYNC; */
+    while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS);
+        /* empty */
+
+    swfw_sync = er32(SW_FW_SYNC);
+    swfw_sync &= ~swmask;
+    ew32(SW_FW_SYNC, swfw_sync);
+
+    e1000_put_hw_eeprom_semaphore(hw);
+}
+
+/*****************************************************************************
+* Reads the value from a PHY register, if the value is on a specific non zero
+* page, sets the page first.
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to read
+******************************************************************************/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
+{
+    u32 ret_val;
+    u16 swfw;
+
+    DEBUGFUNC("e1000_read_phy_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    if ((hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) &&
+       (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+        ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+                                         (u16)reg_addr);
+        if (ret_val) {
+            e1000_swfw_sync_release(hw, swfw);
+            return ret_val;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
+            (hw->mac_type == e1000_80003es2lan)) {
+            /* Select Configuration Page */
+            if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+                ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
+                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+            } else {
+                /* Use Alternative Page Select register to access
+                 * registers 30 and 31
+                 */
+                ret_val = e1000_write_phy_reg_ex(hw,
+                                                 GG82563_PHY_PAGE_SELECT_ALT,
+                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+            }
+
+            if (ret_val) {
+                e1000_swfw_sync_release(hw, swfw);
+                return ret_val;
+            }
+        }
+    }
+
+    ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+                                    phy_data);
+
+    e1000_swfw_sync_release(hw, swfw);
+    return ret_val;
+}
+
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+				 u16 *phy_data)
+{
+    u32 i;
+    u32 mdic = 0;
+    const u32 phy_addr = 1;
+
+    DEBUGFUNC("e1000_read_phy_reg_ex");
+
+    if (reg_addr > MAX_PHY_REG_ADDRESS) {
+        DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+        return -E1000_ERR_PARAM;
+    }
+
+    if (hw->mac_type > e1000_82543) {
+        /* Set up Op-code, Phy Address, and register address in the MDI
+         * Control register.  The MAC will take care of interfacing with the
+         * PHY to retrieve the desired data.
+         */
+        mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+                (phy_addr << E1000_MDIC_PHY_SHIFT) |
+                (E1000_MDIC_OP_READ));
+
+        ew32(MDIC, mdic);
+
+        /* Poll the ready bit to see if the MDI read completed */
+        for (i = 0; i < 64; i++) {
+            udelay(50);
+            mdic = er32(MDIC);
+            if (mdic & E1000_MDIC_READY) break;
+        }
+        if (!(mdic & E1000_MDIC_READY)) {
+            DEBUGOUT("MDI Read did not complete\n");
+            return -E1000_ERR_PHY;
+        }
+        if (mdic & E1000_MDIC_ERROR) {
+            DEBUGOUT("MDI Error\n");
+            return -E1000_ERR_PHY;
+        }
+        *phy_data = (u16)mdic;
+    } else {
+        /* We must first send a preamble through the MDIO pin to signal the
+         * beginning of an MII instruction.  This is done by sending 32
+         * consecutive "1" bits.
+         */
+        e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+        /* Now combine the next few fields that are required for a read
+         * operation.  We use this method instead of calling the
+         * e1000_shift_out_mdi_bits routine five different times. The format of
+         * a MII read instruction consists of a shift out of 14 bits and is
+         * defined as follows:
+         *    <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
+         * followed by a shift in of 18 bits.  This first two bits shifted in
+         * are TurnAround bits used to avoid contention on the MDIO pin when a
+         * READ operation is performed.  These two bits are thrown away
+         * followed by a shift in of 16 bits which contains the desired data.
+         */
+        mdic = ((reg_addr) | (phy_addr << 5) |
+                (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+        e1000_shift_out_mdi_bits(hw, mdic, 14);
+
+        /* Now that we've shifted out the read command to the MII, we need to
+         * "shift in" the 16-bit value (18 total bits) of the requested PHY
+         * register address.
+         */
+        *phy_data = e1000_shift_in_mdi_bits(hw);
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Writes a value to a PHY register
+*
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to write
+* data - data to write to the PHY
+******************************************************************************/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
+{
+    u32 ret_val;
+    u16 swfw;
+
+    DEBUGFUNC("e1000_write_phy_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    if ((hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) &&
+       (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+        ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+                                         (u16)reg_addr);
+        if (ret_val) {
+            e1000_swfw_sync_release(hw, swfw);
+            return ret_val;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
+            (hw->mac_type == e1000_80003es2lan)) {
+            /* Select Configuration Page */
+            if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+                ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
+                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+            } else {
+                /* Use Alternative Page Select register to access
+                 * registers 30 and 31
+                 */
+                ret_val = e1000_write_phy_reg_ex(hw,
+                                                 GG82563_PHY_PAGE_SELECT_ALT,
+                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+            }
+
+            if (ret_val) {
+                e1000_swfw_sync_release(hw, swfw);
+                return ret_val;
+            }
+        }
+    }
+
+    ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+                                     phy_data);
+
+    e1000_swfw_sync_release(hw, swfw);
+    return ret_val;
+}
+
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+				  u16 phy_data)
+{
+    u32 i;
+    u32 mdic = 0;
+    const u32 phy_addr = 1;
+
+    DEBUGFUNC("e1000_write_phy_reg_ex");
+
+    if (reg_addr > MAX_PHY_REG_ADDRESS) {
+        DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+        return -E1000_ERR_PARAM;
+    }
+
+    if (hw->mac_type > e1000_82543) {
+        /* Set up Op-code, Phy Address, register address, and data intended
+         * for the PHY register in the MDI Control register.  The MAC will take
+         * care of interfacing with the PHY to send the desired data.
+         */
+        mdic = (((u32)phy_data) |
+                (reg_addr << E1000_MDIC_REG_SHIFT) |
+                (phy_addr << E1000_MDIC_PHY_SHIFT) |
+                (E1000_MDIC_OP_WRITE));
+
+        ew32(MDIC, mdic);
+
+        /* Poll the ready bit to see if the MDI read completed */
+        for (i = 0; i < 641; i++) {
+            udelay(5);
+            mdic = er32(MDIC);
+            if (mdic & E1000_MDIC_READY) break;
+        }
+        if (!(mdic & E1000_MDIC_READY)) {
+            DEBUGOUT("MDI Write did not complete\n");
+            return -E1000_ERR_PHY;
+        }
+    } else {
+        /* We'll need to use the SW defined pins to shift the write command
+         * out to the PHY. We first send a preamble to the PHY to signal the
+         * beginning of the MII instruction.  This is done by sending 32
+         * consecutive "1" bits.
+         */
+        e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+        /* Now combine the remaining required fields that will indicate a
+         * write operation. We use this method instead of calling the
+         * e1000_shift_out_mdi_bits routine for each field in the command. The
+         * format of a MII write instruction is as follows:
+         * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+         */
+        mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
+                (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+        mdic <<= 16;
+        mdic |= (u32)phy_data;
+
+        e1000_shift_out_mdi_bits(hw, mdic, 32);
+    }
+
+    return E1000_SUCCESS;
+}
+
+static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data)
+{
+    u32 reg_val;
+    u16 swfw;
+    DEBUGFUNC("e1000_read_kmrn_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    /* Write register address */
+    reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
+              E1000_KUMCTRLSTA_OFFSET) |
+              E1000_KUMCTRLSTA_REN;
+    ew32(KUMCTRLSTA, reg_val);
+    udelay(2);
+
+    /* Read the data returned */
+    reg_val = er32(KUMCTRLSTA);
+    *data = (u16)reg_val;
+
+    e1000_swfw_sync_release(hw, swfw);
+    return E1000_SUCCESS;
+}
+
+static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data)
+{
+    u32 reg_val;
+    u16 swfw;
+    DEBUGFUNC("e1000_write_kmrn_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
+              E1000_KUMCTRLSTA_OFFSET) | data;
+    ew32(KUMCTRLSTA, reg_val);
+    udelay(2);
+
+    e1000_swfw_sync_release(hw, swfw);
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Returns the PHY to the power-on reset state
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+    u32 ctrl, ctrl_ext;
+    u32 led_ctrl;
+    s32 ret_val;
+    u16 swfw;
+
+    DEBUGFUNC("e1000_phy_hw_reset");
+
+    /* In the case of the phy reset being blocked, it's not an error, we
+     * simply return success without performing the reset. */
+    ret_val = e1000_check_phy_reset_block(hw);
+    if (ret_val)
+        return E1000_SUCCESS;
+
+    DEBUGOUT("Resetting Phy...\n");
+
+    if (hw->mac_type > e1000_82543) {
+        if ((hw->mac_type == e1000_80003es2lan) &&
+            (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+            swfw = E1000_SWFW_PHY1_SM;
+        } else {
+            swfw = E1000_SWFW_PHY0_SM;
+        }
+        if (e1000_swfw_sync_acquire(hw, swfw)) {
+            DEBUGOUT("Unable to acquire swfw sync\n");
+            return -E1000_ERR_SWFW_SYNC;
+        }
+        /* Read the device control register and assert the E1000_CTRL_PHY_RST
+         * bit. Then, take it out of reset.
+         * For pre-e1000_82571 hardware, we delay for 10ms between the assert
+         * and deassert.  For e1000_82571 hardware and later, we instead delay
+         * for 50us between and 10ms after the deassertion.
+         */
+        ctrl = er32(CTRL);
+        ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+        E1000_WRITE_FLUSH();
+
+        if (hw->mac_type < e1000_82571)
+            msleep(10);
+        else
+            udelay(100);
+
+        ew32(CTRL, ctrl);
+        E1000_WRITE_FLUSH();
+
+        if (hw->mac_type >= e1000_82571)
+            mdelay(10);
+
+        e1000_swfw_sync_release(hw, swfw);
+    } else {
+        /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
+         * bit to put the PHY into reset. Then, take it out of reset.
+         */
+        ctrl_ext = er32(CTRL_EXT);
+        ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+        ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+        ew32(CTRL_EXT, ctrl_ext);
+        E1000_WRITE_FLUSH();
+        msleep(10);
+        ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+        ew32(CTRL_EXT, ctrl_ext);
+        E1000_WRITE_FLUSH();
+    }
+    udelay(150);
+
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        /* Configure activity LED after PHY reset */
+        led_ctrl = er32(LEDCTL);
+        led_ctrl &= IGP_ACTIVITY_LED_MASK;
+        led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+        ew32(LEDCTL, led_ctrl);
+    }
+
+    /* Wait for FW to finish PHY configuration. */
+    ret_val = e1000_get_phy_cfg_done(hw);
+    if (ret_val != E1000_SUCCESS)
+        return ret_val;
+    e1000_release_software_semaphore(hw);
+
+    if ((hw->mac_type == e1000_ich8lan) && (hw->phy_type == e1000_phy_igp_3))
+        ret_val = e1000_init_lcd_from_nvm(hw);
+
+    return ret_val;
+}
+
+/******************************************************************************
+* Resets the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Sets bit 15 of the MII Control register
+******************************************************************************/
+s32 e1000_phy_reset(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_phy_reset");
+
+    /* In the case of the phy reset being blocked, it's not an error, we
+     * simply return success without performing the reset. */
+    ret_val = e1000_check_phy_reset_block(hw);
+    if (ret_val)
+        return E1000_SUCCESS;
+
+    switch (hw->phy_type) {
+    case e1000_phy_igp:
+    case e1000_phy_igp_2:
+    case e1000_phy_igp_3:
+    case e1000_phy_ife:
+        ret_val = e1000_phy_hw_reset(hw);
+        if (ret_val)
+            return ret_val;
+        break;
+    default:
+        ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= MII_CR_RESET;
+        ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        udelay(1);
+        break;
+    }
+
+    if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
+        e1000_phy_init_script(hw);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Work-around for 82566 power-down: on D3 entry-
+* 1) disable gigabit link
+* 2) write VR power-down enable
+* 3) read it back
+* if successful continue, else issue LCD reset and repeat
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw)
+{
+    s32 reg;
+    u16 phy_data;
+    s32 retry = 0;
+
+    DEBUGFUNC("e1000_phy_powerdown_workaround");
+
+    if (hw->phy_type != e1000_phy_igp_3)
+        return;
+
+    do {
+        /* Disable link */
+        reg = er32(PHY_CTRL);
+        ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+        /* Write VR power-down enable - bits 9:8 should be 10b */
+        e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+        phy_data |= (1 << 9);
+        phy_data &= ~(1 << 8);
+        e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data);
+
+        /* Read it back and test */
+        e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+        if (((phy_data & IGP3_VR_CTRL_MODE_MASK) == IGP3_VR_CTRL_MODE_SHUT) || retry)
+            break;
+
+        /* Issue PHY reset and repeat at most one more time */
+        reg = er32(CTRL);
+        ew32(CTRL, reg | E1000_CTRL_PHY_RST);
+        retry++;
+    } while (retry);
+
+    return;
+
+}
+
+/******************************************************************************
+* Work-around for 82566 Kumeran PCS lock loss:
+* On link status change (i.e. PCI reset, speed change) and link is up and
+* speed is gigabit-
+* 0) if workaround is optionally disabled do nothing
+* 1) wait 1ms for Kumeran link to come up
+* 2) check Kumeran Diagnostic register PCS lock loss bit
+* 3) if not set the link is locked (all is good), otherwise...
+* 4) reset the PHY
+* 5) repeat up to 10 times
+* Note: this is only called for IGP3 copper when speed is 1gb.
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    s32 reg;
+    s32 cnt;
+    u16 phy_data;
+
+    if (hw->kmrn_lock_loss_workaround_disabled)
+        return E1000_SUCCESS;
+
+    /* Make sure link is up before proceeding.  If not just return.
+     * Attempting this while link is negotiating fouled up link
+     * stability */
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+
+    if (phy_data & MII_SR_LINK_STATUS) {
+        for (cnt = 0; cnt < 10; cnt++) {
+            /* read once to clear */
+            ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+            if (ret_val)
+                return ret_val;
+            /* and again to get new status */
+            ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            /* check for PCS lock */
+            if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+                return E1000_SUCCESS;
+
+            /* Issue PHY reset */
+            e1000_phy_hw_reset(hw);
+            mdelay(5);
+        }
+        /* Disable GigE link negotiation */
+        reg = er32(PHY_CTRL);
+        ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+        /* unable to acquire PCS lock */
+        return E1000_ERR_PHY;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Probes the expected PHY address for known PHY IDs
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
+{
+    s32 phy_init_status, ret_val;
+    u16 phy_id_high, phy_id_low;
+    bool match = false;
+
+    DEBUGFUNC("e1000_detect_gig_phy");
+
+    if (hw->phy_id != 0)
+        return E1000_SUCCESS;
+
+    /* The 82571 firmware may still be configuring the PHY.  In this
+     * case, we cannot access the PHY until the configuration is done.  So
+     * we explicitly set the PHY values. */
+    if (hw->mac_type == e1000_82571 ||
+        hw->mac_type == e1000_82572) {
+        hw->phy_id = IGP01E1000_I_PHY_ID;
+        hw->phy_type = e1000_phy_igp_2;
+        return E1000_SUCCESS;
+    }
+
+    /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work-
+     * around that forces PHY page 0 to be set or the reads fail.  The rest of
+     * the code in this routine uses e1000_read_phy_reg to read the PHY ID.
+     * So for ESB-2 we need to have this set so our reads won't fail.  If the
+     * attached PHY is not a e1000_phy_gg82563, the routines below will figure
+     * this out as well. */
+    if (hw->mac_type == e1000_80003es2lan)
+        hw->phy_type = e1000_phy_gg82563;
+
+    /* Read the PHY ID Registers to identify which PHY is onboard. */
+    ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
+    if (ret_val)
+        return ret_val;
+
+    hw->phy_id = (u32)(phy_id_high << 16);
+    udelay(20);
+    ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
+    if (ret_val)
+        return ret_val;
+
+    hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK);
+    hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK;
+
+    switch (hw->mac_type) {
+    case e1000_82543:
+        if (hw->phy_id == M88E1000_E_PHY_ID) match = true;
+        break;
+    case e1000_82544:
+        if (hw->phy_id == M88E1000_I_PHY_ID) match = true;
+        break;
+    case e1000_82540:
+    case e1000_82545:
+    case e1000_82545_rev_3:
+    case e1000_82546:
+    case e1000_82546_rev_3:
+        if (hw->phy_id == M88E1011_I_PHY_ID) match = true;
+        break;
+    case e1000_82541:
+    case e1000_82541_rev_2:
+    case e1000_82547:
+    case e1000_82547_rev_2:
+        if (hw->phy_id == IGP01E1000_I_PHY_ID) match = true;
+        break;
+    case e1000_82573:
+        if (hw->phy_id == M88E1111_I_PHY_ID) match = true;
+        break;
+    case e1000_80003es2lan:
+        if (hw->phy_id == GG82563_E_PHY_ID) match = true;
+        break;
+    case e1000_ich8lan:
+        if (hw->phy_id == IGP03E1000_E_PHY_ID) match = true;
+        if (hw->phy_id == IFE_E_PHY_ID) match = true;
+        if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = true;
+        if (hw->phy_id == IFE_C_E_PHY_ID) match = true;
+        break;
+    default:
+        DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+        return -E1000_ERR_CONFIG;
+    }
+    phy_init_status = e1000_set_phy_type(hw);
+
+    if ((match) && (phy_init_status == E1000_SUCCESS)) {
+        DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+        return E1000_SUCCESS;
+    }
+    DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+    return -E1000_ERR_PHY;
+}
+
+/******************************************************************************
+* Resets the PHY's DSP
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    DEBUGFUNC("e1000_phy_reset_dsp");
+
+    do {
+        if (hw->phy_type != e1000_phy_gg82563) {
+            ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
+            if (ret_val) break;
+        }
+        ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
+        if (ret_val) break;
+        ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
+        if (ret_val) break;
+        ret_val = E1000_SUCCESS;
+    } while (0);
+
+    return ret_val;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for igp PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+				  struct e1000_phy_info *phy_info)
+{
+    s32 ret_val;
+    u16 phy_data, min_length, max_length, average;
+    e1000_rev_polarity polarity;
+
+    DEBUGFUNC("e1000_phy_igp_get_info");
+
+    /* The downshift status is checked only once, after link is established,
+     * and it stored in the hw->speed_downgraded parameter. */
+    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+
+    /* IGP01E1000 does not need to support it. */
+    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+    /* IGP01E1000 always correct polarity reversal */
+    phy_info->polarity_correction = e1000_polarity_reversal_enabled;
+
+    /* Check polarity status */
+    ret_val = e1000_check_polarity(hw, &polarity);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->cable_polarity = polarity;
+
+    ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & IGP01E1000_PSSR_MDIX) >>
+                          IGP01E1000_PSSR_MDIX_SHIFT);
+
+    if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+       IGP01E1000_PSSR_SPEED_1000MBPS) {
+        /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+                             SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+                             e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+        phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+                              SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+                              e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+        /* Get cable length */
+        ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+        if (ret_val)
+            return ret_val;
+
+        /* Translate to old method */
+        average = (max_length + min_length) / 2;
+
+        if (average <= e1000_igp_cable_length_50)
+            phy_info->cable_length = e1000_cable_length_50;
+        else if (average <= e1000_igp_cable_length_80)
+            phy_info->cable_length = e1000_cable_length_50_80;
+        else if (average <= e1000_igp_cable_length_110)
+            phy_info->cable_length = e1000_cable_length_80_110;
+        else if (average <= e1000_igp_cable_length_140)
+            phy_info->cable_length = e1000_cable_length_110_140;
+        else
+            phy_info->cable_length = e1000_cable_length_140;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for ife PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
+				  struct e1000_phy_info *phy_info)
+{
+    s32 ret_val;
+    u16 phy_data;
+    e1000_rev_polarity polarity;
+
+    DEBUGFUNC("e1000_phy_ife_get_info");
+
+    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+    ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+    phy_info->polarity_correction =
+                        ((phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
+                        IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT) ?
+                        e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+    if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
+        ret_val = e1000_check_polarity(hw, &polarity);
+        if (ret_val)
+            return ret_val;
+    } else {
+        /* Polarity is forced. */
+        polarity = ((phy_data & IFE_PSC_FORCE_POLARITY) >>
+                     IFE_PSC_FORCE_POLARITY_SHIFT) ?
+                     e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+    }
+    phy_info->cable_polarity = polarity;
+
+    ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->mdix_mode = (e1000_auto_x_mode)
+                     ((phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
+                     IFE_PMC_MDIX_MODE_SHIFT);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers fot m88 PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+				  struct e1000_phy_info *phy_info)
+{
+    s32 ret_val;
+    u16 phy_data;
+    e1000_rev_polarity polarity;
+
+    DEBUGFUNC("e1000_phy_m88_get_info");
+
+    /* The downshift status is checked only once, after link is established,
+     * and it stored in the hw->speed_downgraded parameter. */
+    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->extended_10bt_distance =
+        ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
+        M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
+        e1000_10bt_ext_dist_enable_lower : e1000_10bt_ext_dist_enable_normal;
+
+    phy_info->polarity_correction =
+        ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
+        M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
+        e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+    /* Check polarity status */
+    ret_val = e1000_check_polarity(hw, &polarity);
+    if (ret_val)
+        return ret_val;
+    phy_info->cable_polarity = polarity;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & M88E1000_PSSR_MDIX) >>
+                          M88E1000_PSSR_MDIX_SHIFT);
+
+    if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+        /* Cable Length Estimation and Local/Remote Receiver Information
+         * are only valid at 1000 Mbps.
+         */
+        if (hw->phy_type != e1000_phy_gg82563) {
+            phy_info->cable_length = (e1000_cable_length)((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+                                      M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+        } else {
+            ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_info->cable_length = (e1000_cable_length)(phy_data & GG82563_DSPD_CABLE_LENGTH);
+        }
+
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+                             SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+                             e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+        phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+                              SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+                              e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
+{
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_phy_get_info");
+
+    phy_info->cable_length = e1000_cable_length_undefined;
+    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
+    phy_info->cable_polarity = e1000_rev_polarity_undefined;
+    phy_info->downshift = e1000_downshift_undefined;
+    phy_info->polarity_correction = e1000_polarity_reversal_undefined;
+    phy_info->mdix_mode = e1000_auto_x_mode_undefined;
+    phy_info->local_rx = e1000_1000t_rx_status_undefined;
+    phy_info->remote_rx = e1000_1000t_rx_status_undefined;
+
+    if (hw->media_type != e1000_media_type_copper) {
+        DEBUGOUT("PHY info is only valid for copper media\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+        DEBUGOUT("PHY info is only valid if link is up\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    if (hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2)
+        return e1000_phy_igp_get_info(hw, phy_info);
+    else if (hw->phy_type == e1000_phy_ife)
+        return e1000_phy_ife_get_info(hw, phy_info);
+    else
+        return e1000_phy_m88_get_info(hw, phy_info);
+}
+
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_validate_mdi_settings");
+
+    if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+        DEBUGOUT("Invalid MDI setting detected\n");
+        hw->mdix = 1;
+        return -E1000_ERR_CONFIG;
+    }
+    return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Sets up eeprom variables in the hw struct.  Must be called after mac_type
+ * is configured.  Additionally, if this is ICH8, the flash controller GbE
+ * registers must be mapped, or this will crash.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_init_eeprom_params(struct e1000_hw *hw)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u32 eecd = er32(EECD);
+    s32 ret_val = E1000_SUCCESS;
+    u16 eeprom_size;
+
+    DEBUGFUNC("e1000_init_eeprom_params");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+    case e1000_82544:
+        eeprom->type = e1000_eeprom_microwire;
+        eeprom->word_size = 64;
+        eeprom->opcode_bits = 3;
+        eeprom->address_bits = 6;
+        eeprom->delay_usec = 50;
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
+        break;
+    case e1000_82540:
+    case e1000_82545:
+    case e1000_82545_rev_3:
+    case e1000_82546:
+    case e1000_82546_rev_3:
+        eeprom->type = e1000_eeprom_microwire;
+        eeprom->opcode_bits = 3;
+        eeprom->delay_usec = 50;
+        if (eecd & E1000_EECD_SIZE) {
+            eeprom->word_size = 256;
+            eeprom->address_bits = 8;
+        } else {
+            eeprom->word_size = 64;
+            eeprom->address_bits = 6;
+        }
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
+        break;
+    case e1000_82541:
+    case e1000_82541_rev_2:
+    case e1000_82547:
+    case e1000_82547_rev_2:
+        if (eecd & E1000_EECD_TYPE) {
+            eeprom->type = e1000_eeprom_spi;
+            eeprom->opcode_bits = 8;
+            eeprom->delay_usec = 1;
+            if (eecd & E1000_EECD_ADDR_BITS) {
+                eeprom->page_size = 32;
+                eeprom->address_bits = 16;
+            } else {
+                eeprom->page_size = 8;
+                eeprom->address_bits = 8;
+            }
+        } else {
+            eeprom->type = e1000_eeprom_microwire;
+            eeprom->opcode_bits = 3;
+            eeprom->delay_usec = 50;
+            if (eecd & E1000_EECD_ADDR_BITS) {
+                eeprom->word_size = 256;
+                eeprom->address_bits = 8;
+            } else {
+                eeprom->word_size = 64;
+                eeprom->address_bits = 6;
+            }
+        }
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
+        break;
+    case e1000_82571:
+    case e1000_82572:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
+        break;
+    case e1000_82573:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = true;
+        eeprom->use_eewr = true;
+        if (!e1000_is_onboard_nvm_eeprom(hw)) {
+            eeprom->type = e1000_eeprom_flash;
+            eeprom->word_size = 2048;
+
+            /* Ensure that the Autonomous FLASH update bit is cleared due to
+             * Flash update issue on parts which use a FLASH for NVM. */
+            eecd &= ~E1000_EECD_AUPDEN;
+            ew32(EECD, eecd);
+        }
+        break;
+    case e1000_80003es2lan:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = true;
+        eeprom->use_eewr = false;
+        break;
+    case e1000_ich8lan:
+        {
+        s32  i = 0;
+        u32 flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
+
+        eeprom->type = e1000_eeprom_ich8;
+        eeprom->use_eerd = false;
+        eeprom->use_eewr = false;
+        eeprom->word_size = E1000_SHADOW_RAM_WORDS;
+
+        /* Zero the shadow RAM structure. But don't load it from NVM
+         * so as to save time for driver init */
+        if (hw->eeprom_shadow_ram != NULL) {
+            for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+                hw->eeprom_shadow_ram[i].modified = false;
+                hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+            }
+        }
+
+        hw->flash_base_addr = (flash_size & ICH_GFPREG_BASE_MASK) *
+                              ICH_FLASH_SECTOR_SIZE;
+
+        hw->flash_bank_size = ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
+        hw->flash_bank_size -= (flash_size & ICH_GFPREG_BASE_MASK);
+
+        hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
+
+        hw->flash_bank_size /= 2 * sizeof(u16);
+
+        break;
+        }
+    default:
+        break;
+    }
+
+    if (eeprom->type == e1000_eeprom_spi) {
+        /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
+         * 32KB (incremented by powers of 2).
+         */
+        if (hw->mac_type <= e1000_82547_rev_2) {
+            /* Set to default value for initial eeprom read. */
+            eeprom->word_size = 64;
+            ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
+            if (ret_val)
+                return ret_val;
+            eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+            /* 256B eeprom size was not supported in earlier hardware, so we
+             * bump eeprom_size up one to ensure that "1" (which maps to 256B)
+             * is never the result used in the shifting logic below. */
+            if (eeprom_size)
+                eeprom_size++;
+        } else {
+            eeprom_size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+                          E1000_EECD_SIZE_EX_SHIFT);
+        }
+
+        eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
+    }
+    return ret_val;
+}
+
+/******************************************************************************
+ * Raises the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
+{
+    /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+     * wait <delay> microseconds.
+     */
+    *eecd = *eecd | E1000_EECD_SK;
+    ew32(EECD, *eecd);
+    E1000_WRITE_FLUSH();
+    udelay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Lowers the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
+{
+    /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+     * wait 50 microseconds.
+     */
+    *eecd = *eecd & ~E1000_EECD_SK;
+    ew32(EECD, *eecd);
+    E1000_WRITE_FLUSH();
+    udelay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Shift data bits out to the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * data - data to send to the EEPROM
+ * count - number of bits to shift out
+ *****************************************************************************/
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u32 eecd;
+    u32 mask;
+
+    /* We need to shift "count" bits out to the EEPROM. So, value in the
+     * "data" parameter will be shifted out to the EEPROM one bit at a time.
+     * In order to do this, "data" must be broken down into bits.
+     */
+    mask = 0x01 << (count - 1);
+    eecd = er32(EECD);
+    if (eeprom->type == e1000_eeprom_microwire) {
+        eecd &= ~E1000_EECD_DO;
+    } else if (eeprom->type == e1000_eeprom_spi) {
+        eecd |= E1000_EECD_DO;
+    }
+    do {
+        /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+         * and then raising and then lowering the clock (the SK bit controls
+         * the clock input to the EEPROM).  A "0" is shifted out to the EEPROM
+         * by setting "DI" to "0" and then raising and then lowering the clock.
+         */
+        eecd &= ~E1000_EECD_DI;
+
+        if (data & mask)
+            eecd |= E1000_EECD_DI;
+
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+
+        udelay(eeprom->delay_usec);
+
+        e1000_raise_ee_clk(hw, &eecd);
+        e1000_lower_ee_clk(hw, &eecd);
+
+        mask = mask >> 1;
+
+    } while (mask);
+
+    /* We leave the "DI" bit set to "0" when we leave this routine. */
+    eecd &= ~E1000_EECD_DI;
+    ew32(EECD, eecd);
+}
+
+/******************************************************************************
+ * Shift data bits in from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
+{
+    u32 eecd;
+    u32 i;
+    u16 data;
+
+    /* In order to read a register from the EEPROM, we need to shift 'count'
+     * bits in from the EEPROM. Bits are "shifted in" by raising the clock
+     * input to the EEPROM (setting the SK bit), and then reading the value of
+     * the "DO" bit.  During this "shifting in" process the "DI" bit should
+     * always be clear.
+     */
+
+    eecd = er32(EECD);
+
+    eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+    data = 0;
+
+    for (i = 0; i < count; i++) {
+        data = data << 1;
+        e1000_raise_ee_clk(hw, &eecd);
+
+        eecd = er32(EECD);
+
+        eecd &= ~(E1000_EECD_DI);
+        if (eecd & E1000_EECD_DO)
+            data |= 1;
+
+        e1000_lower_ee_clk(hw, &eecd);
+    }
+
+    return data;
+}
+
+/******************************************************************************
+ * Prepares EEPROM for access
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
+ * function should be called before issuing a command to the EEPROM.
+ *****************************************************************************/
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u32 eecd, i=0;
+
+    DEBUGFUNC("e1000_acquire_eeprom");
+
+    if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
+        return -E1000_ERR_SWFW_SYNC;
+    eecd = er32(EECD);
+
+    if (hw->mac_type != e1000_82573) {
+        /* Request EEPROM Access */
+        if (hw->mac_type > e1000_82544) {
+            eecd |= E1000_EECD_REQ;
+            ew32(EECD, eecd);
+            eecd = er32(EECD);
+            while ((!(eecd & E1000_EECD_GNT)) &&
+                  (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+                i++;
+                udelay(5);
+                eecd = er32(EECD);
+            }
+            if (!(eecd & E1000_EECD_GNT)) {
+                eecd &= ~E1000_EECD_REQ;
+                ew32(EECD, eecd);
+                DEBUGOUT("Could not acquire EEPROM grant\n");
+                e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+                return -E1000_ERR_EEPROM;
+            }
+        }
+    }
+
+    /* Setup EEPROM for Read/Write */
+
+    if (eeprom->type == e1000_eeprom_microwire) {
+        /* Clear SK and DI */
+        eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+        ew32(EECD, eecd);
+
+        /* Set CS */
+        eecd |= E1000_EECD_CS;
+        ew32(EECD, eecd);
+    } else if (eeprom->type == e1000_eeprom_spi) {
+        /* Clear SK and CS */
+        eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+        ew32(EECD, eecd);
+        udelay(1);
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Returns EEPROM to a "standby" state
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_standby_eeprom(struct e1000_hw *hw)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u32 eecd;
+
+    eecd = er32(EECD);
+
+    if (eeprom->type == e1000_eeprom_microwire) {
+        eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(eeprom->delay_usec);
+
+        /* Clock high */
+        eecd |= E1000_EECD_SK;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(eeprom->delay_usec);
+
+        /* Select EEPROM */
+        eecd |= E1000_EECD_CS;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(eeprom->delay_usec);
+
+        /* Clock low */
+        eecd &= ~E1000_EECD_SK;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(eeprom->delay_usec);
+    } else if (eeprom->type == e1000_eeprom_spi) {
+        /* Toggle CS to flush commands */
+        eecd |= E1000_EECD_CS;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(eeprom->delay_usec);
+        eecd &= ~E1000_EECD_CS;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(eeprom->delay_usec);
+    }
+}
+
+/******************************************************************************
+ * Terminates a command by inverting the EEPROM's chip select pin
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_release_eeprom(struct e1000_hw *hw)
+{
+    u32 eecd;
+
+    DEBUGFUNC("e1000_release_eeprom");
+
+    eecd = er32(EECD);
+
+    if (hw->eeprom.type == e1000_eeprom_spi) {
+        eecd |= E1000_EECD_CS;  /* Pull CS high */
+        eecd &= ~E1000_EECD_SK; /* Lower SCK */
+
+        ew32(EECD, eecd);
+
+        udelay(hw->eeprom.delay_usec);
+    } else if (hw->eeprom.type == e1000_eeprom_microwire) {
+        /* cleanup eeprom */
+
+        /* CS on Microwire is active-high */
+        eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+
+        ew32(EECD, eecd);
+
+        /* Rising edge of clock */
+        eecd |= E1000_EECD_SK;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(hw->eeprom.delay_usec);
+
+        /* Falling edge of clock */
+        eecd &= ~E1000_EECD_SK;
+        ew32(EECD, eecd);
+        E1000_WRITE_FLUSH();
+        udelay(hw->eeprom.delay_usec);
+    }
+
+    /* Stop requesting EEPROM access */
+    if (hw->mac_type > e1000_82544) {
+        eecd &= ~E1000_EECD_REQ;
+        ew32(EECD, eecd);
+    }
+
+    e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
+{
+    u16 retry_count = 0;
+    u8 spi_stat_reg;
+
+    DEBUGFUNC("e1000_spi_eeprom_ready");
+
+    /* Read "Status Register" repeatedly until the LSB is cleared.  The
+     * EEPROM will signal that the command has been completed by clearing
+     * bit 0 of the internal status register.  If it's not cleared within
+     * 5 milliseconds, then error out.
+     */
+    retry_count = 0;
+    do {
+        e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
+                                hw->eeprom.opcode_bits);
+        spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8);
+        if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
+            break;
+
+        udelay(5);
+        retry_count += 5;
+
+        e1000_standby_eeprom(hw);
+    } while (retry_count < EEPROM_MAX_RETRY_SPI);
+
+    /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
+     * only 0-5mSec on 5V devices)
+     */
+    if (retry_count >= EEPROM_MAX_RETRY_SPI) {
+        DEBUGOUT("SPI EEPROM Status error\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+    s32 ret;
+    spin_lock(&e1000_eeprom_lock);
+    ret = e1000_do_read_eeprom(hw, offset, words, data);
+    spin_unlock(&e1000_eeprom_lock);
+    return ret;
+}
+
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u32 i = 0;
+
+    DEBUGFUNC("e1000_read_eeprom");
+
+    /* If eeprom is not yet detected, do so now */
+    if (eeprom->word_size == 0)
+        e1000_init_eeprom_params(hw);
+
+    /* A check for invalid values:  offset too large, too many words, and not
+     * enough words.
+     */
+    if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+       (words == 0)) {
+        DEBUGOUT2("\"words\" parameter out of bounds. Words = %d, size = %d\n", offset, eeprom->word_size);
+        return -E1000_ERR_EEPROM;
+    }
+
+    /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
+     * directly. In this case, we need to acquire the EEPROM so that
+     * FW or other port software does not interrupt.
+     */
+    if (e1000_is_onboard_nvm_eeprom(hw) && !hw->eeprom.use_eerd) {
+        /* Prepare the EEPROM for bit-bang reading */
+        if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+            return -E1000_ERR_EEPROM;
+    }
+
+    /* Eerd register EEPROM access requires no eeprom aquire/release */
+    if (eeprom->use_eerd)
+        return e1000_read_eeprom_eerd(hw, offset, words, data);
+
+    /* ICH EEPROM access is done via the ICH flash controller */
+    if (eeprom->type == e1000_eeprom_ich8)
+        return e1000_read_eeprom_ich8(hw, offset, words, data);
+
+    /* Set up the SPI or Microwire EEPROM for bit-bang reading.  We have
+     * acquired the EEPROM at this point, so any returns should relase it */
+    if (eeprom->type == e1000_eeprom_spi) {
+        u16 word_in;
+        u8 read_opcode = EEPROM_READ_OPCODE_SPI;
+
+        if (e1000_spi_eeprom_ready(hw)) {
+            e1000_release_eeprom(hw);
+            return -E1000_ERR_EEPROM;
+        }
+
+        e1000_standby_eeprom(hw);
+
+        /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+        if ((eeprom->address_bits == 8) && (offset >= 128))
+            read_opcode |= EEPROM_A8_OPCODE_SPI;
+
+        /* Send the READ command (opcode + addr)  */
+        e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
+        e1000_shift_out_ee_bits(hw, (u16)(offset*2), eeprom->address_bits);
+
+        /* Read the data.  The address of the eeprom internally increments with
+         * each byte (spi) being read, saving on the overhead of eeprom setup
+         * and tear-down.  The address counter will roll over if reading beyond
+         * the size of the eeprom, thus allowing the entire memory to be read
+         * starting from any offset. */
+        for (i = 0; i < words; i++) {
+            word_in = e1000_shift_in_ee_bits(hw, 16);
+            data[i] = (word_in >> 8) | (word_in << 8);
+        }
+    } else if (eeprom->type == e1000_eeprom_microwire) {
+        for (i = 0; i < words; i++) {
+            /* Send the READ command (opcode + addr)  */
+            e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
+                                    eeprom->opcode_bits);
+            e1000_shift_out_ee_bits(hw, (u16)(offset + i),
+                                    eeprom->address_bits);
+
+            /* Read the data.  For microwire, each word requires the overhead
+             * of eeprom setup and tear-down. */
+            data[i] = e1000_shift_in_ee_bits(hw, 16);
+            e1000_standby_eeprom(hw);
+        }
+    }
+
+    /* End this read operation */
+    e1000_release_eeprom(hw);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data)
+{
+    u32 i, eerd = 0;
+    s32 error = 0;
+
+    for (i = 0; i < words; i++) {
+        eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
+                         E1000_EEPROM_RW_REG_START;
+
+        ew32(EERD, eerd);
+        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
+
+        if (error) {
+            break;
+        }
+        data[i] = (er32(EERD) >> E1000_EEPROM_RW_REG_DATA);
+
+    }
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word from the EEPROM using the EEWR register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
+				   u16 *data)
+{
+    u32    register_value = 0;
+    u32    i              = 0;
+    s32     error          = 0;
+
+    if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
+        return -E1000_ERR_SWFW_SYNC;
+
+    for (i = 0; i < words; i++) {
+        register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
+                         ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
+                         E1000_EEPROM_RW_REG_START;
+
+        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+        if (error) {
+            break;
+        }
+
+        ew32(EEWR, register_value);
+
+        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+
+        if (error) {
+            break;
+        }
+    }
+
+    e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+    return error;
+}
+
+/******************************************************************************
+ * Polls the status bit (bit 1) of the EERD to determine when the read is done.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
+{
+    u32 attempts = 100000;
+    u32 i, reg = 0;
+    s32 done = E1000_ERR_EEPROM;
+
+    for (i = 0; i < attempts; i++) {
+        if (eerd == E1000_EEPROM_POLL_READ)
+            reg = er32(EERD);
+        else
+            reg = er32(EEWR);
+
+        if (reg & E1000_EEPROM_RW_REG_DONE) {
+            done = E1000_SUCCESS;
+            break;
+        }
+        udelay(5);
+    }
+
+    return done;
+}
+
+/***************************************************************************
+* Description:     Determines if the onboard NVM is FLASH or EEPROM.
+*
+* hw - Struct containing variables accessed by shared code
+****************************************************************************/
+static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
+{
+    u32 eecd = 0;
+
+    DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
+
+    if (hw->mac_type == e1000_ich8lan)
+        return false;
+
+    if (hw->mac_type == e1000_82573) {
+        eecd = er32(EECD);
+
+        /* Isolate bits 15 & 16 */
+        eecd = ((eecd >> 15) & 0x03);
+
+        /* If both bits are set, device is Flash type */
+        if (eecd == 0x03) {
+            return false;
+        }
+    }
+    return true;
+}
+
+/******************************************************************************
+ * Verifies that the EEPROM has a valid checksum
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Reads the first 64 16 bit words of the EEPROM and sums the values read.
+ * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * valid.
+ *****************************************************************************/
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
+{
+    u16 checksum = 0;
+    u16 i, eeprom_data;
+
+    DEBUGFUNC("e1000_validate_eeprom_checksum");
+
+    if ((hw->mac_type == e1000_82573) && !e1000_is_onboard_nvm_eeprom(hw)) {
+        /* Check bit 4 of word 10h.  If it is 0, firmware is done updating
+         * 10h-12h.  Checksum may need to be fixed. */
+        e1000_read_eeprom(hw, 0x10, 1, &eeprom_data);
+        if ((eeprom_data & 0x10) == 0) {
+            /* Read 0x23 and check bit 15.  This bit is a 1 when the checksum
+             * has already been fixed.  If the checksum is still wrong and this
+             * bit is a 1, we need to return bad checksum.  Otherwise, we need
+             * to set this bit to a 1 and update the checksum. */
+            e1000_read_eeprom(hw, 0x23, 1, &eeprom_data);
+            if ((eeprom_data & 0x8000) == 0) {
+                eeprom_data |= 0x8000;
+                e1000_write_eeprom(hw, 0x23, 1, &eeprom_data);
+                e1000_update_eeprom_checksum(hw);
+            }
+        }
+    }
+
+    if (hw->mac_type == e1000_ich8lan) {
+        /* Drivers must allocate the shadow ram structure for the
+         * EEPROM checksum to be updated.  Otherwise, this bit as well
+         * as the checksum must both be set correctly for this
+         * validation to pass.
+         */
+        e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
+        if ((eeprom_data & 0x40) == 0) {
+            eeprom_data |= 0x40;
+            e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
+            e1000_update_eeprom_checksum(hw);
+        }
+    }
+
+    for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+        if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        checksum += eeprom_data;
+    }
+
+    if (checksum == (u16)EEPROM_SUM)
+        return E1000_SUCCESS;
+    else {
+        DEBUGOUT("EEPROM Checksum Invalid\n");
+        return -E1000_ERR_EEPROM;
+    }
+}
+
+/******************************************************************************
+ * Calculates the EEPROM checksum and writes it to the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
+ * Writes the difference to word offset 63 of the EEPROM.
+ *****************************************************************************/
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
+{
+    u32 ctrl_ext;
+    u16 checksum = 0;
+    u16 i, eeprom_data;
+
+    DEBUGFUNC("e1000_update_eeprom_checksum");
+
+    for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+        if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        checksum += eeprom_data;
+    }
+    checksum = (u16)EEPROM_SUM - checksum;
+    if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+        DEBUGOUT("EEPROM Write Error\n");
+        return -E1000_ERR_EEPROM;
+    } else if (hw->eeprom.type == e1000_eeprom_flash) {
+        e1000_commit_shadow_ram(hw);
+    } else if (hw->eeprom.type == e1000_eeprom_ich8) {
+        e1000_commit_shadow_ram(hw);
+        /* Reload the EEPROM, or else modifications will not appear
+         * until after next adapter reset. */
+        ctrl_ext = er32(CTRL_EXT);
+        ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+        ew32(CTRL_EXT, ctrl_ext);
+        msleep(10);
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Parent function for writing words to the different EEPROM types.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - 16 bit word to be written to the EEPROM
+ *
+ * If e1000_update_eeprom_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ *****************************************************************************/
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+    s32 ret;
+    spin_lock(&e1000_eeprom_lock);
+    ret = e1000_do_write_eeprom(hw, offset, words, data);
+    spin_unlock(&e1000_eeprom_lock);
+    return ret;
+}
+
+
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    s32 status = 0;
+
+    DEBUGFUNC("e1000_write_eeprom");
+
+    /* If eeprom is not yet detected, do so now */
+    if (eeprom->word_size == 0)
+        e1000_init_eeprom_params(hw);
+
+    /* A check for invalid values:  offset too large, too many words, and not
+     * enough words.
+     */
+    if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+       (words == 0)) {
+        DEBUGOUT("\"words\" parameter out of bounds\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    /* 82573 writes only through eewr */
+    if (eeprom->use_eewr)
+        return e1000_write_eeprom_eewr(hw, offset, words, data);
+
+    if (eeprom->type == e1000_eeprom_ich8)
+        return e1000_write_eeprom_ich8(hw, offset, words, data);
+
+    /* Prepare the EEPROM for writing  */
+    if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+        return -E1000_ERR_EEPROM;
+
+    if (eeprom->type == e1000_eeprom_microwire) {
+        status = e1000_write_eeprom_microwire(hw, offset, words, data);
+    } else {
+        status = e1000_write_eeprom_spi(hw, offset, words, data);
+        msleep(10);
+    }
+
+    /* Done with writing */
+    e1000_release_eeprom(hw);
+
+    return status;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in an SPI EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 8 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u16 widx = 0;
+
+    DEBUGFUNC("e1000_write_eeprom_spi");
+
+    while (widx < words) {
+        u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
+
+        if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
+
+        e1000_standby_eeprom(hw);
+
+        /*  Send the WRITE ENABLE command (8 bit opcode )  */
+        e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
+                                    eeprom->opcode_bits);
+
+        e1000_standby_eeprom(hw);
+
+        /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+        if ((eeprom->address_bits == 8) && (offset >= 128))
+            write_opcode |= EEPROM_A8_OPCODE_SPI;
+
+        /* Send the Write command (8-bit opcode + addr) */
+        e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
+
+        e1000_shift_out_ee_bits(hw, (u16)((offset + widx)*2),
+                                eeprom->address_bits);
+
+        /* Send the data */
+
+        /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+        while (widx < words) {
+            u16 word_out = data[widx];
+            word_out = (word_out >> 8) | (word_out << 8);
+            e1000_shift_out_ee_bits(hw, word_out, 16);
+            widx++;
+
+            /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
+             * operation, while the smaller eeproms are capable of an 8-byte
+             * PAGE WRITE operation.  Break the inner loop to pass new address
+             */
+            if ((((offset + widx)*2) % eeprom->page_size) == 0) {
+                e1000_standby_eeprom(hw);
+                break;
+            }
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in a Microwire EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 16 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+					u16 words, u16 *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    u32 eecd;
+    u16 words_written = 0;
+    u16 i = 0;
+
+    DEBUGFUNC("e1000_write_eeprom_microwire");
+
+    /* Send the write enable command to the EEPROM (3-bit opcode plus
+     * 6/8-bit dummy address beginning with 11).  It's less work to include
+     * the 11 of the dummy address as part of the opcode than it is to shift
+     * it over the correct number of bits for the address.  This puts the
+     * EEPROM into write/erase mode.
+     */
+    e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
+                            (u16)(eeprom->opcode_bits + 2));
+
+    e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
+
+    /* Prepare the EEPROM */
+    e1000_standby_eeprom(hw);
+
+    while (words_written < words) {
+        /* Send the Write command (3-bit opcode + addr) */
+        e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
+                                eeprom->opcode_bits);
+
+        e1000_shift_out_ee_bits(hw, (u16)(offset + words_written),
+                                eeprom->address_bits);
+
+        /* Send the data */
+        e1000_shift_out_ee_bits(hw, data[words_written], 16);
+
+        /* Toggle the CS line.  This in effect tells the EEPROM to execute
+         * the previous command.
+         */
+        e1000_standby_eeprom(hw);
+
+        /* Read DO repeatedly until it is high (equal to '1').  The EEPROM will
+         * signal that the command has been completed by raising the DO signal.
+         * If DO does not go high in 10 milliseconds, then error out.
+         */
+        for (i = 0; i < 200; i++) {
+            eecd = er32(EECD);
+            if (eecd & E1000_EECD_DO) break;
+            udelay(50);
+        }
+        if (i == 200) {
+            DEBUGOUT("EEPROM Write did not complete\n");
+            return -E1000_ERR_EEPROM;
+        }
+
+        /* Recover from write */
+        e1000_standby_eeprom(hw);
+
+        words_written++;
+    }
+
+    /* Send the write disable command to the EEPROM (3-bit opcode plus
+     * 6/8-bit dummy address beginning with 10).  It's less work to include
+     * the 10 of the dummy address as part of the opcode than it is to shift
+     * it over the correct number of bits for the address.  This takes the
+     * EEPROM out of write/erase mode.
+     */
+    e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
+                            (u16)(eeprom->opcode_bits + 2));
+
+    e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Flushes the cached eeprom to NVM. This is done by saving the modified values
+ * in the eeprom cache and the non modified values in the currently active bank
+ * to the new bank.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_commit_shadow_ram(struct e1000_hw *hw)
+{
+    u32 attempts = 100000;
+    u32 eecd = 0;
+    u32 flop = 0;
+    u32 i = 0;
+    s32 error = E1000_SUCCESS;
+    u32 old_bank_offset = 0;
+    u32 new_bank_offset = 0;
+    u8 low_byte = 0;
+    u8 high_byte = 0;
+    bool sector_write_failed = false;
+
+    if (hw->mac_type == e1000_82573) {
+        /* The flop register will be used to determine if flash type is STM */
+        flop = er32(FLOP);
+        for (i=0; i < attempts; i++) {
+            eecd = er32(EECD);
+            if ((eecd & E1000_EECD_FLUPD) == 0) {
+                break;
+            }
+            udelay(5);
+        }
+
+        if (i == attempts) {
+            return -E1000_ERR_EEPROM;
+        }
+
+        /* If STM opcode located in bits 15:8 of flop, reset firmware */
+        if ((flop & 0xFF00) == E1000_STM_OPCODE) {
+            ew32(HICR, E1000_HICR_FW_RESET);
+        }
+
+        /* Perform the flash update */
+        ew32(EECD, eecd | E1000_EECD_FLUPD);
+
+        for (i=0; i < attempts; i++) {
+            eecd = er32(EECD);
+            if ((eecd & E1000_EECD_FLUPD) == 0) {
+                break;
+            }
+            udelay(5);
+        }
+
+        if (i == attempts) {
+            return -E1000_ERR_EEPROM;
+        }
+    }
+
+    if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) {
+        /* We're writing to the opposite bank so if we're on bank 1,
+         * write to bank 0 etc.  We also need to erase the segment that
+         * is going to be written */
+        if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
+            new_bank_offset = hw->flash_bank_size * 2;
+            old_bank_offset = 0;
+            e1000_erase_ich8_4k_segment(hw, 1);
+        } else {
+            old_bank_offset = hw->flash_bank_size * 2;
+            new_bank_offset = 0;
+            e1000_erase_ich8_4k_segment(hw, 0);
+        }
+
+        sector_write_failed = false;
+        /* Loop for every byte in the shadow RAM,
+         * which is in units of words. */
+        for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+            /* Determine whether to write the value stored
+             * in the other NVM bank or a modified value stored
+             * in the shadow RAM */
+            if (hw->eeprom_shadow_ram[i].modified) {
+                low_byte = (u8)hw->eeprom_shadow_ram[i].eeprom_word;
+                udelay(100);
+                error = e1000_verify_write_ich8_byte(hw,
+                            (i << 1) + new_bank_offset, low_byte);
+
+                if (error != E1000_SUCCESS)
+                    sector_write_failed = true;
+                else {
+                    high_byte =
+                        (u8)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
+                    udelay(100);
+                }
+            } else {
+                e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
+                                     &low_byte);
+                udelay(100);
+                error = e1000_verify_write_ich8_byte(hw,
+                            (i << 1) + new_bank_offset, low_byte);
+
+                if (error != E1000_SUCCESS)
+                    sector_write_failed = true;
+                else {
+                    e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
+                                         &high_byte);
+                    udelay(100);
+                }
+            }
+
+            /* If the write of the low byte was successful, go ahead and
+             * write the high byte while checking to make sure that if it
+             * is the signature byte, then it is handled properly */
+            if (!sector_write_failed) {
+                /* If the word is 0x13, then make sure the signature bits
+                 * (15:14) are 11b until the commit has completed.
+                 * This will allow us to write 10b which indicates the
+                 * signature is valid.  We want to do this after the write
+                 * has completed so that we don't mark the segment valid
+                 * while the write is still in progress */
+                if (i == E1000_ICH_NVM_SIG_WORD)
+                    high_byte = E1000_ICH_NVM_SIG_MASK | high_byte;
+
+                error = e1000_verify_write_ich8_byte(hw,
+                            (i << 1) + new_bank_offset + 1, high_byte);
+                if (error != E1000_SUCCESS)
+                    sector_write_failed = true;
+
+            } else {
+                /* If the write failed then break from the loop and
+                 * return an error */
+                break;
+            }
+        }
+
+        /* Don't bother writing the segment valid bits if sector
+         * programming failed. */
+        if (!sector_write_failed) {
+            /* Finally validate the new segment by setting bit 15:14
+             * to 10b in word 0x13 , this can be done without an
+             * erase as well since these bits are 11 to start with
+             * and we need to change bit 14 to 0b */
+            e1000_read_ich8_byte(hw,
+                                 E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+                                 &high_byte);
+            high_byte &= 0xBF;
+            error = e1000_verify_write_ich8_byte(hw,
+                        E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
+            /* And invalidate the previously valid segment by setting
+             * its signature word (0x13) high_byte to 0b. This can be
+             * done without an erase because flash erase sets all bits
+             * to 1's. We can write 1's to 0's without an erase */
+            if (error == E1000_SUCCESS) {
+                error = e1000_verify_write_ich8_byte(hw,
+                            E1000_ICH_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
+            }
+
+            /* Clear the now not used entry in the cache */
+            for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+                hw->eeprom_shadow_ram[i].modified = false;
+                hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+            }
+        }
+    }
+
+    return error;
+}
+
+/******************************************************************************
+ * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
+ * second function of dual function devices
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+    u16 offset;
+    u16 eeprom_data, i;
+
+    DEBUGFUNC("e1000_read_mac_addr");
+
+    for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+        offset = i >> 1;
+        if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF);
+        hw->perm_mac_addr[i+1] = (u8)(eeprom_data >> 8);
+    }
+
+    switch (hw->mac_type) {
+    default:
+        break;
+    case e1000_82546:
+    case e1000_82546_rev_3:
+    case e1000_82571:
+    case e1000_80003es2lan:
+        if (er32(STATUS) & E1000_STATUS_FUNC_1)
+            hw->perm_mac_addr[5] ^= 0x01;
+        break;
+    }
+
+    for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+        hw->mac_addr[i] = hw->perm_mac_addr[i];
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Initializes receive address filters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive addresss registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ *****************************************************************************/
+static void e1000_init_rx_addrs(struct e1000_hw *hw)
+{
+    u32 i;
+    u32 rar_num;
+
+    DEBUGFUNC("e1000_init_rx_addrs");
+
+    /* Setup the receive address. */
+    DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+    e1000_rar_set(hw, hw->mac_addr, 0);
+
+    rar_num = E1000_RAR_ENTRIES;
+
+    /* Reserve a spot for the Locally Administered Address to work around
+     * an 82571 issue in which a reset on one port will reload the MAC on
+     * the other port. */
+    if ((hw->mac_type == e1000_82571) && (hw->laa_is_present))
+        rar_num -= 1;
+    if (hw->mac_type == e1000_ich8lan)
+        rar_num = E1000_RAR_ENTRIES_ICH8LAN;
+
+    /* Zero out the other 15 receive addresses. */
+    DEBUGOUT("Clearing RAR[1-15]\n");
+    for (i = 1; i < rar_num; i++) {
+        E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+        E1000_WRITE_FLUSH();
+        E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+        E1000_WRITE_FLUSH();
+    }
+}
+
+/******************************************************************************
+ * Hashes an address to determine its location in the multicast table
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *****************************************************************************/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+    u32 hash_value = 0;
+
+    /* The portion of the address that is used for the hash table is
+     * determined by the mc_filter_type setting.
+     */
+    switch (hw->mc_filter_type) {
+    /* [0] [1] [2] [3] [4] [5]
+     * 01  AA  00  12  34  56
+     * LSB                 MSB
+     */
+    case 0:
+        if (hw->mac_type == e1000_ich8lan) {
+            /* [47:38] i.e. 0x158 for above example address */
+            hash_value = ((mc_addr[4] >> 6) | (((u16)mc_addr[5]) << 2));
+        } else {
+            /* [47:36] i.e. 0x563 for above example address */
+            hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+        }
+        break;
+    case 1:
+        if (hw->mac_type == e1000_ich8lan) {
+            /* [46:37] i.e. 0x2B1 for above example address */
+            hash_value = ((mc_addr[4] >> 5) | (((u16)mc_addr[5]) << 3));
+        } else {
+            /* [46:35] i.e. 0xAC6 for above example address */
+            hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+        }
+        break;
+    case 2:
+        if (hw->mac_type == e1000_ich8lan) {
+            /*[45:36] i.e. 0x163 for above example address */
+            hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+        } else {
+            /* [45:34] i.e. 0x5D8 for above example address */
+            hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+        }
+        break;
+    case 3:
+        if (hw->mac_type == e1000_ich8lan) {
+            /* [43:34] i.e. 0x18D for above example address */
+            hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+        } else {
+            /* [43:32] i.e. 0x634 for above example address */
+            hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+        }
+        break;
+    }
+
+    hash_value &= 0xFFF;
+    if (hw->mac_type == e1000_ich8lan)
+        hash_value &= 0x3FF;
+
+    return hash_value;
+}
+
+/******************************************************************************
+ * Sets the bit in the multicast table corresponding to the hash value.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ *****************************************************************************/
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
+{
+    u32 hash_bit, hash_reg;
+    u32 mta;
+    u32 temp;
+
+    /* The MTA is a register array of 128 32-bit registers.
+     * It is treated like an array of 4096 bits.  We want to set
+     * bit BitArray[hash_value]. So we figure out what register
+     * the bit is in, read it, OR in the new bit, then write
+     * back the new value.  The register is determined by the
+     * upper 7 bits of the hash value and the bit within that
+     * register are determined by the lower 5 bits of the value.
+     */
+    hash_reg = (hash_value >> 5) & 0x7F;
+    if (hw->mac_type == e1000_ich8lan)
+        hash_reg &= 0x1F;
+
+    hash_bit = hash_value & 0x1F;
+
+    mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
+
+    mta |= (1 << hash_bit);
+
+    /* If we are on an 82544 and we are trying to write an odd offset
+     * in the MTA, save off the previous entry before writing and
+     * restore the old value after writing.
+     */
+    if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
+        temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
+        E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+        E1000_WRITE_FLUSH();
+        E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp);
+        E1000_WRITE_FLUSH();
+    } else {
+        E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+        E1000_WRITE_FLUSH();
+    }
+}
+
+/******************************************************************************
+ * Puts an ethernet address into a receive address register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * addr - Address to put into receive address register
+ * index - Receive address register to write
+ *****************************************************************************/
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+    u32 rar_low, rar_high;
+
+    /* HW expects these in little endian so we reverse the byte order
+     * from network order (big endian) to little endian
+     */
+    rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
+               ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+    rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+    /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
+     * unit hang.
+     *
+     * Description:
+     * If there are any Rx frames queued up or otherwise present in the HW
+     * before RSS is enabled, and then we enable RSS, the HW Rx unit will
+     * hang.  To work around this issue, we have to disable receives and
+     * flush out all Rx frames before we enable RSS. To do so, we modify we
+     * redirect all Rx traffic to manageability and then reset the HW.
+     * This flushes away Rx frames, and (since the redirections to
+     * manageability persists across resets) keeps new ones from coming in
+     * while we work.  Then, we clear the Address Valid AV bit for all MAC
+     * addresses and undo the re-direction to manageability.
+     * Now, frames are coming in again, but the MAC won't accept them, so
+     * far so good.  We now proceed to initialize RSS (if necessary) and
+     * configure the Rx unit.  Last, we re-enable the AV bits and continue
+     * on our merry way.
+     */
+    switch (hw->mac_type) {
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_80003es2lan:
+        if (hw->leave_av_bit_off)
+            break;
+    default:
+        /* Indicate to hardware the Address is Valid. */
+        rar_high |= E1000_RAH_AV;
+        break;
+    }
+
+    E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+    E1000_WRITE_FLUSH();
+    E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+    E1000_WRITE_FLUSH();
+}
+
+/******************************************************************************
+ * Writes a value to the specified offset in the VLAN filter table.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - Offset in VLAN filer table to write
+ * value - Value to write into VLAN filter table
+ *****************************************************************************/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+    u32 temp;
+
+    if (hw->mac_type == e1000_ich8lan)
+        return;
+
+    if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+        temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
+        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+        E1000_WRITE_FLUSH();
+        E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+        E1000_WRITE_FLUSH();
+    } else {
+        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+        E1000_WRITE_FLUSH();
+    }
+}
+
+/******************************************************************************
+ * Clears the VLAN filer table
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_clear_vfta(struct e1000_hw *hw)
+{
+    u32 offset;
+    u32 vfta_value = 0;
+    u32 vfta_offset = 0;
+    u32 vfta_bit_in_reg = 0;
+
+    if (hw->mac_type == e1000_ich8lan)
+        return;
+
+    if (hw->mac_type == e1000_82573) {
+        if (hw->mng_cookie.vlan_id != 0) {
+            /* The VFTA is a 4096b bit-field, each identifying a single VLAN
+             * ID.  The following operations determine which 32b entry
+             * (i.e. offset) into the array we want to set the VLAN ID
+             * (i.e. bit) of the manageability unit. */
+            vfta_offset = (hw->mng_cookie.vlan_id >>
+                           E1000_VFTA_ENTRY_SHIFT) &
+                          E1000_VFTA_ENTRY_MASK;
+            vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
+                                    E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+        }
+    }
+    for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+        /* If the offset we want to clear is the same offset of the
+         * manageability VLAN ID, then clear all bits except that of the
+         * manageability unit */
+        vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+        E1000_WRITE_FLUSH();
+    }
+}
+
+static s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+    u32 ledctl;
+    const u32 ledctl_mask = 0x000000FF;
+    const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+    const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+    u16 eeprom_data, i, temp;
+    const u16 led_mask = 0x0F;
+
+    DEBUGFUNC("e1000_id_led_init");
+
+    if (hw->mac_type < e1000_82540) {
+        /* Nothing to do */
+        return E1000_SUCCESS;
+    }
+
+    ledctl = er32(LEDCTL);
+    hw->ledctl_default = ledctl;
+    hw->ledctl_mode1 = hw->ledctl_default;
+    hw->ledctl_mode2 = hw->ledctl_default;
+
+    if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+        DEBUGOUT("EEPROM Read Error\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    if ((hw->mac_type == e1000_82573) &&
+        (eeprom_data == ID_LED_RESERVED_82573))
+        eeprom_data = ID_LED_DEFAULT_82573;
+    else if ((eeprom_data == ID_LED_RESERVED_0000) ||
+            (eeprom_data == ID_LED_RESERVED_FFFF)) {
+        if (hw->mac_type == e1000_ich8lan)
+            eeprom_data = ID_LED_DEFAULT_ICH8LAN;
+        else
+            eeprom_data = ID_LED_DEFAULT;
+    }
+
+    for (i = 0; i < 4; i++) {
+        temp = (eeprom_data >> (i << 2)) & led_mask;
+        switch (temp) {
+        case ID_LED_ON1_DEF2:
+        case ID_LED_ON1_ON2:
+        case ID_LED_ON1_OFF2:
+            hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode1 |= ledctl_on << (i << 3);
+            break;
+        case ID_LED_OFF1_DEF2:
+        case ID_LED_OFF1_ON2:
+        case ID_LED_OFF1_OFF2:
+            hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode1 |= ledctl_off << (i << 3);
+            break;
+        default:
+            /* Do nothing */
+            break;
+        }
+        switch (temp) {
+        case ID_LED_DEF1_ON2:
+        case ID_LED_ON1_ON2:
+        case ID_LED_OFF1_ON2:
+            hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode2 |= ledctl_on << (i << 3);
+            break;
+        case ID_LED_DEF1_OFF2:
+        case ID_LED_ON1_OFF2:
+        case ID_LED_OFF1_OFF2:
+            hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode2 |= ledctl_off << (i << 3);
+            break;
+        default:
+            /* Do nothing */
+            break;
+        }
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Prepares SW controlable LED for use and saves the current state of the LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+    u32 ledctl;
+    s32 ret_val = E1000_SUCCESS;
+
+    DEBUGFUNC("e1000_setup_led");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+    case e1000_82544:
+        /* No setup necessary */
+        break;
+    case e1000_82541:
+    case e1000_82547:
+    case e1000_82541_rev_2:
+    case e1000_82547_rev_2:
+        /* Turn off PHY Smart Power Down (if enabled) */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                     &hw->phy_spd_default);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                      (u16)(hw->phy_spd_default &
+                                      ~IGP01E1000_GMII_SPD));
+        if (ret_val)
+            return ret_val;
+        /* Fall Through */
+    default:
+        if (hw->media_type == e1000_media_type_fiber) {
+            ledctl = er32(LEDCTL);
+            /* Save current LEDCTL settings */
+            hw->ledctl_default = ledctl;
+            /* Turn off LED0 */
+            ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+                        E1000_LEDCTL_LED0_BLINK |
+                        E1000_LEDCTL_LED0_MODE_MASK);
+            ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+                       E1000_LEDCTL_LED0_MODE_SHIFT);
+            ew32(LEDCTL, ledctl);
+        } else if (hw->media_type == e1000_media_type_copper)
+            ew32(LEDCTL, hw->ledctl_mode1);
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Used on 82571 and later Si that has LED blink bits.
+ * Callers must use their own timer and should have already called
+ * e1000_id_led_init()
+ * Call e1000_cleanup led() to stop blinking
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_blink_led_start(struct e1000_hw *hw)
+{
+    s16  i;
+    u32 ledctl_blink = 0;
+
+    DEBUGFUNC("e1000_id_led_blink_on");
+
+    if (hw->mac_type < e1000_82571) {
+        /* Nothing to do */
+        return E1000_SUCCESS;
+    }
+    if (hw->media_type == e1000_media_type_fiber) {
+        /* always blink LED0 for PCI-E fiber */
+        ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+                     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+    } else {
+        /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */
+        ledctl_blink = hw->ledctl_mode2;
+        for (i=0; i < 4; i++)
+            if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) ==
+                E1000_LEDCTL_MODE_LED_ON)
+                ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8));
+    }
+
+    ew32(LEDCTL, ledctl_blink);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Restores the saved state of the SW controlable LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+    s32 ret_val = E1000_SUCCESS;
+
+    DEBUGFUNC("e1000_cleanup_led");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+    case e1000_82544:
+        /* No cleanup necessary */
+        break;
+    case e1000_82541:
+    case e1000_82547:
+    case e1000_82541_rev_2:
+    case e1000_82547_rev_2:
+        /* Turn on PHY Smart Power Down (if previously enabled) */
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                      hw->phy_spd_default);
+        if (ret_val)
+            return ret_val;
+        /* Fall Through */
+    default:
+        if (hw->phy_type == e1000_phy_ife) {
+            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+            break;
+        }
+        /* Restore LEDCTL settings */
+        ew32(LEDCTL, hw->ledctl_default);
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns on the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+    u32 ctrl = er32(CTRL);
+
+    DEBUGFUNC("e1000_led_on");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+        /* Set SW Defineable Pin 0 to turn on the LED */
+        ctrl |= E1000_CTRL_SWDPIN0;
+        ctrl |= E1000_CTRL_SWDPIO0;
+        break;
+    case e1000_82544:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Set SW Defineable Pin 0 to turn on the LED */
+            ctrl |= E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else {
+            /* Clear SW Defineable Pin 0 to turn on the LED */
+            ctrl &= ~E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        }
+        break;
+    default:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Clear SW Defineable Pin 0 to turn on the LED */
+            ctrl &= ~E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else if (hw->phy_type == e1000_phy_ife) {
+            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+        } else if (hw->media_type == e1000_media_type_copper) {
+            ew32(LEDCTL, hw->ledctl_mode2);
+            return E1000_SUCCESS;
+        }
+        break;
+    }
+
+    ew32(CTRL, ctrl);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns off the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+    u32 ctrl = er32(CTRL);
+
+    DEBUGFUNC("e1000_led_off");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+        /* Clear SW Defineable Pin 0 to turn off the LED */
+        ctrl &= ~E1000_CTRL_SWDPIN0;
+        ctrl |= E1000_CTRL_SWDPIO0;
+        break;
+    case e1000_82544:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Clear SW Defineable Pin 0 to turn off the LED */
+            ctrl &= ~E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else {
+            /* Set SW Defineable Pin 0 to turn off the LED */
+            ctrl |= E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        }
+        break;
+    default:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Set SW Defineable Pin 0 to turn off the LED */
+            ctrl |= E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else if (hw->phy_type == e1000_phy_ife) {
+            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+        } else if (hw->media_type == e1000_media_type_copper) {
+            ew32(LEDCTL, hw->ledctl_mode1);
+            return E1000_SUCCESS;
+        }
+        break;
+    }
+
+    ew32(CTRL, ctrl);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Clears all hardware statistics counters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
+{
+    volatile u32 temp;
+
+    temp = er32(CRCERRS);
+    temp = er32(SYMERRS);
+    temp = er32(MPC);
+    temp = er32(SCC);
+    temp = er32(ECOL);
+    temp = er32(MCC);
+    temp = er32(LATECOL);
+    temp = er32(COLC);
+    temp = er32(DC);
+    temp = er32(SEC);
+    temp = er32(RLEC);
+    temp = er32(XONRXC);
+    temp = er32(XONTXC);
+    temp = er32(XOFFRXC);
+    temp = er32(XOFFTXC);
+    temp = er32(FCRUC);
+
+    if (hw->mac_type != e1000_ich8lan) {
+    temp = er32(PRC64);
+    temp = er32(PRC127);
+    temp = er32(PRC255);
+    temp = er32(PRC511);
+    temp = er32(PRC1023);
+    temp = er32(PRC1522);
+    }
+
+    temp = er32(GPRC);
+    temp = er32(BPRC);
+    temp = er32(MPRC);
+    temp = er32(GPTC);
+    temp = er32(GORCL);
+    temp = er32(GORCH);
+    temp = er32(GOTCL);
+    temp = er32(GOTCH);
+    temp = er32(RNBC);
+    temp = er32(RUC);
+    temp = er32(RFC);
+    temp = er32(ROC);
+    temp = er32(RJC);
+    temp = er32(TORL);
+    temp = er32(TORH);
+    temp = er32(TOTL);
+    temp = er32(TOTH);
+    temp = er32(TPR);
+    temp = er32(TPT);
+
+    if (hw->mac_type != e1000_ich8lan) {
+    temp = er32(PTC64);
+    temp = er32(PTC127);
+    temp = er32(PTC255);
+    temp = er32(PTC511);
+    temp = er32(PTC1023);
+    temp = er32(PTC1522);
+    }
+
+    temp = er32(MPTC);
+    temp = er32(BPTC);
+
+    if (hw->mac_type < e1000_82543) return;
+
+    temp = er32(ALGNERRC);
+    temp = er32(RXERRC);
+    temp = er32(TNCRS);
+    temp = er32(CEXTERR);
+    temp = er32(TSCTC);
+    temp = er32(TSCTFC);
+
+    if (hw->mac_type <= e1000_82544) return;
+
+    temp = er32(MGTPRC);
+    temp = er32(MGTPDC);
+    temp = er32(MGTPTC);
+
+    if (hw->mac_type <= e1000_82547_rev_2) return;
+
+    temp = er32(IAC);
+    temp = er32(ICRXOC);
+
+    if (hw->mac_type == e1000_ich8lan) return;
+
+    temp = er32(ICRXPTC);
+    temp = er32(ICRXATC);
+    temp = er32(ICTXPTC);
+    temp = er32(ICTXATC);
+    temp = er32(ICTXQEC);
+    temp = er32(ICTXQMTC);
+    temp = er32(ICRXDMTC);
+}
+
+/******************************************************************************
+ * Resets Adaptive IFS to its default state.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Call this after e1000_init_hw. You may override the IFS defaults by setting
+ * hw->ifs_params_forced to true. However, you must initialize hw->
+ * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
+ * before calling this function.
+ *****************************************************************************/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_reset_adaptive");
+
+    if (hw->adaptive_ifs) {
+        if (!hw->ifs_params_forced) {
+            hw->current_ifs_val = 0;
+            hw->ifs_min_val = IFS_MIN;
+            hw->ifs_max_val = IFS_MAX;
+            hw->ifs_step_size = IFS_STEP;
+            hw->ifs_ratio = IFS_RATIO;
+        }
+        hw->in_ifs_mode = false;
+        ew32(AIT, 0);
+    } else {
+        DEBUGOUT("Not in Adaptive IFS mode!\n");
+    }
+}
+
+/******************************************************************************
+ * Called during the callback/watchdog routine to update IFS value based on
+ * the ratio of transmits to collisions.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * tx_packets - Number of transmits since last callback
+ * total_collisions - Number of collisions since last callback
+ *****************************************************************************/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_update_adaptive");
+
+    if (hw->adaptive_ifs) {
+        if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
+            if (hw->tx_packet_delta > MIN_NUM_XMITS) {
+                hw->in_ifs_mode = true;
+                if (hw->current_ifs_val < hw->ifs_max_val) {
+                    if (hw->current_ifs_val == 0)
+                        hw->current_ifs_val = hw->ifs_min_val;
+                    else
+                        hw->current_ifs_val += hw->ifs_step_size;
+                    ew32(AIT, hw->current_ifs_val);
+                }
+            }
+        } else {
+            if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+                hw->current_ifs_val = 0;
+                hw->in_ifs_mode = false;
+                ew32(AIT, 0);
+            }
+        }
+    } else {
+        DEBUGOUT("Not in Adaptive IFS mode!\n");
+    }
+}
+
+/******************************************************************************
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ *
+ * hw - Struct containing variables accessed by shared code
+ * frame_len - The length of the frame in question
+ * mac_addr - The Ethernet destination address of the frame in question
+ *****************************************************************************/
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
+			    u32 frame_len, u8 *mac_addr)
+{
+    u64 carry_bit;
+
+    /* First adjust the frame length. */
+    frame_len--;
+    /* We need to adjust the statistics counters, since the hardware
+     * counters overcount this packet as a CRC error and undercount
+     * the packet as a good packet
+     */
+    /* This packet should not be counted as a CRC error.    */
+    stats->crcerrs--;
+    /* This packet does count as a Good Packet Received.    */
+    stats->gprc++;
+
+    /* Adjust the Good Octets received counters             */
+    carry_bit = 0x80000000 & stats->gorcl;
+    stats->gorcl += frame_len;
+    /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+     * Received Count) was one before the addition,
+     * AND it is zero after, then we lost the carry out,
+     * need to add one to Gorch (Good Octets Received Count High).
+     * This could be simplified if all environments supported
+     * 64-bit integers.
+     */
+    if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
+        stats->gorch++;
+    /* Is this a broadcast or multicast?  Check broadcast first,
+     * since the test for a multicast frame will test positive on
+     * a broadcast frame.
+     */
+    if ((mac_addr[0] == (u8)0xff) && (mac_addr[1] == (u8)0xff))
+        /* Broadcast packet */
+        stats->bprc++;
+    else if (*mac_addr & 0x01)
+        /* Multicast packet */
+        stats->mprc++;
+
+    if (frame_len == hw->max_frame_size) {
+        /* In this case, the hardware has overcounted the number of
+         * oversize frames.
+         */
+        if (stats->roc > 0)
+            stats->roc--;
+    }
+
+    /* Adjust the bin counters when the extra byte put the frame in the
+     * wrong bin. Remember that the frame_len was adjusted above.
+     */
+    if (frame_len == 64) {
+        stats->prc64++;
+        stats->prc127--;
+    } else if (frame_len == 127) {
+        stats->prc127++;
+        stats->prc255--;
+    } else if (frame_len == 255) {
+        stats->prc255++;
+        stats->prc511--;
+    } else if (frame_len == 511) {
+        stats->prc511++;
+        stats->prc1023--;
+    } else if (frame_len == 1023) {
+        stats->prc1023++;
+        stats->prc1522--;
+    } else if (frame_len == 1522) {
+        stats->prc1522++;
+    }
+}
+
+/******************************************************************************
+ * Gets the current PCI bus type, speed, and width of the hardware
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void e1000_get_bus_info(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 pci_ex_link_status;
+    u32 status;
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+        hw->bus_type = e1000_bus_type_pci;
+        hw->bus_speed = e1000_bus_speed_unknown;
+        hw->bus_width = e1000_bus_width_unknown;
+        break;
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_82573:
+    case e1000_80003es2lan:
+        hw->bus_type = e1000_bus_type_pci_express;
+        hw->bus_speed = e1000_bus_speed_2500;
+        ret_val = e1000_read_pcie_cap_reg(hw,
+                                      PCI_EX_LINK_STATUS,
+                                      &pci_ex_link_status);
+        if (ret_val)
+            hw->bus_width = e1000_bus_width_unknown;
+        else
+            hw->bus_width = (pci_ex_link_status & PCI_EX_LINK_WIDTH_MASK) >>
+                          PCI_EX_LINK_WIDTH_SHIFT;
+        break;
+    case e1000_ich8lan:
+        hw->bus_type = e1000_bus_type_pci_express;
+        hw->bus_speed = e1000_bus_speed_2500;
+        hw->bus_width = e1000_bus_width_pciex_1;
+        break;
+    default:
+        status = er32(STATUS);
+        hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
+                       e1000_bus_type_pcix : e1000_bus_type_pci;
+
+        if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+            hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
+                            e1000_bus_speed_66 : e1000_bus_speed_120;
+        } else if (hw->bus_type == e1000_bus_type_pci) {
+            hw->bus_speed = (status & E1000_STATUS_PCI66) ?
+                            e1000_bus_speed_66 : e1000_bus_speed_33;
+        } else {
+            switch (status & E1000_STATUS_PCIX_SPEED) {
+            case E1000_STATUS_PCIX_SPEED_66:
+                hw->bus_speed = e1000_bus_speed_66;
+                break;
+            case E1000_STATUS_PCIX_SPEED_100:
+                hw->bus_speed = e1000_bus_speed_100;
+                break;
+            case E1000_STATUS_PCIX_SPEED_133:
+                hw->bus_speed = e1000_bus_speed_133;
+                break;
+            default:
+                hw->bus_speed = e1000_bus_speed_reserved;
+                break;
+            }
+        }
+        hw->bus_width = (status & E1000_STATUS_BUS64) ?
+                        e1000_bus_width_64 : e1000_bus_width_32;
+        break;
+    }
+}
+
+/******************************************************************************
+ * Writes a value to one of the devices registers using port I/O (as opposed to
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset to write to
+ * value - value to write
+ *****************************************************************************/
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
+{
+    unsigned long io_addr = hw->io_base;
+    unsigned long io_data = hw->io_base + 4;
+
+    e1000_io_write(hw, io_addr, offset);
+    e1000_io_write(hw, io_data, value);
+}
+
+/******************************************************************************
+ * Estimates the cable length.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * min_length - The estimated minimum length
+ * max_length - The estimated maximum length
+ *
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
+ *
+ * This function always returns a ranged length (minimum & maximum).
+ * So for M88 phy's, this function interprets the one value returned from the
+ * register to the minimum and maximum range.
+ * For IGP phy's, the function calculates the range by the AGC registers.
+ *****************************************************************************/
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+				  u16 *max_length)
+{
+    s32 ret_val;
+    u16 agc_value = 0;
+    u16 i, phy_data;
+    u16 cable_length;
+
+    DEBUGFUNC("e1000_get_cable_length");
+
+    *min_length = *max_length = 0;
+
+    /* Use old method for Phy older than IGP */
+    if (hw->phy_type == e1000_phy_m88) {
+
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+                       M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+        /* Convert the enum value to ranged values */
+        switch (cable_length) {
+        case e1000_cable_length_50:
+            *min_length = 0;
+            *max_length = e1000_igp_cable_length_50;
+            break;
+        case e1000_cable_length_50_80:
+            *min_length = e1000_igp_cable_length_50;
+            *max_length = e1000_igp_cable_length_80;
+            break;
+        case e1000_cable_length_80_110:
+            *min_length = e1000_igp_cable_length_80;
+            *max_length = e1000_igp_cable_length_110;
+            break;
+        case e1000_cable_length_110_140:
+            *min_length = e1000_igp_cable_length_110;
+            *max_length = e1000_igp_cable_length_140;
+            break;
+        case e1000_cable_length_140:
+            *min_length = e1000_igp_cable_length_140;
+            *max_length = e1000_igp_cable_length_170;
+            break;
+        default:
+            return -E1000_ERR_PHY;
+            break;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+        switch (cable_length) {
+        case e1000_gg_cable_length_60:
+            *min_length = 0;
+            *max_length = e1000_igp_cable_length_60;
+            break;
+        case e1000_gg_cable_length_60_115:
+            *min_length = e1000_igp_cable_length_60;
+            *max_length = e1000_igp_cable_length_115;
+            break;
+        case e1000_gg_cable_length_115_150:
+            *min_length = e1000_igp_cable_length_115;
+            *max_length = e1000_igp_cable_length_150;
+            break;
+        case e1000_gg_cable_length_150:
+            *min_length = e1000_igp_cable_length_150;
+            *max_length = e1000_igp_cable_length_180;
+            break;
+        default:
+            return -E1000_ERR_PHY;
+            break;
+        }
+    } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+        u16 cur_agc_value;
+        u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+        u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+                                                         {IGP01E1000_PHY_AGC_A,
+                                                          IGP01E1000_PHY_AGC_B,
+                                                          IGP01E1000_PHY_AGC_C,
+                                                          IGP01E1000_PHY_AGC_D};
+        /* Read the AGC registers for all channels */
+        for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+
+            ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+            /* Value bound check. */
+            if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+                (cur_agc_value == 0))
+                return -E1000_ERR_PHY;
+
+            agc_value += cur_agc_value;
+
+            /* Update minimal AGC value. */
+            if (min_agc_value > cur_agc_value)
+                min_agc_value = cur_agc_value;
+        }
+
+        /* Remove the minimal AGC result for length < 50m */
+        if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+            agc_value -= min_agc_value;
+
+            /* Get the average length of the remaining 3 channels */
+            agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+        } else {
+            /* Get the average length of all the 4 channels. */
+            agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+        }
+
+        /* Set the range of the calculated length. */
+        *min_length = ((e1000_igp_cable_length_table[agc_value] -
+                       IGP01E1000_AGC_RANGE) > 0) ?
+                       (e1000_igp_cable_length_table[agc_value] -
+                       IGP01E1000_AGC_RANGE) : 0;
+        *max_length = e1000_igp_cable_length_table[agc_value] +
+                      IGP01E1000_AGC_RANGE;
+    } else if (hw->phy_type == e1000_phy_igp_2 ||
+               hw->phy_type == e1000_phy_igp_3) {
+        u16 cur_agc_index, max_agc_index = 0;
+        u16 min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
+        u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
+                                                         {IGP02E1000_PHY_AGC_A,
+                                                          IGP02E1000_PHY_AGC_B,
+                                                          IGP02E1000_PHY_AGC_C,
+                                                          IGP02E1000_PHY_AGC_D};
+        /* Read the AGC registers for all channels */
+        for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+            ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            /* Getting bits 15:9, which represent the combination of course and
+             * fine gain values.  The result is a number that can be put into
+             * the lookup table to obtain the approximate cable length. */
+            cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+                            IGP02E1000_AGC_LENGTH_MASK;
+
+            /* Array index bound check. */
+            if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
+                (cur_agc_index == 0))
+                return -E1000_ERR_PHY;
+
+            /* Remove min & max AGC values from calculation. */
+            if (e1000_igp_2_cable_length_table[min_agc_index] >
+                e1000_igp_2_cable_length_table[cur_agc_index])
+                min_agc_index = cur_agc_index;
+            if (e1000_igp_2_cable_length_table[max_agc_index] <
+                e1000_igp_2_cable_length_table[cur_agc_index])
+                max_agc_index = cur_agc_index;
+
+            agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+        }
+
+        agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+                      e1000_igp_2_cable_length_table[max_agc_index]);
+        agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+        /* Calculate cable length with the error range of +/- 10 meters. */
+        *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+                       (agc_value - IGP02E1000_AGC_RANGE) : 0;
+        *max_length = agc_value + IGP02E1000_AGC_RANGE;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check the cable polarity
+ *
+ * hw - Struct containing variables accessed by shared code
+ * polarity - output parameter : 0 - Polarity is not reversed
+ *                               1 - Polarity is reversed.
+ *
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
+ *
+ * For phy's older than IGP, this function simply reads the polarity bit in the
+ * Phy Status register.  For IGP phy's, this bit is valid only if link speed is
+ * 10 Mbps.  If the link speed is 100 Mbps there is no polarity so this bit will
+ * return 0.  If the link speed is 1000 Mbps the polarity status is in the
+ * IGP01E1000_PHY_PCS_INIT_REG.
+ *****************************************************************************/
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+				e1000_rev_polarity *polarity)
+{
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_check_polarity");
+
+    if ((hw->phy_type == e1000_phy_m88) ||
+        (hw->phy_type == e1000_phy_gg82563)) {
+        /* return the Polarity bit in the Status register. */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
+                     M88E1000_PSSR_REV_POLARITY_SHIFT) ?
+                     e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+
+    } else if (hw->phy_type == e1000_phy_igp ||
+              hw->phy_type == e1000_phy_igp_3 ||
+              hw->phy_type == e1000_phy_igp_2) {
+        /* Read the Status register to check the speed */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
+         * find the polarity status */
+        if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+           IGP01E1000_PSSR_SPEED_1000MBPS) {
+
+            /* Read the GIG initialization PCS register (0x00B4) */
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            /* Check the polarity bits */
+            *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ?
+                         e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+        } else {
+            /* For 10 Mbps, read the polarity bit in the status register. (for
+             * 100 Mbps this bit is always 0) */
+            *polarity = (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
+                         e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+        }
+    } else if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        *polarity = ((phy_data & IFE_PESC_POLARITY_REVERSED) >>
+                     IFE_PESC_POLARITY_REVERSED_SHIFT) ?
+                     e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check if Downshift occured
+ *
+ * hw - Struct containing variables accessed by shared code
+ * downshift - output parameter : 0 - No Downshift ocured.
+ *                                1 - Downshift ocured.
+ *
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
+ *
+ * For phy's older than IGP, this function reads the Downshift bit in the Phy
+ * Specific Status register.  For IGP phy's, it reads the Downgrade bit in the
+ * Link Health register.  In IGP this bit is latched high, so the driver must
+ * read it immediately after link is established.
+ *****************************************************************************/
+static s32 e1000_check_downshift(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_check_downshift");
+
+    if (hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) {
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
+    } else if ((hw->phy_type == e1000_phy_m88) ||
+               (hw->phy_type == e1000_phy_gg82563)) {
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
+                               M88E1000_PSSR_DOWNSHIFT_SHIFT;
+    } else if (hw->phy_type == e1000_phy_ife) {
+        /* e1000_phy_ife supports 10/100 speed only */
+        hw->speed_downgraded = false;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
+{
+    s32 ret_val;
+    u16 phy_data, phy_saved_data, speed, duplex, i;
+    u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+                                        {IGP01E1000_PHY_AGC_PARAM_A,
+                                        IGP01E1000_PHY_AGC_PARAM_B,
+                                        IGP01E1000_PHY_AGC_PARAM_C,
+                                        IGP01E1000_PHY_AGC_PARAM_D};
+    u16 min_length, max_length;
+
+    DEBUGFUNC("e1000_config_dsp_after_link_change");
+
+    if (hw->phy_type != e1000_phy_igp)
+        return E1000_SUCCESS;
+
+    if (link_up) {
+        ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+        if (ret_val) {
+            DEBUGOUT("Error getting link speed and duplex\n");
+            return ret_val;
+        }
+
+        if (speed == SPEED_1000) {
+
+            ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+            if (ret_val)
+                return ret_val;
+
+            if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
+                min_length >= e1000_igp_cable_length_50) {
+
+                for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+                    ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
+                                                 &phy_data);
+                    if (ret_val)
+                        return ret_val;
+
+                    phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+                    ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
+                                                  phy_data);
+                    if (ret_val)
+                        return ret_val;
+                }
+                hw->dsp_config_state = e1000_dsp_config_activated;
+            }
+
+            if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
+               (min_length < e1000_igp_cable_length_50)) {
+
+                u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+                u32 idle_errs = 0;
+
+                /* clear previous idle error counts */
+                ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+                                             &phy_data);
+                if (ret_val)
+                    return ret_val;
+
+                for (i = 0; i < ffe_idle_err_timeout; i++) {
+                    udelay(1000);
+                    ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+                                                 &phy_data);
+                    if (ret_val)
+                        return ret_val;
+
+                    idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
+                    if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+                        hw->ffe_config_state = e1000_ffe_config_active;
+
+                        ret_val = e1000_write_phy_reg(hw,
+                                    IGP01E1000_PHY_DSP_FFE,
+                                    IGP01E1000_PHY_DSP_FFE_CM_CP);
+                        if (ret_val)
+                            return ret_val;
+                        break;
+                    }
+
+                    if (idle_errs)
+                        ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+                }
+            }
+        }
+    } else {
+        if (hw->dsp_config_state == e1000_dsp_config_activated) {
+            /* Save off the current value of register 0x2F5B to be restored at
+             * the end of the routines. */
+            ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            /* Disable the PHY transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+            if (ret_val)
+                return ret_val;
+
+            mdelay(20);
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_FORCE_GIGA);
+            if (ret_val)
+                return ret_val;
+            for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+                ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
+                if (ret_val)
+                    return ret_val;
+
+                phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+                phy_data |=  IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+                ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
+                if (ret_val)
+                    return ret_val;
+            }
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_RESTART_AUTONEG);
+            if (ret_val)
+                return ret_val;
+
+            mdelay(20);
+
+            /* Now enable the transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            hw->dsp_config_state = e1000_dsp_config_enabled;
+        }
+
+        if (hw->ffe_config_state == e1000_ffe_config_active) {
+            /* Save off the current value of register 0x2F5B to be restored at
+             * the end of the routines. */
+            ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            /* Disable the PHY transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+            if (ret_val)
+                return ret_val;
+
+            mdelay(20);
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_FORCE_GIGA);
+            if (ret_val)
+                return ret_val;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
+                                          IGP01E1000_PHY_DSP_FFE_DEFAULT);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_RESTART_AUTONEG);
+            if (ret_val)
+                return ret_val;
+
+            mdelay(20);
+
+            /* Now enable the transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            hw->ffe_config_state = e1000_ffe_config_enabled;
+        }
+    }
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set PHY to class A mode
+ * Assumes the following operations will follow to enable the new class mode.
+ *  1. Do a PHY soft reset
+ *  2. Restart auto-negotiation or force link.
+ *
+ * hw - Struct containing variables accessed by shared code
+ ****************************************************************************/
+static s32 e1000_set_phy_mode(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 eeprom_data;
+
+    DEBUGFUNC("e1000_set_phy_mode");
+
+    if ((hw->mac_type == e1000_82545_rev_3) &&
+        (hw->media_type == e1000_media_type_copper)) {
+        ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
+        if (ret_val) {
+            return ret_val;
+        }
+
+        if ((eeprom_data != EEPROM_RESERVED_WORD) &&
+            (eeprom_data & EEPROM_PHY_CLASS_A)) {
+            ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
+            if (ret_val)
+                return ret_val;
+            ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
+            if (ret_val)
+                return ret_val;
+
+            hw->phy_reset_disable = false;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu state according to the active flag.  When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+    u32 phy_ctrl = 0;
+    s32 ret_val;
+    u16 phy_data;
+    DEBUGFUNC("e1000_set_d3_lplu_state");
+
+    if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
+        && hw->phy_type != e1000_phy_igp_3)
+        return E1000_SUCCESS;
+
+    /* During driver activity LPLU should not be used or it will attain link
+     * from the lowest speeds starting from 10Mbps. The capability is used for
+     * Dx transitions and states */
+    if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->mac_type == e1000_ich8lan) {
+        /* MAC writes into PHY register based on the state transition
+         * and start auto-negotiation. SW driver can overwrite the settings
+         * in CSR PHY power control E1000_PHY_CTRL register. */
+        phy_ctrl = er32(PHY_CTRL);
+    } else {
+        ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if (!active) {
+        if (hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547_rev_2) {
+            phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+            if (ret_val)
+                return ret_val;
+        } else {
+            if (hw->mac_type == e1000_ich8lan) {
+                phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+                ew32(PHY_CTRL, phy_ctrl);
+            } else {
+                phy_data &= ~IGP02E1000_PM_D3_LPLU;
+                ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                              phy_data);
+                if (ret_val)
+                    return ret_val;
+            }
+        }
+
+        /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used during
+         * Dx states where the power conservation is most important.  During
+         * driver activity we should enable SmartSpeed, so performance is
+         * maintained. */
+        if (hw->smart_speed == e1000_smart_speed_on) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        } else if (hw->smart_speed == e1000_smart_speed_off) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+    } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
+               (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
+               (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
+
+        if (hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547_rev_2) {
+            phy_data |= IGP01E1000_GMII_FLEX_SPD;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+            if (ret_val)
+                return ret_val;
+        } else {
+            if (hw->mac_type == e1000_ich8lan) {
+                phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+                ew32(PHY_CTRL, phy_ctrl);
+            } else {
+                phy_data |= IGP02E1000_PM_D3_LPLU;
+                ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                              phy_data);
+                if (ret_val)
+                    return ret_val;
+            }
+        }
+
+        /* When LPLU is enabled we should disable SmartSpeed */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+        if (ret_val)
+            return ret_val;
+
+    }
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu d0 state according to the active flag.  When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+    u32 phy_ctrl = 0;
+    s32 ret_val;
+    u16 phy_data;
+    DEBUGFUNC("e1000_set_d0_lplu_state");
+
+    if (hw->mac_type <= e1000_82547_rev_2)
+        return E1000_SUCCESS;
+
+    if (hw->mac_type == e1000_ich8lan) {
+        phy_ctrl = er32(PHY_CTRL);
+    } else {
+        ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if (!active) {
+        if (hw->mac_type == e1000_ich8lan) {
+            phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+            ew32(PHY_CTRL, phy_ctrl);
+        } else {
+            phy_data &= ~IGP02E1000_PM_D0_LPLU;
+            ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+        /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used during
+         * Dx states where the power conservation is most important.  During
+         * driver activity we should enable SmartSpeed, so performance is
+         * maintained. */
+        if (hw->smart_speed == e1000_smart_speed_on) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        } else if (hw->smart_speed == e1000_smart_speed_off) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+
+    } else {
+
+        if (hw->mac_type == e1000_ich8lan) {
+            phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+            ew32(PHY_CTRL, phy_ctrl);
+        } else {
+            phy_data |= IGP02E1000_PM_D0_LPLU;
+            ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+        /* When LPLU is enabled we should disable SmartSpeed */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+        if (ret_val)
+            return ret_val;
+
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Change VCO speed register to improve Bit Error Rate performance of SERDES.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_set_vco_speed(struct e1000_hw *hw)
+{
+    s32  ret_val;
+    u16 default_page = 0;
+    u16 phy_data;
+
+    DEBUGFUNC("e1000_set_vco_speed");
+
+    switch (hw->mac_type) {
+    case e1000_82545_rev_3:
+    case e1000_82546_rev_3:
+       break;
+    default:
+        return E1000_SUCCESS;
+    }
+
+    /* Set PHY register 30, page 5, bit 8 to 0 */
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Set PHY register 30, page 4, bit 11 to 1 */
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
+    if (ret_val)
+        return ret_val;
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function reads the cookie from ARC ram.
+ *
+ * returns: - E1000_SUCCESS .
+ ****************************************************************************/
+static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer)
+{
+    u8 i;
+    u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET;
+    u8 length = E1000_MNG_DHCP_COOKIE_LENGTH;
+
+    length = (length >> 2);
+    offset = (offset >> 2);
+
+    for (i = 0; i < length; i++) {
+        *((u32 *)buffer + i) =
+            E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
+    }
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function checks whether the HOST IF is enabled for command operaton
+ * and also checks whether the previous command is completed.
+ * It busy waits in case of previous command is not completed.
+ *
+ * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
+ *            timeout
+ *          - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+    u32 hicr;
+    u8 i;
+
+    /* Check that the host interface is enabled. */
+    hicr = er32(HICR);
+    if ((hicr & E1000_HICR_EN) == 0) {
+        DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+        return -E1000_ERR_HOST_INTERFACE_COMMAND;
+    }
+    /* check the previous command is completed */
+    for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+        hicr = er32(HICR);
+        if (!(hicr & E1000_HICR_C))
+            break;
+        mdelay(1);
+    }
+
+    if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+        DEBUGOUT("Previous command timeout failed .\n");
+        return -E1000_ERR_HOST_INTERFACE_COMMAND;
+    }
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient way.
+ * Also fills up the sum of the buffer in *buffer parameter.
+ *
+ * returns  - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+				   u16 offset, u8 *sum)
+{
+    u8 *tmp;
+    u8 *bufptr = buffer;
+    u32 data = 0;
+    u16 remaining, i, j, prev_bytes;
+
+    /* sum = only sum of the data and it is not checksum */
+
+    if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
+        return -E1000_ERR_PARAM;
+    }
+
+    tmp = (u8 *)&data;
+    prev_bytes = offset & 0x3;
+    offset &= 0xFFFC;
+    offset >>= 2;
+
+    if (prev_bytes) {
+        data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
+        for (j = prev_bytes; j < sizeof(u32); j++) {
+            *(tmp + j) = *bufptr++;
+            *sum += *(tmp + j);
+        }
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data);
+        length -= j - prev_bytes;
+        offset++;
+    }
+
+    remaining = length & 0x3;
+    length -= remaining;
+
+    /* Calculate length in DWORDs */
+    length >>= 2;
+
+    /* The device driver writes the relevant command block into the
+     * ram area. */
+    for (i = 0; i < length; i++) {
+        for (j = 0; j < sizeof(u32); j++) {
+            *(tmp + j) = *bufptr++;
+            *sum += *(tmp + j);
+        }
+
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
+    }
+    if (remaining) {
+        for (j = 0; j < sizeof(u32); j++) {
+            if (j < remaining)
+                *(tmp + j) = *bufptr++;
+            else
+                *(tmp + j) = 0;
+
+            *sum += *(tmp + j);
+        }
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function writes the command header after does the checksum calculation.
+ *
+ * returns  - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+				      struct e1000_host_mng_command_header *hdr)
+{
+    u16 i;
+    u8 sum;
+    u8 *buffer;
+
+    /* Write the whole command header structure which includes sum of
+     * the buffer */
+
+    u16 length = sizeof(struct e1000_host_mng_command_header);
+
+    sum = hdr->checksum;
+    hdr->checksum = 0;
+
+    buffer = (u8 *)hdr;
+    i = length;
+    while (i--)
+        sum += buffer[i];
+
+    hdr->checksum = 0 - sum;
+
+    length >>= 2;
+    /* The device driver writes the relevant command block into the ram area. */
+    for (i = 0; i < length; i++) {
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *)hdr + i));
+        E1000_WRITE_FLUSH();
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function indicates to ARC that a new command is pending which completes
+ * one write operation by the driver.
+ *
+ * returns  - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_write_commit(struct e1000_hw *hw)
+{
+    u32 hicr;
+
+    hicr = er32(HICR);
+    /* Setting this bit tells the ARC that a new command is pending. */
+    ew32(HICR, hicr | E1000_HICR_C);
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function checks the mode of the firmware.
+ *
+ * returns  - true when the mode is IAMT or false.
+ ****************************************************************************/
+bool e1000_check_mng_mode(struct e1000_hw *hw)
+{
+    u32 fwsm;
+
+    fwsm = er32(FWSM);
+
+    if (hw->mac_type == e1000_ich8lan) {
+        if ((fwsm & E1000_FWSM_MODE_MASK) ==
+            (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+            return true;
+    } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
+               (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+        return true;
+
+    return false;
+}
+
+
+/*****************************************************************************
+ * This function writes the dhcp info .
+ ****************************************************************************/
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+    s32 ret_val;
+    struct e1000_host_mng_command_header hdr;
+
+    hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+    hdr.command_length = length;
+    hdr.reserved1 = 0;
+    hdr.reserved2 = 0;
+    hdr.checksum = 0;
+
+    ret_val = e1000_mng_enable_host_if(hw);
+    if (ret_val == E1000_SUCCESS) {
+        ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr),
+                                          &(hdr.checksum));
+        if (ret_val == E1000_SUCCESS) {
+            ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+            if (ret_val == E1000_SUCCESS)
+                ret_val = e1000_mng_write_commit(hw);
+        }
+    }
+    return ret_val;
+}
+
+
+/*****************************************************************************
+ * This function calculates the checksum.
+ *
+ * returns  - checksum of buffer contents.
+ ****************************************************************************/
+static u8 e1000_calculate_mng_checksum(char *buffer, u32 length)
+{
+    u8 sum = 0;
+    u32 i;
+
+    if (!buffer)
+        return 0;
+
+    for (i=0; i < length; i++)
+        sum += buffer[i];
+
+    return (u8)(0 - sum);
+}
+
+/*****************************************************************************
+ * This function checks whether tx pkt filtering needs to be enabled or not.
+ *
+ * returns  - true for packet filtering or false.
+ ****************************************************************************/
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+    /* called in init as well as watchdog timer functions */
+
+    s32 ret_val, checksum;
+    bool tx_filter = false;
+    struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
+    u8 *buffer = (u8 *) &(hw->mng_cookie);
+
+    if (e1000_check_mng_mode(hw)) {
+        ret_val = e1000_mng_enable_host_if(hw);
+        if (ret_val == E1000_SUCCESS) {
+            ret_val = e1000_host_if_read_cookie(hw, buffer);
+            if (ret_val == E1000_SUCCESS) {
+                checksum = hdr->checksum;
+                hdr->checksum = 0;
+                if ((hdr->signature == E1000_IAMT_SIGNATURE) &&
+                    checksum == e1000_calculate_mng_checksum((char *)buffer,
+                                               E1000_MNG_DHCP_COOKIE_LENGTH)) {
+                    if (hdr->status &
+                        E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
+                        tx_filter = true;
+                } else
+                    tx_filter = true;
+            } else
+                tx_filter = true;
+        }
+    }
+
+    hw->tx_pkt_filtering = tx_filter;
+    return tx_filter;
+}
+
+/******************************************************************************
+ * Verifies the hardware needs to allow ARPs to be processed by the host
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * returns: - true/false
+ *
+ *****************************************************************************/
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+    u32 manc;
+    u32 fwsm, factps;
+
+    if (hw->asf_firmware_present) {
+        manc = er32(MANC);
+
+        if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+            !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+            return false;
+        if (e1000_arc_subsystem_valid(hw)) {
+            fwsm = er32(FWSM);
+            factps = er32(FACTPS);
+
+            if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
+                   e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
+                return true;
+        } else
+            if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
+                return true;
+    }
+    return false;
+}
+
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
+{
+    s32 ret_val;
+    u16 mii_status_reg;
+    u16 i;
+
+    /* Polarity reversal workaround for forced 10F/10H links. */
+
+    /* Disable the transmitter on the PHY */
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+    if (ret_val)
+        return ret_val;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+    if (ret_val)
+        return ret_val;
+
+    /* This loop will early-out if the NO link condition has been met. */
+    for (i = PHY_FORCE_TIME; i > 0; i--) {
+        /* Read the MII Status Register and wait for Link Status bit
+         * to be clear.
+         */
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
+        mdelay(100);
+    }
+
+    /* Recommended delay time after link has been lost */
+    mdelay(1000);
+
+    /* Now we will re-enable th transmitter on the PHY */
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+    if (ret_val)
+        return ret_val;
+    mdelay(50);
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+    if (ret_val)
+        return ret_val;
+    mdelay(50);
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+    if (ret_val)
+        return ret_val;
+    mdelay(50);
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+    if (ret_val)
+        return ret_val;
+
+    /* This loop will early-out if the link condition has been met. */
+    for (i = PHY_FORCE_TIME; i > 0; i--) {
+        /* Read the MII Status Register and wait for Link Status bit
+         * to be set.
+         */
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        if (mii_status_reg & MII_SR_LINK_STATUS) break;
+        mdelay(100);
+    }
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Disables PCI-Express master access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - none.
+ *
+ ***************************************************************************/
+static void e1000_set_pci_express_master_disable(struct e1000_hw *hw)
+{
+    u32 ctrl;
+
+    DEBUGFUNC("e1000_set_pci_express_master_disable");
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return;
+
+    ctrl = er32(CTRL);
+    ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+    ew32(CTRL, ctrl);
+}
+
+/*******************************************************************************
+ *
+ * Disables PCI-Express master access and verifies there are no pending requests
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't
+ *            caused the master requests to be disabled.
+ *            E1000_SUCCESS master requests disabled.
+ *
+ ******************************************************************************/
+s32 e1000_disable_pciex_master(struct e1000_hw *hw)
+{
+    s32 timeout = MASTER_DISABLE_TIMEOUT;   /* 80ms */
+
+    DEBUGFUNC("e1000_disable_pciex_master");
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return E1000_SUCCESS;
+
+    e1000_set_pci_express_master_disable(hw);
+
+    while (timeout) {
+        if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
+            break;
+        else
+            udelay(100);
+        timeout--;
+    }
+
+    if (!timeout) {
+        DEBUGOUT("Master requests are pending.\n");
+        return -E1000_ERR_MASTER_REQUESTS_PENDING;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*******************************************************************************
+ *
+ * Check for EEPROM Auto Read bit done.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ *            E1000_SUCCESS at any other case.
+ *
+ ******************************************************************************/
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
+{
+    s32 timeout = AUTO_READ_DONE_TIMEOUT;
+
+    DEBUGFUNC("e1000_get_auto_rd_done");
+
+    switch (hw->mac_type) {
+    default:
+        msleep(5);
+        break;
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_82573:
+    case e1000_80003es2lan:
+    case e1000_ich8lan:
+        while (timeout) {
+            if (er32(EECD) & E1000_EECD_AUTO_RD)
+                break;
+            else msleep(1);
+            timeout--;
+        }
+
+        if (!timeout) {
+            DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
+            return -E1000_ERR_RESET;
+        }
+        break;
+    }
+
+    /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
+     * Need to wait for PHY configuration completion before accessing NVM
+     * and PHY. */
+    if (hw->mac_type == e1000_82573)
+        msleep(25);
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ * Checks if the PHY configuration is done
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ *            E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+    s32 timeout = PHY_CFG_TIMEOUT;
+    u32 cfg_mask = E1000_EEPROM_CFG_DONE;
+
+    DEBUGFUNC("e1000_get_phy_cfg_done");
+
+    switch (hw->mac_type) {
+    default:
+        mdelay(10);
+        break;
+    case e1000_80003es2lan:
+        /* Separate *_CFG_DONE_* bit for each port */
+        if (er32(STATUS) & E1000_STATUS_FUNC_1)
+            cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1;
+        /* Fall Through */
+    case e1000_82571:
+    case e1000_82572:
+        while (timeout) {
+            if (er32(EEMNGCTL) & cfg_mask)
+                break;
+            else
+                msleep(1);
+            timeout--;
+        }
+        if (!timeout) {
+            DEBUGOUT("MNG configuration cycle has not completed.\n");
+            return -E1000_ERR_RESET;
+        }
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Using the combination of SMBI and SWESMBI semaphore bits when resetting
+ * adapter or Eeprom access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_EEPROM if fail to access EEPROM.
+ *            E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
+{
+    s32 timeout;
+    u32 swsm;
+
+    DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
+
+    if (!hw->eeprom_semaphore_present)
+        return E1000_SUCCESS;
+
+    if (hw->mac_type == e1000_80003es2lan) {
+        /* Get the SW semaphore. */
+        if (e1000_get_software_semaphore(hw) != E1000_SUCCESS)
+            return -E1000_ERR_EEPROM;
+    }
+
+    /* Get the FW semaphore. */
+    timeout = hw->eeprom.word_size + 1;
+    while (timeout) {
+        swsm = er32(SWSM);
+        swsm |= E1000_SWSM_SWESMBI;
+        ew32(SWSM, swsm);
+        /* if we managed to set the bit we got the semaphore. */
+        swsm = er32(SWSM);
+        if (swsm & E1000_SWSM_SWESMBI)
+            break;
+
+        udelay(50);
+        timeout--;
+    }
+
+    if (!timeout) {
+        /* Release semaphores */
+        e1000_put_hw_eeprom_semaphore(hw);
+        DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ * This function clears HW semaphore bits.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - None.
+ *
+ ***************************************************************************/
+static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
+{
+    u32 swsm;
+
+    DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
+
+    if (!hw->eeprom_semaphore_present)
+        return;
+
+    swsm = er32(SWSM);
+    if (hw->mac_type == e1000_80003es2lan) {
+        /* Release both semaphores. */
+        swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+    } else
+        swsm &= ~(E1000_SWSM_SWESMBI);
+    ew32(SWSM, swsm);
+}
+
+/***************************************************************************
+ *
+ * Obtaining software semaphore bit (SMBI) before resetting PHY.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to obtain semaphore.
+ *            E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+static s32 e1000_get_software_semaphore(struct e1000_hw *hw)
+{
+    s32 timeout = hw->eeprom.word_size + 1;
+    u32 swsm;
+
+    DEBUGFUNC("e1000_get_software_semaphore");
+
+    if (hw->mac_type != e1000_80003es2lan) {
+        return E1000_SUCCESS;
+    }
+
+    while (timeout) {
+        swsm = er32(SWSM);
+        /* If SMBI bit cleared, it is now set and we hold the semaphore */
+        if (!(swsm & E1000_SWSM_SMBI))
+            break;
+        mdelay(1);
+        timeout--;
+    }
+
+    if (!timeout) {
+        DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+        return -E1000_ERR_RESET;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release semaphore bit (SMBI).
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static void e1000_release_software_semaphore(struct e1000_hw *hw)
+{
+    u32 swsm;
+
+    DEBUGFUNC("e1000_release_software_semaphore");
+
+    if (hw->mac_type != e1000_80003es2lan) {
+        return;
+    }
+
+    swsm = er32(SWSM);
+    /* Release the SW semaphores.*/
+    swsm &= ~E1000_SWSM_SMBI;
+    ew32(SWSM, swsm);
+}
+
+/******************************************************************************
+ * Checks if PHY reset is blocked due to SOL/IDER session, for example.
+ * Returning E1000_BLK_PHY_RESET isn't necessarily an error.  But it's up to
+ * the caller to figure out how to deal with it.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_BLK_PHY_RESET
+ *            E1000_SUCCESS
+ *
+ *****************************************************************************/
+s32 e1000_check_phy_reset_block(struct e1000_hw *hw)
+{
+    u32 manc = 0;
+    u32 fwsm = 0;
+
+    if (hw->mac_type == e1000_ich8lan) {
+        fwsm = er32(FWSM);
+        return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
+                                            : E1000_BLK_PHY_RESET;
+    }
+
+    if (hw->mac_type > e1000_82547_rev_2)
+        manc = er32(MANC);
+    return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+        E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw)
+{
+    u32 fwsm;
+
+    /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
+     * may not be provided a DMA clock when no manageability features are
+     * enabled.  We do not want to perform any reads/writes to these registers
+     * if this is the case.  We read FWSM to determine the manageability mode.
+     */
+    switch (hw->mac_type) {
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_82573:
+    case e1000_80003es2lan:
+        fwsm = er32(FWSM);
+        if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
+            return true;
+        break;
+    case e1000_ich8lan:
+        return true;
+    default:
+        break;
+    }
+    return false;
+}
+
+
+/******************************************************************************
+ * Configure PCI-Ex no-snoop
+ *
+ * hw - Struct containing variables accessed by shared code.
+ * no_snoop - Bitmap of no-snoop events.
+ *
+ * returns: E1000_SUCCESS
+ *
+ *****************************************************************************/
+static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
+{
+    u32 gcr_reg = 0;
+
+    DEBUGFUNC("e1000_set_pci_ex_no_snoop");
+
+    if (hw->bus_type == e1000_bus_type_unknown)
+        e1000_get_bus_info(hw);
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return E1000_SUCCESS;
+
+    if (no_snoop) {
+        gcr_reg = er32(GCR);
+        gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
+        gcr_reg |= no_snoop;
+        ew32(GCR, gcr_reg);
+    }
+    if (hw->mac_type == e1000_ich8lan) {
+        u32 ctrl_ext;
+
+        ew32(GCR, PCI_EX_82566_SNOOP_ALL);
+
+        ctrl_ext = er32(CTRL_EXT);
+        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+        ew32(CTRL_EXT, ctrl_ext);
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Get software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static s32 e1000_get_software_flag(struct e1000_hw *hw)
+{
+    s32 timeout = PHY_CFG_TIMEOUT;
+    u32 extcnf_ctrl;
+
+    DEBUGFUNC("e1000_get_software_flag");
+
+    if (hw->mac_type == e1000_ich8lan) {
+        while (timeout) {
+            extcnf_ctrl = er32(EXTCNF_CTRL);
+            extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+            ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+            extcnf_ctrl = er32(EXTCNF_CTRL);
+            if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+                break;
+            mdelay(1);
+            timeout--;
+        }
+
+        if (!timeout) {
+            DEBUGOUT("FW or HW locks the resource too long.\n");
+            return -E1000_ERR_CONFIG;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static void e1000_release_software_flag(struct e1000_hw *hw)
+{
+    u32 extcnf_ctrl;
+
+    DEBUGFUNC("e1000_release_software_flag");
+
+    if (hw->mac_type == e1000_ich8lan) {
+        extcnf_ctrl= er32(EXTCNF_CTRL);
+        extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+        ew32(EXTCNF_CTRL, extcnf_ctrl);
+    }
+
+    return;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
+ * register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data)
+{
+    s32  error = E1000_SUCCESS;
+    u32 flash_bank = 0;
+    u32 act_offset = 0;
+    u32 bank_offset = 0;
+    u16 word = 0;
+    u16 i = 0;
+
+    /* We need to know which is the valid flash bank.  In the event
+     * that we didn't allocate eeprom_shadow_ram, we may not be
+     * managing flash_bank.  So it cannot be trusted and needs
+     * to be updated with each read.
+     */
+    /* Value of bit 22 corresponds to the flash bank we're on. */
+    flash_bank = (er32(EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
+
+    /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
+    bank_offset = flash_bank * (hw->flash_bank_size * 2);
+
+    error = e1000_get_software_flag(hw);
+    if (error != E1000_SUCCESS)
+        return error;
+
+    for (i = 0; i < words; i++) {
+        if (hw->eeprom_shadow_ram != NULL &&
+            hw->eeprom_shadow_ram[offset+i].modified) {
+            data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
+        } else {
+            /* The NVM part needs a byte offset, hence * 2 */
+            act_offset = bank_offset + ((offset + i) * 2);
+            error = e1000_read_ich8_word(hw, act_offset, &word);
+            if (error != E1000_SUCCESS)
+                break;
+            data[i] = word;
+        }
+    }
+
+    e1000_release_software_flag(hw);
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
+ * register.  Actually, writes are written to the shadow ram cache in the hw
+ * structure hw->e1000_shadow_ram.  e1000_commit_shadow_ram flushes this to
+ * the NVM, which occurs when the NVM checksum is updated.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to write
+ * words - number of words to write
+ * data - words to write to the EEPROM
+ *****************************************************************************/
+static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+				   u16 *data)
+{
+    u32 i = 0;
+    s32 error = E1000_SUCCESS;
+
+    error = e1000_get_software_flag(hw);
+    if (error != E1000_SUCCESS)
+        return error;
+
+    /* A driver can write to the NVM only if it has eeprom_shadow_ram
+     * allocated.  Subsequent reads to the modified words are read from
+     * this cached structure as well.  Writes will only go into this
+     * cached structure unless it's followed by a call to
+     * e1000_update_eeprom_checksum() where it will commit the changes
+     * and clear the "modified" field.
+     */
+    if (hw->eeprom_shadow_ram != NULL) {
+        for (i = 0; i < words; i++) {
+            if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
+                hw->eeprom_shadow_ram[offset+i].modified = true;
+                hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
+            } else {
+                error = -E1000_ERR_EEPROM;
+                break;
+            }
+        }
+    } else {
+        /* Drivers have the option to not allocate eeprom_shadow_ram as long
+         * as they don't perform any NVM writes.  An attempt in doing so
+         * will result in this error.
+         */
+        error = -E1000_ERR_EEPROM;
+    }
+
+    e1000_release_software_flag(hw);
+
+    return error;
+}
+
+/******************************************************************************
+ * This function does initial flash setup so that a new read/write/erase cycle
+ * can be started.
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+static s32 e1000_ich8_cycle_init(struct e1000_hw *hw)
+{
+    union ich8_hws_flash_status hsfsts;
+    s32 error = E1000_ERR_EEPROM;
+    s32 i     = 0;
+
+    DEBUGFUNC("e1000_ich8_cycle_init");
+
+    hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+    /* May be check the Flash Des Valid bit in Hw status */
+    if (hsfsts.hsf_status.fldesvalid == 0) {
+        DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.");
+        return error;
+    }
+
+    /* Clear FCERR in Hw status by writing 1 */
+    /* Clear DAEL in Hw status by writing a 1 */
+    hsfsts.hsf_status.flcerr = 1;
+    hsfsts.hsf_status.dael = 1;
+
+    E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+
+    /* Either we should have a hardware SPI cycle in progress bit to check
+     * against, in order to start a new cycle or FDONE bit should be changed
+     * in the hardware so that it is 1 after harware reset, which can then be
+     * used as an indication whether a cycle is in progress or has been
+     * completed .. we should also have some software semaphore mechanism to
+     * guard FDONE or the cycle in progress bit so that two threads access to
+     * those bits can be sequentiallized or a way so that 2 threads dont
+     * start the cycle at the same time */
+
+    if (hsfsts.hsf_status.flcinprog == 0) {
+        /* There is no cycle running at present, so we can start a cycle */
+        /* Begin by setting Flash Cycle Done. */
+        hsfsts.hsf_status.flcdone = 1;
+        E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+        error = E1000_SUCCESS;
+    } else {
+        /* otherwise poll for sometime so the current cycle has a chance
+         * to end before giving up. */
+        for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
+            hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+            if (hsfsts.hsf_status.flcinprog == 0) {
+                error = E1000_SUCCESS;
+                break;
+            }
+            udelay(1);
+        }
+        if (error == E1000_SUCCESS) {
+            /* Successful in waiting for previous cycle to timeout,
+             * now set the Flash Cycle Done. */
+            hsfsts.hsf_status.flcdone = 1;
+            E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+        } else {
+            DEBUGOUT("Flash controller busy, cannot get access");
+        }
+    }
+    return error;
+}
+
+/******************************************************************************
+ * This function starts a flash cycle and waits for its completion
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
+{
+    union ich8_hws_flash_ctrl hsflctl;
+    union ich8_hws_flash_status hsfsts;
+    s32 error = E1000_ERR_EEPROM;
+    u32 i = 0;
+
+    /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+    hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+    hsflctl.hsf_ctrl.flcgo = 1;
+    E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+    /* wait till FDONE bit is set to 1 */
+    do {
+        hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+        if (hsfsts.hsf_status.flcdone == 1)
+            break;
+        udelay(1);
+        i++;
+    } while (i < timeout);
+    if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
+        error = E1000_SUCCESS;
+    }
+    return error;
+}
+
+/******************************************************************************
+ * Reads a byte or word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte or word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - Pointer to the word to store the value read.
+ *****************************************************************************/
+static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+				u16 *data)
+{
+    union ich8_hws_flash_status hsfsts;
+    union ich8_hws_flash_ctrl hsflctl;
+    u32 flash_linear_address;
+    u32 flash_data = 0;
+    s32 error = -E1000_ERR_EEPROM;
+    s32 count = 0;
+
+    DEBUGFUNC("e1000_read_ich8_data");
+
+    if (size < 1  || size > 2 || data == NULL ||
+        index > ICH_FLASH_LINEAR_ADDR_MASK)
+        return error;
+
+    flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
+                           hw->flash_base_addr;
+
+    do {
+        udelay(1);
+        /* Steps */
+        error = e1000_ich8_cycle_init(hw);
+        if (error != E1000_SUCCESS)
+            break;
+
+        hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+        /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+        hsflctl.hsf_ctrl.fldbcount = size - 1;
+        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+        E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+        /* Write the last 24 bits of index into Flash Linear address field in
+         * Flash Address */
+        /* TODO: TBD maybe check the index against the size of flash */
+
+        E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
+
+        error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
+
+        /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
+         * sequence a few more times, else read in (shift in) the Flash Data0,
+         * the order is least significant byte first msb to lsb */
+        if (error == E1000_SUCCESS) {
+            flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
+            if (size == 1) {
+                *data = (u8)(flash_data & 0x000000FF);
+            } else if (size == 2) {
+                *data = (u16)(flash_data & 0x0000FFFF);
+            }
+            break;
+        } else {
+            /* If we've gotten here, then things are probably completely hosed,
+             * but if the error condition is detected, it won't hurt to give
+             * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+             */
+            hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+            if (hsfsts.hsf_status.flcerr == 1) {
+                /* Repeat for some time before giving up. */
+                continue;
+            } else if (hsfsts.hsf_status.flcdone == 0) {
+                DEBUGOUT("Timeout error - flash cycle did not complete.");
+                break;
+            }
+        }
+    } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes One /two bytes to the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte/word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - The byte(s) to write to the NVM.
+ *****************************************************************************/
+static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+				 u16 data)
+{
+    union ich8_hws_flash_status hsfsts;
+    union ich8_hws_flash_ctrl hsflctl;
+    u32 flash_linear_address;
+    u32 flash_data = 0;
+    s32 error = -E1000_ERR_EEPROM;
+    s32 count = 0;
+
+    DEBUGFUNC("e1000_write_ich8_data");
+
+    if (size < 1  || size > 2 || data > size * 0xff ||
+        index > ICH_FLASH_LINEAR_ADDR_MASK)
+        return error;
+
+    flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
+                           hw->flash_base_addr;
+
+    do {
+        udelay(1);
+        /* Steps */
+        error = e1000_ich8_cycle_init(hw);
+        if (error != E1000_SUCCESS)
+            break;
+
+        hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+        /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+        hsflctl.hsf_ctrl.fldbcount = size -1;
+        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+        E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+        /* Write the last 24 bits of index into Flash Linear address field in
+         * Flash Address */
+        E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
+
+        if (size == 1)
+            flash_data = (u32)data & 0x00FF;
+        else
+            flash_data = (u32)data;
+
+        E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
+
+        /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
+         * sequence a few more times else done */
+        error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
+        if (error == E1000_SUCCESS) {
+            break;
+        } else {
+            /* If we're here, then things are most likely completely hosed,
+             * but if the error condition is detected, it won't hurt to give
+             * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+             */
+            hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+            if (hsfsts.hsf_status.flcerr == 1) {
+                /* Repeat for some time before giving up. */
+                continue;
+            } else if (hsfsts.hsf_status.flcdone == 0) {
+                DEBUGOUT("Timeout error - flash cycle did not complete.");
+                break;
+            }
+        }
+    } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+    return error;
+}
+
+/******************************************************************************
+ * Reads a single byte from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - Pointer to a byte to store the value read.
+ *****************************************************************************/
+static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data)
+{
+    s32 status = E1000_SUCCESS;
+    u16 word = 0;
+
+    status = e1000_read_ich8_data(hw, index, 1, &word);
+    if (status == E1000_SUCCESS) {
+        *data = (u8)word;
+    }
+
+    return status;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ * Performs verification by reading back the value and then going through
+ * a retry algorithm before giving up.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to write.
+ * byte - The byte to write to the NVM.
+ *****************************************************************************/
+static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
+{
+    s32 error = E1000_SUCCESS;
+    s32 program_retries = 0;
+
+    DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index);
+
+    error = e1000_write_ich8_byte(hw, index, byte);
+
+    if (error != E1000_SUCCESS) {
+        for (program_retries = 0; program_retries < 100; program_retries++) {
+            DEBUGOUT2("Retrying \t Byte := %2.2X Offset := %d\n", byte, index);
+            error = e1000_write_ich8_byte(hw, index, byte);
+            udelay(100);
+            if (error == E1000_SUCCESS)
+                break;
+        }
+    }
+
+    if (program_retries == 100)
+        error = E1000_ERR_EEPROM;
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - The byte to write to the NVM.
+ *****************************************************************************/
+static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
+{
+    s32 status = E1000_SUCCESS;
+    u16 word = (u16)data;
+
+    status = e1000_write_ich8_data(hw, index, 1, word);
+
+    return status;
+}
+
+/******************************************************************************
+ * Reads a word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The starting byte index of the word to read.
+ * data - Pointer to a word to store the value read.
+ *****************************************************************************/
+static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
+{
+    s32 status = E1000_SUCCESS;
+    status = e1000_read_ich8_data(hw, index, 2, data);
+    return status;
+}
+
+/******************************************************************************
+ * Erases the bank specified. Each bank may be a 4, 8 or 64k block. Banks are 0
+ * based.
+ *
+ * hw - pointer to e1000_hw structure
+ * bank - 0 for first bank, 1 for second bank
+ *
+ * Note that this function may actually erase as much as 8 or 64 KBytes.  The
+ * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the
+ * bank size may be 4, 8 or 64 KBytes
+ *****************************************************************************/
+static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
+{
+    union ich8_hws_flash_status hsfsts;
+    union ich8_hws_flash_ctrl hsflctl;
+    u32 flash_linear_address;
+    s32  count = 0;
+    s32  error = E1000_ERR_EEPROM;
+    s32  iteration;
+    s32  sub_sector_size = 0;
+    s32  bank_size;
+    s32  j = 0;
+    s32  error_flag = 0;
+
+    hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+    /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
+    /* 00: The Hw sector is 256 bytes, hence we need to erase 16
+     *     consecutive sectors.  The start index for the nth Hw sector can be
+     *     calculated as bank * 4096 + n * 256
+     * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+     *     The start index for the nth Hw sector can be calculated
+     *     as bank * 4096
+     * 10: The HW sector is 8K bytes
+     * 11: The Hw sector size is 64K bytes */
+    if (hsfsts.hsf_status.berasesz == 0x0) {
+        /* Hw sector size 256 */
+        sub_sector_size = ICH_FLASH_SEG_SIZE_256;
+        bank_size = ICH_FLASH_SECTOR_SIZE;
+        iteration = ICH_FLASH_SECTOR_SIZE / ICH_FLASH_SEG_SIZE_256;
+    } else if (hsfsts.hsf_status.berasesz == 0x1) {
+        bank_size = ICH_FLASH_SEG_SIZE_4K;
+        iteration = 1;
+    } else if (hsfsts.hsf_status.berasesz == 0x3) {
+        bank_size = ICH_FLASH_SEG_SIZE_64K;
+        iteration = 1;
+    } else {
+        return error;
+    }
+
+    for (j = 0; j < iteration ; j++) {
+        do {
+            count++;
+            /* Steps */
+            error = e1000_ich8_cycle_init(hw);
+            if (error != E1000_SUCCESS) {
+                error_flag = 1;
+                break;
+            }
+
+            /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
+             * Control */
+            hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+            hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+            E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+            /* Write the last 24 bits of an index within the block into Flash
+             * Linear address field in Flash Address.  This probably needs to
+             * be calculated here based off the on-chip erase sector size and
+             * the software bank size (4, 8 or 64 KBytes) */
+            flash_linear_address = bank * bank_size + j * sub_sector_size;
+            flash_linear_address += hw->flash_base_addr;
+            flash_linear_address &= ICH_FLASH_LINEAR_ADDR_MASK;
+
+            E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
+
+            error = e1000_ich8_flash_cycle(hw, ICH_FLASH_ERASE_TIMEOUT);
+            /* Check if FCERR is set to 1.  If 1, clear it and try the whole
+             * sequence a few more times else Done */
+            if (error == E1000_SUCCESS) {
+                break;
+            } else {
+                hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+                if (hsfsts.hsf_status.flcerr == 1) {
+                    /* repeat for some time before giving up */
+                    continue;
+                } else if (hsfsts.hsf_status.flcdone == 0) {
+                    error_flag = 1;
+                    break;
+                }
+            }
+        } while ((count < ICH_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
+        if (error_flag == 1)
+            break;
+    }
+    if (error_flag != 1)
+        error = E1000_SUCCESS;
+    return error;
+}
+
+static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+						 u32 cnf_base_addr,
+						 u32 cnf_size)
+{
+    u32 ret_val = E1000_SUCCESS;
+    u16 word_addr, reg_data, reg_addr;
+    u16 i;
+
+    /* cnf_base_addr is in DWORD */
+    word_addr = (u16)(cnf_base_addr << 1);
+
+    /* cnf_size is returned in size of dwords */
+    for (i = 0; i < cnf_size; i++) {
+        ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, &reg_data);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, &reg_addr);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_get_software_flag(hw);
+        if (ret_val != E1000_SUCCESS)
+            return ret_val;
+
+        ret_val = e1000_write_phy_reg_ex(hw, (u32)reg_addr, reg_data);
+
+        e1000_release_software_flag(hw);
+    }
+
+    return ret_val;
+}
+
+
+/******************************************************************************
+ * This function initializes the PHY from the NVM on ICH8 platforms. This
+ * is needed due to an issue where the NVM configuration is not properly
+ * autoloaded after power transitions. Therefore, after each PHY reset, we
+ * will load the configuration data out of the NVM manually.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw)
+{
+    u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop;
+
+    if (hw->phy_type != e1000_phy_igp_3)
+          return E1000_SUCCESS;
+
+    /* Check if SW needs configure the PHY */
+    reg_data = er32(FEXTNVM);
+    if (!(reg_data & FEXTNVM_SW_CONFIG))
+        return E1000_SUCCESS;
+
+    /* Wait for basic configuration completes before proceeding*/
+    loop = 0;
+    do {
+        reg_data = er32(STATUS) & E1000_STATUS_LAN_INIT_DONE;
+        udelay(100);
+        loop++;
+    } while ((!reg_data) && (loop < 50));
+
+    /* Clear the Init Done bit for the next init event */
+    reg_data = er32(STATUS);
+    reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
+    ew32(STATUS, reg_data);
+
+    /* Make sure HW does not configure LCD from PHY extended configuration
+       before SW configuration */
+    reg_data = er32(EXTCNF_CTRL);
+    if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
+        reg_data = er32(EXTCNF_SIZE);
+        cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
+        cnf_size >>= 16;
+        if (cnf_size) {
+            reg_data = er32(EXTCNF_CTRL);
+            cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
+            /* cnf_base_addr is in DWORD */
+            cnf_base_addr >>= 16;
+
+            /* Configure LCD from extended configuration region. */
+            ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
+                                                            cnf_size);
+            if (ret_val)
+                return ret_val;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_hw-2.6.31-orig.h	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,3406 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.h
+ * Structures, enums, and macros for the MAC
+ */
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+
+
+/* Forward declarations of structures used by the shared code */
+struct e1000_hw;
+struct e1000_hw_stats;
+
+/* Enumerated types specific to the e1000 hardware */
+/* Media Access Controlers */
+typedef enum {
+    e1000_undefined = 0,
+    e1000_82542_rev2_0,
+    e1000_82542_rev2_1,
+    e1000_82543,
+    e1000_82544,
+    e1000_82540,
+    e1000_82545,
+    e1000_82545_rev_3,
+    e1000_82546,
+    e1000_82546_rev_3,
+    e1000_82541,
+    e1000_82541_rev_2,
+    e1000_82547,
+    e1000_82547_rev_2,
+    e1000_82571,
+    e1000_82572,
+    e1000_82573,
+    e1000_80003es2lan,
+    e1000_ich8lan,
+    e1000_num_macs
+} e1000_mac_type;
+
+typedef enum {
+    e1000_eeprom_uninitialized = 0,
+    e1000_eeprom_spi,
+    e1000_eeprom_microwire,
+    e1000_eeprom_flash,
+    e1000_eeprom_ich8,
+    e1000_eeprom_none, /* No NVM support */
+    e1000_num_eeprom_types
+} e1000_eeprom_type;
+
+/* Media Types */
+typedef enum {
+    e1000_media_type_copper = 0,
+    e1000_media_type_fiber = 1,
+    e1000_media_type_internal_serdes = 2,
+    e1000_num_media_types
+} e1000_media_type;
+
+typedef enum {
+    e1000_10_half = 0,
+    e1000_10_full = 1,
+    e1000_100_half = 2,
+    e1000_100_full = 3
+} e1000_speed_duplex_type;
+
+/* Flow Control Settings */
+typedef enum {
+    E1000_FC_NONE = 0,
+    E1000_FC_RX_PAUSE = 1,
+    E1000_FC_TX_PAUSE = 2,
+    E1000_FC_FULL = 3,
+    E1000_FC_DEFAULT = 0xFF
+} e1000_fc_type;
+
+struct e1000_shadow_ram {
+    u16 eeprom_word;
+    bool modified;
+};
+
+/* PCI bus types */
+typedef enum {
+    e1000_bus_type_unknown = 0,
+    e1000_bus_type_pci,
+    e1000_bus_type_pcix,
+    e1000_bus_type_pci_express,
+    e1000_bus_type_reserved
+} e1000_bus_type;
+
+/* PCI bus speeds */
+typedef enum {
+    e1000_bus_speed_unknown = 0,
+    e1000_bus_speed_33,
+    e1000_bus_speed_66,
+    e1000_bus_speed_100,
+    e1000_bus_speed_120,
+    e1000_bus_speed_133,
+    e1000_bus_speed_2500,
+    e1000_bus_speed_reserved
+} e1000_bus_speed;
+
+/* PCI bus widths */
+typedef enum {
+    e1000_bus_width_unknown = 0,
+    /* These PCIe values should literally match the possible return values
+     * from config space */
+    e1000_bus_width_pciex_1 = 1,
+    e1000_bus_width_pciex_2 = 2,
+    e1000_bus_width_pciex_4 = 4,
+    e1000_bus_width_32,
+    e1000_bus_width_64,
+    e1000_bus_width_reserved
+} e1000_bus_width;
+
+/* PHY status info structure and supporting enums */
+typedef enum {
+    e1000_cable_length_50 = 0,
+    e1000_cable_length_50_80,
+    e1000_cable_length_80_110,
+    e1000_cable_length_110_140,
+    e1000_cable_length_140,
+    e1000_cable_length_undefined = 0xFF
+} e1000_cable_length;
+
+typedef enum {
+    e1000_gg_cable_length_60 = 0,
+    e1000_gg_cable_length_60_115 = 1,
+    e1000_gg_cable_length_115_150 = 2,
+    e1000_gg_cable_length_150 = 4
+} e1000_gg_cable_length;
+
+typedef enum {
+    e1000_igp_cable_length_10  = 10,
+    e1000_igp_cable_length_20  = 20,
+    e1000_igp_cable_length_30  = 30,
+    e1000_igp_cable_length_40  = 40,
+    e1000_igp_cable_length_50  = 50,
+    e1000_igp_cable_length_60  = 60,
+    e1000_igp_cable_length_70  = 70,
+    e1000_igp_cable_length_80  = 80,
+    e1000_igp_cable_length_90  = 90,
+    e1000_igp_cable_length_100 = 100,
+    e1000_igp_cable_length_110 = 110,
+    e1000_igp_cable_length_115 = 115,
+    e1000_igp_cable_length_120 = 120,
+    e1000_igp_cable_length_130 = 130,
+    e1000_igp_cable_length_140 = 140,
+    e1000_igp_cable_length_150 = 150,
+    e1000_igp_cable_length_160 = 160,
+    e1000_igp_cable_length_170 = 170,
+    e1000_igp_cable_length_180 = 180
+} e1000_igp_cable_length;
+
+typedef enum {
+    e1000_10bt_ext_dist_enable_normal = 0,
+    e1000_10bt_ext_dist_enable_lower,
+    e1000_10bt_ext_dist_enable_undefined = 0xFF
+} e1000_10bt_ext_dist_enable;
+
+typedef enum {
+    e1000_rev_polarity_normal = 0,
+    e1000_rev_polarity_reversed,
+    e1000_rev_polarity_undefined = 0xFF
+} e1000_rev_polarity;
+
+typedef enum {
+    e1000_downshift_normal = 0,
+    e1000_downshift_activated,
+    e1000_downshift_undefined = 0xFF
+} e1000_downshift;
+
+typedef enum {
+    e1000_smart_speed_default = 0,
+    e1000_smart_speed_on,
+    e1000_smart_speed_off
+} e1000_smart_speed;
+
+typedef enum {
+    e1000_polarity_reversal_enabled = 0,
+    e1000_polarity_reversal_disabled,
+    e1000_polarity_reversal_undefined = 0xFF
+} e1000_polarity_reversal;
+
+typedef enum {
+    e1000_auto_x_mode_manual_mdi = 0,
+    e1000_auto_x_mode_manual_mdix,
+    e1000_auto_x_mode_auto1,
+    e1000_auto_x_mode_auto2,
+    e1000_auto_x_mode_undefined = 0xFF
+} e1000_auto_x_mode;
+
+typedef enum {
+    e1000_1000t_rx_status_not_ok = 0,
+    e1000_1000t_rx_status_ok,
+    e1000_1000t_rx_status_undefined = 0xFF
+} e1000_1000t_rx_status;
+
+typedef enum {
+    e1000_phy_m88 = 0,
+    e1000_phy_igp,
+    e1000_phy_igp_2,
+    e1000_phy_gg82563,
+    e1000_phy_igp_3,
+    e1000_phy_ife,
+    e1000_phy_undefined = 0xFF
+} e1000_phy_type;
+
+typedef enum {
+    e1000_ms_hw_default = 0,
+    e1000_ms_force_master,
+    e1000_ms_force_slave,
+    e1000_ms_auto
+} e1000_ms_type;
+
+typedef enum {
+    e1000_ffe_config_enabled = 0,
+    e1000_ffe_config_active,
+    e1000_ffe_config_blocked
+} e1000_ffe_config;
+
+typedef enum {
+    e1000_dsp_config_disabled = 0,
+    e1000_dsp_config_enabled,
+    e1000_dsp_config_activated,
+    e1000_dsp_config_undefined = 0xFF
+} e1000_dsp_config;
+
+struct e1000_phy_info {
+    e1000_cable_length cable_length;
+    e1000_10bt_ext_dist_enable extended_10bt_distance;
+    e1000_rev_polarity cable_polarity;
+    e1000_downshift downshift;
+    e1000_polarity_reversal polarity_correction;
+    e1000_auto_x_mode mdix_mode;
+    e1000_1000t_rx_status local_rx;
+    e1000_1000t_rx_status remote_rx;
+};
+
+struct e1000_phy_stats {
+    u32 idle_errors;
+    u32 receive_errors;
+};
+
+struct e1000_eeprom_info {
+    e1000_eeprom_type type;
+    u16 word_size;
+    u16 opcode_bits;
+    u16 address_bits;
+    u16 delay_usec;
+    u16 page_size;
+    bool use_eerd;
+    bool use_eewr;
+};
+
+/* Flex ASF Information */
+#define E1000_HOST_IF_MAX_SIZE  2048
+
+typedef enum {
+    e1000_byte_align = 0,
+    e1000_word_align = 1,
+    e1000_dword_align = 2
+} e1000_align_type;
+
+
+
+/* Error Codes */
+#define E1000_SUCCESS      0
+#define E1000_ERR_EEPROM   1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_TYPE 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+
+#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \
+                                     (((_value) & 0xff00) >> 8))
+
+/* Function prototypes */
+/* Initialization */
+s32 e1000_reset_hw(struct e1000_hw *hw);
+s32 e1000_init_hw(struct e1000_hw *hw);
+s32 e1000_set_mac_type(struct e1000_hw *hw);
+void e1000_set_media_type(struct e1000_hw *hw);
+
+/* Link Configuration */
+s32 e1000_setup_link(struct e1000_hw *hw);
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+s32 e1000_check_for_link(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+s32 e1000_force_mac_fc(struct e1000_hw *hw);
+
+/* PHY */
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data);
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
+s32 e1000_phy_hw_reset(struct e1000_hw *hw);
+s32 e1000_phy_reset(struct e1000_hw *hw);
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
+
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
+
+/* EEPROM Functions */
+s32 e1000_init_eeprom_params(struct e1000_hw *hw);
+
+/* MNG HOST IF functions */
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD   64
+#define E1000_HI_MAX_MNG_DATA_LENGTH    0x6F8   /* Host Interface data length */
+
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT  10      /* Time in ms to process MNG command */
+#define E1000_MNG_DHCP_COOKIE_OFFSET    0x6F0   /* Cookie offset */
+#define E1000_MNG_DHCP_COOKIE_LENGTH    0x10    /* Cookie length */
+#define E1000_MNG_IAMT_MODE             0x3
+#define E1000_MNG_ICH_IAMT_MODE         0x2
+#define E1000_IAMT_SIGNATURE            0x544D4149 /* Intel(R) Active Management Technology signature */
+
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT    0x2 /* DHCP parsing enabled */
+#define E1000_VFTA_ENTRY_SHIFT                       0x5
+#define E1000_VFTA_ENTRY_MASK                        0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK              0x1F
+
+struct e1000_host_mng_command_header {
+    u8 command_id;
+    u8 checksum;
+    u16 reserved1;
+    u16 reserved2;
+    u16 command_length;
+};
+
+struct e1000_host_mng_command_info {
+    struct e1000_host_mng_command_header command_header;  /* Command Head/Command Result Head has 4 bytes */
+    u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];   /* Command data can length 0..0x658*/
+};
+#ifdef __BIG_ENDIAN
+struct e1000_host_mng_dhcp_cookie{
+    u32 signature;
+    u16 vlan_id;
+    u8 reserved0;
+    u8 status;
+    u32 reserved1;
+    u8 checksum;
+    u8 reserved3;
+    u16 reserved2;
+};
+#else
+struct e1000_host_mng_dhcp_cookie{
+    u32 signature;
+    u8 status;
+    u8 reserved0;
+    u16 vlan_id;
+    u32 reserved1;
+    u16 reserved2;
+    u8 reserved3;
+    u8 checksum;
+};
+#endif
+
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer,
+                                  u16 length);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
+s32 e1000_read_mac_addr(struct e1000_hw * hw);
+
+/* Filters (multicast, vlan, receive) */
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
+void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+
+/* LED functions */
+s32 e1000_setup_led(struct e1000_hw *hw);
+s32 e1000_cleanup_led(struct e1000_hw *hw);
+s32 e1000_led_on(struct e1000_hw *hw);
+s32 e1000_led_off(struct e1000_hw *hw);
+s32 e1000_blink_led_start(struct e1000_hw *hw);
+
+/* Adaptive IFS Functions */
+
+/* Everything else */
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr);
+void e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
+int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
+/* Port I/O is only supported on 82544 and newer */
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
+s32 e1000_disable_pciex_master(struct e1000_hw *hw);
+s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
+
+
+#define E1000_READ_REG_IO(a, reg) \
+    e1000_read_reg_io((a), E1000_##reg)
+#define E1000_WRITE_REG_IO(a, reg, val) \
+    e1000_write_reg_io((a), E1000_##reg, val)
+
+/* PCI Device IDs */
+#define E1000_DEV_ID_82542               0x1000
+#define E1000_DEV_ID_82543GC_FIBER       0x1001
+#define E1000_DEV_ID_82543GC_COPPER      0x1004
+#define E1000_DEV_ID_82544EI_COPPER      0x1008
+#define E1000_DEV_ID_82544EI_FIBER       0x1009
+#define E1000_DEV_ID_82544GC_COPPER      0x100C
+#define E1000_DEV_ID_82544GC_LOM         0x100D
+#define E1000_DEV_ID_82540EM             0x100E
+#define E1000_DEV_ID_82540EM_LOM         0x1015
+#define E1000_DEV_ID_82540EP_LOM         0x1016
+#define E1000_DEV_ID_82540EP             0x1017
+#define E1000_DEV_ID_82540EP_LP          0x101E
+#define E1000_DEV_ID_82545EM_COPPER      0x100F
+#define E1000_DEV_ID_82545EM_FIBER       0x1011
+#define E1000_DEV_ID_82545GM_COPPER      0x1026
+#define E1000_DEV_ID_82545GM_FIBER       0x1027
+#define E1000_DEV_ID_82545GM_SERDES      0x1028
+#define E1000_DEV_ID_82546EB_COPPER      0x1010
+#define E1000_DEV_ID_82546EB_FIBER       0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
+#define E1000_DEV_ID_82541EI             0x1013
+#define E1000_DEV_ID_82541EI_MOBILE      0x1018
+#define E1000_DEV_ID_82541ER_LOM         0x1014
+#define E1000_DEV_ID_82541ER             0x1078
+#define E1000_DEV_ID_82547GI             0x1075
+#define E1000_DEV_ID_82541GI             0x1076
+#define E1000_DEV_ID_82541GI_MOBILE      0x1077
+#define E1000_DEV_ID_82541GI_LF          0x107C
+#define E1000_DEV_ID_82546GB_COPPER      0x1079
+#define E1000_DEV_ID_82546GB_FIBER       0x107A
+#define E1000_DEV_ID_82546GB_SERDES      0x107B
+#define E1000_DEV_ID_82546GB_PCIE        0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
+#define E1000_DEV_ID_82547EI             0x1019
+#define E1000_DEV_ID_82547EI_MOBILE      0x101A
+#define E1000_DEV_ID_82571EB_COPPER      0x105E
+#define E1000_DEV_ID_82571EB_FIBER       0x105F
+#define E1000_DEV_ID_82571EB_SERDES      0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER  0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE  0x10BC
+#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
+#define E1000_DEV_ID_82572EI_COPPER      0x107D
+#define E1000_DEV_ID_82572EI_FIBER       0x107E
+#define E1000_DEV_ID_82572EI_SERDES      0x107F
+#define E1000_DEV_ID_82572EI             0x10B9
+#define E1000_DEV_ID_82573E              0x108B
+#define E1000_DEV_ID_82573E_IAMT         0x108C
+#define E1000_DEV_ID_82573L              0x109A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT     0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT     0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT     0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT     0x10BB
+
+#define E1000_DEV_ID_ICH8_IGP_M_AMT      0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT        0x104A
+#define E1000_DEV_ID_ICH8_IGP_C          0x104B
+#define E1000_DEV_ID_ICH8_IFE            0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT         0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G          0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M          0x104D
+
+
+#define NODE_ADDRESS_SIZE 6
+#define ETH_LENGTH_OF_ADDRESS 6
+
+/* MAC decode size is 128K - This is the size of BAR0 */
+#define MAC_DECODE_SIZE (128 * 1024)
+
+#define E1000_82542_2_0_REV_ID 2
+#define E1000_82542_2_1_REV_ID 3
+#define E1000_REVISION_0       0
+#define E1000_REVISION_1       1
+#define E1000_REVISION_2       2
+#define E1000_REVISION_3       3
+
+#define SPEED_10    10
+#define SPEED_100   100
+#define SPEED_1000  1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* The sizes (in bytes) of a ethernet packet */
+#define ENET_HEADER_SIZE             14
+#define MAXIMUM_ETHERNET_FRAME_SIZE  1518 /* With FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE  64   /* With FCS */
+#define ETHERNET_FCS_SIZE            4
+#define MAXIMUM_ETHERNET_PACKET_SIZE \
+    (MAXIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define MINIMUM_ETHERNET_PACKET_SIZE \
+    (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define CRC_LENGTH                   ETHERNET_FCS_SIZE
+#define MAX_JUMBO_FRAME_SIZE         0x3F00
+
+
+/* 802.1q VLAN Packet Sizes */
+#define VLAN_TAG_SIZE  4     /* 802.3ac tag (not DMAed) */
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
+#define ETHERNET_IP_TYPE        0x0800  /* IP packets */
+#define ETHERNET_ARP_TYPE       0x0806  /* Address Resolution Protocol (ARP) */
+
+/* Packet Header defines */
+#define IP_PROTOCOL_TCP    6
+#define IP_PROTOCOL_UDP    0x11
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+    E1000_IMS_RXDMT0 |         \
+    E1000_IMS_RXSEQ)
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+    E1000_IMS_RXT0   |    \
+    E1000_IMS_TXDW   |    \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ  |    \
+    E1000_IMS_LSC)
+
+/* Additional interrupts need to be handled for e1000_ich8lan:
+    DSW = The FW changed the status of the DISSW bit in FWSM
+    PHYINT = The LAN connected device generates an interrupt
+    EPRST = Manageability reset event */
+#define IMS_ICH8LAN_ENABLE_MASK (\
+    E1000_IMS_DSW   | \
+    E1000_IMS_PHYINT | \
+    E1000_IMS_EPRST)
+
+/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor. We
+ * reserve one of these spots for our directed address, allowing us room for
+ * E1000_RAR_ENTRIES - 1 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+
+#define E1000_RAR_ENTRIES_ICH8LAN  6
+
+#define MIN_NUMBER_OF_DESCRIPTORS  8
+#define MAX_NUMBER_OF_DESCRIPTORS  0xFFF8
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+    __le64 buffer_addr; /* Address of the descriptor's data buffer */
+    __le16 length;     /* Length of data DMAed into data buffer */
+    __le16 csum;       /* Packet checksum */
+    u8 status;      /* Descriptor status */
+    u8 errors;      /* Descriptor Errors */
+    __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+    struct {
+        __le64 buffer_addr;
+        __le64 reserved;
+    } read;
+    struct {
+        struct {
+            __le32 mrq;              /* Multiple Rx Queues */
+            union {
+                __le32 rss;          /* RSS Hash */
+                struct {
+                    __le16 ip_id;    /* IP id */
+                    __le16 csum;     /* Packet Checksum */
+                } csum_ip;
+            } hi_dword;
+        } lower;
+        struct {
+            __le32 status_error;     /* ext status/error */
+            __le16 length;
+            __le16 vlan;             /* VLAN tag */
+        } upper;
+    } wb;  /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+    struct {
+        /* one buffer for protocol header(s), three data buffers */
+        __le64 buffer_addr[MAX_PS_BUFFERS];
+    } read;
+    struct {
+        struct {
+            __le32 mrq;              /* Multiple Rx Queues */
+            union {
+                __le32 rss;          /* RSS Hash */
+                struct {
+                    __le16 ip_id;    /* IP id */
+                    __le16 csum;     /* Packet Checksum */
+                } csum_ip;
+            } hi_dword;
+        } lower;
+        struct {
+            __le32 status_error;     /* ext status/error */
+            __le16 length0;          /* length of buffer 0 */
+            __le16 vlan;             /* VLAN tag */
+        } middle;
+        struct {
+            __le16 header_status;
+            __le16 length[3];        /* length of buffers 1-3 */
+        } upper;
+        __le64 reserved;
+    } wb; /* writeback */
+};
+
+/* Receive Decriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum caculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF      0x80    /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV    0x200   /* IP identification valid */
+#define E1000_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
+#define E1000_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
+#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
+#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
+#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
+#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK  0xE000  /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK  0x1000  /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_TCPE  0x20000000
+#define E1000_RXDEXT_STATERR_IPE   0x40000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP        0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK  0x000003FF
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+    E1000_RXD_ERR_CE  |                \
+    E1000_RXD_ERR_SE  |                \
+    E1000_RXD_ERR_SEQ |                \
+    E1000_RXD_ERR_CXE |                \
+    E1000_RXD_ERR_RXE)
+
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+    E1000_RXDEXT_STATERR_CE  |            \
+    E1000_RXDEXT_STATERR_SE  |            \
+    E1000_RXDEXT_STATERR_SEQ |            \
+    E1000_RXDEXT_STATERR_CXE |            \
+    E1000_RXDEXT_STATERR_RXE)
+
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+    __le64 buffer_addr;       /* Address of the descriptor's data buffer */
+    union {
+        __le32 data;
+        struct {
+            __le16 length;    /* Data buffer length */
+            u8 cso;        /* Checksum offset */
+            u8 cmd;        /* Descriptor control */
+        } flags;
+    } lower;
+    union {
+        __le32 data;
+        struct {
+            u8 status;     /* Descriptor status */
+            u8 css;        /* Checksum start */
+            __le16 special;
+        } fields;
+    } upper;
+};
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C     0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+    union {
+        __le32 ip_config;
+        struct {
+            u8 ipcss;      /* IP checksum start */
+            u8 ipcso;      /* IP checksum offset */
+            __le16 ipcse;     /* IP checksum end */
+        } ip_fields;
+    } lower_setup;
+    union {
+        __le32 tcp_config;
+        struct {
+            u8 tucss;      /* TCP checksum start */
+            u8 tucso;      /* TCP checksum offset */
+            __le16 tucse;     /* TCP checksum end */
+        } tcp_fields;
+    } upper_setup;
+    __le32 cmd_and_length;    /* */
+    union {
+        __le32 data;
+        struct {
+            u8 status;     /* Descriptor status */
+            u8 hdr_len;    /* Header length */
+            __le16 mss;       /* Maximum segment size */
+        } fields;
+    } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+    __le64 buffer_addr;       /* Address of the descriptor's buffer address */
+    union {
+        __le32 data;
+        struct {
+            __le16 length;    /* Data buffer length */
+            u8 typ_len_ext;        /* */
+            u8 cmd;        /* */
+        } flags;
+    } lower;
+    union {
+        __le32 data;
+        struct {
+            u8 status;     /* Descriptor status */
+            u8 popts;      /* Packet Options */
+            __le16 special;   /* */
+        } fields;
+    } upper;
+};
+
+/* Filters */
+#define E1000_NUM_UNICAST          16   /* Unicast filter entries */
+#define E1000_MC_TBL_SIZE          128  /* Multicast Filter Table (4096 bits) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+#define E1000_NUM_UNICAST_ICH8LAN  7
+#define E1000_MC_TBL_SIZE_ICH8LAN  32
+
+
+/* Receive Address Register */
+struct e1000_rar {
+    volatile __le32 low;      /* receive address low */
+    volatile __le32 high;     /* receive address high */
+};
+
+/* Number of entries in the Multicast Table Array (MTA). */
+#define E1000_NUM_MTA_REGISTERS 128
+#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
+
+/* IPv4 Address Table Entry */
+struct e1000_ipv4_at_entry {
+    volatile u32 ipv4_addr;        /* IP Address (RW) */
+    volatile u32 reserved;
+};
+
+/* Four wakeup IP addresses are supported */
+#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
+#define E1000_IP4AT_SIZE                  E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
+#define E1000_IP4AT_SIZE_ICH8LAN          3
+#define E1000_IP6AT_SIZE                  1
+
+/* IPv6 Address Table Entry */
+struct e1000_ipv6_at_entry {
+    volatile u8 ipv6_addr[16];
+};
+
+/* Flexible Filter Length Table Entry */
+struct e1000_fflt_entry {
+    volatile u32 length;   /* Flexible Filter Length (RW) */
+    volatile u32 reserved;
+};
+
+/* Flexible Filter Mask Table Entry */
+struct e1000_ffmt_entry {
+    volatile u32 mask;     /* Flexible Filter Mask (RW) */
+    volatile u32 reserved;
+};
+
+/* Flexible Filter Value Table Entry */
+struct e1000_ffvt_entry {
+    volatile u32 value;    /* Flexible Filter Value (RW) */
+    volatile u32 reserved;
+};
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX  128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+#define E1000_DISABLE_SERDES_LOOPBACK   0x0400
+
+/* Register Set. (82543, 82544)
+ *
+ * Registers are defined to be 32 bits and  should be accessed as 32 bit values.
+ * These registers are physically located on the NIC, but are mapped into the
+ * host memory address space.
+ *
+ * RW - register is both readable and writable
+ * RO - register is read only
+ * WO - register is write only
+ * R/clr - register is read only and is cleared when read
+ * A - register array
+ */
+#define E1000_CTRL     0x00000  /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004  /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS   0x00008  /* Device Status - RO */
+#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014  /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
+#define E1000_FLA      0x0001C  /* Flash Access - RW */
+#define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_SCTL     0x00024  /* SerDes Control - RW */
+#define E1000_FEXTNVM  0x00028  /* Future Extended NVM register */
+#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
+#define E1000_FCT      0x00030  /* Flow Control Type - RW */
+#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
+#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL     0x00100  /* RX Control - RW */
+#define E1000_RDTR1    0x02820  /* RX Delay Timer (1) - RW */
+#define E1000_RDBAL1   0x02900  /* RX Descriptor Base Address Low (1) - RW */
+#define E1000_RDBAH1   0x02904  /* RX Descriptor Base Address High (1) - RW */
+#define E1000_RDLEN1   0x02908  /* RX Descriptor Length (1) - RW */
+#define E1000_RDH1     0x02910  /* RX Descriptor Head (1) - RW */
+#define E1000_RDT1     0x02918  /* RX Descriptor Tail (1) - RW */
+#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178  /* TX Configuration Word - RW */
+#define E1000_RXCW     0x00180  /* RX Configuration Word - RO */
+#define E1000_TCTL     0x00400  /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404  /* Extended TX Control - RW */
+#define E1000_TIPG     0x00410  /* TX Inter-packet gap -RW */
+#define E1000_TBT      0x00448  /* TX Burst Timer - RW */
+#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
+#define E1000_EXTCNF_CTRL  0x00F00  /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE  0x00F08  /* Extended Configuration Size */
+#define E1000_PHY_CTRL     0x00F10  /* PHY Control Register in CSR */
+#define FEXTNVM_SW_CONFIG  0x0001
+#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008  /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_FLASH_UPDATES 1000
+#define E1000_EEARBC   0x01024  /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT   0x01028  /* FLASH Timer Register */
+#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL  0x01030  /* FLASH control register */
+#define E1000_FLSWDATA 0x01034  /* FLASH data register */
+#define E1000_FLSWCNT  0x01038  /* FLASH Access Counter */
+#define E1000_FLOP     0x0103C  /* FLASH Opcode Register */
+#define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
+#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL   0x02170  /* Packet Split Receive Control - RW */
+#define E1000_RDBAL    0x02800  /* RX Descriptor Base Address Low - RW */
+#define E1000_RDBAH    0x02804  /* RX Descriptor Base Address High - RW */
+#define E1000_RDLEN    0x02808  /* RX Descriptor Length - RW */
+#define E1000_RDH      0x02810  /* RX Descriptor Head - RW */
+#define E1000_RDT      0x02818  /* RX Descriptor Tail - RW */
+#define E1000_RDTR     0x02820  /* RX Delay Timer - RW */
+#define E1000_RDBAL0   E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
+#define E1000_RDBAH0   E1000_RDBAH /* RX Desc Base Address High (0) - RW */
+#define E1000_RDLEN0   E1000_RDLEN /* RX Desc Length (0) - RW */
+#define E1000_RDH0     E1000_RDH   /* RX Desc Head (0) - RW */
+#define E1000_RDT0     E1000_RDT   /* RX Desc Tail (0) - RW */
+#define E1000_RDTR0    E1000_RDTR  /* RX Delay Timer (0) - RW */
+#define E1000_RXDCTL   0x02828  /* RX Descriptor Control queue 0 - RW */
+#define E1000_RXDCTL1  0x02928  /* RX Descriptor Control queue 1 - RW */
+#define E1000_RADV     0x0282C  /* RX Interrupt Absolute Delay Timer - RW */
+#define E1000_RSRPD    0x02C00  /* RX Small Packet Detect - RW */
+#define E1000_RAID     0x02C08  /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC   0x03000  /* TX DMA Control - RW */
+#define E1000_KABGTXD  0x03004  /* AFE Band Gap Transmit Ref Data */
+#define E1000_TDFH     0x03410  /* TX Data FIFO Head - RW */
+#define E1000_TDFT     0x03418  /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420  /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFTS    0x03428  /* TX Data FIFO Tail Saved - RW */
+#define E1000_TDFPC    0x03430  /* TX Data FIFO Packet Count - RW */
+#define E1000_TDBAL    0x03800  /* TX Descriptor Base Address Low - RW */
+#define E1000_TDBAH    0x03804  /* TX Descriptor Base Address High - RW */
+#define E1000_TDLEN    0x03808  /* TX Descriptor Length - RW */
+#define E1000_TDH      0x03810  /* TX Descriptor Head - RW */
+#define E1000_TDT      0x03818  /* TX Descripotr Tail - RW */
+#define E1000_TIDV     0x03820  /* TX Interrupt Delay Value - RW */
+#define E1000_TXDCTL   0x03828  /* TX Descriptor Control - RW */
+#define E1000_TADV     0x0382C  /* TX Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_TARC0    0x03840  /* TX Arbitration Count (0) */
+#define E1000_TDBAL1   0x03900  /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1   0x03904  /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1   0x03908  /* TX Desc Length (1) - RW */
+#define E1000_TDH1     0x03910  /* TX Desc Head (1) - RW */
+#define E1000_TDT1     0x03918  /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1  0x03928  /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1    0x03940  /* TX Arbitration Count (1) */
+#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028  /* Collision Count - R/clr */
+#define E1000_DC       0x04030  /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034  /* TX-No CRS - R/clr */
+#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048  /* XON RX Count - R/clr */
+#define E1000_XONTXC   0x0404C  /* XON TX Count - R/clr */
+#define E1000_XOFFRXC  0x04050  /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC  0x04054  /* XOFF TX Count - R/clr */
+#define E1000_FCRUC    0x04058  /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C  /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060  /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064  /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068  /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C  /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070  /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074  /* Good Packets RX Count - R/clr */
+#define E1000_BPRC     0x04078  /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC     0x0407C  /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC     0x04080  /* Good Packets TX Count - R/clr */
+#define E1000_GORCL    0x04088  /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH    0x0408C  /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL    0x04090  /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH    0x04094  /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC     0x040A0  /* RX No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4  /* RX Undersize Count - R/clr */
+#define E1000_RFC      0x040A8  /* RX Fragment Count - R/clr */
+#define E1000_ROC      0x040AC  /* RX Oversize Count - R/clr */
+#define E1000_RJC      0x040B0  /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4  /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC  /* Management Packets TX Count - R/clr */
+#define E1000_TORL     0x040C0  /* Total Octets RX Low - R/clr */
+#define E1000_TORH     0x040C4  /* Total Octets RX High - R/clr */
+#define E1000_TOTL     0x040C8  /* Total Octets TX Low - R/clr */
+#define E1000_TOTH     0x040CC  /* Total Octets TX High - R/clr */
+#define E1000_TPR      0x040D0  /* Total Packets RX - R/clr */
+#define E1000_TPT      0x040D4  /* Total Packets TX - R/clr */
+#define E1000_PTC64    0x040D8  /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC  /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0  /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4  /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8  /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC  /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0  /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC     0x040F4  /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
+#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
+#define E1000_RXCSUM   0x05000  /* RX Checksum Control - RW */
+#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
+#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400  /* Receive Address - RW Array */
+#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC      0x05800  /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810  /* Wakeup Status - RO */
+#define E1000_MANC     0x05820  /* Management Control - RW */
+#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
+#define E1000_IP4AT    0x05840  /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT    0x05880  /* IPv6 Address Table - RW Array */
+#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
+#define E1000_WUPM     0x05A00  /* Wakeup Packet Memory - RO A */
+#define E1000_FFLT     0x05F00  /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF  0x08800  /* Host Interface */
+#define E1000_FFMT     0x09000  /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT     0x09800  /* Flexible Filter Value Table - RW Array */
+
+#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA     0x0003C  /* PHY address - RW */
+#define E1000_MANC2H     0x05860  /* Managment Control To Host - RW */
+#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+
+#define E1000_GCR       0x05B00 /* PCI-Ex Control */
+#define E1000_GSCL_1    0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2    0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3    0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4    0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM      0x05B50 /* SW Semaphore */
+#define E1000_FWSM      0x05B54 /* FW Semaphore */
+#define E1000_FFLT_DBG  0x05F04 /* Debug Register */
+#define E1000_HICR      0x08F00 /* Host Inteface Control */
+
+/* RSS registers */
+#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA      0x05C00 /* Redirection Table - RW Array */
+#define E1000_RSSRK     0x05C80 /* RSS Random Key - RW Array */
+#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
+/* Register Set (82542)
+ *
+ * Some of the 82542 registers are located at different offsets than they are
+ * in more current versions of the 8254x. Despite the difference in location,
+ * the registers function in the same manner.
+ */
+#define E1000_82542_CTRL     E1000_CTRL
+#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
+#define E1000_82542_STATUS   E1000_STATUS
+#define E1000_82542_EECD     E1000_EECD
+#define E1000_82542_EERD     E1000_EERD
+#define E1000_82542_CTRL_EXT E1000_CTRL_EXT
+#define E1000_82542_FLA      E1000_FLA
+#define E1000_82542_MDIC     E1000_MDIC
+#define E1000_82542_SCTL     E1000_SCTL
+#define E1000_82542_FEXTNVM  E1000_FEXTNVM
+#define E1000_82542_FCAL     E1000_FCAL
+#define E1000_82542_FCAH     E1000_FCAH
+#define E1000_82542_FCT      E1000_FCT
+#define E1000_82542_VET      E1000_VET
+#define E1000_82542_RA       0x00040
+#define E1000_82542_ICR      E1000_ICR
+#define E1000_82542_ITR      E1000_ITR
+#define E1000_82542_ICS      E1000_ICS
+#define E1000_82542_IMS      E1000_IMS
+#define E1000_82542_IMC      E1000_IMC
+#define E1000_82542_RCTL     E1000_RCTL
+#define E1000_82542_RDTR     0x00108
+#define E1000_82542_RDBAL    0x00110
+#define E1000_82542_RDBAH    0x00114
+#define E1000_82542_RDLEN    0x00118
+#define E1000_82542_RDH      0x00120
+#define E1000_82542_RDT      0x00128
+#define E1000_82542_RDTR0    E1000_82542_RDTR
+#define E1000_82542_RDBAL0   E1000_82542_RDBAL
+#define E1000_82542_RDBAH0   E1000_82542_RDBAH
+#define E1000_82542_RDLEN0   E1000_82542_RDLEN
+#define E1000_82542_RDH0     E1000_82542_RDH
+#define E1000_82542_RDT0     E1000_82542_RDT
+#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
+                                                       * RX Control - RW */
+#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
+#define E1000_82542_RDBAH3   0x02B04 /* RX Desc Base High Queue 3 - RW */
+#define E1000_82542_RDBAL3   0x02B00 /* RX Desc Low Queue 3 - RW */
+#define E1000_82542_RDLEN3   0x02B08 /* RX Desc Length Queue 3 - RW */
+#define E1000_82542_RDH3     0x02B10 /* RX Desc Head Queue 3 - RW */
+#define E1000_82542_RDT3     0x02B18 /* RX Desc Tail Queue 3 - RW */
+#define E1000_82542_RDBAL2   0x02A00 /* RX Desc Base Low Queue 2 - RW */
+#define E1000_82542_RDBAH2   0x02A04 /* RX Desc Base High Queue 2 - RW */
+#define E1000_82542_RDLEN2   0x02A08 /* RX Desc Length Queue 2 - RW */
+#define E1000_82542_RDH2     0x02A10 /* RX Desc Head Queue 2 - RW */
+#define E1000_82542_RDT2     0x02A18 /* RX Desc Tail Queue 2 - RW */
+#define E1000_82542_RDTR1    0x00130
+#define E1000_82542_RDBAL1   0x00138
+#define E1000_82542_RDBAH1   0x0013C
+#define E1000_82542_RDLEN1   0x00140
+#define E1000_82542_RDH1     0x00148
+#define E1000_82542_RDT1     0x00150
+#define E1000_82542_FCRTH    0x00160
+#define E1000_82542_FCRTL    0x00168
+#define E1000_82542_FCTTV    E1000_FCTTV
+#define E1000_82542_TXCW     E1000_TXCW
+#define E1000_82542_RXCW     E1000_RXCW
+#define E1000_82542_MTA      0x00200
+#define E1000_82542_TCTL     E1000_TCTL
+#define E1000_82542_TCTL_EXT E1000_TCTL_EXT
+#define E1000_82542_TIPG     E1000_TIPG
+#define E1000_82542_TDBAL    0x00420
+#define E1000_82542_TDBAH    0x00424
+#define E1000_82542_TDLEN    0x00428
+#define E1000_82542_TDH      0x00430
+#define E1000_82542_TDT      0x00438
+#define E1000_82542_TIDV     0x00440
+#define E1000_82542_TBT      E1000_TBT
+#define E1000_82542_AIT      E1000_AIT
+#define E1000_82542_VFTA     0x00600
+#define E1000_82542_LEDCTL   E1000_LEDCTL
+#define E1000_82542_PBA      E1000_PBA
+#define E1000_82542_PBS      E1000_PBS
+#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
+#define E1000_82542_EEARBC   E1000_EEARBC
+#define E1000_82542_FLASHT   E1000_FLASHT
+#define E1000_82542_EEWR     E1000_EEWR
+#define E1000_82542_FLSWCTL  E1000_FLSWCTL
+#define E1000_82542_FLSWDATA E1000_FLSWDATA
+#define E1000_82542_FLSWCNT  E1000_FLSWCNT
+#define E1000_82542_FLOP     E1000_FLOP
+#define E1000_82542_EXTCNF_CTRL  E1000_EXTCNF_CTRL
+#define E1000_82542_EXTCNF_SIZE  E1000_EXTCNF_SIZE
+#define E1000_82542_PHY_CTRL E1000_PHY_CTRL
+#define E1000_82542_ERT      E1000_ERT
+#define E1000_82542_RXDCTL   E1000_RXDCTL
+#define E1000_82542_RXDCTL1  E1000_RXDCTL1
+#define E1000_82542_RADV     E1000_RADV
+#define E1000_82542_RSRPD    E1000_RSRPD
+#define E1000_82542_TXDMAC   E1000_TXDMAC
+#define E1000_82542_KABGTXD  E1000_KABGTXD
+#define E1000_82542_TDFHS    E1000_TDFHS
+#define E1000_82542_TDFTS    E1000_TDFTS
+#define E1000_82542_TDFPC    E1000_TDFPC
+#define E1000_82542_TXDCTL   E1000_TXDCTL
+#define E1000_82542_TADV     E1000_TADV
+#define E1000_82542_TSPMT    E1000_TSPMT
+#define E1000_82542_CRCERRS  E1000_CRCERRS
+#define E1000_82542_ALGNERRC E1000_ALGNERRC
+#define E1000_82542_SYMERRS  E1000_SYMERRS
+#define E1000_82542_RXERRC   E1000_RXERRC
+#define E1000_82542_MPC      E1000_MPC
+#define E1000_82542_SCC      E1000_SCC
+#define E1000_82542_ECOL     E1000_ECOL
+#define E1000_82542_MCC      E1000_MCC
+#define E1000_82542_LATECOL  E1000_LATECOL
+#define E1000_82542_COLC     E1000_COLC
+#define E1000_82542_DC       E1000_DC
+#define E1000_82542_TNCRS    E1000_TNCRS
+#define E1000_82542_SEC      E1000_SEC
+#define E1000_82542_CEXTERR  E1000_CEXTERR
+#define E1000_82542_RLEC     E1000_RLEC
+#define E1000_82542_XONRXC   E1000_XONRXC
+#define E1000_82542_XONTXC   E1000_XONTXC
+#define E1000_82542_XOFFRXC  E1000_XOFFRXC
+#define E1000_82542_XOFFTXC  E1000_XOFFTXC
+#define E1000_82542_FCRUC    E1000_FCRUC
+#define E1000_82542_PRC64    E1000_PRC64
+#define E1000_82542_PRC127   E1000_PRC127
+#define E1000_82542_PRC255   E1000_PRC255
+#define E1000_82542_PRC511   E1000_PRC511
+#define E1000_82542_PRC1023  E1000_PRC1023
+#define E1000_82542_PRC1522  E1000_PRC1522
+#define E1000_82542_GPRC     E1000_GPRC
+#define E1000_82542_BPRC     E1000_BPRC
+#define E1000_82542_MPRC     E1000_MPRC
+#define E1000_82542_GPTC     E1000_GPTC
+#define E1000_82542_GORCL    E1000_GORCL
+#define E1000_82542_GORCH    E1000_GORCH
+#define E1000_82542_GOTCL    E1000_GOTCL
+#define E1000_82542_GOTCH    E1000_GOTCH
+#define E1000_82542_RNBC     E1000_RNBC
+#define E1000_82542_RUC      E1000_RUC
+#define E1000_82542_RFC      E1000_RFC
+#define E1000_82542_ROC      E1000_ROC
+#define E1000_82542_RJC      E1000_RJC
+#define E1000_82542_MGTPRC   E1000_MGTPRC
+#define E1000_82542_MGTPDC   E1000_MGTPDC
+#define E1000_82542_MGTPTC   E1000_MGTPTC
+#define E1000_82542_TORL     E1000_TORL
+#define E1000_82542_TORH     E1000_TORH
+#define E1000_82542_TOTL     E1000_TOTL
+#define E1000_82542_TOTH     E1000_TOTH
+#define E1000_82542_TPR      E1000_TPR
+#define E1000_82542_TPT      E1000_TPT
+#define E1000_82542_PTC64    E1000_PTC64
+#define E1000_82542_PTC127   E1000_PTC127
+#define E1000_82542_PTC255   E1000_PTC255
+#define E1000_82542_PTC511   E1000_PTC511
+#define E1000_82542_PTC1023  E1000_PTC1023
+#define E1000_82542_PTC1522  E1000_PTC1522
+#define E1000_82542_MPTC     E1000_MPTC
+#define E1000_82542_BPTC     E1000_BPTC
+#define E1000_82542_TSCTC    E1000_TSCTC
+#define E1000_82542_TSCTFC   E1000_TSCTFC
+#define E1000_82542_RXCSUM   E1000_RXCSUM
+#define E1000_82542_WUC      E1000_WUC
+#define E1000_82542_WUFC     E1000_WUFC
+#define E1000_82542_WUS      E1000_WUS
+#define E1000_82542_MANC     E1000_MANC
+#define E1000_82542_IPAV     E1000_IPAV
+#define E1000_82542_IP4AT    E1000_IP4AT
+#define E1000_82542_IP6AT    E1000_IP6AT
+#define E1000_82542_WUPL     E1000_WUPL
+#define E1000_82542_WUPM     E1000_WUPM
+#define E1000_82542_FFLT     E1000_FFLT
+#define E1000_82542_TDFH     0x08010
+#define E1000_82542_TDFT     0x08018
+#define E1000_82542_FFMT     E1000_FFMT
+#define E1000_82542_FFVT     E1000_FFVT
+#define E1000_82542_HOST_IF  E1000_HOST_IF
+#define E1000_82542_IAM         E1000_IAM
+#define E1000_82542_EEMNGCTL    E1000_EEMNGCTL
+#define E1000_82542_PSRCTL      E1000_PSRCTL
+#define E1000_82542_RAID        E1000_RAID
+#define E1000_82542_TARC0       E1000_TARC0
+#define E1000_82542_TDBAL1      E1000_TDBAL1
+#define E1000_82542_TDBAH1      E1000_TDBAH1
+#define E1000_82542_TDLEN1      E1000_TDLEN1
+#define E1000_82542_TDH1        E1000_TDH1
+#define E1000_82542_TDT1        E1000_TDT1
+#define E1000_82542_TXDCTL1     E1000_TXDCTL1
+#define E1000_82542_TARC1       E1000_TARC1
+#define E1000_82542_RFCTL       E1000_RFCTL
+#define E1000_82542_GCR         E1000_GCR
+#define E1000_82542_GSCL_1      E1000_GSCL_1
+#define E1000_82542_GSCL_2      E1000_GSCL_2
+#define E1000_82542_GSCL_3      E1000_GSCL_3
+#define E1000_82542_GSCL_4      E1000_GSCL_4
+#define E1000_82542_FACTPS      E1000_FACTPS
+#define E1000_82542_SWSM        E1000_SWSM
+#define E1000_82542_FWSM        E1000_FWSM
+#define E1000_82542_FFLT_DBG    E1000_FFLT_DBG
+#define E1000_82542_IAC         E1000_IAC
+#define E1000_82542_ICRXPTC     E1000_ICRXPTC
+#define E1000_82542_ICRXATC     E1000_ICRXATC
+#define E1000_82542_ICTXPTC     E1000_ICTXPTC
+#define E1000_82542_ICTXATC     E1000_ICTXATC
+#define E1000_82542_ICTXQEC     E1000_ICTXQEC
+#define E1000_82542_ICTXQMTC    E1000_ICTXQMTC
+#define E1000_82542_ICRXDMTC    E1000_ICRXDMTC
+#define E1000_82542_ICRXOC      E1000_ICRXOC
+#define E1000_82542_HICR        E1000_HICR
+
+#define E1000_82542_CPUVEC      E1000_CPUVEC
+#define E1000_82542_MRQC        E1000_MRQC
+#define E1000_82542_RETA        E1000_RETA
+#define E1000_82542_RSSRK       E1000_RSSRK
+#define E1000_82542_RSSIM       E1000_RSSIM
+#define E1000_82542_RSSIR       E1000_RSSIR
+#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
+#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
+#define E1000_82542_MANC2H      E1000_MANC2H
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+	u64		crcerrs;
+	u64		algnerrc;
+	u64		symerrs;
+	u64		rxerrc;
+	u64		txerrc;
+	u64		mpc;
+	u64		scc;
+	u64		ecol;
+	u64		mcc;
+	u64		latecol;
+	u64		colc;
+	u64		dc;
+	u64		tncrs;
+	u64		sec;
+	u64		cexterr;
+	u64		rlec;
+	u64		xonrxc;
+	u64		xontxc;
+	u64		xoffrxc;
+	u64		xofftxc;
+	u64		fcruc;
+	u64		prc64;
+	u64		prc127;
+	u64		prc255;
+	u64		prc511;
+	u64		prc1023;
+	u64		prc1522;
+	u64		gprc;
+	u64		bprc;
+	u64		mprc;
+	u64		gptc;
+	u64		gorcl;
+	u64		gorch;
+	u64		gotcl;
+	u64		gotch;
+	u64		rnbc;
+	u64		ruc;
+	u64		rfc;
+	u64		roc;
+	u64		rlerrc;
+	u64		rjc;
+	u64		mgprc;
+	u64		mgpdc;
+	u64		mgptc;
+	u64		torl;
+	u64		torh;
+	u64		totl;
+	u64		toth;
+	u64		tpr;
+	u64		tpt;
+	u64		ptc64;
+	u64		ptc127;
+	u64		ptc255;
+	u64		ptc511;
+	u64		ptc1023;
+	u64		ptc1522;
+	u64		mptc;
+	u64		bptc;
+	u64		tsctc;
+	u64		tsctfc;
+	u64		iac;
+	u64		icrxptc;
+	u64		icrxatc;
+	u64		ictxptc;
+	u64		ictxatc;
+	u64		ictxqec;
+	u64		ictxqmtc;
+	u64		icrxdmtc;
+	u64		icrxoc;
+};
+
+/* Structure containing variables used by the shared code (e1000_hw.c) */
+struct e1000_hw {
+	u8 __iomem		*hw_addr;
+	u8 __iomem		*flash_address;
+	e1000_mac_type		mac_type;
+	e1000_phy_type		phy_type;
+	u32		phy_init_script;
+	e1000_media_type	media_type;
+	void			*back;
+	struct e1000_shadow_ram	*eeprom_shadow_ram;
+	u32		flash_bank_size;
+	u32		flash_base_addr;
+	e1000_fc_type		fc;
+	e1000_bus_speed		bus_speed;
+	e1000_bus_width		bus_width;
+	e1000_bus_type		bus_type;
+	struct e1000_eeprom_info eeprom;
+	e1000_ms_type		master_slave;
+	e1000_ms_type		original_master_slave;
+	e1000_ffe_config	ffe_config_state;
+	u32		asf_firmware_present;
+	u32		eeprom_semaphore_present;
+	u32		swfw_sync_present;
+	u32		swfwhw_semaphore_present;
+	unsigned long		io_base;
+	u32		phy_id;
+	u32		phy_revision;
+	u32		phy_addr;
+	u32		original_fc;
+	u32		txcw;
+	u32		autoneg_failed;
+	u32		max_frame_size;
+	u32		min_frame_size;
+	u32		mc_filter_type;
+	u32		num_mc_addrs;
+	u32		collision_delta;
+	u32		tx_packet_delta;
+	u32		ledctl_default;
+	u32		ledctl_mode1;
+	u32		ledctl_mode2;
+	bool			tx_pkt_filtering;
+	struct e1000_host_mng_dhcp_cookie mng_cookie;
+	u16		phy_spd_default;
+	u16		autoneg_advertised;
+	u16		pci_cmd_word;
+	u16		fc_high_water;
+	u16		fc_low_water;
+	u16		fc_pause_time;
+	u16		current_ifs_val;
+	u16		ifs_min_val;
+	u16		ifs_max_val;
+	u16		ifs_step_size;
+	u16		ifs_ratio;
+	u16		device_id;
+	u16		vendor_id;
+	u16		subsystem_id;
+	u16		subsystem_vendor_id;
+	u8			revision_id;
+	u8			autoneg;
+	u8			mdix;
+	u8			forced_speed_duplex;
+	u8			wait_autoneg_complete;
+	u8			dma_fairness;
+	u8			mac_addr[NODE_ADDRESS_SIZE];
+	u8			perm_mac_addr[NODE_ADDRESS_SIZE];
+	bool			disable_polarity_correction;
+	bool			speed_downgraded;
+	e1000_smart_speed	smart_speed;
+	e1000_dsp_config	dsp_config_state;
+	bool			get_link_status;
+	bool			serdes_link_down;
+	bool			tbi_compatibility_en;
+	bool			tbi_compatibility_on;
+	bool			laa_is_present;
+	bool			phy_reset_disable;
+	bool			initialize_hw_bits_disable;
+	bool			fc_send_xon;
+	bool			fc_strict_ieee;
+	bool			report_tx_early;
+	bool			adaptive_ifs;
+	bool			ifs_params_forced;
+	bool			in_ifs_mode;
+	bool			mng_reg_access_disabled;
+	bool			leave_av_bit_off;
+	bool			kmrn_lock_loss_workaround_disabled;
+	bool			bad_tx_carr_stats_fd;
+	bool			has_manc2h;
+	bool			rx_needs_kicking;
+	bool			has_smbus;
+};
+
+
+#define E1000_EEPROM_SWDPIN0   0x0001   /* SWDPIN 0 EEPROM Value */
+#define E1000_EEPROM_LED_LOGIC 0x0020   /* Led Logic Word */
+#define E1000_EEPROM_RW_REG_DATA   16   /* Offset to data in EEPROM read/write registers */
+#define E1000_EEPROM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_EEPROM_RW_REG_START  1    /* First bit for telling part to start operation */
+#define E1000_EEPROM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_EEPROM_POLL_WRITE    1    /* Flag for polling for write complete */
+#define E1000_EEPROM_POLL_READ     0    /* Flag for polling for read complete */
+/* Register Bit Masks */
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM      0x00000002  /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR    0x00000004  /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME      0x00000010  /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE      0x00000020  /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2  0x00100000  /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3  0x00200000  /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1  0x00800000  /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2  0x01000000  /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3  0x02000000  /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_RTE      0x20000000  /* Routing tag enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000  /* Initiate an interrupt to manageability engine */
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_0     0x00000000      /* Function 0 */
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_TBIMODE    0x00000020      /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200   /* Lan Init Completion
+                                                   by EEPROM/Flash */
+#define E1000_STATUS_ASDV       0x00000300      /* Auto speed detect value */
+#define E1000_STATUS_DOCK_CI    0x00000800      /* Change in Dock/Undock state. Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_MTXCKOK    0x00000400      /* MTX clock running OK */
+#define E1000_STATUS_PCI66      0x00000800      /* In 66Mhz slot */
+#define E1000_STATUS_BUS64      0x00001000      /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE  0x00002000      /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000      /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0  0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1  0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2  0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE   0x01000000 /* BMC external code execution disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_FUSE_8       0x04000000
+#define E1000_STATUS_FUSE_9       0x08000000
+#define E1000_STATUS_SERDES0_DIS  0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS  0x20000000 /* SERDES disabled on port 1 */
+
+/* Constants used to intrepret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed  50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed  66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+
+/* EEPROM/Flash Control */
+#define E1000_EECD_SK        0x00000001 /* EEPROM Clock */
+#define E1000_EECD_CS        0x00000002 /* EEPROM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* EEPROM Data In */
+#define E1000_EECD_DO        0x00000008 /* EEPROM Data Out */
+#define E1000_EECD_FWE_MASK  0x00000030
+#define E1000_EECD_FWE_DIS   0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN    0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ       0x00000040 /* EEPROM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* EEPROM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* EEPROM Present */
+#define E1000_EECD_SIZE      0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
+#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
+                                         * (0-small, 1-large) */
+#define E1000_EECD_TYPE      0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_EEPROM_GRANT_ATTEMPTS
+#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+#define E1000_EECD_AUTO_RD          0x00000200  /* EEPROM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* EEprom Size */
+#define E1000_EECD_SIZE_EX_SHIFT    11
+#define E1000_EECD_NVADDS    0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD   0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM  0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV     0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
+#define E1000_EECD_SECVAL_SHIFT      22
+#define E1000_STM_OPCODE     0xDB00
+#define E1000_HICR_FW_RESET  0xC0
+
+#define E1000_SHADOW_RAM_WORDS     2048
+#define E1000_ICH_NVM_SIG_WORD     0x13
+#define E1000_ICH_NVM_SIG_MASK     0xC0
+
+/* EEPROM Read */
+#define E1000_EERD_START      0x00000001 /* Start Read */
+#define E1000_EERD_DONE       0x00000010 /* Read Done */
+#define E1000_EERD_ADDR_SHIFT 8
+#define E1000_EERD_ADDR_MASK  0x0000FF00 /* Read Address */
+#define E1000_EERD_DATA_SHIFT 16
+#define E1000_EERD_DATA_MASK  0xFFFF0000 /* Read Data */
+
+/* SPI EEPROM Status Register */
+#define EEPROM_STATUS_RDY_SPI  0x01
+#define EEPROM_STATUS_WEN_SPI  0x02
+#define EEPROM_STATUS_BP0_SPI  0x04
+#define EEPROM_STATUS_BP1_SPI  0x08
+#define EEPROM_STATUS_WPEN_SPI 0x80
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN   0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN   0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN   0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN   0x00000008 /* Maps SDP7 to GPI3 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT   E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define E1000_CTRL_EXT_SDP4_DIR  0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR  0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR  0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR  0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK    0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
+#define E1000_CTRL_EXT_WR_WMARK_MASK  0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256   0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME           0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CRTL_EXT_PB_PAREN       0x01000000 /* packet buffer parity error detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN       0x02000000 /* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_GHOST_PAREN    0x40000000
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
+#define E1000_MDIC_ERROR     0x40000000
+
+#define E1000_KUMCTRLSTA_MASK           0x0000FFFF
+#define E1000_KUMCTRLSTA_OFFSET         0x001F0000
+#define E1000_KUMCTRLSTA_OFFSET_SHIFT   16
+#define E1000_KUMCTRLSTA_REN            0x00200000
+
+#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL      0x00000000
+#define E1000_KUMCTRLSTA_OFFSET_CTRL           0x00000001
+#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL       0x00000002
+#define E1000_KUMCTRLSTA_OFFSET_DIAG           0x00000003
+#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS       0x00000004
+#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM      0x00000009
+#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL        0x00000010
+#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES     0x0000001E
+#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES      0x0000001F
+
+/* FIFO Control */
+#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS   0x00000008
+#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS   0x00000800
+
+/* In-Band Control */
+#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT    0x00000500
+#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING  0x00000010
+
+/* Half-Duplex Control */
+#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004
+#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT  0x00000000
+
+#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL       0x0000001E
+
+#define E1000_KUMCTRLSTA_DIAG_FELPBK           0x2000
+#define E1000_KUMCTRLSTA_DIAG_NELPBK           0x1000
+
+#define E1000_KUMCTRLSTA_K0S_100_EN            0x2000
+#define E1000_KUMCTRLSTA_K0S_GBE_EN            0x1000
+#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK   0x0003
+
+#define E1000_KABGTXD_BGSQLBIAS                0x00050000
+
+#define E1000_PHY_CTRL_SPD_EN                  0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU                0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU             0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE      0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE             0x00000040
+#define E1000_PHY_CTRL_B2B_EN                  0x00000080
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT      0
+#define E1000_LEDCTL_LED0_BLINK_RATE      0x0000020
+#define E1000_LEDCTL_LED0_IVRT            0x00000040
+#define E1000_LEDCTL_LED0_BLINK           0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK       0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT      8
+#define E1000_LEDCTL_LED1_BLINK_RATE      0x0002000
+#define E1000_LEDCTL_LED1_IVRT            0x00004000
+#define E1000_LEDCTL_LED1_BLINK           0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK       0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT      16
+#define E1000_LEDCTL_LED2_BLINK_RATE      0x00200000
+#define E1000_LEDCTL_LED2_IVRT            0x00400000
+#define E1000_LEDCTL_LED2_BLINK           0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK       0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT      24
+#define E1000_LEDCTL_LED3_BLINK_RATE      0x20000000
+#define E1000_LEDCTL_LED3_IVRT            0x40000000
+#define E1000_LEDCTL_LED3_BLINK           0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000  0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP       0x2
+#define E1000_LEDCTL_MODE_ACTIVITY      0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10       0x5
+#define E1000_LEDCTL_MODE_LINK_100      0x6
+#define E1000_LEDCTL_MODE_LINK_1000     0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE     0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX   0x9
+#define E1000_LEDCTL_MODE_COLLISION     0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED     0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE      0xC
+#define E1000_LEDCTL_MODE_PAUSED        0xD
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Receive Address */
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE          0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO           0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC          0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG         0x00000400 /* RX /c/ ordered set */
+#define E1000_ICR_GPI_EN0       0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1       0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2       0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3       0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW       0x00008000
+#define E1000_ICR_SRPD          0x00010000
+#define E1000_ICR_ACK           0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG           0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK          0x00080000 /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR  0x00400000 /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR        0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY    0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW           0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT        0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST         0x00100000 /* ME handware reset occurs */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_ICS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_ICS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_ICS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_ICS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_ICS_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_ICS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD      E1000_ICR_SRPD
+#define E1000_ICS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_ICS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_ICS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW       E1000_ICR_DSW
+#define E1000_ICS_PHYINT    E1000_ICR_PHYINT
+#define E1000_ICS_EPRST     E1000_ICR_EPRST
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMS_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD      E1000_ICR_SRPD
+#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW       E1000_ICR_DSW
+#define E1000_IMS_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMS_EPRST     E1000_ICR_EPRST
+
+/* Interrupt Mask Clear */
+#define E1000_IMC_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMC_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMC_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMC_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMC_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMC_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_IMC_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMC_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMC_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_IMC_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMC_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMC_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMC_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMC_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMC_SRPD      E1000_ICR_SRPD
+#define E1000_IMC_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMC_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMC_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMC_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_IMC_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMC_DSW       E1000_ICR_DSW
+#define E1000_IMC_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMC_EPRST     E1000_ICR_EPRST
+
+/* Receive Control */
+#define E1000_RCTL_RST            0x00000001    /* Software reset */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promiscuous enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promiscuous enab */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP        0x00000080    /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK      0x00000C00    /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT     0x00000100    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH    0x00000200    /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_MO_0           0x00000000    /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1           0x00001000    /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2           0x00002000    /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
+#define E1000_RCTL_MDR            0x00004000    /* multicast desc ring 0 */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048        0x00000000    /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000    /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000    /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384       0x00010000    /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000    /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000    /* rx buffer size 4096 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
+#define E1000_RCTL_DPF            0x00400000    /* discard pause frames */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK    0x78000000    /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT   27            /* Flexible buffer shift */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SW_W_SYNC definitions */
+#define E1000_SWFW_EEP_SM     0x0001
+#define E1000_SWFW_PHY0_SM    0x0002
+#define E1000_SWFW_PHY1_SM    0x0004
+#define E1000_SWFW_MAC_CSR_SM 0x0008
+
+/* Receive Descriptor */
+#define E1000_RDT_DELAY 0x0000ffff      /* Delay timer (1=1024us) */
+#define E1000_RDT_FPDB  0x80000000      /* Flush descriptor block */
+#define E1000_RDLEN_LEN 0x0007ff80      /* descriptor length */
+#define E1000_RDH_RDH   0x0000ffff      /* receive descriptor head */
+#define E1000_RDT_RDT   0x0000ffff      /* receive descriptor tail */
+
+/* Flow Control */
+#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000     /* External Flow Control Enable */
+#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+/* Header split receive */
+#define E1000_RFCTL_ISCSI_DIS           0x00000001
+#define E1000_RFCTL_ISCSI_DWC_MASK      0x0000003E
+#define E1000_RFCTL_ISCSI_DWC_SHIFT     1
+#define E1000_RFCTL_NFSW_DIS            0x00000040
+#define E1000_RFCTL_NFSR_DIS            0x00000080
+#define E1000_RFCTL_NFS_VER_MASK        0x00000300
+#define E1000_RFCTL_NFS_VER_SHIFT       8
+#define E1000_RFCTL_IPV6_DIS            0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS       0x00000800
+#define E1000_RFCTL_ACK_DIS             0x00001000
+#define E1000_RFCTL_ACKD_DIS            0x00002000
+#define E1000_RFCTL_IPFRSP_DIS          0x00004000
+#define E1000_RFCTL_EXTEN               0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
+
+/* Receive Descriptor Control */
+#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
+#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
+#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
+#define E1000_RXDCTL_GRAN    0x01000000 /* RXDCTL Granularity */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN    0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
+                                              still to be processed. */
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
+#define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
+#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
+#define E1000_TXCW_RF         0x00003000        /* TXCW remote fault */
+#define E1000_TXCW_NP         0x00008000        /* TXCW next page */
+#define E1000_TXCW_CW         0x0000ffff        /* TxConfigWord mask */
+#define E1000_TXCW_TXC        0x40000000        /* Transmit Config control */
+#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW    0x0000ffff     /* RxConfigWord mask */
+#define E1000_RXCW_NC    0x04000000     /* Receive config no carrier */
+#define E1000_RXCW_IV    0x08000000     /* Receive config invalid */
+#define E1000_RXCW_CC    0x10000000     /* Receive config change */
+#define E1000_RXCW_C     0x20000000     /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000     /* Receive config synch */
+#define E1000_RXCW_ANC   0x80000000     /* Auto-neg complete */
+
+/* Transmit Control */
+#define E1000_TCTL_RST    0x00000001    /* software reset */
+#define E1000_TCTL_EN     0x00000002    /* enable tx */
+#define E1000_TCTL_BCE    0x00000004    /* busy check enable */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000    /* SW Xoff transmission */
+#define E1000_TCTL_PBE    0x00800000    /* Packet Burst Enable */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU   0x02000000    /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
+/* Extended Transmit Control */
+#define E1000_TCTL_EXT_BST_MASK  0x000003FF /* Backoff Slot Time */
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+
+#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX   0x00010000
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF   /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL   0x00000400   /* IPv6 checksum offload */
+#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
+
+/* Multiple Receive Queue Control */
+#define E1000_MRQC_ENABLE_MASK              0x00000003
+#define E1000_MRQC_ENABLE_RSS_2Q            0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT           0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK           0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP       0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4           0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX    0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX        0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6           0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP       0x00200000
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME       0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME     0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_SPM        0x80000000 /* Enable SPM */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO      0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET 16       /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
+#define E1000_WUS_MAG  0x00000002 /* Magic Packet Received */
+#define E1000_WUS_EX   0x00000004 /* Directed Exact Received */
+#define E1000_WUS_MC   0x00000008 /* Directed Multicast Received */
+#define E1000_WUS_BC   0x00000010 /* Broadcast Received */
+#define E1000_WUS_ARP  0x00000020 /* ARP Request Packet Received */
+#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
+#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
+#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
+#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
+#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
+#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
+#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE    0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN       0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN       0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN       0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN       0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN       0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_NEIGHBOR_EN   0x00004000 /* Enable Neighbor Discovery
+                                             * Filtering */
+#define E1000_MANC_ARP_RES_EN    0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET     0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL       0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000 /* Enable MAC address
+                                                    * filtering */
+#define E1000_MANC_EN_MNG2HOST   0x00200000 /* Enable MNG packets to host
+                                             * memory */
+#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000 /* Enable IP address
+                                                    * filtering */
+#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN         0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN   0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT  0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT   0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT  28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT   29 /* SMBus Clock Out Shift */
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG         0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
+
+/* FW Semaphore Register */
+#define E1000_FWSM_MODE_MASK    0x0000000E /* FW mode */
+#define E1000_FWSM_MODE_SHIFT            1
+#define E1000_FWSM_FW_VALID     0x00008000 /* FW established a valid mode */
+
+#define E1000_FWSM_RSPCIPHY        0x00000040 /* Reset PHY on PCI reset */
+#define E1000_FWSM_DISSW           0x10000000 /* FW disable SW Write Access */
+#define E1000_FWSM_SKUSEL_MASK     0x60000000 /* LAN SKU select */
+#define E1000_FWSM_SKUEL_SHIFT     29
+#define E1000_FWSM_SKUSEL_EMB      0x0 /* Embedded SKU */
+#define E1000_FWSM_SKUSEL_CONS     0x1 /* Consumer SKU */
+#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
+#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
+
+/* FFLT Debug Register */
+#define E1000_FFLT_DBG_INVC     0x00100000 /* Invalid /C/ code handling */
+
+typedef enum {
+    e1000_mng_mode_none     = 0,
+    e1000_mng_mode_asf,
+    e1000_mng_mode_pt,
+    e1000_mng_mode_ipmi,
+    e1000_mng_mode_host_interface_only
+} e1000_mng_mode;
+
+/* Host Inteface Control Register */
+#define E1000_HICR_EN           0x00000001  /* Enable Bit - RO */
+#define E1000_HICR_C            0x00000002  /* Driver sets this bit when done
+                                             * to put command in RAM */
+#define E1000_HICR_SV           0x00000004  /* Status Validity */
+#define E1000_HICR_FWR          0x00000080  /* FW reset. Set by the Host */
+
+/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
+#define E1000_HI_MAX_DATA_LENGTH         252 /* Host Interface data length */
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH  1792 /* Number of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH  448 /* Number of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT         500 /* Time in ms to process HI command */
+
+struct e1000_host_command_header {
+    u8 command_id;
+    u8 command_length;
+    u8 command_options;   /* I/F bits for command, status for return */
+    u8 checksum;
+};
+struct e1000_host_command_info {
+    struct e1000_host_command_header command_header;  /* Command Head/Command Result Head has 4 bytes */
+    u8 command_data[E1000_HI_MAX_DATA_LENGTH];   /* Command data can length 0..252 */
+};
+
+/* Host SMB register #0 */
+#define E1000_HSMC0R_CLKIN      0x00000001  /* SMB Clock in */
+#define E1000_HSMC0R_DATAIN     0x00000002  /* SMB Data in */
+#define E1000_HSMC0R_DATAOUT    0x00000004  /* SMB Data out */
+#define E1000_HSMC0R_CLKOUT     0x00000008  /* SMB Clock out */
+
+/* Host SMB register #1 */
+#define E1000_HSMC1R_CLKIN      E1000_HSMC0R_CLKIN
+#define E1000_HSMC1R_DATAIN     E1000_HSMC0R_DATAIN
+#define E1000_HSMC1R_DATAOUT    E1000_HSMC0R_DATAOUT
+#define E1000_HSMC1R_CLKOUT     E1000_HSMC0R_CLKOUT
+
+/* FW Status Register */
+#define E1000_FWSTS_FWS_MASK    0x000000FF  /* FW Status */
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF   /* Only the lower 12 bits are valid */
+
+#define E1000_MDALIGN          4096
+
+/* PCI-Ex registers*/
+
+/* PCI-Ex Control Register */
+#define E1000_GCR_RXD_NO_SNOOP          0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
+#define E1000_GCR_TXD_NO_SNOOP          0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
+
+#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
+                             E1000_GCR_RXDSCW_NO_SNOOP      | \
+                             E1000_GCR_RXDSCR_NO_SNOOP      | \
+                             E1000_GCR_TXD_NO_SNOOP         | \
+                             E1000_GCR_TXDSCW_NO_SNOOP      | \
+                             E1000_GCR_TXDSCR_NO_SNOOP)
+
+#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+/* Function Active and Power State to MNG */
+#define E1000_FACTPS_FUNC0_POWER_STATE_MASK         0x00000003
+#define E1000_FACTPS_LAN0_VALID                     0x00000004
+#define E1000_FACTPS_FUNC0_AUX_EN                   0x00000008
+#define E1000_FACTPS_FUNC1_POWER_STATE_MASK         0x000000C0
+#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT        6
+#define E1000_FACTPS_LAN1_VALID                     0x00000100
+#define E1000_FACTPS_FUNC1_AUX_EN                   0x00000200
+#define E1000_FACTPS_FUNC2_POWER_STATE_MASK         0x00003000
+#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT        12
+#define E1000_FACTPS_IDE_ENABLE                     0x00004000
+#define E1000_FACTPS_FUNC2_AUX_EN                   0x00008000
+#define E1000_FACTPS_FUNC3_POWER_STATE_MASK         0x000C0000
+#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT        18
+#define E1000_FACTPS_SP_ENABLE                      0x00100000
+#define E1000_FACTPS_FUNC3_AUX_EN                   0x00200000
+#define E1000_FACTPS_FUNC4_POWER_STATE_MASK         0x03000000
+#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT        24
+#define E1000_FACTPS_IPMI_ENABLE                    0x04000000
+#define E1000_FACTPS_FUNC4_AUX_EN                   0x08000000
+#define E1000_FACTPS_MNGCG                          0x20000000
+#define E1000_FACTPS_LAN_FUNC_SEL                   0x40000000
+#define E1000_FACTPS_PM_STATE_CHANGED               0x80000000
+
+/* PCI-Ex Config Space */
+#define PCI_EX_LINK_STATUS           0x12
+#define PCI_EX_LINK_WIDTH_MASK       0x3F0
+#define PCI_EX_LINK_WIDTH_SHIFT      4
+
+/* EEPROM Commands - Microwire */
+#define EEPROM_READ_OPCODE_MICROWIRE  0x6  /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5  /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7  /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE_MICROWIRE  0x13 /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE_MICROWIRE  0x10 /* EEPROM erast/write disable */
+
+/* EEPROM Commands - SPI */
+#define EEPROM_MAX_RETRY_SPI        5000 /* Max wait of 5ms, for RDY signal */
+#define EEPROM_READ_OPCODE_SPI      0x03  /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_SPI     0x02  /* EEPROM write opcode */
+#define EEPROM_A8_OPCODE_SPI        0x08  /* opcode bit-3 = address bit-8 */
+#define EEPROM_WREN_OPCODE_SPI      0x06  /* EEPROM set Write Enable latch */
+#define EEPROM_WRDI_OPCODE_SPI      0x04  /* EEPROM reset Write Enable latch */
+#define EEPROM_RDSR_OPCODE_SPI      0x05  /* EEPROM read Status register */
+#define EEPROM_WRSR_OPCODE_SPI      0x01  /* EEPROM write Status register */
+#define EEPROM_ERASE4K_OPCODE_SPI   0x20  /* EEPROM ERASE 4KB */
+#define EEPROM_ERASE64K_OPCODE_SPI  0xD8  /* EEPROM ERASE 64KB */
+#define EEPROM_ERASE256_OPCODE_SPI  0xDB  /* EEPROM ERASE 256B */
+
+/* EEPROM Size definitions */
+#define EEPROM_WORD_SIZE_SHIFT  6
+#define EEPROM_SIZE_SHIFT       10
+#define EEPROM_SIZE_MASK        0x1C00
+
+/* EEPROM Word Offsets */
+#define EEPROM_COMPAT                 0x0003
+#define EEPROM_ID_LED_SETTINGS        0x0004
+#define EEPROM_VERSION                0x0005
+#define EEPROM_SERDES_AMPLITUDE       0x0006 /* For SERDES output amplitude adjustment. */
+#define EEPROM_PHY_CLASS_WORD         0x0007
+#define EEPROM_INIT_CONTROL1_REG      0x000A
+#define EEPROM_INIT_CONTROL2_REG      0x000F
+#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define EEPROM_INIT_CONTROL3_PORT_B   0x0014
+#define EEPROM_INIT_3GIO_3            0x001A
+#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define EEPROM_INIT_CONTROL3_PORT_A   0x0024
+#define EEPROM_CFG                    0x0012
+#define EEPROM_FLASH_VERSION          0x0032
+#define EEPROM_CHECKSUM_REG           0x003F
+
+#define E1000_EEPROM_CFG_DONE         0x00040000   /* MNG config cycle done */
+#define E1000_EEPROM_CFG_DONE_PORT_1  0x00080000   /* ...for second port */
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_RESERVED_82573  0xF746
+#define ID_LED_DEFAULT_82573   0x1811
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2 << 12) | \
+                              (ID_LED_OFF1_OFF2 << 8) | \
+                              (ID_LED_DEF1_DEF2 << 4) | \
+                              (ID_LED_DEF1_DEF2))
+#define ID_LED_DEFAULT_ICH8LAN  ((ID_LED_DEF1_DEF2 << 12) | \
+                                 (ID_LED_DEF1_OFF2 <<  8) | \
+                                 (ID_LED_DEF1_ON2  <<  4) | \
+                                 (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+
+/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */
+#define EEPROM_SERDES_AMPLITUDE_MASK  0x000F
+
+/* Mask bit for PHY class in Word 7 of the EEPROM */
+#define EEPROM_PHY_CLASS_A   0x8000
+
+/* Mask bits for fields in Word 0x0a of the EEPROM */
+#define EEPROM_WORD0A_ILOS   0x0010
+#define EEPROM_WORD0A_SWDPIO 0x01E0
+#define EEPROM_WORD0A_LRST   0x0200
+#define EEPROM_WORD0A_FD     0x0400
+#define EEPROM_WORD0A_66MHZ  0x0800
+
+/* Mask bits for fields in Word 0x0f of the EEPROM */
+#define EEPROM_WORD0F_PAUSE_MASK 0x3000
+#define EEPROM_WORD0F_PAUSE      0x1000
+#define EEPROM_WORD0F_ASM_DIR    0x2000
+#define EEPROM_WORD0F_ANE        0x0800
+#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0
+#define EEPROM_WORD0F_LPLU       0x0001
+
+/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */
+#define EEPROM_WORD1020_GIGA_DISABLE         0x0010
+#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008
+
+/* Mask bits for fields in Word 0x1a of the EEPROM */
+#define EEPROM_WORD1A_ASPM_MASK  0x000C
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
+#define EEPROM_SUM 0xBABA
+
+/* EEPROM Map defines (WORD OFFSETS)*/
+#define EEPROM_NODE_ADDRESS_BYTE_0 0
+#define EEPROM_PBA_BYTE_1          8
+
+#define EEPROM_RESERVED_WORD          0xFFFF
+
+/* EEPROM Map Sizes (Byte Counts) */
+#define PBA_SIZE 4
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+/* Collision distance is a 0-based value that applies to
+ * half-duplex-capable hardware only. */
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLLISION_DISTANCE_82542  64
+#define E1000_FDX_COLLISION_DISTANCE    E1000_COLLISION_DISTANCE
+#define E1000_HDX_COLLISION_DISTANCE    E1000_COLLISION_DISTANCE
+#define E1000_COLD_SHIFT                12
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82542_TIPG_IPGT        10
+#define DEFAULT_82543_TIPG_IPGT_FIBER  9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK  0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82542_TIPG_IPGR1 2
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT  10
+
+#define DEFAULT_82542_TIPG_IPGR2 10
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT  20
+
+#define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009
+#define DEFAULT_80003ES2LAN_TIPG_IPGT_1000   0x00000008
+#define E1000_TXDMAC_DPP 0x00000001
+
+/* Adaptive IFS defines */
+#define TX_THRESHOLD_START     8
+#define TX_THRESHOLD_INCREMENT 10
+#define TX_THRESHOLD_DECREMENT 1
+#define TX_THRESHOLD_STOP      190
+#define TX_THRESHOLD_DISABLE   0
+#define TX_THRESHOLD_TIMER_MS  10000
+#define MIN_NUM_XMITS          1000
+#define IFS_MAX                80
+#define IFS_STEP               10
+#define IFS_MIN                40
+#define IFS_RATIO              4
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE  0x00000002
+#define E1000_EXTCNF_CTRL_D_UD_ENABLE       0x00000004
+#define E1000_EXTCNF_CTRL_D_UD_LATENCY      0x00000008
+#define E1000_EXTCNF_CTRL_D_UD_OWNER        0x00000010
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER   0x0FFF0000
+
+#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH    0x000000FF
+#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH   0x0000FF00
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH   0x00FF0000
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE  0x00000001
+#define E1000_EXTCNF_CTRL_SWFLAG            0x00000020
+
+/* PBA constants */
+#define E1000_PBA_8K 0x0008    /* 8KB, default Rx allocation */
+#define E1000_PBA_12K 0x000C    /* 12KB, default Rx allocation */
+#define E1000_PBA_16K 0x0010    /* 16KB, default TX allocation */
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030    /* 48KB, default RX allocation */
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* The historical defaults for the flow control values are given below. */
+#define FC_DEFAULT_HI_THRESH        (0x8000)    /* 32KB */
+#define FC_DEFAULT_LO_THRESH        (0x4000)    /* 16KB */
+#define FC_DEFAULT_TX_TIMER         (0x100)     /* ~130 us */
+
+/* PCIX Config space */
+#define PCIX_COMMAND_REGISTER    0xE6
+#define PCIX_STATUS_REGISTER_LO  0xE8
+#define PCIX_STATUS_REGISTER_HI  0xEA
+
+#define PCIX_COMMAND_MMRBC_MASK      0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT     0x2
+#define PCIX_STATUS_HI_MMRBC_MASK    0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT   0x5
+#define PCIX_STATUS_HI_MMRBC_4K      0x3
+#define PCIX_STATUS_HI_MMRBC_2K      0x2
+
+
+/* Number of bits required to shift right the "pause" bits from the
+ * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register.
+ */
+#define PAUSE_SHIFT 5
+
+/* Number of bits required to shift left the "SWDPIO" bits from the
+ * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register.
+ */
+#define SWDPIO_SHIFT 17
+
+/* Number of bits required to shift left the "SWDPIO_EXT" bits from the
+ * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register.
+ */
+#define SWDPIO__EXT_SHIFT 4
+
+/* Number of bits required to shift left the "ILOS" bit from the EEPROM
+ * (bit 4) to the "ILOS" (bit 7) field in the CTRL register.
+ */
+#define ILOS_SHIFT  3
+
+
+#define RECEIVE_BUFFER_ALIGN_SIZE  (256)
+
+/* Number of milliseconds we wait for auto-negotiation to complete */
+#define LINK_UP_TIMEOUT             500
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
+#define AUTO_READ_DONE_TIMEOUT      10
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+
+#define E1000_TX_BUFFER_SIZE ((u32)1514)
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION   0x0F
+
+/* TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ *      adapter = a pointer to struct e1000_hw
+ *      status = the 8 bit status field of the RX descriptor with EOP set
+ *      error = the 8 bit error field of the RX descriptor with EOP set
+ *      length = the sum of all the length fields of the RX descriptors that
+ *               make up the current frame
+ *      last_byte = the last byte of the frame DMAed by the hardware
+ *      max_frame_length = the maximum frame length we want to accept.
+ *      min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ *  ...
+ *  if (TBI_ACCEPT) {
+ *      accept_frame = true;
+ *      e1000_tbi_adjust_stats(adapter, MacAddress);
+ *      frame_length--;
+ *  } else {
+ *      accept_frame = false;
+ *  }
+ *  ...
+ */
+
+#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \
+    ((adapter)->tbi_compatibility_on && \
+     (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+     ((last_byte) == CARRIER_EXTENSION) && \
+     (((status) & E1000_RXD_STAT_VP) ? \
+          (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \
+           ((length) <= ((adapter)->max_frame_size + 1))) : \
+          (((length) > (adapter)->min_frame_size) && \
+           ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+
+/* Structures, enums, and macros for the PHY */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR  E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET      E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR       E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO           E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR        E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC            E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4     E1000_CTRL_EXT_SDP4_DATA
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CTRL         0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Regiser */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
+
+#define MAX_PHY_REG_ADDRESS        0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG     0xF   /* Registers equal on all pages */
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE        0x12  /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS        0x13  /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR       0x15  /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL      0x1A  /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8  0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800    /* improved BER performance */
+
+#define IGP01E1000_IEEE_REGS_PAGE  0x0000
+#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
+#define IGP01E1000_IEEE_FORCE_GIGA      0x0140
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
+#define IGP01E1000_PHY_PORT_CTRL   0x12 /* PHY Specific Control Register */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
+#define IGP01E1000_GMII_FIFO       0x14 /* GMII FIFO Register */
+#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
+#define IGP02E1000_PHY_POWER_MGMT      0x19
+#define IGP01E1000_PHY_PAGE_SELECT     0x1F /* PHY Page Select Core Register */
+
+/* IGP01E1000 AGC Registers - stores the cable length values*/
+#define IGP01E1000_PHY_AGC_A        0x1172
+#define IGP01E1000_PHY_AGC_B        0x1272
+#define IGP01E1000_PHY_AGC_C        0x1472
+#define IGP01E1000_PHY_AGC_D        0x1872
+
+/* IGP02E1000 AGC Registers for cable length values */
+#define IGP02E1000_PHY_AGC_A        0x11B1
+#define IGP02E1000_PHY_AGC_B        0x12B1
+#define IGP02E1000_PHY_AGC_C        0x14B1
+#define IGP02E1000_PHY_AGC_D        0x18B1
+
+/* IGP01E1000 DSP Reset Register */
+#define IGP01E1000_PHY_DSP_RESET   0x1F33
+#define IGP01E1000_PHY_DSP_SET     0x1F71
+#define IGP01E1000_PHY_DSP_FFE     0x1F35
+
+#define IGP01E1000_PHY_CHANNEL_NUM    4
+#define IGP02E1000_PHY_CHANNEL_NUM    4
+
+#define IGP01E1000_PHY_AGC_PARAM_A    0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B    0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C    0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D    0x1871
+
+#define IGP01E1000_PHY_EDAC_MU_INDEX        0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000
+
+#define IGP01E1000_PHY_ANALOG_TX_STATE      0x2890
+#define IGP01E1000_PHY_ANALOG_CLASS_A       0x2000
+#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE  0x0004
+#define IGP01E1000_PHY_DSP_FFE_CM_CP        0x0069
+
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT      0x002A
+/* IGP01E1000 PCS Initialization register - stores the polarity status when
+ * speed = 1000 Mbps. */
+#define IGP01E1000_PHY_PCS_INIT_REG  0x00B4
+#define IGP01E1000_PHY_PCS_CTRL_REG  0x00B5
+
+#define IGP01E1000_ANALOG_REGS_PAGE  0x20C0
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT        5
+#define GG82563_REG(page, reg)    \
+        (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG       30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL           \
+        GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_SPEC_STATUS         \
+        GG82563_REG(0, 17) /* PHY Specific Status */
+#define GG82563_PHY_INT_ENABLE          \
+        GG82563_REG(0, 18) /* Interrupt Enable */
+#define GG82563_PHY_SPEC_STATUS_2       \
+        GG82563_REG(0, 19) /* PHY Specific Status 2 */
+#define GG82563_PHY_RX_ERR_CNTR         \
+        GG82563_REG(0, 21) /* Receive Error Counter */
+#define GG82563_PHY_PAGE_SELECT         \
+        GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2         \
+        GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT     \
+        GG82563_REG(0, 29) /* Alternate Page Select */
+#define GG82563_PHY_TEST_CLK_CTRL       \
+        GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
+
+#define GG82563_PHY_MAC_SPEC_CTRL       \
+        GG82563_REG(2, 21) /* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL_2     \
+        GG82563_REG(2, 26) /* MAC Specific Control 2 */
+
+#define GG82563_PHY_DSP_DISTANCE    \
+        GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL   \
+        GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PORT_RESET          \
+        GG82563_REG(193, 17) /* Port Reset */
+#define GG82563_PHY_REVISION_ID         \
+        GG82563_REG(193, 18) /* Revision ID */
+#define GG82563_PHY_DEVICE_ID           \
+        GG82563_REG(193, 19) /* Device ID */
+#define GG82563_PHY_PWR_MGMT_CTRL       \
+        GG82563_REG(193, 20) /* Power Management Control */
+#define GG82563_PHY_RATE_ADAPT_CTRL     \
+        GG82563_REG(193, 25) /* Rate Adaptation Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
+        GG82563_REG(194, 16) /* FIFO's Control/Status */
+#define GG82563_PHY_KMRN_CTRL           \
+        GG82563_REG(194, 17) /* Control */
+#define GG82563_PHY_INBAND_CTRL         \
+        GG82563_REG(194, 18) /* Inband Control */
+#define GG82563_PHY_KMRN_DIAGNOSTIC     \
+        GG82563_REG(194, 19) /* Diagnostic */
+#define GG82563_PHY_ACK_TIMEOUTS        \
+        GG82563_REG(194, 20) /* Acknowledge Timeouts */
+#define GG82563_PHY_ADV_ABILITY         \
+        GG82563_REG(194, 21) /* Advertised Ability */
+#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
+        GG82563_REG(194, 23) /* Link Partner Advertised Ability */
+#define GG82563_PHY_ADV_NEXT_PAGE       \
+        GG82563_REG(194, 24) /* Advertised Next Page */
+#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
+        GG82563_REG(194, 25) /* Link Partner Advertised Next page */
+#define GG82563_PHY_KMRN_MISC           \
+        GG82563_REG(194, 26) /* Misc. */
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080  /* Collision test enable */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_ISOLATE          0x0400  /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS     0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT     0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS      0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT      0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS   0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS     0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS     0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS       0x0800 /* 10T   Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS       0x1000 /* 10T   Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS      0x2000 /* 100X  Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS      0x4000 /* 100X  Full Duplex Capable */
+#define MII_SR_100T4_CAPS        0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD 0x0001   /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS    0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS    0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS  0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS  0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS     0x0200   /* 100T4 Capable */
+#define NWAY_AR_PAUSE          0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR        0x0800   /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT   0x2000   /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE      0x8000   /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS    0x0020 /* LP is 10T   Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS    0x0040 /* LP is 10T   Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS  0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS     0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT   0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE    0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE      0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS      0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD          0x0002 /* LP is 10T   Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS    0x0004 /* LP is 10T   Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT  0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* Next Page TX Register */
+#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define NPTX_TOGGLE         0x0800 /* Toggles between exchanges
+                                    * of different NP
+                                    */
+#define NPTX_ACKNOWLDGE2    0x1000 /* 1 = will comply with msg
+                                    * 0 = cannot comply with msg
+                                    */
+#define NPTX_MSG_PAGE       0x2000 /* formatted(1)/unformatted(0) pg */
+#define NPTX_NEXT_PAGE      0x8000 /* 1 = addition NP will follow
+                                    * 0 = sending last NP
+                                    */
+
+/* Link Partner Next Page Register */
+#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define LP_RNPR_TOGGLE         0x0800 /* Toggles between exchanges
+                                       * of different NP
+                                       */
+#define LP_RNPR_ACKNOWLDGE2    0x1000 /* 1 = will comply with msg
+                                       * 0 = cannot comply with msg
+                                       */
+#define LP_RNPR_MSG_PAGE       0x2000  /* formatted(1)/unformatted(0) pg */
+#define LP_RNPR_ACKNOWLDGE     0x4000  /* 1 = ACK / 0 = NO ACK */
+#define LP_RNPR_NEXT_PAGE      0x8000  /* 1 = addition NP will follow
+                                        * 0 = sending last NP
+                                        */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE      0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+#define CR_1000T_REPEATER_DTE    0x0400 /* 1=Repeater/switch device port */
+                                        /* 0=DTE device */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+                                        /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE       0x1000 /* 1=Master/Slave manual config value */
+                                        /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1     0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2     0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3     0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4     0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT   0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR   0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS       0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS       0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES    0x4000 /* 1=Local TX is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT  0x8000 /* Master/Slave config fault */
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT          12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT           13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT    5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20            20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100           100
+
+/* Extended Status Register */
+#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
+#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
+#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
+#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+
+#define PHY_TX_POLARITY_MASK   0x0100 /* register 10h bit 8 (polarity bit) */
+#define PHY_TX_NORMAL_POLARITY 0      /* register 10h bit 8 (normal polarity) */
+
+#define AUTO_POLARITY_DISABLE  0x0010 /* register 11h bit 4 */
+                                      /* (0=enable, 1=disable) */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
+#define M88E1000_PSCR_CLK125_DISABLE    0x0010 /* 1=CLK125 low,
+                                                * 0=CLK125 toggling
+                                                */
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
+                                               /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040  /* 1000BASE-T: Auto crossover,
+                                                *  100BASE-TX/10BASE-T:
+                                                *  MDI Mode
+                                                */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060  /* Auto crossover enabled
+                                                * all speeds.
+                                                */
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080
+                                        /* 1=Enable Extended 10BASE-T distance
+                                         * (Lower 10BASE-T RX Threshold)
+                                         * 0=Normal 10BASE-T RX Threshold */
+#define M88E1000_PSCR_MII_5BIT_ENABLE      0x0100
+                                        /* 1=5-Bit interface in 100BASE-TX
+                                         * 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Transmit */
+
+#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT    1
+#define M88E1000_PSCR_AUTO_X_MODE_SHIFT          5
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER             0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380 /* 0=<50M;1=50-80M;2=80-110M;
+                                            * 3=110-140M;4=>140M */
+#define M88E1000_PSSR_LINK               0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD          0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX               0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS              0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS             0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_REV_POLARITY_SHIFT 1
+#define M88E1000_PSSR_DOWNSHIFT_SHIFT    5
+#define M88E1000_PSSR_MDIX_SHIFT         6
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000 /* 1=Lost lock detect enabled.
+                                              * Will assert lost lock and bring
+                                              * link down if idle not seen
+                                              * within 1ms in 1000BASE-T
+                                              */
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X   0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X   0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X   0x0C00
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS   0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X    0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X    0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5     0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0       0x0000 /* NO  TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X    0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X    0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X    0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X    0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X    0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X    0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X    0x0E00
+
+/* IGP01E1000 Specific Port Config Register - R/W */
+#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT  0x0010
+#define IGP01E1000_PSCFR_PRE_EN                0x0020
+#define IGP01E1000_PSCFR_SMART_SPEED           0x0080
+#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK    0x0100
+#define IGP01E1000_PSCFR_DISABLE_JABBER        0x0400
+#define IGP01E1000_PSCFR_DISABLE_TRANSMIT      0x2000
+
+/* IGP01E1000 Specific Port Status Register - R/O */
+#define IGP01E1000_PSSR_AUTONEG_FAILED         0x0001 /* RO LH SC */
+#define IGP01E1000_PSSR_POLARITY_REVERSED      0x0002
+#define IGP01E1000_PSSR_CABLE_LENGTH           0x007C
+#define IGP01E1000_PSSR_FULL_DUPLEX            0x0200
+#define IGP01E1000_PSSR_LINK_UP                0x0400
+#define IGP01E1000_PSSR_MDIX                   0x0800
+#define IGP01E1000_PSSR_SPEED_MASK             0xC000 /* speed bits mask */
+#define IGP01E1000_PSSR_SPEED_10MBPS           0x4000
+#define IGP01E1000_PSSR_SPEED_100MBPS          0x8000
+#define IGP01E1000_PSSR_SPEED_1000MBPS         0xC000
+#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT     0x0002 /* shift right 2 */
+#define IGP01E1000_PSSR_MDIX_SHIFT             0x000B /* shift right 11 */
+
+/* IGP01E1000 Specific Port Control Register - R/W */
+#define IGP01E1000_PSCR_TP_LOOPBACK            0x0010
+#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR      0x0200
+#define IGP01E1000_PSCR_TEN_CRS_SELECT         0x0400
+#define IGP01E1000_PSCR_FLIP_CHIP              0x0800
+#define IGP01E1000_PSCR_AUTO_MDIX              0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX         0x2000 /* 0-MDI, 1-MDIX */
+
+/* IGP01E1000 Specific Port Link Health Register */
+#define IGP01E1000_PLHR_SS_DOWNGRADE           0x8000
+#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR    0x4000
+#define IGP01E1000_PLHR_MASTER_FAULT           0x2000
+#define IGP01E1000_PLHR_MASTER_RESOLUTION      0x1000
+#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK       0x0800 /* LH */
+#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW   0x0400 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_1             0x0200 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_0             0x0100
+#define IGP01E1000_PLHR_AUTONEG_FAULT          0x0040
+#define IGP01E1000_PLHR_AUTONEG_ACTIVE         0x0010
+#define IGP01E1000_PLHR_VALID_CHANNEL_D        0x0008
+#define IGP01E1000_PLHR_VALID_CHANNEL_C        0x0004
+#define IGP01E1000_PLHR_VALID_CHANNEL_B        0x0002
+#define IGP01E1000_PLHR_VALID_CHANNEL_A        0x0001
+
+/* IGP01E1000 Channel Quality Register */
+#define IGP01E1000_MSE_CHANNEL_D        0x000F
+#define IGP01E1000_MSE_CHANNEL_C        0x00F0
+#define IGP01E1000_MSE_CHANNEL_B        0x0F00
+#define IGP01E1000_MSE_CHANNEL_A        0xF000
+
+#define IGP02E1000_PM_SPD                         0x0001  /* Smart Power Down */
+#define IGP02E1000_PM_D3_LPLU                     0x0004  /* Enable LPLU in non-D0a modes */
+#define IGP02E1000_PM_D0_LPLU                     0x0002  /* Enable LPLU in D0a mode */
+
+/* IGP01E1000 DSP reset macros */
+#define DSP_RESET_ENABLE     0x0
+#define DSP_RESET_DISABLE    0x2
+#define E1000_MAX_DSP_RESETS 10
+
+/* IGP01E1000 & IGP02E1000 AGC Registers */
+
+#define IGP01E1000_AGC_LENGTH_SHIFT 7         /* Coarse - 13:11, Fine - 10:7 */
+#define IGP02E1000_AGC_LENGTH_SHIFT 9         /* Coarse - 15:13, Fine - 12:9 */
+
+/* IGP02E1000 AGC Register Length 9-bit mask */
+#define IGP02E1000_AGC_LENGTH_MASK  0x7F
+
+/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
+#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
+#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113
+
+/* The precision error of the cable length is +/- 10 meters */
+#define IGP01E1000_AGC_RANGE    10
+#define IGP02E1000_AGC_RANGE    15
+
+/* IGP01E1000 PCS Initialization register */
+/* bits 3:6 in the PCS registers stores the channels polarity */
+#define IGP01E1000_PHY_POLARITY_MASK    0x0078
+
+/* IGP01E1000 GMII FIFO Register */
+#define IGP01E1000_GMII_FLEX_SPD               0x10 /* Enable flexible speed
+                                                     * on Link-Up */
+#define IGP01E1000_GMII_SPD                    0x20 /* Enable SPD */
+
+/* IGP01E1000 Analog Register */
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS       0x20D1
+#define IGP01E1000_ANALOG_FUSE_STATUS             0x20D0
+#define IGP01E1000_ANALOG_FUSE_CONTROL            0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS             0x20DE
+
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK            0xF000
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK            0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK          0x0070
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED        0x0100
+#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL    0x0002
+
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH        0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10            0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1               0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10              0x0500
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_DISABLE_JABBER             0x0001 /* 1=Disable Jabber */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE  0x0002 /* 1=Polarity Reversal Disabled */
+#define GG82563_PSCR_POWER_DOWN                 0x0004 /* 1=Power Down */
+#define GG82563_PSCR_COPPER_TRANSMITER_DISABLE  0x0008 /* 1=Transmitter Disabled */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK        0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI         0x0000 /* 00=Manual MDI configuration */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX        0x0020 /* 01=Manual MDIX configuration */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO        0x0060 /* 11=Automatic crossover */
+#define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE   0x0080 /* 1=Enable Extended Distance */
+#define GG82563_PSCR_ENERGY_DETECT_MASK         0x0300
+#define GG82563_PSCR_ENERGY_DETECT_OFF          0x0000 /* 00,01=Off */
+#define GG82563_PSCR_ENERGY_DETECT_RX           0x0200 /* 10=Sense on Rx only (Energy Detect) */
+#define GG82563_PSCR_ENERGY_DETECT_RX_TM        0x0300 /* 11=Sense and Tx NLP */
+#define GG82563_PSCR_FORCE_LINK_GOOD            0x0400 /* 1=Force Link Good */
+#define GG82563_PSCR_DOWNSHIFT_ENABLE           0x0800 /* 1=Enable Downshift */
+#define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK     0x7000
+#define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT    12
+
+/* PHY Specific Status Register (Page 0, Register 17) */
+#define GG82563_PSSR_JABBER                0x0001 /* 1=Jabber */
+#define GG82563_PSSR_POLARITY              0x0002 /* 1=Polarity Reversed */
+#define GG82563_PSSR_LINK                  0x0008 /* 1=Link is Up */
+#define GG82563_PSSR_ENERGY_DETECT         0x0010 /* 1=Sleep, 0=Active */
+#define GG82563_PSSR_DOWNSHIFT             0x0020 /* 1=Downshift */
+#define GG82563_PSSR_CROSSOVER_STATUS      0x0040 /* 1=MDIX, 0=MDI */
+#define GG82563_PSSR_RX_PAUSE_ENABLED      0x0100 /* 1=Receive Pause Enabled */
+#define GG82563_PSSR_TX_PAUSE_ENABLED      0x0200 /* 1=Transmit Pause Enabled */
+#define GG82563_PSSR_LINK_UP               0x0400 /* 1=Link Up */
+#define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */
+#define GG82563_PSSR_PAGE_RECEIVED         0x1000 /* 1=Page Received */
+#define GG82563_PSSR_DUPLEX                0x2000 /* 1-Full-Duplex */
+#define GG82563_PSSR_SPEED_MASK            0xC000
+#define GG82563_PSSR_SPEED_10MBPS          0x0000 /* 00=10Mbps */
+#define GG82563_PSSR_SPEED_100MBPS         0x4000 /* 01=100Mbps */
+#define GG82563_PSSR_SPEED_1000MBPS        0x8000 /* 10=1000Mbps */
+
+/* PHY Specific Status Register 2 (Page 0, Register 19) */
+#define GG82563_PSSR2_JABBER                0x0001 /* 1=Jabber */
+#define GG82563_PSSR2_POLARITY_CHANGED      0x0002 /* 1=Polarity Changed */
+#define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */
+#define GG82563_PSSR2_DOWNSHIFT_INTERRUPT   0x0020 /* 1=Downshift Detected */
+#define GG82563_PSSR2_MDI_CROSSOVER_CHANGE  0x0040 /* 1=Crossover Changed */
+#define GG82563_PSSR2_FALSE_CARRIER         0x0100 /* 1=False Carrier */
+#define GG82563_PSSR2_SYMBOL_ERROR          0x0200 /* 1=Symbol Error */
+#define GG82563_PSSR2_LINK_STATUS_CHANGED   0x0400 /* 1=Link Status Changed */
+#define GG82563_PSSR2_AUTO_NEG_COMPLETED    0x0800 /* 1=Auto-Neg Completed */
+#define GG82563_PSSR2_PAGE_RECEIVED         0x1000 /* 1=Page Received */
+#define GG82563_PSSR2_DUPLEX_CHANGED        0x2000 /* 1=Duplex Changed */
+#define GG82563_PSSR2_SPEED_CHANGED         0x4000 /* 1=Speed Changed */
+#define GG82563_PSSR2_AUTO_NEG_ERROR        0x8000 /* 1=Auto-Neg Error */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_10BT_POLARITY_FORCE           0x0002 /* 1=Force Negative Polarity */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_MASK       0x000C
+#define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL     0x0000 /* 00,01=Normal Operation */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_112NS      0x0008 /* 10=Select 112ns Sequence */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_16NS       0x000C /* 11=Select 16ns Sequence */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG              0x2000 /* 1=Reverse Auto-Negotiation */
+#define GG82563_PSCR2_1000BT_DISABLE                0x4000 /* 1=Disable 1000BASE-T */
+#define GG82563_PSCR2_TRANSMITER_TYPE_MASK          0x8000
+#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B      0x0000 /* 0=Class B */
+#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A      0x8000 /* 1=Class A */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK                    0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ           0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25MHZ           0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ         0x0006
+#define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ          0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX               0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26) */
+#define GG82563_DSPD_CABLE_LENGTH               0x0007 /* 0 = <50M;
+                                                          1 = 50-80M;
+                                                          2 = 80-110M;
+                                                          3 = 110-140M;
+                                                          4 = >140M */
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PHY_LEDS_EN                    0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
+#define GG82563_KMCR_FORCE_LINK_UP                  0x0040 /* 1=Force Link Up */
+#define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT         0x0080
+#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK     0x0400
+#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT          0x0400 /* 1=6.25MHz, 0=0.8MHz */
+#define GG82563_KMCR_PASS_FALSE_CARRIER             0x0800
+
+/* Power Management Control Register (Page 193, Register 20) */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE         0x0001 /* 1=Enalbe SERDES Electrical Idle */
+#define GG82563_PMCR_DISABLE_PORT                   0x0002 /* 1=Disable Port */
+#define GG82563_PMCR_DISABLE_SERDES                 0x0004 /* 1=Disable SERDES */
+#define GG82563_PMCR_REVERSE_AUTO_NEG               0x0008 /* 1=Enable Reverse Auto-Negotiation */
+#define GG82563_PMCR_DISABLE_1000_NON_D0            0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */
+#define GG82563_PMCR_DISABLE_1000                   0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */
+#define GG82563_PMCR_REVERSE_AUTO_NEG_D0A           0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */
+#define GG82563_PMCR_FORCE_POWER_STATE              0x0080 /* 1=Force Power State */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK    0x0300
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR      0x0000 /* 00=Dr */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U     0x0100 /* 01=D0u */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A     0x0200 /* 10=D0a */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3      0x0300 /* 11=D3 */
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING                     0x0010 /* Disable Padding Use */
+
+
+/* Bit definitions for valid PHY IDs. */
+/* I = Integrated
+ * E = External
+ */
+#define M88_VENDOR         0x0141
+#define M88E1000_E_PHY_ID  0x01410C50
+#define M88E1000_I_PHY_ID  0x01410C30
+#define M88E1011_I_PHY_ID  0x01410C20
+#define IGP01E1000_I_PHY_ID  0x02A80380
+#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID
+#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
+#define M88E1011_I_REV_4   0x04
+#define M88E1111_I_PHY_ID  0x01410CC0
+#define L1LXT971A_PHY_ID   0x001378E0
+#define GG82563_E_PHY_ID   0x01410CA0
+
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define PHY_PAGE_SHIFT        5
+#define PHY_REG(page, reg)    \
+        (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+
+#define IGP3_PHY_PORT_CTRL           \
+        PHY_REG(769, 17) /* Port General Configuration */
+#define IGP3_PHY_RATE_ADAPT_CTRL \
+        PHY_REG(769, 25) /* Rate Adapter Control Register */
+
+#define IGP3_KMRN_FIFO_CTRL_STATS \
+        PHY_REG(770, 16) /* KMRN FIFO's control/status register */
+#define IGP3_KMRN_POWER_MNG_CTRL \
+        PHY_REG(770, 17) /* KMRN Power Management Control Register */
+#define IGP3_KMRN_INBAND_CTRL \
+        PHY_REG(770, 18) /* KMRN Inband Control Register */
+#define IGP3_KMRN_DIAG \
+        PHY_REG(770, 19) /* KMRN Diagnostic register */
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
+#define IGP3_KMRN_ACK_TIMEOUT \
+        PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
+
+#define IGP3_VR_CTRL \
+        PHY_REG(776, 18) /* Voltage regulator control register */
+#define IGP3_VR_CTRL_MODE_SHUT       0x0200 /* Enter powerdown, shutdown VRs */
+#define IGP3_VR_CTRL_MODE_MASK       0x0300 /* Shutdown VR Mask */
+
+#define IGP3_CAPABILITY \
+        PHY_REG(776, 19) /* IGP3 Capability Register */
+
+/* Capabilities for SKU Control  */
+#define IGP3_CAP_INITIATE_TEAM       0x0001 /* Able to initiate a team */
+#define IGP3_CAP_WFM                 0x0002 /* Support WoL and PXE */
+#define IGP3_CAP_ASF                 0x0004 /* Support ASF */
+#define IGP3_CAP_LPLU                0x0008 /* Support Low Power Link Up */
+#define IGP3_CAP_DC_AUTO_SPEED       0x0010 /* Support AC/DC Auto Link Speed */
+#define IGP3_CAP_SPD                 0x0020 /* Support Smart Power Down */
+#define IGP3_CAP_MULT_QUEUE          0x0040 /* Support 2 tx & 2 rx queues */
+#define IGP3_CAP_RSS                 0x0080 /* Support RSS */
+#define IGP3_CAP_8021PQ              0x0100 /* Support 802.1Q & 802.1p */
+#define IGP3_CAP_AMT_CB              0x0200 /* Support active manageability and circuit breaker */
+
+#define IGP3_PPC_JORDAN_EN           0x0001
+#define IGP3_PPC_JORDAN_GIGA_SPEED   0x0002
+
+#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS         0x0001
+#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK   0x001E
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA        0x0020
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_100         0x0040
+
+#define IGP3E1000_PHY_MISC_CTRL                0x1B   /* Misc. Ctrl register */
+#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET        0x1000 /* Duplex Manual Set */
+
+#define IGP3_KMRN_EXT_CTRL  PHY_REG(770, 18)
+#define IGP3_KMRN_EC_DIS_INBAND    0x0080
+
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define IFE_E_PHY_ID         0x02A80330 /* 10/100 PHY */
+#define IFE_PLUS_E_PHY_ID    0x02A80320
+#define IFE_C_E_PHY_ID       0x02A80310
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL   0x10  /* 100BaseTx Extended Status, Control and Address */
+#define IFE_PHY_SPECIAL_CONTROL           0x11  /* 100BaseTx PHY special control register */
+#define IFE_PHY_RCV_FALSE_CARRIER         0x13  /* 100BaseTx Receive False Carrier Counter */
+#define IFE_PHY_RCV_DISCONNECT            0x14  /* 100BaseTx Receive Disconnet Counter */
+#define IFE_PHY_RCV_ERROT_FRAME           0x15  /* 100BaseTx Receive Error Frame Counter */
+#define IFE_PHY_RCV_SYMBOL_ERR            0x16  /* Receive Symbol Error Counter */
+#define IFE_PHY_PREM_EOF_ERR              0x17  /* 100BaseTx Receive Premature End Of Frame Error Counter */
+#define IFE_PHY_RCV_EOF_ERR               0x18  /* 10BaseT Receive End Of Frame Error Counter */
+#define IFE_PHY_TX_JABBER_DETECT          0x19  /* 10BaseT Transmit Jabber Detect Counter */
+#define IFE_PHY_EQUALIZER                 0x1A  /* PHY Equalizer Control and Status */
+#define IFE_PHY_SPECIAL_CONTROL_LED       0x1B  /* PHY special control and LED configuration */
+#define IFE_PHY_MDIX_CONTROL              0x1C  /* MDI/MDI-X Control register */
+#define IFE_PHY_HWI_CONTROL               0x1D  /* Hardware Integrity Control (HWI) */
+
+#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE  0x2000  /* Defaut 1 = Disable auto reduced power down */
+#define IFE_PESC_100BTX_POWER_DOWN           0x0400  /* Indicates the power state of 100BASE-TX */
+#define IFE_PESC_10BTX_POWER_DOWN            0x0200  /* Indicates the power state of 10BASE-T */
+#define IFE_PESC_POLARITY_REVERSED           0x0100  /* Indicates 10BASE-T polarity */
+#define IFE_PESC_PHY_ADDR_MASK               0x007C  /* Bit 6:2 for sampled PHY address */
+#define IFE_PESC_SPEED                       0x0002  /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
+#define IFE_PESC_DUPLEX                      0x0001  /* Auto-negotiation duplex result 1=Full, 0=Half */
+#define IFE_PESC_POLARITY_REVERSED_SHIFT     8
+
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN   0x0100  /* 1 = Dyanmic Power Down disabled */
+#define IFE_PSC_FORCE_POLARITY               0x0020  /* 1=Reversed Polarity, 0=Normal */
+#define IFE_PSC_AUTO_POLARITY_DISABLE        0x0010  /* 1=Auto Polarity Disabled, 0=Enabled */
+#define IFE_PSC_JABBER_FUNC_DISABLE          0x0001  /* 1=Jabber Disabled, 0=Normal Jabber Operation */
+#define IFE_PSC_FORCE_POLARITY_SHIFT         5
+#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT  4
+
+#define IFE_PMC_AUTO_MDIX                    0x0080  /* 1=enable MDI/MDI-X feature, default 0=disabled */
+#define IFE_PMC_FORCE_MDIX                   0x0040  /* 1=force MDIX-X, 0=force MDI */
+#define IFE_PMC_MDIX_STATUS                  0x0020  /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_AUTO_MDIX_COMPLETE           0x0010  /* Resolution algorithm is completed */
+#define IFE_PMC_MDIX_MODE_SHIFT              6
+#define IFE_PHC_MDIX_RESET_ALL_MASK          0x0000  /* Disable auto MDI-X */
+
+#define IFE_PHC_HWI_ENABLE                   0x8000  /* Enable the HWI feature */
+#define IFE_PHC_ABILITY_CHECK                0x4000  /* 1= Test Passed, 0=failed */
+#define IFE_PHC_TEST_EXEC                    0x2000  /* PHY launch test pulses on the wire */
+#define IFE_PHC_HIGHZ                        0x0200  /* 1 = Open Circuit */
+#define IFE_PHC_LOWZ                         0x0400  /* 1 = Short Circuit */
+#define IFE_PHC_LOW_HIGH_Z_MASK              0x0600  /* Mask for indication type of problem on the line */
+#define IFE_PHC_DISTANCE_MASK                0x01FF  /* Mask for distance to the cable problem, in 80cm granularity */
+#define IFE_PHC_RESET_ALL_MASK               0x0000  /* Disable HWI */
+#define IFE_PSCL_PROBE_MODE                  0x0020  /* LED Probe mode */
+#define IFE_PSCL_PROBE_LEDS_OFF              0x0006  /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON               0x0007  /* Force LEDs 0 and 2 on */
+
+#define ICH_FLASH_COMMAND_TIMEOUT            5000    /* 5000 uSecs - adjusted */
+#define ICH_FLASH_ERASE_TIMEOUT              3000000 /* Up to 3 seconds - worst case */
+#define ICH_FLASH_CYCLE_REPEAT_COUNT         10      /* 10 cycles */
+#define ICH_FLASH_SEG_SIZE_256               256
+#define ICH_FLASH_SEG_SIZE_4K                4096
+#define ICH_FLASH_SEG_SIZE_64K               65536
+
+#define ICH_CYCLE_READ                       0x0
+#define ICH_CYCLE_RESERVED                   0x1
+#define ICH_CYCLE_WRITE                      0x2
+#define ICH_CYCLE_ERASE                      0x3
+
+#define ICH_FLASH_GFPREG   0x0000
+#define ICH_FLASH_HSFSTS   0x0004
+#define ICH_FLASH_HSFCTL   0x0006
+#define ICH_FLASH_FADDR    0x0008
+#define ICH_FLASH_FDATA0   0x0010
+#define ICH_FLASH_FRACC    0x0050
+#define ICH_FLASH_FREG0    0x0054
+#define ICH_FLASH_FREG1    0x0058
+#define ICH_FLASH_FREG2    0x005C
+#define ICH_FLASH_FREG3    0x0060
+#define ICH_FLASH_FPR0     0x0074
+#define ICH_FLASH_FPR1     0x0078
+#define ICH_FLASH_SSFSTS   0x0090
+#define ICH_FLASH_SSFCTL   0x0092
+#define ICH_FLASH_PREOP    0x0094
+#define ICH_FLASH_OPTYPE   0x0096
+#define ICH_FLASH_OPMENU   0x0098
+
+#define ICH_FLASH_REG_MAPSIZE      0x00A0
+#define ICH_FLASH_SECTOR_SIZE      4096
+#define ICH_GFPREG_BASE_MASK       0x1FFF
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+
+/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+    struct ich8_hsfsts {
+#ifdef __BIG_ENDIAN
+        u16 reserved2      :6;
+        u16 fldesvalid     :1;
+        u16 flockdn        :1;
+        u16 flcdone        :1;
+        u16 flcerr         :1;
+        u16 dael           :1;
+        u16 berasesz       :2;
+        u16 flcinprog      :1;
+        u16 reserved1      :2;
+#else
+        u16 flcdone        :1;   /* bit 0 Flash Cycle Done */
+        u16 flcerr         :1;   /* bit 1 Flash Cycle Error */
+        u16 dael           :1;   /* bit 2 Direct Access error Log */
+        u16 berasesz       :2;   /* bit 4:3 Block/Sector Erase Size */
+        u16 flcinprog      :1;   /* bit 5 flash SPI cycle in Progress */
+        u16 reserved1      :2;   /* bit 13:6 Reserved */
+        u16 reserved2      :6;   /* bit 13:6 Reserved */
+        u16 fldesvalid     :1;   /* bit 14 Flash Descriptor Valid */
+        u16 flockdn        :1;   /* bit 15 Flash Configuration Lock-Down */
+#endif
+    } hsf_status;
+    u16 regval;
+};
+
+/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+    struct ich8_hsflctl {
+#ifdef __BIG_ENDIAN
+        u16 fldbcount      :2;
+        u16 flockdn        :6;
+        u16 flcgo          :1;
+        u16 flcycle        :2;
+        u16 reserved       :5;
+#else
+        u16 flcgo          :1;   /* 0 Flash Cycle Go */
+        u16 flcycle        :2;   /* 2:1 Flash Cycle */
+        u16 reserved       :5;   /* 7:3 Reserved  */
+        u16 fldbcount      :2;   /* 9:8 Flash Data Byte Count */
+        u16 flockdn        :6;   /* 15:10 Reserved */
+#endif
+    } hsf_ctrl;
+    u16 regval;
+};
+
+/* ICH8 Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+    struct ich8_flracc {
+#ifdef __BIG_ENDIAN
+        u32 gmwag          :8;
+        u32 gmrag          :8;
+        u32 grwa           :8;
+        u32 grra           :8;
+#else
+        u32 grra           :8;   /* 0:7 GbE region Read Access */
+        u32 grwa           :8;   /* 8:15 GbE region Write Access */
+        u32 gmrag          :8;   /* 23:16 GbE Master Read Access Grant  */
+        u32 gmwag          :8;   /* 31:24 GbE Master Write Access Grant */
+#endif
+    } hsf_flregacc;
+    u16 regval;
+};
+
+/* Miscellaneous PHY bit definitions. */
+#define PHY_PREAMBLE        0xFFFFFFFF
+#define PHY_SOF             0x01
+#define PHY_OP_READ         0x02
+#define PHY_OP_WRITE        0x01
+#define PHY_TURNAROUND      0x02
+#define PHY_PREAMBLE_SIZE   32
+#define MII_CR_SPEED_1000   0x0040
+#define MII_CR_SPEED_100    0x2000
+#define MII_CR_SPEED_10     0x0000
+#define E1000_PHY_ADDRESS   0x01
+#define PHY_AUTO_NEG_TIME   45  /* 4.5 Seconds */
+#define PHY_FORCE_TIME      20  /* 2.0 Seconds */
+#define PHY_REVISION_MASK   0xFFFFFFF0
+#define DEVICE_SPEED_MASK   0x00000300  /* Device Ctrl Reg Speed Mask */
+#define REG4_SPEED_MASK     0x01E0
+#define REG9_SPEED_MASK     0x0300
+#define ADVERTISE_10_HALF   0x0001
+#define ADVERTISE_10_FULL   0x0002
+#define ADVERTISE_100_HALF  0x0004
+#define ADVERTISE_100_FULL  0x0008
+#define ADVERTISE_1000_HALF 0x0010
+#define ADVERTISE_1000_FULL 0x0020
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F  /* Everything but 1000-Half */
+#define AUTONEG_ADVERTISE_10_100_ALL    0x000F /* All 10/100 speeds*/
+#define AUTONEG_ADVERTISE_10_ALL        0x0003 /* 10Mbps Full & Half speeds*/
+
+#endif /* _E1000_HW_H_ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_main-2.6.31-ethercat.c	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,5027 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+  vim: noexpandtab
+
+*******************************************************************************/
+
+#include "e1000-2.6.31-ethercat.h"
+#include <net/ip6_checksum.h>
+
+char e1000_driver_name[] = "ec_e1000";
+static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
+#define DRV_VERSION "7.3.21-k3-NAPI"
+const char e1000_driver_version[] = DRV_VERSION;
+static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
+
+/* e1000_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * Macro expands to...
+ *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+ */
+static struct pci_device_id e1000_pci_tbl[] = {
+	INTEL_E1000_ETHERNET_DEVICE(0x1000),
+	INTEL_E1000_ETHERNET_DEVICE(0x1001),
+	INTEL_E1000_ETHERNET_DEVICE(0x1004),
+	INTEL_E1000_ETHERNET_DEVICE(0x1008),
+	INTEL_E1000_ETHERNET_DEVICE(0x1009),
+	INTEL_E1000_ETHERNET_DEVICE(0x100C),
+	INTEL_E1000_ETHERNET_DEVICE(0x100D),
+	INTEL_E1000_ETHERNET_DEVICE(0x100E),
+	INTEL_E1000_ETHERNET_DEVICE(0x100F),
+	INTEL_E1000_ETHERNET_DEVICE(0x1010),
+	INTEL_E1000_ETHERNET_DEVICE(0x1011),
+	INTEL_E1000_ETHERNET_DEVICE(0x1012),
+	INTEL_E1000_ETHERNET_DEVICE(0x1013),
+	INTEL_E1000_ETHERNET_DEVICE(0x1014),
+	INTEL_E1000_ETHERNET_DEVICE(0x1015),
+	INTEL_E1000_ETHERNET_DEVICE(0x1016),
+	INTEL_E1000_ETHERNET_DEVICE(0x1017),
+	INTEL_E1000_ETHERNET_DEVICE(0x1018),
+	INTEL_E1000_ETHERNET_DEVICE(0x1019),
+	INTEL_E1000_ETHERNET_DEVICE(0x101A),
+	INTEL_E1000_ETHERNET_DEVICE(0x101D),
+	INTEL_E1000_ETHERNET_DEVICE(0x101E),
+	INTEL_E1000_ETHERNET_DEVICE(0x1026),
+	INTEL_E1000_ETHERNET_DEVICE(0x1027),
+	INTEL_E1000_ETHERNET_DEVICE(0x1028),
+	INTEL_E1000_ETHERNET_DEVICE(0x1075),
+	INTEL_E1000_ETHERNET_DEVICE(0x1076),
+	INTEL_E1000_ETHERNET_DEVICE(0x1077),
+	INTEL_E1000_ETHERNET_DEVICE(0x1078),
+	INTEL_E1000_ETHERNET_DEVICE(0x1079),
+	INTEL_E1000_ETHERNET_DEVICE(0x107A),
+	INTEL_E1000_ETHERNET_DEVICE(0x107B),
+	INTEL_E1000_ETHERNET_DEVICE(0x107C),
+	INTEL_E1000_ETHERNET_DEVICE(0x108A),
+	INTEL_E1000_ETHERNET_DEVICE(0x1099),
+	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+	/* required last entry */
+	{0,}
+};
+
+// do not auto-load driver
+// MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+                             struct e1000_tx_ring *txdr);
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+                             struct e1000_rx_ring *rxdr);
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+                             struct e1000_tx_ring *tx_ring);
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+                             struct e1000_rx_ring *rx_ring);
+void e1000_update_stats(struct e1000_adapter *adapter);
+
+static int e1000_init_module(void);
+static void e1000_exit_module(void);
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit e1000_remove(struct pci_dev *pdev);
+static int e1000_alloc_queues(struct e1000_adapter *adapter);
+static int e1000_sw_init(struct e1000_adapter *adapter);
+static int e1000_open(struct net_device *netdev);
+static int e1000_close(struct net_device *netdev);
+static void e1000_configure_tx(struct e1000_adapter *adapter);
+static void e1000_configure_rx(struct e1000_adapter *adapter);
+static void e1000_setup_rctl(struct e1000_adapter *adapter);
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+                                struct e1000_tx_ring *tx_ring);
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+                                struct e1000_rx_ring *rx_ring);
+static void e1000_set_rx_mode(struct net_device *netdev);
+static void e1000_update_phy_info(unsigned long data);
+static void e1000_watchdog(unsigned long data);
+static void e1000_82547_tx_fifo_stall(unsigned long data);
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
+static int e1000_set_mac(struct net_device *netdev, void *p);
+void ec_poll(struct net_device *);
+static irqreturn_t e1000_intr(int irq, void *data);
+static irqreturn_t e1000_intr_msi(int irq, void *data);
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+			       struct e1000_tx_ring *tx_ring);
+static int e1000_clean(struct napi_struct *napi, int budget);
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring,
+			       int *work_done, int work_to_do);
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+                                   struct e1000_rx_ring *rx_ring,
+				   int cleaned_count);
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+			   int cmd);
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
+static void e1000_tx_timeout(struct net_device *dev);
+static void e1000_reset_task(struct work_struct *work);
+static void e1000_smartspeed(struct e1000_adapter *adapter);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+                                       struct sk_buff *skb);
+
+static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_restore_vlan(struct e1000_adapter *adapter);
+
+#ifdef CONFIG_PM
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
+static int e1000_resume(struct pci_dev *pdev);
+#endif
+static void e1000_shutdown(struct pci_dev *pdev);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void e1000_netpoll (struct net_device *netdev);
+#endif
+
+#define COPYBREAK_DEFAULT 256
+static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+	"Maximum size of packet that is copied to a new buffer on receive");
+
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+                     pci_channel_state_t state);
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
+static void e1000_io_resume(struct pci_dev *pdev);
+
+static struct pci_error_handlers e1000_err_handler = {
+	.error_detected = e1000_io_error_detected,
+	.slot_reset = e1000_io_slot_reset,
+	.resume = e1000_io_resume,
+};
+
+static struct pci_driver e1000_driver = {
+	.name     = e1000_driver_name,
+	.id_table = e1000_pci_tbl,
+	.probe    = e1000_probe,
+	.remove   = __devexit_p(e1000_remove),
+#ifdef CONFIG_PM
+	/* Power Managment Hooks */
+	.suspend  = e1000_suspend,
+	.resume   = e1000_resume,
+#endif
+	.shutdown = e1000_shutdown,
+	.err_handler = &e1000_err_handler
+};
+
+MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
+MODULE_DESCRIPTION("EtherCAT-capable Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+
+static int __init e1000_init_module(void)
+{
+	int ret;
+	printk(KERN_INFO "%s - version %s\n",
+	       e1000_driver_string, e1000_driver_version);
+
+	printk(KERN_INFO "%s\n", e1000_copyright);
+
+	ret = pci_register_driver(&e1000_driver);
+	if (copybreak != COPYBREAK_DEFAULT) {
+		if (copybreak == 0)
+			printk(KERN_INFO "e1000: copybreak disabled\n");
+		else
+			printk(KERN_INFO "e1000: copybreak enabled for "
+			       "packets <= %u bytes\n", copybreak);
+	}
+	return ret;
+}
+
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+
+static void __exit e1000_exit_module(void)
+{
+	pci_unregister_driver(&e1000_driver);
+}
+
+module_exit(e1000_exit_module);
+
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	irq_handler_t handler = e1000_intr;
+	int irq_flags = IRQF_SHARED;
+	int err;
+
+	if (adapter->ecdev)
+		return 0;
+
+	if (hw->mac_type >= e1000_82571) {
+		adapter->have_msi = !pci_enable_msi(adapter->pdev);
+		if (adapter->have_msi) {
+			handler = e1000_intr_msi;
+			irq_flags = 0;
+		}
+	}
+
+	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
+	                  netdev);
+	if (err) {
+		if (adapter->have_msi)
+			pci_disable_msi(adapter->pdev);
+		DPRINTK(PROBE, ERR,
+		        "Unable to allocate interrupt Error: %d\n", err);
+	}
+
+	return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	if (adapter->ecdev)
+		return;
+
+	free_irq(adapter->pdev->irq, netdev);
+
+	if (adapter->have_msi)
+		pci_disable_msi(adapter->pdev);
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+
+static void e1000_irq_disable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (adapter->ecdev)
+		return;
+
+	ew32(IMC, ~0);
+	E1000_WRITE_FLUSH();
+	synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+
+static void e1000_irq_enable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (adapter->ecdev)
+		return;
+ 
+	ew32(IMS, IMS_ENABLE_MASK);
+	E1000_WRITE_FLUSH();
+}
+
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	u16 vid = hw->mng_cookie.vlan_id;
+	u16 old_vid = adapter->mng_vlan_id;
+	if (adapter->vlgrp) {
+		if (!vlan_group_get_device(adapter->vlgrp, vid)) {
+			if (hw->mng_cookie.status &
+				E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
+				e1000_vlan_rx_add_vid(netdev, vid);
+				adapter->mng_vlan_id = vid;
+			} else
+				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+
+			if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
+					(vid != old_vid) &&
+			    !vlan_group_get_device(adapter->vlgrp, old_vid))
+				e1000_vlan_rx_kill_vid(netdev, old_vid);
+		} else
+			adapter->mng_vlan_id = vid;
+	}
+}
+
+/**
+ * e1000_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the network i/f is closed.
+ *
+ **/
+
+static void e1000_release_hw_control(struct e1000_adapter *adapter)
+{
+	u32 ctrl_ext;
+	u32 swsm;
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* Let firmware taken over control of h/w */
+	switch (hw->mac_type) {
+	case e1000_82573:
+		swsm = er32(SWSM);
+		ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+		ctrl_ext = er32(CTRL_EXT);
+		ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * e1000_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the network i/f is open.
+ *
+ **/
+
+static void e1000_get_hw_control(struct e1000_adapter *adapter)
+{
+	u32 ctrl_ext;
+	u32 swsm;
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* Let firmware know the driver has taken over */
+	switch (hw->mac_type) {
+	case e1000_82573:
+		swsm = er32(SWSM);
+		ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+		ctrl_ext = er32(CTRL_EXT);
+		ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	default:
+		break;
+	}
+}
+
+static void e1000_init_manageability(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (adapter->en_mng_pt) {
+		u32 manc = er32(MANC);
+
+		/* disable hardware interception of ARP */
+		manc &= ~(E1000_MANC_ARP_EN);
+
+		/* enable receiving management packets to the host */
+		/* this will probably generate destination unreachable messages
+		 * from the host OS, but the packets will be handled on SMBUS */
+		if (hw->has_manc2h) {
+			u32 manc2h = er32(MANC2H);
+
+			manc |= E1000_MANC_EN_MNG2HOST;
+#define E1000_MNG2HOST_PORT_623 (1 << 5)
+#define E1000_MNG2HOST_PORT_664 (1 << 6)
+			manc2h |= E1000_MNG2HOST_PORT_623;
+			manc2h |= E1000_MNG2HOST_PORT_664;
+			ew32(MANC2H, manc2h);
+		}
+
+		ew32(MANC, manc);
+	}
+}
+
+static void e1000_release_manageability(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (adapter->en_mng_pt) {
+		u32 manc = er32(MANC);
+
+		/* re-enable hardware interception of ARP */
+		manc |= E1000_MANC_ARP_EN;
+
+		if (hw->has_manc2h)
+			manc &= ~E1000_MANC_EN_MNG2HOST;
+
+		/* don't explicitly have to mess with MANC2H since
+		 * MANC has an enable disable that gates MANC2H */
+
+		ew32(MANC, manc);
+	}
+}
+
+/**
+ * e1000_configure - configure the hardware for RX and TX
+ * @adapter = private board structure
+ **/
+static void e1000_configure(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	e1000_set_rx_mode(netdev);
+
+	e1000_restore_vlan(adapter);
+	e1000_init_manageability(adapter);
+
+	e1000_configure_tx(adapter);
+	e1000_setup_rctl(adapter);
+	e1000_configure_rx(adapter);
+	/* call E1000_DESC_UNUSED which always leaves
+	 * at least 1 descriptor unused to make sure
+	 * next_to_use != next_to_clean */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
+		if (adapter->ecdev) {
+			/* fill rx ring completely! */
+			adapter->alloc_rx_buf(adapter, ring, ring->count);
+		} else {
+            /* this one leaves the last ring element unallocated! */
+			adapter->alloc_rx_buf(adapter, ring,
+					E1000_DESC_UNUSED(ring));
+		}
+	}
+
+	adapter->tx_queue_len = netdev->tx_queue_len;
+}
+
+int e1000_up(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* hardware has been reset, we need to reload some things */
+	e1000_configure(adapter);
+
+	clear_bit(__E1000_DOWN, &adapter->flags);
+
+	if (!adapter->ecdev) {
+		napi_enable(&adapter->napi);
+
+		e1000_irq_enable(adapter);
+
+		netif_wake_queue(adapter->netdev);
+
+		/* fire a link change interrupt to start the watchdog */
+		ew32(ICS, E1000_ICS_LSC);
+	}
+	return 0;
+}
+
+/**
+ * e1000_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000_reset ***
+ *
+ **/
+
+void e1000_power_up_phy(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 mii_reg = 0;
+
+	/* Just clear the power down bit to wake the phy back up */
+	if (hw->media_type == e1000_media_type_copper) {
+		/* according to the manual, the phy will retain its
+		 * settings across a power-down/up cycle */
+		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
+		mii_reg &= ~MII_CR_POWER_DOWN;
+		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
+	}
+}
+
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* Power down the PHY so no link is implied when interface is down *
+	 * The PHY cannot be powered down if any of the following is true *
+	 * (a) WoL is enabled
+	 * (b) AMT is active
+	 * (c) SoL/IDER session is active */
+	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
+	   hw->media_type == e1000_media_type_copper) {
+		u16 mii_reg = 0;
+
+		switch (hw->mac_type) {
+		case e1000_82540:
+		case e1000_82545:
+		case e1000_82545_rev_3:
+		case e1000_82546:
+		case e1000_82546_rev_3:
+		case e1000_82541:
+		case e1000_82541_rev_2:
+		case e1000_82547:
+		case e1000_82547_rev_2:
+			if (er32(MANC) & E1000_MANC_SMBUS_EN)
+				goto out;
+			break;
+		case e1000_82571:
+		case e1000_82572:
+		case e1000_82573:
+		case e1000_80003es2lan:
+		case e1000_ich8lan:
+			if (e1000_check_mng_mode(hw) ||
+			    e1000_check_phy_reset_block(hw))
+				goto out;
+			break;
+		default:
+			goto out;
+		}
+		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
+		mii_reg |= MII_CR_POWER_DOWN;
+		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
+		mdelay(1);
+	}
+out:
+	return;
+}
+
+void e1000_down(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	u32 rctl, tctl;
+
+	/* signal that we're down so the interrupt handler does not
+	 * reschedule our watchdog timer */
+	set_bit(__E1000_DOWN, &adapter->flags);
+
+	/* disable receives in the hardware */
+	rctl = er32(RCTL);
+	ew32(RCTL, rctl & ~E1000_RCTL_EN);
+
+	if (!adapter->ecdev) {
+		/* flush and sleep below */
+		/* can be netif_tx_disable when NETIF_F_LLTX is removed */
+		netif_stop_queue(netdev);
+	}
+
+	/* disable transmits in the hardware */
+	tctl = er32(TCTL);
+	tctl &= ~E1000_TCTL_EN;
+	ew32(TCTL, tctl);
+	/* flush both disables and wait for them to finish */
+	E1000_WRITE_FLUSH();
+	msleep(10);
+
+	if (!adapter->ecdev) {
+		napi_disable(&adapter->napi);
+
+		e1000_irq_disable(adapter);
+
+		del_timer_sync(&adapter->tx_fifo_stall_timer);
+		del_timer_sync(&adapter->watchdog_timer);
+		del_timer_sync(&adapter->phy_info_timer);
+	}
+
+	netdev->tx_queue_len = adapter->tx_queue_len;
+	adapter->link_speed = 0;
+	adapter->link_duplex = 0;
+	if (!adapter->ecdev) {
+		netif_carrier_off(netdev);
+	}
+
+	e1000_reset(adapter);
+	e1000_clean_all_tx_rings(adapter);
+	e1000_clean_all_rx_rings(adapter);
+}
+
+void e1000_reinit_locked(struct e1000_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+		msleep(1);
+	e1000_down(adapter);
+	e1000_up(adapter);
+	clear_bit(__E1000_RESETTING, &adapter->flags);
+}
+
+void e1000_reset(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
+	u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
+	bool legacy_pba_adjust = false;
+
+	/* Repartition Pba for greater than 9k mtu
+	 * To take effect CTRL.RST is required.
+	 */
+
+	switch (hw->mac_type) {
+	case e1000_82542_rev2_0:
+	case e1000_82542_rev2_1:
+	case e1000_82543:
+	case e1000_82544:
+	case e1000_82540:
+	case e1000_82541:
+	case e1000_82541_rev_2:
+		legacy_pba_adjust = true;
+		pba = E1000_PBA_48K;
+		break;
+	case e1000_82545:
+	case e1000_82545_rev_3:
+	case e1000_82546:
+	case e1000_82546_rev_3:
+		pba = E1000_PBA_48K;
+		break;
+	case e1000_82547:
+	case e1000_82547_rev_2:
+		legacy_pba_adjust = true;
+		pba = E1000_PBA_30K;
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		pba = E1000_PBA_38K;
+		break;
+	case e1000_82573:
+		pba = E1000_PBA_20K;
+		break;
+	case e1000_ich8lan:
+		pba = E1000_PBA_8K;
+	case e1000_undefined:
+	case e1000_num_macs:
+		break;
+	}
+
+	if (legacy_pba_adjust) {
+		if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
+			pba -= 8; /* allocate more FIFO for Tx */
+
+		if (hw->mac_type == e1000_82547) {
+			adapter->tx_fifo_head = 0;
+			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
+			adapter->tx_fifo_size =
+				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
+			atomic_set(&adapter->tx_fifo_stall, 0);
+		}
+	} else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
+		/* adjust PBA for jumbo frames */
+		ew32(PBA, pba);
+
+		/* To maintain wire speed transmits, the Tx FIFO should be
+		 * large enough to accomodate two full transmit packets,
+		 * rounded up to the next 1KB and expressed in KB.  Likewise,
+		 * the Rx FIFO should be large enough to accomodate at least
+		 * one full receive packet and is similarly rounded up and
+		 * expressed in KB. */
+		pba = er32(PBA);
+		/* upper 16 bits has Tx packet buffer allocation size in KB */
+		tx_space = pba >> 16;
+		/* lower 16 bits has Rx packet buffer allocation size in KB */
+		pba &= 0xffff;
+		/* don't include ethernet FCS because hardware appends/strips */
+		min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
+		               VLAN_TAG_SIZE;
+		min_tx_space = min_rx_space;
+		min_tx_space *= 2;
+		min_tx_space = ALIGN(min_tx_space, 1024);
+		min_tx_space >>= 10;
+		min_rx_space = ALIGN(min_rx_space, 1024);
+		min_rx_space >>= 10;
+
+		/* If current Tx allocation is less than the min Tx FIFO size,
+		 * and the min Tx FIFO size is less than the current Rx FIFO
+		 * allocation, take space away from current Rx allocation */
+		if (tx_space < min_tx_space &&
+		    ((min_tx_space - tx_space) < pba)) {
+			pba = pba - (min_tx_space - tx_space);
+
+			/* PCI/PCIx hardware has PBA alignment constraints */
+			switch (hw->mac_type) {
+			case e1000_82545 ... e1000_82546_rev_3:
+				pba &= ~(E1000_PBA_8K - 1);
+				break;
+			default:
+				break;
+			}
+
+			/* if short on rx space, rx wins and must trump tx
+			 * adjustment or use Early Receive if available */
+			if (pba < min_rx_space) {
+				switch (hw->mac_type) {
+				case e1000_82573:
+					/* ERT enabled in e1000_configure_rx */
+					break;
+				default:
+					pba = min_rx_space;
+					break;
+				}
+			}
+		}
+	}
+
+	ew32(PBA, pba);
+
+	/* flow control settings */
+	/* Set the FC high water mark to 90% of the FIFO size.
+	 * Required to clear last 3 LSB */
+	fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
+	/* We can't use 90% on small FIFOs because the remainder
+	 * would be less than 1 full frame.  In this case, we size
+	 * it to allow at least a full frame above the high water
+	 *  mark. */
+	if (pba < E1000_PBA_16K)
+		fc_high_water_mark = (pba * 1024) - 1600;
+
+	hw->fc_high_water = fc_high_water_mark;
+	hw->fc_low_water = fc_high_water_mark - 8;
+	if (hw->mac_type == e1000_80003es2lan)
+		hw->fc_pause_time = 0xFFFF;
+	else
+		hw->fc_pause_time = E1000_FC_PAUSE_TIME;
+	hw->fc_send_xon = 1;
+	hw->fc = hw->original_fc;
+
+	/* Allow time for pending master requests to run */
+	e1000_reset_hw(hw);
+	if (hw->mac_type >= e1000_82544)
+		ew32(WUC, 0);
+
+	if (e1000_init_hw(hw))
+		DPRINTK(PROBE, ERR, "Hardware Error\n");
+	e1000_update_mng_vlan(adapter);
+
+	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
+	if (hw->mac_type >= e1000_82544 &&
+	    hw->mac_type <= e1000_82547_rev_2 &&
+	    hw->autoneg == 1 &&
+	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+		u32 ctrl = er32(CTRL);
+		/* clear phy power management bit if we are in gig only mode,
+		 * which if enabled will attempt negotiation to 100Mb, which
+		 * can cause a loss of link at power off or driver unload */
+		ctrl &= ~E1000_CTRL_SWDPIN3;
+		ew32(CTRL, ctrl);
+	}
+
+	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
+
+	e1000_reset_adaptive(hw);
+	e1000_phy_get_info(hw, &adapter->phy_info);
+
+	if (!adapter->smart_power_down &&
+	    (hw->mac_type == e1000_82571 ||
+	     hw->mac_type == e1000_82572)) {
+		u16 phy_data = 0;
+		/* speed up time to link by disabling smart power down, ignore
+		 * the return value of this function because there is nothing
+		 * different we would do if it failed */
+		e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+		                   &phy_data);
+		phy_data &= ~IGP02E1000_PM_SPD;
+		e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+		                    phy_data);
+	}
+
+	e1000_release_manageability(adapter);
+}
+
+/**
+ *  Dump the eeprom for users having checksum issues
+ **/
+static void e1000_dump_eeprom(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct ethtool_eeprom eeprom;
+	const struct ethtool_ops *ops = netdev->ethtool_ops;
+	u8 *data;
+	int i;
+	u16 csum_old, csum_new = 0;
+
+	eeprom.len = ops->get_eeprom_len(netdev);
+	eeprom.offset = 0;
+
+	data = kmalloc(eeprom.len, GFP_KERNEL);
+	if (!data) {
+		printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
+		       " data\n");
+		return;
+	}
+
+	ops->get_eeprom(netdev, &eeprom, data);
+
+	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
+		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
+	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
+		csum_new += data[i] + (data[i + 1] << 8);
+	csum_new = EEPROM_SUM - csum_new;
+
+	printk(KERN_ERR "/*********************/\n");
+	printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
+	printk(KERN_ERR "Calculated              : 0x%04x\n", csum_new);
+
+	printk(KERN_ERR "Offset    Values\n");
+	printk(KERN_ERR "========  ======\n");
+	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
+
+	printk(KERN_ERR "Include this output when contacting your support "
+	       "provider.\n");
+	printk(KERN_ERR "This is not a software error! Something bad "
+	       "happened to your hardware or\n");
+	printk(KERN_ERR "EEPROM image. Ignoring this "
+	       "problem could result in further problems,\n");
+	printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
+	printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
+	       "which is invalid\n");
+	printk(KERN_ERR "and requires you to set the proper MAC "
+	       "address manually before continuing\n");
+	printk(KERN_ERR "to enable this network device.\n");
+	printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
+	       "to your hardware vendor\n");
+	printk(KERN_ERR "or Intel Customer Support.\n");
+	printk(KERN_ERR "/*********************/\n");
+
+	kfree(data);
+}
+
+/**
+ * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
+ * @pdev: PCI device information struct
+ *
+ * Return true if an adapter needs ioport resources
+ **/
+static int e1000_is_need_ioport(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case E1000_DEV_ID_82540EM:
+	case E1000_DEV_ID_82540EM_LOM:
+	case E1000_DEV_ID_82540EP:
+	case E1000_DEV_ID_82540EP_LOM:
+	case E1000_DEV_ID_82540EP_LP:
+	case E1000_DEV_ID_82541EI:
+	case E1000_DEV_ID_82541EI_MOBILE:
+	case E1000_DEV_ID_82541ER:
+	case E1000_DEV_ID_82541ER_LOM:
+	case E1000_DEV_ID_82541GI:
+	case E1000_DEV_ID_82541GI_LF:
+	case E1000_DEV_ID_82541GI_MOBILE:
+	case E1000_DEV_ID_82544EI_COPPER:
+	case E1000_DEV_ID_82544EI_FIBER:
+	case E1000_DEV_ID_82544GC_COPPER:
+	case E1000_DEV_ID_82544GC_LOM:
+	case E1000_DEV_ID_82545EM_COPPER:
+	case E1000_DEV_ID_82545EM_FIBER:
+	case E1000_DEV_ID_82546EB_COPPER:
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546EB_QUAD_COPPER:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct net_device_ops e1000_netdev_ops = {
+	.ndo_open		= e1000_open,
+	.ndo_stop		= e1000_close,
+	.ndo_start_xmit		= e1000_xmit_frame,
+	.ndo_get_stats		= e1000_get_stats,
+	.ndo_set_rx_mode	= e1000_set_rx_mode,
+	.ndo_set_mac_address	= e1000_set_mac,
+	.ndo_tx_timeout 	= e1000_tx_timeout,
+	.ndo_change_mtu		= e1000_change_mtu,
+	.ndo_do_ioctl		= e1000_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+
+	.ndo_vlan_rx_register	= e1000_vlan_rx_register,
+	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= e1000_netpoll,
+#endif
+};
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit e1000_probe(struct pci_dev *pdev,
+				 const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct e1000_adapter *adapter;
+	struct e1000_hw *hw;
+
+	static int cards_found = 0;
+	static int global_quad_port_a = 0; /* global ksp3 port a indication */
+	int i, err, pci_using_dac;
+	u16 eeprom_data = 0;
+	u16 eeprom_apme_mask = E1000_EEPROM_APME;
+	int bars, need_ioport;
+
+	/* do not allocate ioport bars when not needed */
+	need_ioport = e1000_is_need_ioport(pdev);
+	if (need_ioport) {
+		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
+		err = pci_enable_device(pdev);
+	} else {
+		bars = pci_select_bars(pdev, IORESOURCE_MEM);
+		err = pci_enable_device_mem(pdev);
+	}
+	if (err)
+		return err;
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		pci_using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+			if (err) {
+				E1000_ERR("No usable DMA configuration, "
+					  "aborting\n");
+				goto err_dma;
+			}
+		}
+		pci_using_dac = 0;
+	}
+
+	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
+	if (err)
+		goto err_pci_reg;
+
+	pci_set_master(pdev);
+
+	err = -ENOMEM;
+	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
+	if (!netdev)
+		goto err_alloc_etherdev;
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev_priv(netdev);
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	adapter->msg_enable = (1 << debug) - 1;
+	adapter->bars = bars;
+	adapter->need_ioport = need_ioport;
+
+	hw = &adapter->hw;
+	hw->back = adapter;
+
+	err = -EIO;
+	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
+	if (!hw->hw_addr)
+		goto err_ioremap;
+
+	if (adapter->need_ioport) {
+		for (i = BAR_1; i <= BAR_5; i++) {
+			if (pci_resource_len(pdev, i) == 0)
+				continue;
+			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+				hw->io_base = pci_resource_start(pdev, i);
+				break;
+			}
+		}
+	}
+
+	netdev->netdev_ops = &e1000_netdev_ops;
+	e1000_set_ethtool_ops(netdev);
+	netdev->watchdog_timeo = 5 * HZ;
+	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	adapter->bd_number = cards_found;
+
+	/* setup the private structure */
+
+	err = e1000_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	err = -EIO;
+	/* Flash BAR mapping must happen after e1000_sw_init
+	 * because it depends on mac_type */
+	if ((hw->mac_type == e1000_ich8lan) &&
+	   (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+		hw->flash_address = pci_ioremap_bar(pdev, 1);
+		if (!hw->flash_address)
+			goto err_flashmap;
+	}
+
+	if (e1000_check_phy_reset_block(hw))
+		DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
+
+	if (hw->mac_type >= e1000_82543) {
+		netdev->features = NETIF_F_SG |
+				   NETIF_F_HW_CSUM |
+				   NETIF_F_HW_VLAN_TX |
+				   NETIF_F_HW_VLAN_RX |
+				   NETIF_F_HW_VLAN_FILTER;
+		if (hw->mac_type == e1000_ich8lan)
+			netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
+	}
+
+	if ((hw->mac_type >= e1000_82544) &&
+	   (hw->mac_type != e1000_82547))
+		netdev->features |= NETIF_F_TSO;
+
+	if (hw->mac_type > e1000_82547_rev_2)
+		netdev->features |= NETIF_F_TSO6;
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+	netdev->vlan_features |= NETIF_F_TSO;
+	netdev->vlan_features |= NETIF_F_TSO6;
+	netdev->vlan_features |= NETIF_F_HW_CSUM;
+	netdev->vlan_features |= NETIF_F_SG;
+
+	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
+
+	/* initialize eeprom parameters */
+	if (e1000_init_eeprom_params(hw)) {
+		E1000_ERR("EEPROM initialization failed\n");
+		goto err_eeprom;
+	}
+
+	/* before reading the EEPROM, reset the controller to
+	 * put the device in a known good starting state */
+
+	e1000_reset_hw(hw);
+
+	/* make sure the EEPROM is good */
+	if (e1000_validate_eeprom_checksum(hw) < 0) {
+		DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+		e1000_dump_eeprom(adapter);
+		/*
+		 * set MAC address to all zeroes to invalidate and temporary
+		 * disable this device for the user. This blocks regular
+		 * traffic while still permitting ethtool ioctls from reaching
+		 * the hardware as well as allowing the user to run the
+		 * interface after manually setting a hw addr using
+		 * `ip set address`
+		 */
+		memset(hw->mac_addr, 0, netdev->addr_len);
+	} else {
+		/* copy the MAC address out of the EEPROM */
+		if (e1000_read_mac_addr(hw))
+			DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
+	}
+	/* don't block initalization here due to bad MAC address */
+	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
+	memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
+
+	if (!is_valid_ether_addr(netdev->perm_addr))
+		DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+
+	e1000_get_bus_info(hw);
+
+	init_timer(&adapter->tx_fifo_stall_timer);
+	adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
+	adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
+
+	init_timer(&adapter->watchdog_timer);
+	adapter->watchdog_timer.function = &e1000_watchdog;
+	adapter->watchdog_timer.data = (unsigned long) adapter;
+
+	init_timer(&adapter->phy_info_timer);
+	adapter->phy_info_timer.function = &e1000_update_phy_info;
+	adapter->phy_info_timer.data = (unsigned long)adapter;
+
+	INIT_WORK(&adapter->reset_task, e1000_reset_task);
+
+	e1000_check_options(adapter);
+
+	/* Initial Wake on LAN setting
+	 * If APM wake is enabled in the EEPROM,
+	 * enable the ACPI Magic Packet filter
+	 */
+
+	switch (hw->mac_type) {
+	case e1000_82542_rev2_0:
+	case e1000_82542_rev2_1:
+	case e1000_82543:
+		break;
+	case e1000_82544:
+		e1000_read_eeprom(hw,
+			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
+		eeprom_apme_mask = E1000_EEPROM_82544_APM;
+		break;
+	case e1000_ich8lan:
+		e1000_read_eeprom(hw,
+			EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
+		eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
+		break;
+	case e1000_82546:
+	case e1000_82546_rev_3:
+	case e1000_82571:
+	case e1000_80003es2lan:
+		if (er32(STATUS) & E1000_STATUS_FUNC_1){
+			e1000_read_eeprom(hw,
+				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+			break;
+		}
+		/* Fall Through */
+	default:
+		e1000_read_eeprom(hw,
+			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+		break;
+	}
+	if (eeprom_data & eeprom_apme_mask)
+		adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+	/* now that we have the eeprom settings, apply the special cases
+	 * where the eeprom may be wrong or the board simply won't support
+	 * wake on lan on a particular port */
+	switch (pdev->device) {
+	case E1000_DEV_ID_82546GB_PCIE:
+		adapter->eeprom_wol = 0;
+		break;
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546GB_FIBER:
+	case E1000_DEV_ID_82571EB_FIBER:
+		/* Wake events only supported on port A for dual fiber
+		 * regardless of eeprom setting */
+		if (er32(STATUS) & E1000_STATUS_FUNC_1)
+			adapter->eeprom_wol = 0;
+		break;
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+		/* if quad port adapter, disable WoL on all but port A */
+		if (global_quad_port_a != 0)
+			adapter->eeprom_wol = 0;
+		else
+			adapter->quad_port_a = 1;
+		/* Reset for multiple quad port adapters */
+		if (++global_quad_port_a == 4)
+			global_quad_port_a = 0;
+		break;
+	}
+
+	/* initialize the wol settings based on the eeprom settings */
+	adapter->wol = adapter->eeprom_wol;
+	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+	/* print bus type/speed/width info */
+	DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+		((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
+		 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
+		((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+		 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
+		 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
+		 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
+		 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
+		((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
+		 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
+		 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
+		 "32-bit"));
+
+	printk("%pM\n", netdev->dev_addr);
+
+	if (hw->bus_type == e1000_bus_type_pci_express) {
+		DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
+			"longer be supported by this driver in the future.\n",
+			pdev->vendor, pdev->device);
+		DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
+			"driver instead.\n");
+	}
+
+	/* reset the hardware with the new settings */
+	e1000_reset(adapter);
+
+	/* If the controller is 82573 and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (hw->mac_type != e1000_82573 ||
+	    !e1000_check_mng_mode(hw))
+		e1000_get_hw_control(adapter);
+
+ 	// offer device to EtherCAT master module
+	adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE);
+	if (adapter->ecdev) {
+		if (ecdev_open(adapter->ecdev)) {
+			ecdev_withdraw(adapter->ecdev);
+			goto err_register;
+		}
+	} else {
+		strcpy(netdev->name, "eth%d");
+		err = register_netdev(netdev);
+		if (err)
+			goto err_register;
+
+		/* carrier off reporting is important to ethtool even BEFORE open */
+		netif_carrier_off(netdev);
+	}
+
+	DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+
+	cards_found++;
+	return 0;
+
+err_register:
+	e1000_release_hw_control(adapter);
+err_eeprom:
+	if (!e1000_check_phy_reset_block(hw))
+		e1000_phy_hw_reset(hw);
+
+	if (hw->flash_address)
+		iounmap(hw->flash_address);
+err_flashmap:
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+err_sw_init:
+	iounmap(hw->hw_addr);
+err_ioremap:
+	free_netdev(netdev);
+err_alloc_etherdev:
+	pci_release_selected_regions(pdev, bars);
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+
+static void __devexit e1000_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	cancel_work_sync(&adapter->reset_task);
+
+	e1000_release_manageability(adapter);
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant. */
+	e1000_release_hw_control(adapter);
+
+	if (adapter->ecdev) {
+		ecdev_close(adapter->ecdev);
+		ecdev_withdraw(adapter->ecdev);
+	} else {
+		unregister_netdev(netdev);
+	}
+
+	if (!e1000_check_phy_reset_block(hw))
+		e1000_phy_hw_reset(hw);
+
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+	iounmap(hw->hw_addr);
+	if (hw->flash_address)
+		iounmap(hw->flash_address);
+	pci_release_selected_regions(pdev, adapter->bars);
+
+	free_netdev(netdev);
+
+	pci_disable_device(pdev);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+
+static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+
+	/* PCI config space info */
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_id = pdev->subsystem_device;
+	hw->revision_id = pdev->revision;
+
+	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+	hw->max_frame_size = netdev->mtu +
+			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+	/* identify the MAC */
+
+	if (e1000_set_mac_type(hw)) {
+		DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
+		return -EIO;
+	}
+
+	switch (hw->mac_type) {
+	default:
+		break;
+	case e1000_82541:
+	case e1000_82547:
+	case e1000_82541_rev_2:
+	case e1000_82547_rev_2:
+		hw->phy_init_script = 1;
+		break;
+	}
+
+	e1000_set_media_type(hw);
+
+	hw->wait_autoneg_complete = false;
+	hw->tbi_compatibility_en = true;
+	hw->adaptive_ifs = true;
+
+	/* Copper options */
+
+	if (hw->media_type == e1000_media_type_copper) {
+		hw->mdix = AUTO_ALL_MODES;
+		hw->disable_polarity_correction = false;
+		hw->master_slave = E1000_MASTER_SLAVE;
+	}
+
+	adapter->num_tx_queues = 1;
+	adapter->num_rx_queues = 1;
+
+	if (e1000_alloc_queues(adapter)) {
+		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	/* Explicitly disable IRQ since the NIC can be in any state. */
+	e1000_irq_disable(adapter);
+
+	spin_lock_init(&adapter->stats_lock);
+
+	set_bit(__E1000_DOWN, &adapter->flags);
+
+	return 0;
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.
+ **/
+
+static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+	                           sizeof(struct e1000_tx_ring), GFP_KERNEL);
+	if (!adapter->tx_ring)
+		return -ENOMEM;
+
+	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+	                           sizeof(struct e1000_rx_ring), GFP_KERNEL);
+	if (!adapter->rx_ring) {
+		kfree(adapter->tx_ring);
+		return -ENOMEM;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+
+static int e1000_open(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	int err;
+
+	/* disallow open during test */
+	if (test_bit(__E1000_TESTING, &adapter->flags))
+		return -EBUSY;
+
+	netif_carrier_off(netdev);
+
+	/* allocate transmit descriptors */
+	err = e1000_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = e1000_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	e1000_power_up_phy(adapter);
+
+	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+	if ((hw->mng_cookie.status &
+			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
+		e1000_update_mng_vlan(adapter);
+	}
+
+	/* If AMT is enabled, let the firmware know that the network
+	 * interface is now open */
+	if (hw->mac_type == e1000_82573 &&
+	    e1000_check_mng_mode(hw))
+		e1000_get_hw_control(adapter);
+
+	/* before we allocate an interrupt, we must be ready to handle it.
+	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+	 * as soon as we call pci_request_irq, so we have to setup our
+	 * clean_rx handler before we do so.  */
+	e1000_configure(adapter);
+
+	err = e1000_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/* From here on the code is the same as e1000_up() */
+	clear_bit(__E1000_DOWN, &adapter->flags);
+
+	napi_enable(&adapter->napi);
+
+	e1000_irq_enable(adapter);
+
+	netif_start_queue(netdev);
+
+	/* fire a link status change interrupt to start the watchdog */
+	ew32(ICS, E1000_ICS_LSC);
+
+	return E1000_SUCCESS;
+
+err_req_irq:
+	e1000_release_hw_control(adapter);
+	e1000_power_down_phy(adapter);
+	e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+	e1000_free_all_tx_resources(adapter);
+err_setup_tx:
+	e1000_reset(adapter);
+
+	return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+
+static int e1000_close(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+	e1000_down(adapter);
+	e1000_power_down_phy(adapter);
+	e1000_free_irq(adapter);
+
+	e1000_free_all_tx_resources(adapter);
+	e1000_free_all_rx_resources(adapter);
+
+	/* kill manageability vlan ID if supported, but not if a vlan with
+	 * the same ID is registered on the host OS (let 8021q kill it) */
+	if ((hw->mng_cookie.status &
+			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+	     !(adapter->vlgrp &&
+	       vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
+		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+	}
+
+	/* If AMT is enabled, let the firmware know that the network
+	 * interface is now closed */
+	if (hw->mac_type == e1000_82573 &&
+	    e1000_check_mng_mode(hw))
+		e1000_release_hw_control(adapter);
+
+	return 0;
+}
+
+/**
+ * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
+ * @adapter: address of board private structure
+ * @start: address of beginning of memory
+ * @len: length of memory
+ **/
+static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
+				  unsigned long len)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned long begin = (unsigned long)start;
+	unsigned long end = begin + len;
+
+	/* First rev 82545 and 82546 need to not allow any memory
+	 * write location to cross 64k boundary due to errata 23 */
+	if (hw->mac_type == e1000_82545 ||
+	    hw->mac_type == e1000_82546) {
+		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
+	}
+
+	return true;
+}
+
+/**
+ * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @txdr:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *txdr)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+
+	size = sizeof(struct e1000_buffer) * txdr->count;
+	txdr->buffer_info = vmalloc(size);
+	if (!txdr->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(txdr->buffer_info, 0, size);
+
+	/* round up to nearest 4K */
+
+	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+	txdr->size = ALIGN(txdr->size, 4096);
+
+	txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+	if (!txdr->desc) {
+setup_tx_desc_die:
+		vfree(txdr->buffer_info);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+
+	/* Fix for errata 23, can't cross 64kB boundary */
+	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+		void *olddesc = txdr->desc;
+		dma_addr_t olddma = txdr->dma;
+		DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
+				     "at %p\n", txdr->size, txdr->desc);
+		/* Try again, without freeing the previous */
+		txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+		/* Failed allocation, critical failure */
+		if (!txdr->desc) {
+			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+			goto setup_tx_desc_die;
+		}
+
+		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+			/* give up */
+			pci_free_consistent(pdev, txdr->size, txdr->desc,
+					    txdr->dma);
+			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate aligned memory "
+				"for the transmit descriptor ring\n");
+			vfree(txdr->buffer_info);
+			return -ENOMEM;
+		} else {
+			/* Free old allocation, new allocation was successful */
+			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+		}
+	}
+	memset(txdr->desc, 0, txdr->size);
+
+	txdr->next_to_use = 0;
+	txdr->next_to_clean = 0;
+
+	return 0;
+}
+
+/**
+ * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
+ * 				  (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Tx Queue %u failed\n", i);
+			for (i-- ; i >= 0; i--)
+				e1000_free_tx_resources(adapter,
+							&adapter->tx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+
+static void e1000_configure_tx(struct e1000_adapter *adapter)
+{
+	u64 tdba;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tdlen, tctl, tipg, tarc;
+	u32 ipgr1, ipgr2;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+
+	switch (adapter->num_tx_queues) {
+	case 1:
+	default:
+		tdba = adapter->tx_ring[0].dma;
+		tdlen = adapter->tx_ring[0].count *
+			sizeof(struct e1000_tx_desc);
+		ew32(TDLEN, tdlen);
+		ew32(TDBAH, (tdba >> 32));
+		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
+		ew32(TDT, 0);
+		ew32(TDH, 0);
+		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
+		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
+		break;
+	}
+
+	/* Set the default values for the Tx Inter Packet Gap timer */
+	if (hw->mac_type <= e1000_82547_rev_2 &&
+	    (hw->media_type == e1000_media_type_fiber ||
+	     hw->media_type == e1000_media_type_internal_serdes))
+		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+	else
+		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+
+	switch (hw->mac_type) {
+	case e1000_82542_rev2_0:
+	case e1000_82542_rev2_1:
+		tipg = DEFAULT_82542_TIPG_IPGT;
+		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
+		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
+		break;
+	case e1000_80003es2lan:
+		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+		ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
+		break;
+	default:
+		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
+		break;
+	}
+	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+	ew32(TIPG, tipg);
+
+	/* Set the Tx Interrupt Delay register */
+
+	ew32(TIDV, adapter->tx_int_delay);
+	if (hw->mac_type >= e1000_82540)
+		ew32(TADV, adapter->tx_abs_int_delay);
+
+	/* Program the Transmit Control Register */
+
+	tctl = er32(TCTL);
+	tctl &= ~E1000_TCTL_CT;
+	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+	if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
+		tarc = er32(TARC0);
+		/* set the speed mode bit, we'll clear it if we're not at
+		 * gigabit link later */
+		tarc |= (1 << 21);
+		ew32(TARC0, tarc);
+	} else if (hw->mac_type == e1000_80003es2lan) {
+		tarc = er32(TARC0);
+		tarc |= 1;
+		ew32(TARC0, tarc);
+		tarc = er32(TARC1);
+		tarc |= 1;
+		ew32(TARC1, tarc);
+	}
+
+	e1000_config_collision_dist(hw);
+
+	/* Setup Transmit Descriptor Settings for eop descriptor */
+	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+	/* only set IDE if we are delaying interrupts using the timers */
+	if (adapter->tx_int_delay)
+		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+	if (hw->mac_type < e1000_82543)
+		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
+	else
+		adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+	/* Cache if we're 82544 running in PCI-X because we'll
+	 * need this to apply a workaround later in the send path. */
+	if (hw->mac_type == e1000_82544 &&
+	    hw->bus_type == e1000_bus_type_pcix)
+		adapter->pcix_82544 = 1;
+
+	ew32(TCTL, tctl);
+
+}
+
+/**
+ * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rxdr:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rxdr)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	int size, desc_len;
+
+	size = sizeof(struct e1000_buffer) * rxdr->count;
+	rxdr->buffer_info = vmalloc(size);
+	if (!rxdr->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(rxdr->buffer_info, 0, size);
+
+	if (hw->mac_type <= e1000_82547_rev_2)
+		desc_len = sizeof(struct e1000_rx_desc);
+	else
+		desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+	/* Round up to nearest 4K */
+
+	rxdr->size = rxdr->count * desc_len;
+	rxdr->size = ALIGN(rxdr->size, 4096);
+
+	rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+
+	if (!rxdr->desc) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+setup_rx_desc_die:
+		vfree(rxdr->buffer_info);
+		return -ENOMEM;
+	}
+
+	/* Fix for errata 23, can't cross 64kB boundary */
+	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+		void *olddesc = rxdr->desc;
+		dma_addr_t olddma = rxdr->dma;
+		DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
+				     "at %p\n", rxdr->size, rxdr->desc);
+		/* Try again, without freeing the previous */
+		rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+		/* Failed allocation, critical failure */
+		if (!rxdr->desc) {
+			pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate memory "
+				"for the receive descriptor ring\n");
+			goto setup_rx_desc_die;
+		}
+
+		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+			/* give up */
+			pci_free_consistent(pdev, rxdr->size, rxdr->desc,
+					    rxdr->dma);
+			pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate aligned memory "
+				"for the receive descriptor ring\n");
+			goto setup_rx_desc_die;
+		} else {
+			/* Free old allocation, new allocation was successful */
+			pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+		}
+	}
+	memset(rxdr->desc, 0, rxdr->size);
+
+	rxdr->next_to_clean = 0;
+	rxdr->next_to_use = 0;
+
+	return 0;
+}
+
+/**
+ * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
+ * 				  (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Rx Queue %u failed\n", i);
+			for (i-- ; i >= 0; i--)
+				e1000_free_rx_resources(adapter,
+							&adapter->rx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	rctl = er32(RCTL);
+
+	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+
+	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+	if (hw->tbi_compatibility_on == 1)
+		rctl |= E1000_RCTL_SBP;
+	else
+		rctl &= ~E1000_RCTL_SBP;
+
+	if (adapter->netdev->mtu <= ETH_DATA_LEN)
+		rctl &= ~E1000_RCTL_LPE;
+	else
+		rctl |= E1000_RCTL_LPE;
+
+	/* Setup buffer sizes */
+	rctl &= ~E1000_RCTL_SZ_4096;
+	rctl |= E1000_RCTL_BSEX;
+	switch (adapter->rx_buffer_len) {
+		case E1000_RXBUFFER_256:
+			rctl |= E1000_RCTL_SZ_256;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_512:
+			rctl |= E1000_RCTL_SZ_512;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_1024:
+			rctl |= E1000_RCTL_SZ_1024;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_2048:
+		default:
+			rctl |= E1000_RCTL_SZ_2048;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_4096:
+			rctl |= E1000_RCTL_SZ_4096;
+			break;
+		case E1000_RXBUFFER_8192:
+			rctl |= E1000_RCTL_SZ_8192;
+			break;
+		case E1000_RXBUFFER_16384:
+			rctl |= E1000_RCTL_SZ_16384;
+			break;
+	}
+
+	ew32(RCTL, rctl);
+}
+
+/**
+ * e1000_configure_rx - Configure 8254x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+
+static void e1000_configure_rx(struct e1000_adapter *adapter)
+{
+	u64 rdba;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rdlen, rctl, rxcsum, ctrl_ext;
+
+	rdlen = adapter->rx_ring[0].count *
+		sizeof(struct e1000_rx_desc);
+	adapter->clean_rx = e1000_clean_rx_irq;
+	adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+
+	/* disable receives while setting up the descriptors */
+	rctl = er32(RCTL);
+	ew32(RCTL, rctl & ~E1000_RCTL_EN);
+
+	/* set the Receive Delay Timer Register */
+	ew32(RDTR, adapter->rx_int_delay);
+
+	if (hw->mac_type >= e1000_82540) {
+		ew32(RADV, adapter->rx_abs_int_delay);
+		if (adapter->itr_setting != 0)
+			ew32(ITR, 1000000000 / (adapter->itr * 256));
+	}
+
+	if (hw->mac_type >= e1000_82571) {
+		ctrl_ext = er32(CTRL_EXT);
+		/* Reset delay timers after every interrupt */
+		ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
+		/* Auto-Mask interrupts upon ICR access */
+		ctrl_ext |= E1000_CTRL_EXT_IAME;
+		ew32(IAM, 0xffffffff);
+		ew32(CTRL_EXT, ctrl_ext);
+		E1000_WRITE_FLUSH();
+	}
+
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring */
+	switch (adapter->num_rx_queues) {
+	case 1:
+	default:
+		rdba = adapter->rx_ring[0].dma;
+		ew32(RDLEN, rdlen);
+		ew32(RDBAH, (rdba >> 32));
+		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
+		ew32(RDT, 0);
+		ew32(RDH, 0);
+		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
+		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
+		break;
+	}
+
+	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
+	if (hw->mac_type >= e1000_82543) {
+		rxcsum = er32(RXCSUM);
+		if (adapter->rx_csum)
+			rxcsum |= E1000_RXCSUM_TUOFL;
+		else
+			/* don't need to clear IPPCSE as it defaults to 0 */
+			rxcsum &= ~E1000_RXCSUM_TUOFL;
+		ew32(RXCSUM, rxcsum);
+	}
+
+	/* Enable Receives */
+	ew32(RCTL, rctl);
+}
+
+/**
+ * e1000_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	e1000_clean_tx_ring(adapter, tx_ring);
+
+	vfree(tx_ring->buffer_info);
+	tx_ring->buffer_info = NULL;
+
+	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
+}
+
+static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+					     struct e1000_buffer *buffer_info)
+{
+	if (adapter->ecdev)
+		return;
+
+	buffer_info->dma = 0;
+	if (buffer_info->skb) {
+		skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
+		              DMA_TO_DEVICE);
+		dev_kfree_skb_any(buffer_info->skb);
+		buffer_info->skb = NULL;
+	}
+	buffer_info->time_stamp = 0;
+	/* buffer_info must be completely set up in the transmit path */
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+				struct e1000_tx_ring *tx_ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_buffer *buffer_info;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Tx ring sk_buffs */
+
+	for (i = 0; i < tx_ring->count; i++) {
+		buffer_info = &tx_ring->buffer_info[i];
+		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+	}
+
+	size = sizeof(struct e1000_buffer) * tx_ring->count;
+	memset(tx_ring->buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	tx_ring->last_tx_tso = 0;
+
+	writel(0, hw->hw_addr + tx_ring->tdh);
+	writel(0, hw->hw_addr + tx_ring->tdt);
+}
+
+/**
+ * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * e1000_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	e1000_clean_rx_ring(adapter, rx_ring);
+
+	vfree(rx_ring->buffer_info);
+	rx_ring->buffer_info = NULL;
+
+	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+				struct e1000_rx_ring *rx_ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_buffer *buffer_info;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		buffer_info = &rx_ring->buffer_info[i];
+		if (buffer_info->dma) {
+			pci_unmap_single(pdev,
+					 buffer_info->dma,
+					 buffer_info->length,
+					 PCI_DMA_FROMDEVICE);
+		}
+
+		buffer_info->dma = 0;
+
+		if (buffer_info->skb) {
+			dev_kfree_skb(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+	}
+
+	size = sizeof(struct e1000_buffer) * rx_ring->count;
+	memset(rx_ring->buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	writel(0, hw->hw_addr + rx_ring->rdh);
+	writel(0, hw->hw_addr + rx_ring->rdt);
+}
+
+/**
+ * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
+ * and memory write and invalidate disabled for certain operations
+ */
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	u32 rctl;
+
+	e1000_pci_clear_mwi(hw);
+
+	rctl = er32(RCTL);
+	rctl |= E1000_RCTL_RST;
+	ew32(RCTL, rctl);
+	E1000_WRITE_FLUSH();
+	mdelay(5);
+
+	if (!adapter->ecdev && netif_running(netdev))
+		e1000_clean_all_rx_rings(adapter);
+}
+
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	u32 rctl;
+
+	rctl = er32(RCTL);
+	rctl &= ~E1000_RCTL_RST;
+	ew32(RCTL, rctl);
+	E1000_WRITE_FLUSH();
+	mdelay(5);
+
+	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+		e1000_pci_set_mwi(hw);
+
+	if (!adapter->netdev && netif_running(netdev)) {
+		/* No need to loop, because 82542 supports only 1 queue */
+		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
+		e1000_configure_rx(adapter);
+		if (adapter->ecdev) { 
+			/* fill rx ring completely! */
+			adapter->alloc_rx_buf(adapter, ring, ring->count);
+		} else {
+            /* this one leaves the last ring element unallocated! */
+			adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
+		}
+
+	}
+}
+
+/**
+ * e1000_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_set_mac(struct net_device *netdev, void *p)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	/* 82542 2.0 needs to be in reset to write receive address registers */
+
+	if (hw->mac_type == e1000_82542_rev2_0)
+		e1000_enter_82542_rst(adapter);
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
+
+	e1000_rar_set(hw, hw->mac_addr, 0);
+
+	/* With 82571 controllers, LAA may be overwritten (with the default)
+	 * due to controller reset from the other port. */
+	if (hw->mac_type == e1000_82571) {
+		/* activate the work around */
+		hw->laa_is_present = 1;
+
+		/* Hold a copy of the LAA in RAR[14] This is done so that
+		 * between the time RAR[0] gets clobbered  and the time it
+		 * gets fixed (in e1000_watchdog), the actual LAA is in one
+		 * of the RARs and no incoming packets directed to this port
+		 * are dropped. Eventaully the LAA will be in RAR[0] and
+		 * RAR[14] */
+		e1000_rar_set(hw, hw->mac_addr,
+					E1000_RAR_ENTRIES - 1);
+	}
+
+	if (hw->mac_type == e1000_82542_rev2_0)
+		e1000_leave_82542_rst(adapter);
+
+	return 0;
+}
+
+/**
+ * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+
+static void e1000_set_rx_mode(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct netdev_hw_addr *ha;
+	bool use_uc = false;
+	struct dev_addr_list *mc_ptr;
+	u32 rctl;
+	u32 hash_value;
+	int i, rar_entries = E1000_RAR_ENTRIES;
+	int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
+				E1000_NUM_MTA_REGISTERS_ICH8LAN :
+				E1000_NUM_MTA_REGISTERS;
+	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
+
+	if (!mcarray) {
+		DPRINTK(PROBE, ERR, "memory allocation failed\n");
+		return;
+	}
+
+	if (hw->mac_type == e1000_ich8lan)
+		rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
+
+	/* reserve RAR[14] for LAA over-write work-around */
+	if (hw->mac_type == e1000_82571)
+		rar_entries--;
+
+	/* Check for Promiscuous and All Multicast modes */
+
+	rctl = er32(RCTL);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+		rctl &= ~E1000_RCTL_VFE;
+	} else {
+		if (netdev->flags & IFF_ALLMULTI) {
+			rctl |= E1000_RCTL_MPE;
+		} else {
+			rctl &= ~E1000_RCTL_MPE;
+		}
+		if (adapter->hw.mac_type != e1000_ich8lan)
+			rctl |= E1000_RCTL_VFE;
+	}
+
+	if (netdev->uc.count > rar_entries - 1) {
+		rctl |= E1000_RCTL_UPE;
+	} else if (!(netdev->flags & IFF_PROMISC)) {
+		rctl &= ~E1000_RCTL_UPE;
+		use_uc = true;
+	}
+
+	ew32(RCTL, rctl);
+
+	/* 82542 2.0 needs to be in reset to write receive address registers */
+
+	if (hw->mac_type == e1000_82542_rev2_0)
+		e1000_enter_82542_rst(adapter);
+
+	/* load the first 14 addresses into the exact filters 1-14. Unicast
+	 * addresses take precedence to avoid disabling unicast filtering
+	 * when possible.
+	 *
+	 * RAR 0 is used for the station MAC adddress
+	 * if there are not 14 addresses, go ahead and clear the filters
+	 * -- with 82571 controllers only 0-13 entries are filled here
+	 */
+	i = 1;
+	if (use_uc)
+		list_for_each_entry(ha, &netdev->uc.list, list) {
+			if (i == rar_entries)
+				break;
+			e1000_rar_set(hw, ha->addr, i++);
+		}
+
+	WARN_ON(i == rar_entries);
+
+	mc_ptr = netdev->mc_list;
+
+	for (; i < rar_entries; i++) {
+		if (mc_ptr) {
+			e1000_rar_set(hw, mc_ptr->da_addr, i);
+			mc_ptr = mc_ptr->next;
+		} else {
+			E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+			E1000_WRITE_FLUSH();
+			E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+			E1000_WRITE_FLUSH();
+		}
+	}
+
+	/* load any remaining addresses into the hash table */
+
+	for (; mc_ptr; mc_ptr = mc_ptr->next) {
+		u32 hash_reg, hash_bit, mta;
+		hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
+		hash_reg = (hash_value >> 5) & 0x7F;
+		hash_bit = hash_value & 0x1F;
+		mta = (1 << hash_bit);
+		mcarray[hash_reg] |= mta;
+	}
+
+	/* write the hash table completely, write from bottom to avoid
+	 * both stupid write combining chipsets, and flushing each write */
+	for (i = mta_reg_count - 1; i >= 0 ; i--) {
+		/*
+		 * If we are on an 82544 has an errata where writing odd
+		 * offsets overwrites the previous even offset, but writing
+		 * backwards over the range solves the issue by always
+		 * writing the odd offset first
+		 */
+		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
+	}
+	E1000_WRITE_FLUSH();
+
+	if (hw->mac_type == e1000_82542_rev2_0)
+		e1000_leave_82542_rst(adapter);
+
+	kfree(mcarray);
+}
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy */
+
+static void e1000_update_phy_info(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+	struct e1000_hw *hw = &adapter->hw;
+	e1000_phy_get_info(hw, &adapter->phy_info);
+}
+
+/**
+ * e1000_82547_tx_fifo_stall - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+
+static void e1000_82547_tx_fifo_stall(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	u32 tctl;
+
+	if (atomic_read(&adapter->tx_fifo_stall)) {
+		if ((er32(TDT) == er32(TDH)) &&
+		   (er32(TDFT) == er32(TDFH)) &&
+		   (er32(TDFTS) == er32(TDFHS))) {
+			tctl = er32(TCTL);
+			ew32(TCTL, tctl & ~E1000_TCTL_EN);
+			ew32(TDFT, adapter->tx_head_addr);
+			ew32(TDFH, adapter->tx_head_addr);
+			ew32(TDFTS, adapter->tx_head_addr);
+			ew32(TDFHS, adapter->tx_head_addr);
+			ew32(TCTL, tctl);
+			E1000_WRITE_FLUSH();
+
+			adapter->tx_fifo_head = 0;
+			atomic_set(&adapter->tx_fifo_stall, 0);
+			if (!adapter->ecdev) 
+				netif_wake_queue(netdev);
+		} else {
+			if (!adapter->ecdev)
+				mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
+		}
+	}
+}
+
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct e1000_tx_ring *txdr = adapter->tx_ring;
+	u32 link, tctl;
+	s32 ret_val;
+
+	ret_val = e1000_check_for_link(hw);
+	if ((ret_val == E1000_ERR_PHY) &&
+	    (hw->phy_type == e1000_phy_igp_3) &&
+	    (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+		/* See e1000_kumeran_lock_loss_workaround() */
+		DPRINTK(LINK, INFO,
+			"Gigabit has been disabled, downgrading speed\n");
+	}
+
+	if (hw->mac_type == e1000_82573) {
+		e1000_enable_tx_pkt_filtering(hw);
+		if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
+			e1000_update_mng_vlan(adapter);
+	}
+
+	if ((hw->media_type == e1000_media_type_internal_serdes) &&
+	   !(er32(TXCW) & E1000_TXCW_ANE))
+		link = !hw->serdes_link_down;
+	else
+		link = er32(STATUS) & E1000_STATUS_LU;
+
+	if (link) {
+		if ( (adapter->ecdev && !ecdev_get_link(adapter->ecdev))
+				|| (!adapter->ecdev && !netif_carrier_ok(netdev)) ) {
+			u32 ctrl;
+			bool txb2b = true;
+			e1000_get_speed_and_duplex(hw,
+			                           &adapter->link_speed,
+			                           &adapter->link_duplex);
+
+			ctrl = er32(CTRL);
+			printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, "
+			       "Flow Control: %s\n",
+			       netdev->name,
+			       adapter->link_speed,
+			       adapter->link_duplex == FULL_DUPLEX ?
+			        "Full Duplex" : "Half Duplex",
+			        ((ctrl & E1000_CTRL_TFCE) && (ctrl &
+			        E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+			        E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+			        E1000_CTRL_TFCE) ? "TX" : "None" )));
+
+			/* tweak tx_queue_len according to speed/duplex
+			 * and adjust the timeout factor */
+			netdev->tx_queue_len = adapter->tx_queue_len;
+			adapter->tx_timeout_factor = 1;
+			switch (adapter->link_speed) {
+			case SPEED_10:
+				txb2b = false;
+				netdev->tx_queue_len = 10;
+				adapter->tx_timeout_factor = 8;
+				break;
+			case SPEED_100:
+				txb2b = false;
+				netdev->tx_queue_len = 100;
+				/* maybe add some timeout factor ? */
+				break;
+			}
+
+			if ((hw->mac_type == e1000_82571 ||
+			     hw->mac_type == e1000_82572) &&
+			    !txb2b) {
+				u32 tarc0;
+				tarc0 = er32(TARC0);
+				tarc0 &= ~(1 << 21);
+				ew32(TARC0, tarc0);
+			}
+
+			/* disable TSO for pcie and 10/100 speeds, to avoid
+			 * some hardware issues */
+			if (!adapter->tso_force &&
+			    hw->bus_type == e1000_bus_type_pci_express){
+				switch (adapter->link_speed) {
+				case SPEED_10:
+				case SPEED_100:
+					DPRINTK(PROBE,INFO,
+				        "10/100 speed: disabling TSO\n");
+					netdev->features &= ~NETIF_F_TSO;
+					netdev->features &= ~NETIF_F_TSO6;
+					break;
+				case SPEED_1000:
+					netdev->features |= NETIF_F_TSO;
+					netdev->features |= NETIF_F_TSO6;
+					break;
+				default:
+					/* oops */
+					break;
+				}
+			}
+
+			/* enable transmits in the hardware, need to do this
+			 * after setting TARC0 */
+			tctl = er32(TCTL);
+			tctl |= E1000_TCTL_EN;
+			ew32(TCTL, tctl);
+
+			if (adapter->ecdev) {
+				ecdev_set_link(adapter->ecdev, 1);
+			} else {
+				netif_carrier_on(netdev);
+				mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
+			}
+			adapter->smartspeed = 0;
+		} else {
+			/* make sure the receive unit is started */
+			if (hw->rx_needs_kicking) {
+				u32 rctl = er32(RCTL);
+				ew32(RCTL, rctl | E1000_RCTL_EN);
+			}
+		}
+	} else {
+		if ( (adapter->ecdev && ecdev_get_link(adapter->ecdev))
+				|| (!adapter->ecdev && netif_carrier_ok(netdev)) ) {
+			adapter->link_speed = 0;
+			adapter->link_duplex = 0;
+			printk(KERN_INFO "e1000: %s NIC Link is Down\n",
+			       netdev->name);
+			if (adapter->ecdev) {
+				ecdev_set_link(adapter->ecdev, 0);
+			} else {
+				netif_carrier_off(netdev);
+				mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
+			}
+
+			/* 80003ES2LAN workaround--
+			 * For packet buffer work-around on link down event;
+			 * disable receives in the ISR and
+			 * reset device here in the watchdog
+			 */
+			if (hw->mac_type == e1000_80003es2lan)
+				/* reset device */
+				schedule_work(&adapter->reset_task);
+		}
+
+		e1000_smartspeed(adapter);
+	}
+
+	e1000_update_stats(adapter);
+
+	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+	adapter->tpt_old = adapter->stats.tpt;
+	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
+	adapter->colc_old = adapter->stats.colc;
+
+	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
+	adapter->gorcl_old = adapter->stats.gorcl;
+	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
+	adapter->gotcl_old = adapter->stats.gotcl;
+
+	e1000_update_adaptive(hw);
+
+	if (!adapter->ecdev && !netif_carrier_ok(netdev)) {
+		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
+			/* We've lost link, so the controller stops DMA,
+			 * but we've got queued Tx work that's never going
+			 * to get done, so reset controller to flush Tx.
+			 * (Do the reset outside of interrupt context). */
+			adapter->tx_timeout_count++;
+			schedule_work(&adapter->reset_task);
+			/* return immediately since reset is imminent */
+			return;
+		}
+	}
+
+	/* Cause software interrupt to ensure rx ring is cleaned */
+	ew32(ICS, E1000_ICS_RXDMT0);
+
+	/* Force detection of hung controller every watchdog period */
+	if (!adapter->ecdev) adapter->detect_tx_hung = true;
+
+	/* With 82571 controllers, LAA may be overwritten due to controller
+	 * reset from the other port. Set the appropriate LAA in RAR[0] */
+	if (hw->mac_type == e1000_82571 && hw->laa_is_present)
+		e1000_rar_set(hw, hw->mac_addr, 0);
+
+	/* Reset the timer */
+	if (!adapter->ecdev)
+		mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ *      Stores a new ITR value based on packets and byte
+ *      counts during the last interrupt.  The advantage of per interrupt
+ *      computation is faster updates and more accurate ITR for the current
+ *      traffic pattern.  Constants in this function were computed
+ *      based on theoretical maximum wire speed and thresholds were set based
+ *      on testing data as well as attempting to minimize response time
+ *      while increasing bulk throughput.
+ *      this functionality is controlled by the InterruptThrottleRate module
+ *      parameter (see e1000_param.c)
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ **/
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+				     u16 itr_setting, int packets, int bytes)
+{
+	unsigned int retval = itr_setting;
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (unlikely(hw->mac_type < e1000_82540))
+		goto update_itr_done;
+
+	if (packets == 0)
+		goto update_itr_done;
+
+	switch (itr_setting) {
+	case lowest_latency:
+		/* jumbo frames get bulk treatment*/
+		if (bytes/packets > 8000)
+			retval = bulk_latency;
+		else if ((packets < 5) && (bytes > 512))
+			retval = low_latency;
+		break;
+	case low_latency:  /* 50 usec aka 20000 ints/s */
+		if (bytes > 10000) {
+			/* jumbo frames need bulk latency setting */
+			if (bytes/packets > 8000)
+				retval = bulk_latency;
+			else if ((packets < 10) || ((bytes/packets) > 1200))
+				retval = bulk_latency;
+			else if ((packets > 35))
+				retval = lowest_latency;
+		} else if (bytes/packets > 2000)
+			retval = bulk_latency;
+		else if (packets <= 2 && bytes < 512)
+			retval = lowest_latency;
+		break;
+	case bulk_latency: /* 250 usec aka 4000 ints/s */
+		if (bytes > 25000) {
+			if (packets > 35)
+				retval = low_latency;
+		} else if (bytes < 6000) {
+			retval = low_latency;
+		}
+		break;
+	}
+
+update_itr_done:
+	return retval;
+}
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 current_itr;
+	u32 new_itr = adapter->itr;
+
+	if (unlikely(hw->mac_type < e1000_82540))
+		return;
+
+	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+	if (unlikely(adapter->link_speed != SPEED_1000)) {
+		current_itr = 0;
+		new_itr = 4000;
+		goto set_itr_now;
+	}
+
+	adapter->tx_itr = e1000_update_itr(adapter,
+	                            adapter->tx_itr,
+	                            adapter->total_tx_packets,
+	                            adapter->total_tx_bytes);
+	/* conservative mode (itr 3) eliminates the lowest_latency setting */
+	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
+		adapter->tx_itr = low_latency;
+
+	adapter->rx_itr = e1000_update_itr(adapter,
+	                            adapter->rx_itr,
+	                            adapter->total_rx_packets,
+	                            adapter->total_rx_bytes);
+	/* conservative mode (itr 3) eliminates the lowest_latency setting */
+	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
+		adapter->rx_itr = low_latency;
+
+	current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+	switch (current_itr) {
+	/* counts and packets in update_itr are dependent on these numbers */
+	case lowest_latency:
+		new_itr = 70000;
+		break;
+	case low_latency:
+		new_itr = 20000; /* aka hwitr = ~200 */
+		break;
+	case bulk_latency:
+		new_itr = 4000;
+		break;
+	default:
+		break;
+	}
+
+set_itr_now:
+	if (new_itr != adapter->itr) {
+		/* this attempts to bias the interrupt rate towards Bulk
+		 * by adding intermediate steps when interrupt rate is
+		 * increasing */
+		new_itr = new_itr > adapter->itr ?
+		             min(adapter->itr + (new_itr >> 2), new_itr) :
+		             new_itr;
+		adapter->itr = new_itr;
+		ew32(ITR, 1000000000 / (new_itr * 256));
+	}
+
+	return;
+}
+
+#define E1000_TX_FLAGS_CSUM		0x00000001
+#define E1000_TX_FLAGS_VLAN		0x00000002
+#define E1000_TX_FLAGS_TSO		0x00000004
+#define E1000_TX_FLAGS_IPV4		0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT	16
+
+static int e1000_tso(struct e1000_adapter *adapter,
+		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+{
+	struct e1000_context_desc *context_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i;
+	u32 cmd_length = 0;
+	u16 ipcse = 0, tucse, mss;
+	u8 ipcss, ipcso, tucss, tucso, hdr_len;
+	int err;
+
+	if (skb_is_gso(skb)) {
+		if (skb_header_cloned(skb)) {
+			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+			if (err)
+				return err;
+		}
+
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		mss = skb_shinfo(skb)->gso_size;
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *iph = ip_hdr(skb);
+			iph->tot_len = 0;
+			iph->check = 0;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								 iph->daddr, 0,
+								 IPPROTO_TCP,
+								 0);
+			cmd_length = E1000_TXD_CMD_IP;
+			ipcse = skb_transport_offset(skb) - 1;
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+			ipv6_hdr(skb)->payload_len = 0;
+			tcp_hdr(skb)->check =
+				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+						 &ipv6_hdr(skb)->daddr,
+						 0, IPPROTO_TCP, 0);
+			ipcse = 0;
+		}
+		ipcss = skb_network_offset(skb);
+		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+		tucss = skb_transport_offset(skb);
+		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+		tucse = 0;
+
+		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+
+		i = tx_ring->next_to_use;
+		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+		buffer_info = &tx_ring->buffer_info[i];
+
+		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
+		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
+		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
+		context_desc->upper_setup.tcp_fields.tucss = tucss;
+		context_desc->upper_setup.tcp_fields.tucso = tucso;
+		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
+		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+
+		buffer_info->time_stamp = jiffies;
+		buffer_info->next_to_watch = i;
+
+		if (++i == tx_ring->count) i = 0;
+		tx_ring->next_to_use = i;
+
+		return true;
+	}
+	return false;
+}
+
+static bool e1000_tx_csum(struct e1000_adapter *adapter,
+			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+{
+	struct e1000_context_desc *context_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i;
+	u8 css;
+	u32 cmd_len = E1000_TXD_CMD_DEXT;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return false;
+
+	switch (skb->protocol) {
+	case cpu_to_be16(ETH_P_IP):
+		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+			cmd_len |= E1000_TXD_CMD_TCP;
+		break;
+	case cpu_to_be16(ETH_P_IPV6):
+		/* XXX not handling all IPV6 headers */
+		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+			cmd_len |= E1000_TXD_CMD_TCP;
+		break;
+	default:
+		if (unlikely(net_ratelimit()))
+			DPRINTK(DRV, WARNING,
+			        "checksum_partial proto=%x!\n", skb->protocol);
+		break;
+	}
+
+	css = skb_transport_offset(skb);
+
+	i = tx_ring->next_to_use;
+	buffer_info = &tx_ring->buffer_info[i];
+	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+
+	context_desc->lower_setup.ip_config = 0;
+	context_desc->upper_setup.tcp_fields.tucss = css;
+	context_desc->upper_setup.tcp_fields.tucso =
+		css + skb->csum_offset;
+	context_desc->upper_setup.tcp_fields.tucse = 0;
+	context_desc->tcp_seg_setup.data = 0;
+	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
+
+	buffer_info->time_stamp = jiffies;
+	buffer_info->next_to_watch = i;
+
+	if (unlikely(++i == tx_ring->count)) i = 0;
+	tx_ring->next_to_use = i;
+
+	return true;
+}
+
+#define E1000_MAX_TXD_PWR	12
+#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
+
+static int e1000_tx_map(struct e1000_adapter *adapter,
+			struct e1000_tx_ring *tx_ring,
+			struct sk_buff *skb, unsigned int first,
+			unsigned int max_per_txd, unsigned int nr_frags,
+			unsigned int mss)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_buffer *buffer_info;
+	unsigned int len = skb_headlen(skb);
+	unsigned int offset, size, count = 0, i;
+	unsigned int f;
+	dma_addr_t *map;
+
+	i = tx_ring->next_to_use;
+
+	if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
+		dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
+		return 0;
+	}
+
+	map = skb_shinfo(skb)->dma_maps;
+	offset = 0;
+
+	while (len) {
+		buffer_info = &tx_ring->buffer_info[i];
+		size = min(len, max_per_txd);
+		/* Workaround for Controller erratum --
+		 * descriptor for non-tso packet in a linear SKB that follows a
+		 * tso gets written back prematurely before the data is fully
+		 * DMA'd to the controller */
+		if (!skb->data_len && tx_ring->last_tx_tso &&
+		    !skb_is_gso(skb)) {
+			tx_ring->last_tx_tso = 0;
+			size -= 4;
+		}
+
+		/* Workaround for premature desc write-backs
+		 * in TSO mode.  Append 4-byte sentinel desc */
+		if (unlikely(mss && !nr_frags && size == len && size > 8))
+			size -= 4;
+		/* work-around for errata 10 and it applies
+		 * to all controllers in PCI-X mode
+		 * The fix is to make sure that the first descriptor of a
+		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
+		 */
+		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
+		                (size > 2015) && count == 0))
+		        size = 2015;
+
+		/* Workaround for potential 82544 hang in PCI-X.  Avoid
+		 * terminating buffers within evenly-aligned dwords. */
+		if (unlikely(adapter->pcix_82544 &&
+		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
+		   size > 4))
+			size -= 4;
+
+		buffer_info->length = size;
+		buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
+		buffer_info->time_stamp = jiffies;
+		buffer_info->next_to_watch = i;
+
+		len -= size;
+		offset += size;
+		count++;
+		if (len) {
+			i++;
+			if (unlikely(i == tx_ring->count))
+				i = 0;
+		}
+	}
+
+	for (f = 0; f < nr_frags; f++) {
+		struct skb_frag_struct *frag;
+
+		frag = &skb_shinfo(skb)->frags[f];
+		len = frag->size;
+		offset = 0;
+
+		while (len) {
+			i++;
+			if (unlikely(i == tx_ring->count))
+				i = 0;
+
+			buffer_info = &tx_ring->buffer_info[i];
+			size = min(len, max_per_txd);
+			/* Workaround for premature desc write-backs
+			 * in TSO mode.  Append 4-byte sentinel desc */
+			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+				size -= 4;
+			/* Workaround for potential 82544 hang in PCI-X.
+			 * Avoid terminating buffers within evenly-aligned
+			 * dwords. */
+			if (unlikely(adapter->pcix_82544 &&
+			   !((unsigned long)(frag->page+offset+size-1) & 4) &&
+			   size > 4))
+				size -= 4;
+
+			buffer_info->length = size;
+			buffer_info->dma = map[f] + offset;
+			buffer_info->time_stamp = jiffies;
+			buffer_info->next_to_watch = i;
+
+			len -= size;
+			offset += size;
+			count++;
+		}
+	}
+
+	tx_ring->buffer_info[i].skb = skb;
+	tx_ring->buffer_info[first].next_to_watch = i;
+
+	return count;
+}
+
+static void e1000_tx_queue(struct e1000_adapter *adapter,
+			   struct e1000_tx_ring *tx_ring, int tx_flags,
+			   int count)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_tx_desc *tx_desc = NULL;
+	struct e1000_buffer *buffer_info;
+	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+	unsigned int i;
+
+	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
+		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+		             E1000_TXD_CMD_TSE;
+		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+
+		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
+			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
+	}
+
+	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+	}
+
+	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
+		txd_lower |= E1000_TXD_CMD_VLE;
+		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+	}
+
+	i = tx_ring->next_to_use;
+
+	while (count--) {
+		buffer_info = &tx_ring->buffer_info[i];
+		tx_desc = E1000_TX_DESC(*tx_ring, i);
+		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+		tx_desc->lower.data =
+			cpu_to_le32(txd_lower | buffer_info->length);
+		tx_desc->upper.data = cpu_to_le32(txd_upper);
+		if (unlikely(++i == tx_ring->count)) i = 0;
+	}
+
+	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64). */
+	wmb();
+
+	tx_ring->next_to_use = i;
+	writel(i, hw->hw_addr + tx_ring->tdt);
+	/* we need this if more than one processor can write to our tail
+	 * at a time, it syncronizes IO on IA64/Altix systems */
+	mmiowb();
+}
+
+/**
+ * 82547 workaround to avoid controller hang in half-duplex environment.
+ * The workaround is to avoid queuing a large packet that would span
+ * the internal Tx FIFO ring boundary by notifying the stack to resend
+ * the packet at a later time.  This gives the Tx FIFO an opportunity to
+ * flush all packets.  When that occurs, we reset the Tx FIFO pointers
+ * to the beginning of the Tx FIFO.
+ **/
+
+#define E1000_FIFO_HDR			0x10
+#define E1000_82547_PAD_LEN		0x3E0
+
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+				       struct sk_buff *skb)
+{
+	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
+	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
+
+	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
+
+	if (adapter->link_duplex != HALF_DUPLEX)
+		goto no_fifo_stall_required;
+
+	if (atomic_read(&adapter->tx_fifo_stall))
+		return 1;
+
+	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+		atomic_set(&adapter->tx_fifo_stall, 1);
+		return 1;
+	}
+
+no_fifo_stall_required:
+	adapter->tx_fifo_head += skb_fifo_len;
+	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
+		adapter->tx_fifo_head -= adapter->tx_fifo_size;
+	return 0;
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
+				    struct sk_buff *skb)
+{
+	struct e1000_hw *hw =  &adapter->hw;
+	u16 length, offset;
+	if (vlan_tx_tag_present(skb)) {
+		if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
+			( hw->mng_cookie.status &
+			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
+			return 0;
+	}
+	if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
+		struct ethhdr *eth = (struct ethhdr *)skb->data;
+		if ((htons(ETH_P_IP) == eth->h_proto)) {
+			const struct iphdr *ip =
+				(struct iphdr *)((u8 *)skb->data+14);
+			if (IPPROTO_UDP == ip->protocol) {
+				struct udphdr *udp =
+					(struct udphdr *)((u8 *)ip +
+						(ip->ihl << 2));
+				if (ntohs(udp->dest) == 67) {
+					offset = (u8 *)udp + 8 - skb->data;
+					length = skb->len - offset;
+
+					return e1000_mng_write_dhcp_info(hw,
+							(u8 *)udp + 8,
+							length);
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+
+	netif_stop_queue(netdev);
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it. */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available. */
+	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! */
+	netif_start_queue(netdev);
+	++adapter->restart_queue;
+	return 0;
+}
+
+static int e1000_maybe_stop_tx(struct net_device *netdev,
+                               struct e1000_tx_ring *tx_ring, int size)
+{
+	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
+		return 0;
+	return __e1000_maybe_stop_tx(netdev, size);
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_tx_ring *tx_ring;
+	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
+	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+	unsigned int tx_flags = 0;
+	unsigned int len = skb->len - skb->data_len;
+	unsigned int nr_frags = 0;
+	unsigned int mss = 0;
+	int count = 0;
+	int tso;
+	unsigned int f;
+
+	/* This goes back to the question of how to logically map a tx queue
+	 * to a flow.  Right now, performance is impacted slightly negatively
+	 * if using multiple tx queues.  If the stack breaks away from a
+	 * single qdisc implementation, we can look at this again. */
+	tx_ring = adapter->tx_ring;
+
+	if (unlikely(skb->len <= 0)) {
+		if (!adapter->ecdev)
+			dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* 82571 and newer doesn't need the workaround that limited descriptor
+	 * length to 4kB */
+	if (hw->mac_type >= e1000_82571)
+		max_per_txd = 8192;
+
+	mss = skb_shinfo(skb)->gso_size;
+	/* The controller does a simple calculation to
+	 * make sure there is enough room in the FIFO before
+	 * initiating the DMA for each buffer.  The calc is:
+	 * 4 = ceil(buffer len/mss).  To make sure we don't
+	 * overrun the FIFO, adjust the max buffer len if mss
+	 * drops. */
+	if (mss) {
+		u8 hdr_len;
+		max_per_txd = min(mss << 2, max_per_txd);
+		max_txd_pwr = fls(max_per_txd) - 1;
+
+		/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
+		* points to just header, pull a few bytes of payload from
+		* frags into skb->data */
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		if (skb->data_len && hdr_len == len) {
+			switch (hw->mac_type) {
+				unsigned int pull_size;
+			case e1000_82544:
+				/* Make sure we have room to chop off 4 bytes,
+				 * and that the end alignment will work out to
+				 * this hardware's requirements
+				 * NOTE: this is a TSO only workaround
+				 * if end byte alignment not correct move us
+				 * into the next dword */
+				if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+					break;
+				/* fall through */
+			case e1000_82571:
+			case e1000_82572:
+			case e1000_82573:
+			case e1000_ich8lan:
+				pull_size = min((unsigned int)4, skb->data_len);
+				if (!__pskb_pull_tail(skb, pull_size)) {
+					DPRINTK(DRV, ERR,
+						"__pskb_pull_tail failed.\n");
+					dev_kfree_skb_any(skb);
+					return NETDEV_TX_OK;
+				}
+				len = skb->len - skb->data_len;
+				break;
+			default:
+				/* do nothing */
+				break;
+			}
+		}
+	}
+
+	/* reserve a descriptor for the offload context */
+	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
+		count++;
+	count++;
+
+	/* Controller Erratum workaround */
+	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
+		count++;
+
+	count += TXD_USE_COUNT(len, max_txd_pwr);
+
+	if (adapter->pcix_82544)
+		count++;
+
+	/* work-around for errata 10 and it applies to all controllers
+	 * in PCI-X mode, so add one more descriptor to the count
+	 */
+	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
+			(len > 2015)))
+		count++;
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	for (f = 0; f < nr_frags; f++)
+		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+				       max_txd_pwr);
+	if (adapter->pcix_82544)
+		count += nr_frags;
+
+
+	if (hw->tx_pkt_filtering &&
+	    (hw->mac_type == e1000_82573))
+		e1000_transfer_dhcp_info(adapter, skb);
+
+	/* need: count + 2 desc gap to keep tail from touching
+	 * head, otherwise try next time */
+	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
+		return NETDEV_TX_BUSY;
+
+	if (unlikely(hw->mac_type == e1000_82547)) {
+		if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
+			if (!adapter->ecdev) {
+				netif_stop_queue(netdev);
+				mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
+			}
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+		tx_flags |= E1000_TX_FLAGS_VLAN;
+		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+	}
+
+	first = tx_ring->next_to_use;
+
+	tso = e1000_tso(adapter, tx_ring, skb);
+	if (tso < 0) {
+		if (!adapter->ecdev) {
+			dev_kfree_skb_any(skb);
+		}
+		return NETDEV_TX_OK;
+	}
+
+	if (likely(tso)) {
+		tx_ring->last_tx_tso = 1;
+		tx_flags |= E1000_TX_FLAGS_TSO;
+	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+		tx_flags |= E1000_TX_FLAGS_CSUM;
+
+	/* Old method was to assume IPv4 packet by default if TSO was enabled.
+	 * 82571 hardware supports TSO capabilities for IPv6 as well...
+	 * no longer assume, we must. */
+	if (likely(skb->protocol == htons(ETH_P_IP)))
+		tx_flags |= E1000_TX_FLAGS_IPV4;
+
+	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
+	                     nr_frags, mss);
+
+	if (count) {
+		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
+		if (!adapter->ecdev) {
+			/* Make sure there is space in the ring for the next send. */
+			e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+		}
+
+	} else {
+		if (!adapter->ecdev) dev_kfree_skb_any(skb);
+		tx_ring->buffer_info[first].time_stamp = 0;
+		tx_ring->next_to_use = first;
+	}
+
+	return NETDEV_TX_OK;
+}
+
+/**
+ * e1000_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+
+static void e1000_tx_timeout(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	/* Do the reset outside of interrupt context */
+	adapter->tx_timeout_count++;
+	schedule_work(&adapter->reset_task);
+}
+
+static void e1000_reset_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter =
+		container_of(work, struct e1000_adapter, reset_task);
+
+	e1000_reinit_locked(adapter);
+}
+
+/**
+ * e1000_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+
+static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	/* only return the current stats */
+	return &adapter->net_stats;
+}
+
+/**
+ * e1000_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+	u16 eeprom_data = 0;
+
+	if (adapter->ecdev)
+		return -EBUSY;
+
+	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+		DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+		return -EINVAL;
+	}
+
+	/* Adapter-specific max frame size limits. */
+	switch (hw->mac_type) {
+	case e1000_undefined ... e1000_82542_rev2_1:
+	case e1000_ich8lan:
+		if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+			DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+			return -EINVAL;
+		}
+		break;
+	case e1000_82573:
+		/* Jumbo Frames not supported if:
+		 * - this is not an 82573L device
+		 * - ASPM is enabled in any way (0x1A bits 3:2) */
+		e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
+		                  &eeprom_data);
+		if ((hw->device_id != E1000_DEV_ID_82573L) ||
+		    (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
+			if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+				DPRINTK(PROBE, ERR,
+			            	"Jumbo Frames not supported.\n");
+				return -EINVAL;
+			}
+			break;
+		}
+		/* ERT will be enabled later to enable wire speed receives */
+
+		/* fall through to get support */
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
+		if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+			DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
+		break;
+	}
+
+	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+	 * means we reserve 2 more, this pushes us to allocate from the next
+	 * larger slab size
+	 * i.e. RXBUFFER_2048 --> size-4096 slab */
+
+	if (max_frame <= E1000_RXBUFFER_256)
+		adapter->rx_buffer_len = E1000_RXBUFFER_256;
+	else if (max_frame <= E1000_RXBUFFER_512)
+		adapter->rx_buffer_len = E1000_RXBUFFER_512;
+	else if (max_frame <= E1000_RXBUFFER_1024)
+		adapter->rx_buffer_len = E1000_RXBUFFER_1024;
+	else if (max_frame <= E1000_RXBUFFER_2048)
+		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+	else if (max_frame <= E1000_RXBUFFER_4096)
+		adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+	else if (max_frame <= E1000_RXBUFFER_8192)
+		adapter->rx_buffer_len = E1000_RXBUFFER_8192;
+	else if (max_frame <= E1000_RXBUFFER_16384)
+		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+
+	/* adjust allocation if LPE protects us, and we aren't using SBP */
+	if (!hw->tbi_compatibility_on &&
+	    ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
+	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
+		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+
+	netdev->mtu = new_mtu;
+	hw->max_frame_size = max_frame;
+
+	if (netif_running(netdev))
+		e1000_reinit_locked(adapter);
+
+	return 0;
+}
+
+/**
+ * e1000_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+
+void e1000_update_stats(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long flags = 0;
+	u16 phy_tmp;
+
+#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
+
+	/*
+	 * Prevent stats update while adapter is being reset, or if the pci
+	 * connection is down.
+	 */
+	if (adapter->link_speed == 0)
+		return;
+	if (pci_channel_offline(pdev))
+		return;
+
+	if (!adapter->ecdev)
+		spin_lock_irqsave(&adapter->stats_lock, flags);
+
+	/* these counters are modified from e1000_tbi_adjust_stats,
+	 * called from the interrupt context, so they must only
+	 * be written while holding adapter->stats_lock
+	 */
+
+	adapter->stats.crcerrs += er32(CRCERRS);
+	adapter->stats.gprc += er32(GPRC);
+	adapter->stats.gorcl += er32(GORCL);
+	adapter->stats.gorch += er32(GORCH);
+	adapter->stats.bprc += er32(BPRC);
+	adapter->stats.mprc += er32(MPRC);
+	adapter->stats.roc += er32(ROC);
+
+	if (hw->mac_type != e1000_ich8lan) {
+		adapter->stats.prc64 += er32(PRC64);
+		adapter->stats.prc127 += er32(PRC127);
+		adapter->stats.prc255 += er32(PRC255);
+		adapter->stats.prc511 += er32(PRC511);
+		adapter->stats.prc1023 += er32(PRC1023);
+		adapter->stats.prc1522 += er32(PRC1522);
+	}
+
+	adapter->stats.symerrs += er32(SYMERRS);
+	adapter->stats.mpc += er32(MPC);
+	adapter->stats.scc += er32(SCC);
+	adapter->stats.ecol += er32(ECOL);
+	adapter->stats.mcc += er32(MCC);
+	adapter->stats.latecol += er32(LATECOL);
+	adapter->stats.dc += er32(DC);
+	adapter->stats.sec += er32(SEC);
+	adapter->stats.rlec += er32(RLEC);
+	adapter->stats.xonrxc += er32(XONRXC);
+	adapter->stats.xontxc += er32(XONTXC);
+	adapter->stats.xoffrxc += er32(XOFFRXC);
+	adapter->stats.xofftxc += er32(XOFFTXC);
+	adapter->stats.fcruc += er32(FCRUC);
+	adapter->stats.gptc += er32(GPTC);
+	adapter->stats.gotcl += er32(GOTCL);
+	adapter->stats.gotch += er32(GOTCH);
+	adapter->stats.rnbc += er32(RNBC);
+	adapter->stats.ruc += er32(RUC);
+	adapter->stats.rfc += er32(RFC);
+	adapter->stats.rjc += er32(RJC);
+	adapter->stats.torl += er32(TORL);
+	adapter->stats.torh += er32(TORH);
+	adapter->stats.totl += er32(TOTL);
+	adapter->stats.toth += er32(TOTH);
+	adapter->stats.tpr += er32(TPR);
+
+	if (hw->mac_type != e1000_ich8lan) {
+		adapter->stats.ptc64 += er32(PTC64);
+		adapter->stats.ptc127 += er32(PTC127);
+		adapter->stats.ptc255 += er32(PTC255);
+		adapter->stats.ptc511 += er32(PTC511);
+		adapter->stats.ptc1023 += er32(PTC1023);
+		adapter->stats.ptc1522 += er32(PTC1522);
+	}
+
+	adapter->stats.mptc += er32(MPTC);
+	adapter->stats.bptc += er32(BPTC);
+
+	/* used for adaptive IFS */
+
+	hw->tx_packet_delta = er32(TPT);
+	adapter->stats.tpt += hw->tx_packet_delta;
+	hw->collision_delta = er32(COLC);
+	adapter->stats.colc += hw->collision_delta;
+
+	if (hw->mac_type >= e1000_82543) {
+		adapter->stats.algnerrc += er32(ALGNERRC);
+		adapter->stats.rxerrc += er32(RXERRC);
+		adapter->stats.tncrs += er32(TNCRS);
+		adapter->stats.cexterr += er32(CEXTERR);
+		adapter->stats.tsctc += er32(TSCTC);
+		adapter->stats.tsctfc += er32(TSCTFC);
+	}
+	if (hw->mac_type > e1000_82547_rev_2) {
+		adapter->stats.iac += er32(IAC);
+		adapter->stats.icrxoc += er32(ICRXOC);
+
+		if (hw->mac_type != e1000_ich8lan) {
+			adapter->stats.icrxptc += er32(ICRXPTC);
+			adapter->stats.icrxatc += er32(ICRXATC);
+			adapter->stats.ictxptc += er32(ICTXPTC);
+			adapter->stats.ictxatc += er32(ICTXATC);
+			adapter->stats.ictxqec += er32(ICTXQEC);
+			adapter->stats.ictxqmtc += er32(ICTXQMTC);
+			adapter->stats.icrxdmtc += er32(ICRXDMTC);
+		}
+	}
+
+	/* Fill out the OS statistics structure */
+	adapter->net_stats.multicast = adapter->stats.mprc;
+	adapter->net_stats.collisions = adapter->stats.colc;
+
+	/* Rx Errors */
+
+	/* RLEC on some newer hardware can be incorrect so build
+	* our own version based on RUC and ROC */
+	adapter->net_stats.rx_errors = adapter->stats.rxerrc +
+		adapter->stats.crcerrs + adapter->stats.algnerrc +
+		adapter->stats.ruc + adapter->stats.roc +
+		adapter->stats.cexterr;
+	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
+	adapter->net_stats.rx_length_errors = adapter->stats.rlerrc;
+	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
+	adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
+	adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
+
+	/* Tx Errors */
+	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
+	adapter->net_stats.tx_errors = adapter->stats.txerrc;
+	adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
+	adapter->net_stats.tx_window_errors = adapter->stats.latecol;
+	adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
+	if (hw->bad_tx_carr_stats_fd &&
+	    adapter->link_duplex == FULL_DUPLEX) {
+		adapter->net_stats.tx_carrier_errors = 0;
+		adapter->stats.tncrs = 0;
+	}
+
+	/* Tx Dropped needs to be maintained elsewhere */
+
+	/* Phy Stats */
+	if (hw->media_type == e1000_media_type_copper) {
+		if ((adapter->link_speed == SPEED_1000) &&
+		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
+			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
+			adapter->phy_stats.idle_errors += phy_tmp;
+		}
+
+		if ((hw->mac_type <= e1000_82546) &&
+		   (hw->phy_type == e1000_phy_m88) &&
+		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
+			adapter->phy_stats.receive_errors += phy_tmp;
+	}
+
+	/* Management Stats */
+	if (hw->has_smbus) {
+		adapter->stats.mgptc += er32(MGTPTC);
+		adapter->stats.mgprc += er32(MGTPRC);
+		adapter->stats.mgpdc += er32(MGTPDC);
+	}
+
+	if (!adapter->ecdev)
+		spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
+void ec_poll(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) {
+		e1000_watchdog((unsigned long) adapter);
+		adapter->ec_watchdog_jiffies = jiffies;
+	}
+#ifdef CONFIG_PCI_MSI
+	e1000_intr_msi(0, netdev);
+#else
+	e1000_intr(0, netdev);
+#endif
+}
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static irqreturn_t e1000_intr_msi(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = er32(ICR);
+
+	if (adapter->ecdev) {
+		int i, ec_work_done = 0;
+		for (i = 0; i < E1000_MAX_INTR; i++) {
+			if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
+						&ec_work_done, 100) &
+					!e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
+				break;
+			}
+		}
+	} else {
+		/* in NAPI mode read ICR disables interrupts using IAM */
+
+		if ( !adapter->ecdev && (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) ) {
+			hw->get_link_status = 1;
+			/* 80003ES2LAN workaround-- For packet buffer work-around on
+			 * link down event; disable receives here in the ISR and reset
+			 * adapter in watchdog */
+			if (netif_carrier_ok(netdev) &&
+			    (hw->mac_type == e1000_80003es2lan)) {
+				/* disable receives */
+				u32 rctl = er32(RCTL);
+				ew32(RCTL, rctl & ~E1000_RCTL_EN);
+			}
+			/* guard against interrupt when we're going down */
+			if (!test_bit(__E1000_DOWN, &adapter->flags))
+				mod_timer(&adapter->watchdog_timer, jiffies + 1);
+		}
+
+		if (likely(napi_schedule_prep(&adapter->napi))) {
+			adapter->total_tx_bytes = 0;
+			adapter->total_tx_packets = 0;
+			adapter->total_rx_bytes = 0;
+			adapter->total_rx_packets = 0;
+			__napi_schedule(&adapter->napi);
+		} else
+			e1000_irq_enable(adapter);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static irqreturn_t e1000_intr(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl, icr = er32(ICR);
+
+	if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
+		return IRQ_NONE;  /* Not our interrupt */
+
+	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+	 * not set, then the adapter didn't send an interrupt */
+	if (unlikely(hw->mac_type >= e1000_82571 &&
+	             !(icr & E1000_ICR_INT_ASSERTED)))
+		return IRQ_NONE;
+
+	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
+	 * need for the IMC write */
+
+	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+		hw->get_link_status = 1;
+		/* 80003ES2LAN workaround--
+		 * For packet buffer work-around on link down event;
+		 * disable receives here in the ISR and
+		 * reset adapter in watchdog
+		 */
+		if (netif_carrier_ok(netdev) &&
+		    (hw->mac_type == e1000_80003es2lan)) {
+			/* disable receives */
+			rctl = er32(RCTL);
+			ew32(RCTL, rctl & ~E1000_RCTL_EN);
+		}
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->flags))
+			mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+	if (unlikely(hw->mac_type < e1000_82571)) {
+		/* disable interrupts, without the synchronize_irq bit */
+		ew32(IMC, ~0);
+		E1000_WRITE_FLUSH();
+	}
+	if (likely(napi_schedule_prep(&adapter->napi))) {
+		adapter->total_tx_bytes = 0;
+		adapter->total_tx_packets = 0;
+		adapter->total_rx_bytes = 0;
+		adapter->total_rx_packets = 0;
+		__napi_schedule(&adapter->napi);
+	} else {
+		/* this really should not happen! if it does it is basically a
+		 * bug, but not a hard error, so enable ints and continue */
+		if (!test_bit(__E1000_DOWN, &adapter->flags))
+			e1000_irq_enable(adapter);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * e1000_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ * EtherCAT: never called
+ **/
+static int e1000_clean(struct napi_struct *napi, int budget)
+{
+	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+	struct net_device *poll_dev = adapter->netdev;
+	int tx_cleaned = 0, work_done = 0;
+
+	adapter = netdev_priv(poll_dev);
+
+	tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
+
+	adapter->clean_rx(adapter, &adapter->rx_ring[0],
+	                  &work_done, budget);
+
+	if (!tx_cleaned)
+		work_done = budget;
+
+	/* If budget not fully consumed, exit the polling mode */
+	if (work_done < budget) {
+		if (likely(adapter->itr_setting & 3))
+			e1000_set_itr(adapter);
+		napi_complete(napi);
+		if (!test_bit(__E1000_DOWN, &adapter->flags))
+			e1000_irq_enable(adapter);
+	}
+
+	return work_done;
+}
+
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ **/
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+			       struct e1000_tx_ring *tx_ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct e1000_tx_desc *tx_desc, *eop_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i, eop;
+	unsigned int count = 0;
+	unsigned int total_tx_bytes=0, total_tx_packets=0;
+
+	i = tx_ring->next_to_clean;
+	eop = tx_ring->buffer_info[i].next_to_watch;
+	eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+	       (count < tx_ring->count)) {
+		bool cleaned = false;
+		for ( ; !cleaned; count++) {
+			tx_desc = E1000_TX_DESC(*tx_ring, i);
+			buffer_info = &tx_ring->buffer_info[i];
+			cleaned = (i == eop);
+
+			if (cleaned) {
+				struct sk_buff *skb = buffer_info->skb;
+				unsigned int segs, bytecount;
+				segs = skb_shinfo(skb)->gso_segs ?: 1;
+				/* multiply data chunks by size of headers */
+				bytecount = ((segs - 1) * skb_headlen(skb)) +
+				            skb->len;
+				total_tx_packets += segs;
+				total_tx_bytes += bytecount;
+			}
+			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+			tx_desc->upper.data = 0;
+
+			if (unlikely(++i == tx_ring->count)) i = 0;
+		}
+
+		eop = tx_ring->buffer_info[i].next_to_watch;
+		eop_desc = E1000_TX_DESC(*tx_ring, eop);
+	}
+
+	tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+	if (!adapter->ecdev && unlikely(count && netif_carrier_ok(netdev) &&
+		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+		if (netif_queue_stopped(netdev)) {
+			netif_wake_queue(netdev);
+			++adapter->restart_queue;
+		}
+	}
+
+	if (!adapter->ecdev && adapter->detect_tx_hung) {
+		/* Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i */
+		adapter->detect_tx_hung = false;
+		if (tx_ring->buffer_info[i].time_stamp &&
+		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
+		               (adapter->tx_timeout_factor * HZ))
+		    && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+
+			/* detected Tx unit hang */
+			DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+					"  Tx Queue             <%lu>\n"
+					"  TDH                  <%x>\n"
+					"  TDT                  <%x>\n"
+					"  next_to_use          <%x>\n"
+					"  next_to_clean        <%x>\n"
+					"buffer_info[next_to_clean]\n"
+					"  time_stamp           <%lx>\n"
+					"  next_to_watch        <%x>\n"
+					"  jiffies              <%lx>\n"
+					"  next_to_watch.status <%x>\n",
+				(unsigned long)((tx_ring - adapter->tx_ring) /
+					sizeof(struct e1000_tx_ring)),
+				readl(hw->hw_addr + tx_ring->tdh),
+				readl(hw->hw_addr + tx_ring->tdt),
+				tx_ring->next_to_use,
+				tx_ring->next_to_clean,
+				tx_ring->buffer_info[i].time_stamp,
+				eop,
+				jiffies,
+				eop_desc->upper.fields.status);
+			netif_stop_queue(netdev);
+		}
+	}
+	adapter->total_tx_bytes += total_tx_bytes;
+	adapter->total_tx_packets += total_tx_packets;
+	adapter->net_stats.tx_bytes += total_tx_bytes;
+	adapter->net_stats.tx_packets += total_tx_packets;
+	return (count < tx_ring->count);
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * @adapter:     board private structure
+ * @status_err:  receive descriptor status and error fields
+ * @csum:        receive descriptor csum field
+ * @sk_buff:     socket buffer with received data
+ **/
+
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+			      u32 csum, struct sk_buff *skb)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 status = (u16)status_err;
+	u8 errors = (u8)(status_err >> 24);
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* 82543 or newer only */
+	if (unlikely(hw->mac_type < e1000_82543)) return;
+	/* Ignore Checksum bit is set */
+	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
+	/* TCP/UDP checksum error bit is set */
+	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
+		/* let the stack verify checksum errors */
+		adapter->hw_csum_err++;
+		return;
+	}
+	/* TCP/UDP Checksum has not been calculated */
+	if (hw->mac_type <= e1000_82547_rev_2) {
+		if (!(status & E1000_RXD_STAT_TCPCS))
+			return;
+	} else {
+		if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+			return;
+	}
+	/* It must be a TCP or UDP packet with a valid checksum */
+	if (likely(status & E1000_RXD_STAT_TCPCS)) {
+		/* TCP checksum is good */
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else if (hw->mac_type > e1000_82547_rev_2) {
+		/* IP fragment with UDP payload */
+		/* Hardware complements the payload checksum, so we undo it
+		 * and then put the value in host order for further stack use.
+		 */
+		__sum16 sum = (__force __sum16)htons(csum);
+		skb->csum = csum_unfold(~sum);
+		skb->ip_summed = CHECKSUM_COMPLETE;
+	}
+	adapter->hw_csum_good++;
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ **/
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring,
+			       int *work_done, int work_to_do)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc, *next_rxd;
+	struct e1000_buffer *buffer_info, *next_buffer;
+	unsigned long flags;
+	u32 length;
+	u8 last_byte;
+	unsigned int i;
+	int cleaned_count = 0;
+	bool cleaned = false;
+	unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC(*rx_ring, i);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (rx_desc->status & E1000_RXD_STAT_DD) {
+		struct sk_buff *skb;
+		u8 status;
+
+		if (*work_done >= work_to_do)
+			break;
+		(*work_done)++;
+
+		status = rx_desc->status;
+		skb = buffer_info->skb;
+		if (!adapter->ecdev) buffer_info->skb = NULL;
+
+		prefetch(skb->data - NET_IP_ALIGN);
+
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+
+		cleaned = true;
+		cleaned_count++;
+		pci_unmap_single(pdev,
+		                 buffer_info->dma,
+		                 buffer_info->length,
+		                 PCI_DMA_FROMDEVICE);
+		buffer_info->dma = 0;
+
+		length = le16_to_cpu(rx_desc->length);
+		/* !EOP means multiple descriptors were used to store a single
+		 * packet, also make sure the frame isn't just CRC only */
+		if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
+			/* All receives must fit into a single buffer */
+			E1000_DBG("%s: Receive packet consumed multiple"
+				  " buffers\n", netdev->name);
+			/* recycle */
+			buffer_info->skb = skb;
+			goto next_desc;
+		}
+
+		if (!adapter->ecdev &&
+				unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+			last_byte = *(skb->data + length - 1);
+			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
+				       last_byte)) {
+				spin_lock_irqsave(&adapter->stats_lock, flags);
+				e1000_tbi_adjust_stats(hw, &adapter->stats,
+				                       length, skb->data);
+				spin_unlock_irqrestore(&adapter->stats_lock,
+				                       flags);
+				length--;
+			} else {
+				/* recycle */
+				buffer_info->skb = skb;
+				goto next_desc;
+			}
+		}
+
+		/* adjust length to remove Ethernet CRC, this must be
+		 * done after the TBI_ACCEPT workaround above */
+		length -= 4;
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += length;
+		total_rx_packets++;
+
+		/* code added for copybreak, this should improve
+		 * performance for small packets with large amounts
+		 * of reassembly being done in the stack */
+		if (!adapter->ecdev && length < copybreak) {
+			struct sk_buff *new_skb =
+			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+			if (new_skb) {
+				skb_reserve(new_skb, NET_IP_ALIGN);
+				skb_copy_to_linear_data_offset(new_skb,
+							       -NET_IP_ALIGN,
+							       (skb->data -
+							        NET_IP_ALIGN),
+							       (length +
+							        NET_IP_ALIGN));
+				/* save the skb in buffer_info as good */
+				buffer_info->skb = skb;
+				skb = new_skb;
+			}
+			/* else just continue with the old one */
+		}
+		/* end copybreak code */
+		skb_put(skb, length);
+
+		/* Receive Checksum Offload */
+		e1000_rx_checksum(adapter,
+				  (u32)(status) |
+				  ((u32)(rx_desc->errors) << 24),
+				  le16_to_cpu(rx_desc->csum), skb);
+
+		if (adapter->ecdev) {
+			ecdev_receive(adapter->ecdev, skb->data, length);
+
+			// No need to detect link status as
+			// long as frames are received: Reset watchdog.
+			adapter->ec_watchdog_jiffies = jiffies;
+		} else {
+			skb->protocol = eth_type_trans(skb, netdev);
+
+			if (unlikely(adapter->vlgrp &&
+				    (status & E1000_RXD_STAT_VP))) {
+				vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+							 le16_to_cpu(rx_desc->special));
+			} else {
+				netif_receive_skb(skb);
+			}
+		}
+
+next_desc:
+		rx_desc->status = 0;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = E1000_DESC_UNUSED(rx_ring);
+	if (cleaned_count)
+		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+	adapter->total_rx_packets += total_rx_packets;
+	adapter->total_rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_packets += total_rx_packets;
+	return cleaned;
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: address of board private structure
+ **/
+
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+				   struct e1000_rx_ring *rx_ring,
+				   int cleaned_count)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc;
+	struct e1000_buffer *buffer_info;
+	struct sk_buff *skb;
+	unsigned int i;
+	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+
+	i = rx_ring->next_to_use;
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (cleaned_count--) {
+		skb = buffer_info->skb;
+		if (skb) {
+			skb_trim(skb, 0);
+			goto map_skb;
+		}
+
+		skb = netdev_alloc_skb(netdev, bufsz);
+		if (unlikely(!skb)) {
+			/* Better luck next round */
+			adapter->alloc_rx_buff_failed++;
+			break;
+		}
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+			struct sk_buff *oldskb = skb;
+			DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
+					     "at %p\n", bufsz, skb->data);
+			/* Try again, without freeing the previous */
+			skb = netdev_alloc_skb(netdev, bufsz);
+			/* Failed allocation, critical failure */
+			if (!skb) {
+				dev_kfree_skb(oldskb);
+				break;
+			}
+
+			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+				/* give up */
+				dev_kfree_skb(skb);
+				dev_kfree_skb(oldskb);
+				break; /* while !buffer_info->skb */
+			}
+
+			/* Use new allocation */
+			dev_kfree_skb(oldskb);
+		}
+		/* Make buffer alignment 2 beyond a 16 byte boundary
+		 * this will result in a 16 byte aligned IP header after
+		 * the 14 byte MAC header is removed
+		 */
+		skb_reserve(skb, NET_IP_ALIGN);
+
+		buffer_info->skb = skb;
+		buffer_info->length = adapter->rx_buffer_len;
+map_skb:
+		buffer_info->dma = pci_map_single(pdev,
+						  skb->data,
+						  adapter->rx_buffer_len,
+						  PCI_DMA_FROMDEVICE);
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter,
+					(void *)(unsigned long)buffer_info->dma,
+					adapter->rx_buffer_len)) {
+			DPRINTK(RX_ERR, ERR,
+				"dma align check failed: %u bytes at %p\n",
+				adapter->rx_buffer_len,
+				(void *)(unsigned long)buffer_info->dma);
+			if (!adapter->ecdev) {
+				dev_kfree_skb(skb);
+				buffer_info->skb = NULL;
+			}
+
+			pci_unmap_single(pdev, buffer_info->dma,
+					 adapter->rx_buffer_len,
+					 PCI_DMA_FROMDEVICE);
+			buffer_info->dma = 0;
+
+			break; /* while !buffer_info->skb */
+		}
+		rx_desc = E1000_RX_DESC(*rx_ring, i);
+		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+		if (unlikely(++i == rx_ring->count))
+			i = 0;
+		buffer_info = &rx_ring->buffer_info[i];
+	}
+
+	if (likely(rx_ring->next_to_use != i)) {
+		rx_ring->next_to_use = i;
+		if (unlikely(i-- == 0))
+			i = (rx_ring->count - 1);
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64). */
+		wmb();
+		writel(i, hw->hw_addr + rx_ring->rdt);
+	}
+}
+
+/**
+ * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
+ * @adapter:
+ **/
+
+static void e1000_smartspeed(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 phy_status;
+	u16 phy_ctrl;
+
+	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
+	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
+		return;
+
+	if (adapter->smartspeed == 0) {
+		/* If Master/Slave config fault is asserted twice,
+		 * we assume back-to-back */
+		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
+		if (phy_ctrl & CR_1000T_MS_ENABLE) {
+			phy_ctrl &= ~CR_1000T_MS_ENABLE;
+			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+					    phy_ctrl);
+			adapter->smartspeed++;
+			if (!e1000_phy_setup_autoneg(hw) &&
+			   !e1000_read_phy_reg(hw, PHY_CTRL,
+				   	       &phy_ctrl)) {
+				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+					     MII_CR_RESTART_AUTO_NEG);
+				e1000_write_phy_reg(hw, PHY_CTRL,
+						    phy_ctrl);
+			}
+		}
+		return;
+	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+		/* If still no link, perhaps using 2/3 pair cable */
+		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
+		phy_ctrl |= CR_1000T_MS_ENABLE;
+		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
+		if (!e1000_phy_setup_autoneg(hw) &&
+		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
+			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+				     MII_CR_RESTART_AUTO_NEG);
+			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
+		}
+	}
+	/* Restart process after E1000_SMARTSPEED_MAX iterations */
+	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+		adapter->smartspeed = 0;
+}
+
+/**
+ * e1000_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return e1000_mii_ioctl(netdev, ifr, cmd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/**
+ * e1000_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+			   int cmd)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct mii_ioctl_data *data = if_mii(ifr);
+	int retval;
+	u16 mii_reg;
+	u16 spddplx;
+	unsigned long flags;
+
+	if (hw->media_type != e1000_media_type_copper)
+		return -EOPNOTSUPP;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = hw->phy_addr;
+		break;
+	case SIOCGMIIREG:
+		if (!capable(CAP_NET_ADMIN) || adapter->ecdev)
+			return -EPERM;
+		spin_lock_irqsave(&adapter->stats_lock, flags);
+		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
+				   &data->val_out)) {
+			spin_unlock_irqrestore(&adapter->stats_lock, flags);
+			return -EIO;
+		}
+		spin_unlock_irqrestore(&adapter->stats_lock, flags);
+		break;
+	case SIOCSMIIREG:
+		if (!capable(CAP_NET_ADMIN) || adapter->ecdev)
+			return -EPERM;
+		if (data->reg_num & ~(0x1F))
+			return -EFAULT;
+		mii_reg = data->val_in;
+		spin_lock_irqsave(&adapter->stats_lock, flags);
+		if (e1000_write_phy_reg(hw, data->reg_num,
+					mii_reg)) {
+			spin_unlock_irqrestore(&adapter->stats_lock, flags);
+			return -EIO;
+		}
+		spin_unlock_irqrestore(&adapter->stats_lock, flags);
+		if (hw->media_type == e1000_media_type_copper) {
+			switch (data->reg_num) {
+			case PHY_CTRL:
+				if (mii_reg & MII_CR_POWER_DOWN)
+					break;
+				if (mii_reg & MII_CR_AUTO_NEG_EN) {
+					hw->autoneg = 1;
+					hw->autoneg_advertised = 0x2F;
+				} else {
+					if (mii_reg & 0x40)
+						spddplx = SPEED_1000;
+					else if (mii_reg & 0x2000)
+						spddplx = SPEED_100;
+					else
+						spddplx = SPEED_10;
+					spddplx += (mii_reg & 0x100)
+						   ? DUPLEX_FULL :
+						   DUPLEX_HALF;
+					retval = e1000_set_spd_dplx(adapter,
+								    spddplx);
+					if (retval)
+						return retval;
+				}
+				if (netif_running(adapter->netdev))
+					e1000_reinit_locked(adapter);
+				else
+					e1000_reset(adapter);
+				break;
+			case M88E1000_PHY_SPEC_CTRL:
+			case M88E1000_EXT_PHY_SPEC_CTRL:
+				if (e1000_phy_reset(hw))
+					return -EIO;
+				break;
+			}
+		} else {
+			switch (data->reg_num) {
+			case PHY_CTRL:
+				if (mii_reg & MII_CR_POWER_DOWN)
+					break;
+				if (netif_running(adapter->netdev))
+					e1000_reinit_locked(adapter);
+				else
+					e1000_reset(adapter);
+				break;
+			}
+		}
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return E1000_SUCCESS;
+}
+
+void e1000_pci_set_mwi(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+	int ret_val = pci_set_mwi(adapter->pdev);
+
+	if (ret_val)
+		DPRINTK(PROBE, ERR, "Error in setting MWI\n");
+}
+
+void e1000_pci_clear_mwi(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+	pci_clear_mwi(adapter->pdev);
+}
+
+int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+	return pcix_get_mmrbc(adapter->pdev);
+}
+
+void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
+{
+	struct e1000_adapter *adapter = hw->back;
+	pcix_set_mmrbc(adapter->pdev, mmrbc);
+}
+
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+    struct e1000_adapter *adapter = hw->back;
+    u16 cap_offset;
+
+    cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+    if (!cap_offset)
+        return -E1000_ERR_CONFIG;
+
+    pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+    return E1000_SUCCESS;
+}
+
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
+{
+	outl(value, port);
+}
+
+static void e1000_vlan_rx_register(struct net_device *netdev,
+				   struct vlan_group *grp)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl, rctl;
+
+	if (!test_bit(__E1000_DOWN, &adapter->flags))
+		e1000_irq_disable(adapter);
+	adapter->vlgrp = grp;
+
+	if (grp) {
+		/* enable VLAN tag insert/strip */
+		ctrl = er32(CTRL);
+		ctrl |= E1000_CTRL_VME;
+		ew32(CTRL, ctrl);
+
+		if (adapter->hw.mac_type != e1000_ich8lan) {
+			/* enable VLAN receive filtering */
+			rctl = er32(RCTL);
+			rctl &= ~E1000_RCTL_CFIEN;
+			ew32(RCTL, rctl);
+			e1000_update_mng_vlan(adapter);
+		}
+	} else {
+		/* disable VLAN tag insert/strip */
+		ctrl = er32(CTRL);
+		ctrl &= ~E1000_CTRL_VME;
+		ew32(CTRL, ctrl);
+
+		if (adapter->hw.mac_type != e1000_ich8lan) {
+			if (adapter->mng_vlan_id !=
+			    (u16)E1000_MNG_VLAN_NONE) {
+				e1000_vlan_rx_kill_vid(netdev,
+				                       adapter->mng_vlan_id);
+				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+			}
+		}
+	}
+
+	if (!test_bit(__E1000_DOWN, &adapter->flags))
+		e1000_irq_enable(adapter);
+}
+
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vfta, index;
+
+	if ((hw->mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+	    (vid == adapter->mng_vlan_id))
+		return;
+	/* add VID to filter table */
+	index = (vid >> 5) & 0x7F;
+	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
+	vfta |= (1 << (vid & 0x1F));
+	e1000_write_vfta(hw, index, vfta);
+}
+
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vfta, index;
+
+	if (!test_bit(__E1000_DOWN, &adapter->flags))
+		e1000_irq_disable(adapter);
+	vlan_group_set_device(adapter->vlgrp, vid, NULL);
+	if (!test_bit(__E1000_DOWN, &adapter->flags))
+		e1000_irq_enable(adapter);
+
+	if ((hw->mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+	    (vid == adapter->mng_vlan_id)) {
+		/* release control to f/w */
+		e1000_release_hw_control(adapter);
+		return;
+	}
+
+	/* remove VID from filter table */
+	index = (vid >> 5) & 0x7F;
+	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
+	vfta &= ~(1 << (vid & 0x1F));
+	e1000_write_vfta(hw, index, vfta);
+}
+
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+	e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+	if (adapter->vlgrp) {
+		u16 vid;
+		for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+			if (!vlan_group_get_device(adapter->vlgrp, vid))
+				continue;
+			e1000_vlan_rx_add_vid(adapter->netdev, vid);
+		}
+	}
+}
+
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	hw->autoneg = 0;
+
+	/* Fiber NICs only allow 1000 gbps Full duplex */
+	if ((hw->media_type == e1000_media_type_fiber) &&
+		spddplx != (SPEED_1000 + DUPLEX_FULL)) {
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+
+	switch (spddplx) {
+	case SPEED_10 + DUPLEX_HALF:
+		hw->forced_speed_duplex = e1000_10_half;
+		break;
+	case SPEED_10 + DUPLEX_FULL:
+		hw->forced_speed_duplex = e1000_10_full;
+		break;
+	case SPEED_100 + DUPLEX_HALF:
+		hw->forced_speed_duplex = e1000_100_half;
+		break;
+	case SPEED_100 + DUPLEX_FULL:
+		hw->forced_speed_duplex = e1000_100_full;
+		break;
+	case SPEED_1000 + DUPLEX_FULL:
+		hw->autoneg = 1;
+		hw->autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	case SPEED_1000 + DUPLEX_HALF: /* not supported */
+	default:
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl, ctrl_ext, rctl, status;
+	u32 wufc = adapter->wol;
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	if (adapter->ecdev)
+		return -EBUSY;
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+		e1000_down(adapter);
+	}
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+#endif
+
+	status = er32(STATUS);
+	if (status & E1000_STATUS_LU)
+		wufc &= ~E1000_WUFC_LNKC;
+
+	if (wufc) {
+		e1000_setup_rctl(adapter);
+		e1000_set_rx_mode(netdev);
+
+		/* turn on all-multi mode if wake on multicast is enabled */
+		if (wufc & E1000_WUFC_MC) {
+			rctl = er32(RCTL);
+			rctl |= E1000_RCTL_MPE;
+			ew32(RCTL, rctl);
+		}
+
+		if (hw->mac_type >= e1000_82540) {
+			ctrl = er32(CTRL);
+			/* advertise wake from D3Cold */
+			#define E1000_CTRL_ADVD3WUC 0x00100000
+			/* phy power management enable */
+			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+			ctrl |= E1000_CTRL_ADVD3WUC |
+				E1000_CTRL_EN_PHY_PWR_MGMT;
+			ew32(CTRL, ctrl);
+		}
+
+		if (hw->media_type == e1000_media_type_fiber ||
+		   hw->media_type == e1000_media_type_internal_serdes) {
+			/* keep the laser running in D3 */
+			ctrl_ext = er32(CTRL_EXT);
+			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
+			ew32(CTRL_EXT, ctrl_ext);
+		}
+
+		/* Allow time for pending master requests to run */
+		e1000_disable_pciex_master(hw);
+
+		ew32(WUC, E1000_WUC_PME_EN);
+		ew32(WUFC, wufc);
+	} else {
+		ew32(WUC, 0);
+		ew32(WUFC, 0);
+	}
+
+	e1000_release_manageability(adapter);
+
+	*enable_wake = !!wufc;
+
+	/* make sure adapter isn't asleep if manageability is enabled */
+	if (adapter->en_mng_pt)
+		*enable_wake = true;
+
+	if (hw->phy_type == e1000_phy_igp_3)
+		e1000_phy_powerdown_workaround(hw);
+
+	if (netif_running(netdev))
+		e1000_free_irq(adapter);
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant. */
+	e1000_release_hw_control(adapter);
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	int retval;
+	bool wake;
+
+	retval = __e1000_shutdown(pdev, &wake);
+	if (retval)
+		return retval;
+
+	if (wake) {
+		pci_prepare_to_sleep(pdev);
+	} else {
+		pci_wake_from_d3(pdev, false);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+
+static int e1000_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 err;
+
+	if (adapter->ecdev)
+		return -EBUSY;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	if (adapter->need_ioport)
+		err = pci_enable_device(pdev);
+	else
+		err = pci_enable_device_mem(pdev);
+	if (err) {
+		printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	if (netif_running(netdev)) {
+		err = e1000_request_irq(adapter);
+		if (err)
+			return err;
+	}
+
+	e1000_power_up_phy(adapter);
+	e1000_reset(adapter);
+	ew32(WUS, ~0);
+
+	e1000_init_manageability(adapter);
+
+	if (netif_running(netdev))
+		e1000_up(adapter);
+
+	if (!adapter->ecdev) netif_device_attach(netdev);
+
+	/* If the controller is 82573 and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (hw->mac_type != e1000_82573 ||
+	    !e1000_check_mng_mode(hw))
+		e1000_get_hw_control(adapter);
+
+	return 0;
+}
+#endif
+
+static void e1000_shutdown(struct pci_dev *pdev)
+{
+	bool wake;
+
+	__e1000_shutdown(pdev, &wake);
+
+	if (system_state == SYSTEM_POWER_OFF) {
+		pci_wake_from_d3(pdev, wake);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void e1000_netpoll(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	disable_irq(adapter->pdev->irq);
+	e1000_intr(adapter->pdev->irq, netdev);
+	enable_irq(adapter->pdev->irq);
+}
+#endif
+
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci conneection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+						pci_channel_state_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	netif_device_detach(netdev);
+
+	if (state == pci_channel_io_perm_failure)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	if (netif_running(netdev))
+		e1000_down(adapter);
+	pci_disable_device(pdev);
+
+	/* Request a slot slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	int err;
+
+	if (adapter->need_ioport)
+		err = pci_enable_device(pdev);
+	else
+		err = pci_enable_device_mem(pdev);
+	if (err) {
+		printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	e1000_reset(adapter);
+	ew32(WUS, ~0);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	e1000_init_manageability(adapter);
+
+	if (netif_running(netdev)) {
+		if (e1000_up(adapter)) {
+			printk("e1000: can't bring device back up after reset\n");
+			return;
+		}
+	}
+
+	netif_device_attach(netdev);
+
+	/* If the controller is 82573 and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (hw->mac_type != e1000_82573 ||
+	    !e1000_check_mng_mode(hw))
+		e1000_get_hw_control(adapter);
+
+}
+
+/* e1000_main.c */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_main-2.6.31-orig.c	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,4905 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+#include <net/ip6_checksum.h>
+
+char e1000_driver_name[] = "e1000";
+static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
+#define DRV_VERSION "7.3.21-k3-NAPI"
+const char e1000_driver_version[] = DRV_VERSION;
+static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
+
+/* e1000_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * Macro expands to...
+ *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+ */
+static struct pci_device_id e1000_pci_tbl[] = {
+	INTEL_E1000_ETHERNET_DEVICE(0x1000),
+	INTEL_E1000_ETHERNET_DEVICE(0x1001),
+	INTEL_E1000_ETHERNET_DEVICE(0x1004),
+	INTEL_E1000_ETHERNET_DEVICE(0x1008),
+	INTEL_E1000_ETHERNET_DEVICE(0x1009),
+	INTEL_E1000_ETHERNET_DEVICE(0x100C),
+	INTEL_E1000_ETHERNET_DEVICE(0x100D),
+	INTEL_E1000_ETHERNET_DEVICE(0x100E),
+	INTEL_E1000_ETHERNET_DEVICE(0x100F),
+	INTEL_E1000_ETHERNET_DEVICE(0x1010),
+	INTEL_E1000_ETHERNET_DEVICE(0x1011),
+	INTEL_E1000_ETHERNET_DEVICE(0x1012),
+	INTEL_E1000_ETHERNET_DEVICE(0x1013),
+	INTEL_E1000_ETHERNET_DEVICE(0x1014),
+	INTEL_E1000_ETHERNET_DEVICE(0x1015),
+	INTEL_E1000_ETHERNET_DEVICE(0x1016),
+	INTEL_E1000_ETHERNET_DEVICE(0x1017),
+	INTEL_E1000_ETHERNET_DEVICE(0x1018),
+	INTEL_E1000_ETHERNET_DEVICE(0x1019),
+	INTEL_E1000_ETHERNET_DEVICE(0x101A),
+	INTEL_E1000_ETHERNET_DEVICE(0x101D),
+	INTEL_E1000_ETHERNET_DEVICE(0x101E),
+	INTEL_E1000_ETHERNET_DEVICE(0x1026),
+	INTEL_E1000_ETHERNET_DEVICE(0x1027),
+	INTEL_E1000_ETHERNET_DEVICE(0x1028),
+	INTEL_E1000_ETHERNET_DEVICE(0x1075),
+	INTEL_E1000_ETHERNET_DEVICE(0x1076),
+	INTEL_E1000_ETHERNET_DEVICE(0x1077),
+	INTEL_E1000_ETHERNET_DEVICE(0x1078),
+	INTEL_E1000_ETHERNET_DEVICE(0x1079),
+	INTEL_E1000_ETHERNET_DEVICE(0x107A),
+	INTEL_E1000_ETHERNET_DEVICE(0x107B),
+	INTEL_E1000_ETHERNET_DEVICE(0x107C),
+	INTEL_E1000_ETHERNET_DEVICE(0x108A),
+	INTEL_E1000_ETHERNET_DEVICE(0x1099),
+	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+	/* required last entry */
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+                             struct e1000_tx_ring *txdr);
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+                             struct e1000_rx_ring *rxdr);
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+                             struct e1000_tx_ring *tx_ring);
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+                             struct e1000_rx_ring *rx_ring);
+void e1000_update_stats(struct e1000_adapter *adapter);
+
+static int e1000_init_module(void);
+static void e1000_exit_module(void);
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit e1000_remove(struct pci_dev *pdev);
+static int e1000_alloc_queues(struct e1000_adapter *adapter);
+static int e1000_sw_init(struct e1000_adapter *adapter);
+static int e1000_open(struct net_device *netdev);
+static int e1000_close(struct net_device *netdev);
+static void e1000_configure_tx(struct e1000_adapter *adapter);
+static void e1000_configure_rx(struct e1000_adapter *adapter);
+static void e1000_setup_rctl(struct e1000_adapter *adapter);
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+                                struct e1000_tx_ring *tx_ring);
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+                                struct e1000_rx_ring *rx_ring);
+static void e1000_set_rx_mode(struct net_device *netdev);
+static void e1000_update_phy_info(unsigned long data);
+static void e1000_watchdog(unsigned long data);
+static void e1000_82547_tx_fifo_stall(unsigned long data);
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
+static int e1000_set_mac(struct net_device *netdev, void *p);
+static irqreturn_t e1000_intr(int irq, void *data);
+static irqreturn_t e1000_intr_msi(int irq, void *data);
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+			       struct e1000_tx_ring *tx_ring);
+static int e1000_clean(struct napi_struct *napi, int budget);
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring,
+			       int *work_done, int work_to_do);
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+                                   struct e1000_rx_ring *rx_ring,
+				   int cleaned_count);
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+			   int cmd);
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
+static void e1000_tx_timeout(struct net_device *dev);
+static void e1000_reset_task(struct work_struct *work);
+static void e1000_smartspeed(struct e1000_adapter *adapter);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+                                       struct sk_buff *skb);
+
+static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_restore_vlan(struct e1000_adapter *adapter);
+
+#ifdef CONFIG_PM
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
+static int e1000_resume(struct pci_dev *pdev);
+#endif
+static void e1000_shutdown(struct pci_dev *pdev);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void e1000_netpoll (struct net_device *netdev);
+#endif
+
+#define COPYBREAK_DEFAULT 256
+static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+	"Maximum size of packet that is copied to a new buffer on receive");
+
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+                     pci_channel_state_t state);
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
+static void e1000_io_resume(struct pci_dev *pdev);
+
+static struct pci_error_handlers e1000_err_handler = {
+	.error_detected = e1000_io_error_detected,
+	.slot_reset = e1000_io_slot_reset,
+	.resume = e1000_io_resume,
+};
+
+static struct pci_driver e1000_driver = {
+	.name     = e1000_driver_name,
+	.id_table = e1000_pci_tbl,
+	.probe    = e1000_probe,
+	.remove   = __devexit_p(e1000_remove),
+#ifdef CONFIG_PM
+	/* Power Managment Hooks */
+	.suspend  = e1000_suspend,
+	.resume   = e1000_resume,
+#endif
+	.shutdown = e1000_shutdown,
+	.err_handler = &e1000_err_handler
+};
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+
+static int __init e1000_init_module(void)
+{
+	int ret;
+	printk(KERN_INFO "%s - version %s\n",
+	       e1000_driver_string, e1000_driver_version);
+
+	printk(KERN_INFO "%s\n", e1000_copyright);
+
+	ret = pci_register_driver(&e1000_driver);
+	if (copybreak != COPYBREAK_DEFAULT) {
+		if (copybreak == 0)
+			printk(KERN_INFO "e1000: copybreak disabled\n");
+		else
+			printk(KERN_INFO "e1000: copybreak enabled for "
+			       "packets <= %u bytes\n", copybreak);
+	}
+	return ret;
+}
+
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+
+static void __exit e1000_exit_module(void)
+{
+	pci_unregister_driver(&e1000_driver);
+}
+
+module_exit(e1000_exit_module);
+
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	irq_handler_t handler = e1000_intr;
+	int irq_flags = IRQF_SHARED;
+	int err;
+
+	if (hw->mac_type >= e1000_82571) {
+		adapter->have_msi = !pci_enable_msi(adapter->pdev);
+		if (adapter->have_msi) {
+			handler = e1000_intr_msi;
+			irq_flags = 0;
+		}
+	}
+
+	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
+	                  netdev);
+	if (err) {
+		if (adapter->have_msi)
+			pci_disable_msi(adapter->pdev);
+		DPRINTK(PROBE, ERR,
+		        "Unable to allocate interrupt Error: %d\n", err);
+	}
+
+	return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	free_irq(adapter->pdev->irq, netdev);
+
+	if (adapter->have_msi)
+		pci_disable_msi(adapter->pdev);
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+
+static void e1000_irq_disable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	ew32(IMC, ~0);
+	E1000_WRITE_FLUSH();
+	synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+
+static void e1000_irq_enable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	ew32(IMS, IMS_ENABLE_MASK);
+	E1000_WRITE_FLUSH();
+}
+
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	u16 vid = hw->mng_cookie.vlan_id;
+	u16 old_vid = adapter->mng_vlan_id;
+	if (adapter->vlgrp) {
+		if (!vlan_group_get_device(adapter->vlgrp, vid)) {
+			if (hw->mng_cookie.status &
+				E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
+				e1000_vlan_rx_add_vid(netdev, vid);
+				adapter->mng_vlan_id = vid;
+			} else
+				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+
+			if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
+					(vid != old_vid) &&
+			    !vlan_group_get_device(adapter->vlgrp, old_vid))
+				e1000_vlan_rx_kill_vid(netdev, old_vid);
+		} else
+			adapter->mng_vlan_id = vid;
+	}
+}
+
+/**
+ * e1000_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the network i/f is closed.
+ *
+ **/
+
+static void e1000_release_hw_control(struct e1000_adapter *adapter)
+{
+	u32 ctrl_ext;
+	u32 swsm;
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* Let firmware taken over control of h/w */
+	switch (hw->mac_type) {
+	case e1000_82573:
+		swsm = er32(SWSM);
+		ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+		ctrl_ext = er32(CTRL_EXT);
+		ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * e1000_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the network i/f is open.
+ *
+ **/
+
+static void e1000_get_hw_control(struct e1000_adapter *adapter)
+{
+	u32 ctrl_ext;
+	u32 swsm;
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* Let firmware know the driver has taken over */
+	switch (hw->mac_type) {
+	case e1000_82573:
+		swsm = er32(SWSM);
+		ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+		ctrl_ext = er32(CTRL_EXT);
+		ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	default:
+		break;
+	}
+}
+
+static void e1000_init_manageability(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (adapter->en_mng_pt) {
+		u32 manc = er32(MANC);
+
+		/* disable hardware interception of ARP */
+		manc &= ~(E1000_MANC_ARP_EN);
+
+		/* enable receiving management packets to the host */
+		/* this will probably generate destination unreachable messages
+		 * from the host OS, but the packets will be handled on SMBUS */
+		if (hw->has_manc2h) {
+			u32 manc2h = er32(MANC2H);
+
+			manc |= E1000_MANC_EN_MNG2HOST;
+#define E1000_MNG2HOST_PORT_623 (1 << 5)
+#define E1000_MNG2HOST_PORT_664 (1 << 6)
+			manc2h |= E1000_MNG2HOST_PORT_623;
+			manc2h |= E1000_MNG2HOST_PORT_664;
+			ew32(MANC2H, manc2h);
+		}
+
+		ew32(MANC, manc);
+	}
+}
+
+static void e1000_release_manageability(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (adapter->en_mng_pt) {
+		u32 manc = er32(MANC);
+
+		/* re-enable hardware interception of ARP */
+		manc |= E1000_MANC_ARP_EN;
+
+		if (hw->has_manc2h)
+			manc &= ~E1000_MANC_EN_MNG2HOST;
+
+		/* don't explicitly have to mess with MANC2H since
+		 * MANC has an enable disable that gates MANC2H */
+
+		ew32(MANC, manc);
+	}
+}
+
+/**
+ * e1000_configure - configure the hardware for RX and TX
+ * @adapter = private board structure
+ **/
+static void e1000_configure(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	e1000_set_rx_mode(netdev);
+
+	e1000_restore_vlan(adapter);
+	e1000_init_manageability(adapter);
+
+	e1000_configure_tx(adapter);
+	e1000_setup_rctl(adapter);
+	e1000_configure_rx(adapter);
+	/* call E1000_DESC_UNUSED which always leaves
+	 * at least 1 descriptor unused to make sure
+	 * next_to_use != next_to_clean */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
+		adapter->alloc_rx_buf(adapter, ring,
+		                      E1000_DESC_UNUSED(ring));
+	}
+
+	adapter->tx_queue_len = netdev->tx_queue_len;
+}
+
+int e1000_up(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* hardware has been reset, we need to reload some things */
+	e1000_configure(adapter);
+
+	clear_bit(__E1000_DOWN, &adapter->flags);
+
+	napi_enable(&adapter->napi);
+
+	e1000_irq_enable(adapter);
+
+	netif_wake_queue(adapter->netdev);
+
+	/* fire a link change interrupt to start the watchdog */
+	ew32(ICS, E1000_ICS_LSC);
+	return 0;
+}
+
+/**
+ * e1000_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000_reset ***
+ *
+ **/
+
+void e1000_power_up_phy(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 mii_reg = 0;
+
+	/* Just clear the power down bit to wake the phy back up */
+	if (hw->media_type == e1000_media_type_copper) {
+		/* according to the manual, the phy will retain its
+		 * settings across a power-down/up cycle */
+		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
+		mii_reg &= ~MII_CR_POWER_DOWN;
+		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
+	}
+}
+
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* Power down the PHY so no link is implied when interface is down *
+	 * The PHY cannot be powered down if any of the following is true *
+	 * (a) WoL is enabled
+	 * (b) AMT is active
+	 * (c) SoL/IDER session is active */
+	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
+	   hw->media_type == e1000_media_type_copper) {
+		u16 mii_reg = 0;
+
+		switch (hw->mac_type) {
+		case e1000_82540:
+		case e1000_82545:
+		case e1000_82545_rev_3:
+		case e1000_82546:
+		case e1000_82546_rev_3:
+		case e1000_82541:
+		case e1000_82541_rev_2:
+		case e1000_82547:
+		case e1000_82547_rev_2:
+			if (er32(MANC) & E1000_MANC_SMBUS_EN)
+				goto out;
+			break;
+		case e1000_82571:
+		case e1000_82572:
+		case e1000_82573:
+		case e1000_80003es2lan:
+		case e1000_ich8lan:
+			if (e1000_check_mng_mode(hw) ||
+			    e1000_check_phy_reset_block(hw))
+				goto out;
+			break;
+		default:
+			goto out;
+		}
+		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
+		mii_reg |= MII_CR_POWER_DOWN;
+		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
+		mdelay(1);
+	}
+out:
+	return;
+}
+
+void e1000_down(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	u32 rctl, tctl;
+
+	/* signal that we're down so the interrupt handler does not
+	 * reschedule our watchdog timer */
+	set_bit(__E1000_DOWN, &adapter->flags);
+
+	/* disable receives in the hardware */
+	rctl = er32(RCTL);
+	ew32(RCTL, rctl & ~E1000_RCTL_EN);
+	/* flush and sleep below */
+
+	/* can be netif_tx_disable when NETIF_F_LLTX is removed */
+	netif_stop_queue(netdev);
+
+	/* disable transmits in the hardware */
+	tctl = er32(TCTL);
+	tctl &= ~E1000_TCTL_EN;
+	ew32(TCTL, tctl);
+	/* flush both disables and wait for them to finish */
+	E1000_WRITE_FLUSH();
+	msleep(10);
+
+	napi_disable(&adapter->napi);
+
+	e1000_irq_disable(adapter);
+
+	del_timer_sync(&adapter->tx_fifo_stall_timer);
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_info_timer);
+
+	netdev->tx_queue_len = adapter->tx_queue_len;
+	adapter->link_speed = 0;
+	adapter->link_duplex = 0;
+	netif_carrier_off(netdev);
+
+	e1000_reset(adapter);
+	e1000_clean_all_tx_rings(adapter);
+	e1000_clean_all_rx_rings(adapter);
+}
+
+void e1000_reinit_locked(struct e1000_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+		msleep(1);
+	e1000_down(adapter);
+	e1000_up(adapter);
+	clear_bit(__E1000_RESETTING, &adapter->flags);
+}
+
+void e1000_reset(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
+	u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
+	bool legacy_pba_adjust = false;
+
+	/* Repartition Pba for greater than 9k mtu
+	 * To take effect CTRL.RST is required.
+	 */
+
+	switch (hw->mac_type) {
+	case e1000_82542_rev2_0:
+	case e1000_82542_rev2_1:
+	case e1000_82543:
+	case e1000_82544:
+	case e1000_82540:
+	case e1000_82541:
+	case e1000_82541_rev_2:
+		legacy_pba_adjust = true;
+		pba = E1000_PBA_48K;
+		break;
+	case e1000_82545:
+	case e1000_82545_rev_3:
+	case e1000_82546:
+	case e1000_82546_rev_3:
+		pba = E1000_PBA_48K;
+		break;
+	case e1000_82547:
+	case e1000_82547_rev_2:
+		legacy_pba_adjust = true;
+		pba = E1000_PBA_30K;
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		pba = E1000_PBA_38K;
+		break;
+	case e1000_82573:
+		pba = E1000_PBA_20K;
+		break;
+	case e1000_ich8lan:
+		pba = E1000_PBA_8K;
+	case e1000_undefined:
+	case e1000_num_macs:
+		break;
+	}
+
+	if (legacy_pba_adjust) {
+		if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
+			pba -= 8; /* allocate more FIFO for Tx */
+
+		if (hw->mac_type == e1000_82547) {
+			adapter->tx_fifo_head = 0;
+			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
+			adapter->tx_fifo_size =
+				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
+			atomic_set(&adapter->tx_fifo_stall, 0);
+		}
+	} else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
+		/* adjust PBA for jumbo frames */
+		ew32(PBA, pba);
+
+		/* To maintain wire speed transmits, the Tx FIFO should be
+		 * large enough to accomodate two full transmit packets,
+		 * rounded up to the next 1KB and expressed in KB.  Likewise,
+		 * the Rx FIFO should be large enough to accomodate at least
+		 * one full receive packet and is similarly rounded up and
+		 * expressed in KB. */
+		pba = er32(PBA);
+		/* upper 16 bits has Tx packet buffer allocation size in KB */
+		tx_space = pba >> 16;
+		/* lower 16 bits has Rx packet buffer allocation size in KB */
+		pba &= 0xffff;
+		/* don't include ethernet FCS because hardware appends/strips */
+		min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
+		               VLAN_TAG_SIZE;
+		min_tx_space = min_rx_space;
+		min_tx_space *= 2;
+		min_tx_space = ALIGN(min_tx_space, 1024);
+		min_tx_space >>= 10;
+		min_rx_space = ALIGN(min_rx_space, 1024);
+		min_rx_space >>= 10;
+
+		/* If current Tx allocation is less than the min Tx FIFO size,
+		 * and the min Tx FIFO size is less than the current Rx FIFO
+		 * allocation, take space away from current Rx allocation */
+		if (tx_space < min_tx_space &&
+		    ((min_tx_space - tx_space) < pba)) {
+			pba = pba - (min_tx_space - tx_space);
+
+			/* PCI/PCIx hardware has PBA alignment constraints */
+			switch (hw->mac_type) {
+			case e1000_82545 ... e1000_82546_rev_3:
+				pba &= ~(E1000_PBA_8K - 1);
+				break;
+			default:
+				break;
+			}
+
+			/* if short on rx space, rx wins and must trump tx
+			 * adjustment or use Early Receive if available */
+			if (pba < min_rx_space) {
+				switch (hw->mac_type) {
+				case e1000_82573:
+					/* ERT enabled in e1000_configure_rx */
+					break;
+				default:
+					pba = min_rx_space;
+					break;
+				}
+			}
+		}
+	}
+
+	ew32(PBA, pba);
+
+	/* flow control settings */
+	/* Set the FC high water mark to 90% of the FIFO size.
+	 * Required to clear last 3 LSB */
+	fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
+	/* We can't use 90% on small FIFOs because the remainder
+	 * would be less than 1 full frame.  In this case, we size
+	 * it to allow at least a full frame above the high water
+	 *  mark. */
+	if (pba < E1000_PBA_16K)
+		fc_high_water_mark = (pba * 1024) - 1600;
+
+	hw->fc_high_water = fc_high_water_mark;
+	hw->fc_low_water = fc_high_water_mark - 8;
+	if (hw->mac_type == e1000_80003es2lan)
+		hw->fc_pause_time = 0xFFFF;
+	else
+		hw->fc_pause_time = E1000_FC_PAUSE_TIME;
+	hw->fc_send_xon = 1;
+	hw->fc = hw->original_fc;
+
+	/* Allow time for pending master requests to run */
+	e1000_reset_hw(hw);
+	if (hw->mac_type >= e1000_82544)
+		ew32(WUC, 0);
+
+	if (e1000_init_hw(hw))
+		DPRINTK(PROBE, ERR, "Hardware Error\n");
+	e1000_update_mng_vlan(adapter);
+
+	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
+	if (hw->mac_type >= e1000_82544 &&
+	    hw->mac_type <= e1000_82547_rev_2 &&
+	    hw->autoneg == 1 &&
+	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+		u32 ctrl = er32(CTRL);
+		/* clear phy power management bit if we are in gig only mode,
+		 * which if enabled will attempt negotiation to 100Mb, which
+		 * can cause a loss of link at power off or driver unload */
+		ctrl &= ~E1000_CTRL_SWDPIN3;
+		ew32(CTRL, ctrl);
+	}
+
+	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
+
+	e1000_reset_adaptive(hw);
+	e1000_phy_get_info(hw, &adapter->phy_info);
+
+	if (!adapter->smart_power_down &&
+	    (hw->mac_type == e1000_82571 ||
+	     hw->mac_type == e1000_82572)) {
+		u16 phy_data = 0;
+		/* speed up time to link by disabling smart power down, ignore
+		 * the return value of this function because there is nothing
+		 * different we would do if it failed */
+		e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+		                   &phy_data);
+		phy_data &= ~IGP02E1000_PM_SPD;
+		e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+		                    phy_data);
+	}
+
+	e1000_release_manageability(adapter);
+}
+
+/**
+ *  Dump the eeprom for users having checksum issues
+ **/
+static void e1000_dump_eeprom(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct ethtool_eeprom eeprom;
+	const struct ethtool_ops *ops = netdev->ethtool_ops;
+	u8 *data;
+	int i;
+	u16 csum_old, csum_new = 0;
+
+	eeprom.len = ops->get_eeprom_len(netdev);
+	eeprom.offset = 0;
+
+	data = kmalloc(eeprom.len, GFP_KERNEL);
+	if (!data) {
+		printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
+		       " data\n");
+		return;
+	}
+
+	ops->get_eeprom(netdev, &eeprom, data);
+
+	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
+		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
+	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
+		csum_new += data[i] + (data[i + 1] << 8);
+	csum_new = EEPROM_SUM - csum_new;
+
+	printk(KERN_ERR "/*********************/\n");
+	printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
+	printk(KERN_ERR "Calculated              : 0x%04x\n", csum_new);
+
+	printk(KERN_ERR "Offset    Values\n");
+	printk(KERN_ERR "========  ======\n");
+	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
+
+	printk(KERN_ERR "Include this output when contacting your support "
+	       "provider.\n");
+	printk(KERN_ERR "This is not a software error! Something bad "
+	       "happened to your hardware or\n");
+	printk(KERN_ERR "EEPROM image. Ignoring this "
+	       "problem could result in further problems,\n");
+	printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
+	printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
+	       "which is invalid\n");
+	printk(KERN_ERR "and requires you to set the proper MAC "
+	       "address manually before continuing\n");
+	printk(KERN_ERR "to enable this network device.\n");
+	printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
+	       "to your hardware vendor\n");
+	printk(KERN_ERR "or Intel Customer Support.\n");
+	printk(KERN_ERR "/*********************/\n");
+
+	kfree(data);
+}
+
+/**
+ * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
+ * @pdev: PCI device information struct
+ *
+ * Return true if an adapter needs ioport resources
+ **/
+static int e1000_is_need_ioport(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case E1000_DEV_ID_82540EM:
+	case E1000_DEV_ID_82540EM_LOM:
+	case E1000_DEV_ID_82540EP:
+	case E1000_DEV_ID_82540EP_LOM:
+	case E1000_DEV_ID_82540EP_LP:
+	case E1000_DEV_ID_82541EI:
+	case E1000_DEV_ID_82541EI_MOBILE:
+	case E1000_DEV_ID_82541ER:
+	case E1000_DEV_ID_82541ER_LOM:
+	case E1000_DEV_ID_82541GI:
+	case E1000_DEV_ID_82541GI_LF:
+	case E1000_DEV_ID_82541GI_MOBILE:
+	case E1000_DEV_ID_82544EI_COPPER:
+	case E1000_DEV_ID_82544EI_FIBER:
+	case E1000_DEV_ID_82544GC_COPPER:
+	case E1000_DEV_ID_82544GC_LOM:
+	case E1000_DEV_ID_82545EM_COPPER:
+	case E1000_DEV_ID_82545EM_FIBER:
+	case E1000_DEV_ID_82546EB_COPPER:
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546EB_QUAD_COPPER:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct net_device_ops e1000_netdev_ops = {
+	.ndo_open		= e1000_open,
+	.ndo_stop		= e1000_close,
+	.ndo_start_xmit		= e1000_xmit_frame,
+	.ndo_get_stats		= e1000_get_stats,
+	.ndo_set_rx_mode	= e1000_set_rx_mode,
+	.ndo_set_mac_address	= e1000_set_mac,
+	.ndo_tx_timeout 	= e1000_tx_timeout,
+	.ndo_change_mtu		= e1000_change_mtu,
+	.ndo_do_ioctl		= e1000_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+
+	.ndo_vlan_rx_register	= e1000_vlan_rx_register,
+	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= e1000_netpoll,
+#endif
+};
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit e1000_probe(struct pci_dev *pdev,
+				 const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct e1000_adapter *adapter;
+	struct e1000_hw *hw;
+
+	static int cards_found = 0;
+	static int global_quad_port_a = 0; /* global ksp3 port a indication */
+	int i, err, pci_using_dac;
+	u16 eeprom_data = 0;
+	u16 eeprom_apme_mask = E1000_EEPROM_APME;
+	int bars, need_ioport;
+
+	/* do not allocate ioport bars when not needed */
+	need_ioport = e1000_is_need_ioport(pdev);
+	if (need_ioport) {
+		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
+		err = pci_enable_device(pdev);
+	} else {
+		bars = pci_select_bars(pdev, IORESOURCE_MEM);
+		err = pci_enable_device_mem(pdev);
+	}
+	if (err)
+		return err;
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		pci_using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+			if (err) {
+				E1000_ERR("No usable DMA configuration, "
+					  "aborting\n");
+				goto err_dma;
+			}
+		}
+		pci_using_dac = 0;
+	}
+
+	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
+	if (err)
+		goto err_pci_reg;
+
+	pci_set_master(pdev);
+
+	err = -ENOMEM;
+	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
+	if (!netdev)
+		goto err_alloc_etherdev;
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev_priv(netdev);
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	adapter->msg_enable = (1 << debug) - 1;
+	adapter->bars = bars;
+	adapter->need_ioport = need_ioport;
+
+	hw = &adapter->hw;
+	hw->back = adapter;
+
+	err = -EIO;
+	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
+	if (!hw->hw_addr)
+		goto err_ioremap;
+
+	if (adapter->need_ioport) {
+		for (i = BAR_1; i <= BAR_5; i++) {
+			if (pci_resource_len(pdev, i) == 0)
+				continue;
+			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+				hw->io_base = pci_resource_start(pdev, i);
+				break;
+			}
+		}
+	}
+
+	netdev->netdev_ops = &e1000_netdev_ops;
+	e1000_set_ethtool_ops(netdev);
+	netdev->watchdog_timeo = 5 * HZ;
+	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	adapter->bd_number = cards_found;
+
+	/* setup the private structure */
+
+	err = e1000_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	err = -EIO;
+	/* Flash BAR mapping must happen after e1000_sw_init
+	 * because it depends on mac_type */
+	if ((hw->mac_type == e1000_ich8lan) &&
+	   (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+		hw->flash_address = pci_ioremap_bar(pdev, 1);
+		if (!hw->flash_address)
+			goto err_flashmap;
+	}
+
+	if (e1000_check_phy_reset_block(hw))
+		DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
+
+	if (hw->mac_type >= e1000_82543) {
+		netdev->features = NETIF_F_SG |
+				   NETIF_F_HW_CSUM |
+				   NETIF_F_HW_VLAN_TX |
+				   NETIF_F_HW_VLAN_RX |
+				   NETIF_F_HW_VLAN_FILTER;
+		if (hw->mac_type == e1000_ich8lan)
+			netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
+	}
+
+	if ((hw->mac_type >= e1000_82544) &&
+	   (hw->mac_type != e1000_82547))
+		netdev->features |= NETIF_F_TSO;
+
+	if (hw->mac_type > e1000_82547_rev_2)
+		netdev->features |= NETIF_F_TSO6;
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+	netdev->vlan_features |= NETIF_F_TSO;
+	netdev->vlan_features |= NETIF_F_TSO6;
+	netdev->vlan_features |= NETIF_F_HW_CSUM;
+	netdev->vlan_features |= NETIF_F_SG;
+
+	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
+
+	/* initialize eeprom parameters */
+	if (e1000_init_eeprom_params(hw)) {
+		E1000_ERR("EEPROM initialization failed\n");
+		goto err_eeprom;
+	}
+
+	/* before reading the EEPROM, reset the controller to
+	 * put the device in a known good starting state */
+
+	e1000_reset_hw(hw);
+
+	/* make sure the EEPROM is good */
+	if (e1000_validate_eeprom_checksum(hw) < 0) {
+		DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+		e1000_dump_eeprom(adapter);
+		/*
+		 * set MAC address to all zeroes to invalidate and temporary
+		 * disable this device for the user. This blocks regular
+		 * traffic while still permitting ethtool ioctls from reaching
+		 * the hardware as well as allowing the user to run the
+		 * interface after manually setting a hw addr using
+		 * `ip set address`
+		 */
+		memset(hw->mac_addr, 0, netdev->addr_len);
+	} else {
+		/* copy the MAC address out of the EEPROM */
+		if (e1000_read_mac_addr(hw))
+			DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
+	}
+	/* don't block initalization here due to bad MAC address */
+	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
+	memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
+
+	if (!is_valid_ether_addr(netdev->perm_addr))
+		DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+
+	e1000_get_bus_info(hw);
+
+	init_timer(&adapter->tx_fifo_stall_timer);
+	adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
+	adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
+
+	init_timer(&adapter->watchdog_timer);
+	adapter->watchdog_timer.function = &e1000_watchdog;
+	adapter->watchdog_timer.data = (unsigned long) adapter;
+
+	init_timer(&adapter->phy_info_timer);
+	adapter->phy_info_timer.function = &e1000_update_phy_info;
+	adapter->phy_info_timer.data = (unsigned long)adapter;
+
+	INIT_WORK(&adapter->reset_task, e1000_reset_task);
+
+	e1000_check_options(adapter);
+
+	/* Initial Wake on LAN setting
+	 * If APM wake is enabled in the EEPROM,
+	 * enable the ACPI Magic Packet filter
+	 */
+
+	switch (hw->mac_type) {
+	case e1000_82542_rev2_0:
+	case e1000_82542_rev2_1:
+	case e1000_82543:
+		break;
+	case e1000_82544:
+		e1000_read_eeprom(hw,
+			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
+		eeprom_apme_mask = E1000_EEPROM_82544_APM;
+		break;
+	case e1000_ich8lan:
+		e1000_read_eeprom(hw,
+			EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
+		eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
+		break;
+	case e1000_82546:
+	case e1000_82546_rev_3:
+	case e1000_82571:
+	case e1000_80003es2lan:
+		if (er32(STATUS) & E1000_STATUS_FUNC_1){
+			e1000_read_eeprom(hw,
+				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+			break;
+		}
+		/* Fall Through */
+	default:
+		e1000_read_eeprom(hw,
+			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+		break;
+	}
+	if (eeprom_data & eeprom_apme_mask)
+		adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+	/* now that we have the eeprom settings, apply the special cases
+	 * where the eeprom may be wrong or the board simply won't support
+	 * wake on lan on a particular port */
+	switch (pdev->device) {
+	case E1000_DEV_ID_82546GB_PCIE:
+		adapter->eeprom_wol = 0;
+		break;
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546GB_FIBER:
+	case E1000_DEV_ID_82571EB_FIBER:
+		/* Wake events only supported on port A for dual fiber
+		 * regardless of eeprom setting */
+		if (er32(STATUS) & E1000_STATUS_FUNC_1)
+			adapter->eeprom_wol = 0;
+		break;
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+		/* if quad port adapter, disable WoL on all but port A */
+		if (global_quad_port_a != 0)
+			adapter->eeprom_wol = 0;
+		else
+			adapter->quad_port_a = 1;
+		/* Reset for multiple quad port adapters */
+		if (++global_quad_port_a == 4)
+			global_quad_port_a = 0;
+		break;
+	}
+
+	/* initialize the wol settings based on the eeprom settings */
+	adapter->wol = adapter->eeprom_wol;
+	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+	/* print bus type/speed/width info */
+	DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+		((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
+		 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
+		((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+		 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
+		 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
+		 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
+		 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
+		((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
+		 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
+		 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
+		 "32-bit"));
+
+	printk("%pM\n", netdev->dev_addr);
+
+	if (hw->bus_type == e1000_bus_type_pci_express) {
+		DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
+			"longer be supported by this driver in the future.\n",
+			pdev->vendor, pdev->device);
+		DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
+			"driver instead.\n");
+	}
+
+	/* reset the hardware with the new settings */
+	e1000_reset(adapter);
+
+	/* If the controller is 82573 and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (hw->mac_type != e1000_82573 ||
+	    !e1000_check_mng_mode(hw))
+		e1000_get_hw_control(adapter);
+
+	strcpy(netdev->name, "eth%d");
+	err = register_netdev(netdev);
+	if (err)
+		goto err_register;
+
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	netif_carrier_off(netdev);
+
+	DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+
+	cards_found++;
+	return 0;
+
+err_register:
+	e1000_release_hw_control(adapter);
+err_eeprom:
+	if (!e1000_check_phy_reset_block(hw))
+		e1000_phy_hw_reset(hw);
+
+	if (hw->flash_address)
+		iounmap(hw->flash_address);
+err_flashmap:
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+err_sw_init:
+	iounmap(hw->hw_addr);
+err_ioremap:
+	free_netdev(netdev);
+err_alloc_etherdev:
+	pci_release_selected_regions(pdev, bars);
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+
+static void __devexit e1000_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	cancel_work_sync(&adapter->reset_task);
+
+	e1000_release_manageability(adapter);
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant. */
+	e1000_release_hw_control(adapter);
+
+	unregister_netdev(netdev);
+
+	if (!e1000_check_phy_reset_block(hw))
+		e1000_phy_hw_reset(hw);
+
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+	iounmap(hw->hw_addr);
+	if (hw->flash_address)
+		iounmap(hw->flash_address);
+	pci_release_selected_regions(pdev, adapter->bars);
+
+	free_netdev(netdev);
+
+	pci_disable_device(pdev);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+
+static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+
+	/* PCI config space info */
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_id = pdev->subsystem_device;
+	hw->revision_id = pdev->revision;
+
+	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+	hw->max_frame_size = netdev->mtu +
+			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+	/* identify the MAC */
+
+	if (e1000_set_mac_type(hw)) {
+		DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
+		return -EIO;
+	}
+
+	switch (hw->mac_type) {
+	default:
+		break;
+	case e1000_82541:
+	case e1000_82547:
+	case e1000_82541_rev_2:
+	case e1000_82547_rev_2:
+		hw->phy_init_script = 1;
+		break;
+	}
+
+	e1000_set_media_type(hw);
+
+	hw->wait_autoneg_complete = false;
+	hw->tbi_compatibility_en = true;
+	hw->adaptive_ifs = true;
+
+	/* Copper options */
+
+	if (hw->media_type == e1000_media_type_copper) {
+		hw->mdix = AUTO_ALL_MODES;
+		hw->disable_polarity_correction = false;
+		hw->master_slave = E1000_MASTER_SLAVE;
+	}
+
+	adapter->num_tx_queues = 1;
+	adapter->num_rx_queues = 1;
+
+	if (e1000_alloc_queues(adapter)) {
+		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	/* Explicitly disable IRQ since the NIC can be in any state. */
+	e1000_irq_disable(adapter);
+
+	spin_lock_init(&adapter->stats_lock);
+
+	set_bit(__E1000_DOWN, &adapter->flags);
+
+	return 0;
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.
+ **/
+
+static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+	                           sizeof(struct e1000_tx_ring), GFP_KERNEL);
+	if (!adapter->tx_ring)
+		return -ENOMEM;
+
+	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+	                           sizeof(struct e1000_rx_ring), GFP_KERNEL);
+	if (!adapter->rx_ring) {
+		kfree(adapter->tx_ring);
+		return -ENOMEM;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+
+static int e1000_open(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	int err;
+
+	/* disallow open during test */
+	if (test_bit(__E1000_TESTING, &adapter->flags))
+		return -EBUSY;
+
+	netif_carrier_off(netdev);
+
+	/* allocate transmit descriptors */
+	err = e1000_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = e1000_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	e1000_power_up_phy(adapter);
+
+	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+	if ((hw->mng_cookie.status &
+			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
+		e1000_update_mng_vlan(adapter);
+	}
+
+	/* If AMT is enabled, let the firmware know that the network
+	 * interface is now open */
+	if (hw->mac_type == e1000_82573 &&
+	    e1000_check_mng_mode(hw))
+		e1000_get_hw_control(adapter);
+
+	/* before we allocate an interrupt, we must be ready to handle it.
+	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+	 * as soon as we call pci_request_irq, so we have to setup our
+	 * clean_rx handler before we do so.  */
+	e1000_configure(adapter);
+
+	err = e1000_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/* From here on the code is the same as e1000_up() */
+	clear_bit(__E1000_DOWN, &adapter->flags);
+
+	napi_enable(&adapter->napi);
+
+	e1000_irq_enable(adapter);
+
+	netif_start_queue(netdev);
+
+	/* fire a link status change interrupt to start the watchdog */
+	ew32(ICS, E1000_ICS_LSC);
+
+	return E1000_SUCCESS;
+
+err_req_irq:
+	e1000_release_hw_control(adapter);
+	e1000_power_down_phy(adapter);
+	e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+	e1000_free_all_tx_resources(adapter);
+err_setup_tx:
+	e1000_reset(adapter);
+
+	return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+
+static int e1000_close(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+	e1000_down(adapter);
+	e1000_power_down_phy(adapter);
+	e1000_free_irq(adapter);
+
+	e1000_free_all_tx_resources(adapter);
+	e1000_free_all_rx_resources(adapter);
+
+	/* kill manageability vlan ID if supported, but not if a vlan with
+	 * the same ID is registered on the host OS (let 8021q kill it) */
+	if ((hw->mng_cookie.status &
+			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+	     !(adapter->vlgrp &&
+	       vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
+		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+	}
+
+	/* If AMT is enabled, let the firmware know that the network
+	 * interface is now closed */
+	if (hw->mac_type == e1000_82573 &&
+	    e1000_check_mng_mode(hw))
+		e1000_release_hw_control(adapter);
+
+	return 0;
+}
+
+/**
+ * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
+ * @adapter: address of board private structure
+ * @start: address of beginning of memory
+ * @len: length of memory
+ **/
+static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
+				  unsigned long len)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned long begin = (unsigned long)start;
+	unsigned long end = begin + len;
+
+	/* First rev 82545 and 82546 need to not allow any memory
+	 * write location to cross 64k boundary due to errata 23 */
+	if (hw->mac_type == e1000_82545 ||
+	    hw->mac_type == e1000_82546) {
+		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
+	}
+
+	return true;
+}
+
+/**
+ * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @txdr:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *txdr)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+
+	size = sizeof(struct e1000_buffer) * txdr->count;
+	txdr->buffer_info = vmalloc(size);
+	if (!txdr->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(txdr->buffer_info, 0, size);
+
+	/* round up to nearest 4K */
+
+	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+	txdr->size = ALIGN(txdr->size, 4096);
+
+	txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+	if (!txdr->desc) {
+setup_tx_desc_die:
+		vfree(txdr->buffer_info);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+
+	/* Fix for errata 23, can't cross 64kB boundary */
+	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+		void *olddesc = txdr->desc;
+		dma_addr_t olddma = txdr->dma;
+		DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
+				     "at %p\n", txdr->size, txdr->desc);
+		/* Try again, without freeing the previous */
+		txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+		/* Failed allocation, critical failure */
+		if (!txdr->desc) {
+			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+			goto setup_tx_desc_die;
+		}
+
+		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+			/* give up */
+			pci_free_consistent(pdev, txdr->size, txdr->desc,
+					    txdr->dma);
+			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate aligned memory "
+				"for the transmit descriptor ring\n");
+			vfree(txdr->buffer_info);
+			return -ENOMEM;
+		} else {
+			/* Free old allocation, new allocation was successful */
+			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+		}
+	}
+	memset(txdr->desc, 0, txdr->size);
+
+	txdr->next_to_use = 0;
+	txdr->next_to_clean = 0;
+
+	return 0;
+}
+
+/**
+ * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
+ * 				  (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Tx Queue %u failed\n", i);
+			for (i-- ; i >= 0; i--)
+				e1000_free_tx_resources(adapter,
+							&adapter->tx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+
+static void e1000_configure_tx(struct e1000_adapter *adapter)
+{
+	u64 tdba;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tdlen, tctl, tipg, tarc;
+	u32 ipgr1, ipgr2;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+
+	switch (adapter->num_tx_queues) {
+	case 1:
+	default:
+		tdba = adapter->tx_ring[0].dma;
+		tdlen = adapter->tx_ring[0].count *
+			sizeof(struct e1000_tx_desc);
+		ew32(TDLEN, tdlen);
+		ew32(TDBAH, (tdba >> 32));
+		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
+		ew32(TDT, 0);
+		ew32(TDH, 0);
+		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
+		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
+		break;
+	}
+
+	/* Set the default values for the Tx Inter Packet Gap timer */
+	if (hw->mac_type <= e1000_82547_rev_2 &&
+	    (hw->media_type == e1000_media_type_fiber ||
+	     hw->media_type == e1000_media_type_internal_serdes))
+		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+	else
+		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+
+	switch (hw->mac_type) {
+	case e1000_82542_rev2_0:
+	case e1000_82542_rev2_1:
+		tipg = DEFAULT_82542_TIPG_IPGT;
+		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
+		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
+		break;
+	case e1000_80003es2lan:
+		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+		ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
+		break;
+	default:
+		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
+		break;
+	}
+	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+	ew32(TIPG, tipg);
+
+	/* Set the Tx Interrupt Delay register */
+
+	ew32(TIDV, adapter->tx_int_delay);
+	if (hw->mac_type >= e1000_82540)
+		ew32(TADV, adapter->tx_abs_int_delay);
+
+	/* Program the Transmit Control Register */
+
+	tctl = er32(TCTL);
+	tctl &= ~E1000_TCTL_CT;
+	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+	if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
+		tarc = er32(TARC0);
+		/* set the speed mode bit, we'll clear it if we're not at
+		 * gigabit link later */
+		tarc |= (1 << 21);
+		ew32(TARC0, tarc);
+	} else if (hw->mac_type == e1000_80003es2lan) {
+		tarc = er32(TARC0);
+		tarc |= 1;
+		ew32(TARC0, tarc);
+		tarc = er32(TARC1);
+		tarc |= 1;
+		ew32(TARC1, tarc);
+	}
+
+	e1000_config_collision_dist(hw);
+
+	/* Setup Transmit Descriptor Settings for eop descriptor */
+	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+	/* only set IDE if we are delaying interrupts using the timers */
+	if (adapter->tx_int_delay)
+		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+	if (hw->mac_type < e1000_82543)
+		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
+	else
+		adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+	/* Cache if we're 82544 running in PCI-X because we'll
+	 * need this to apply a workaround later in the send path. */
+	if (hw->mac_type == e1000_82544 &&
+	    hw->bus_type == e1000_bus_type_pcix)
+		adapter->pcix_82544 = 1;
+
+	ew32(TCTL, tctl);
+
+}
+
+/**
+ * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rxdr:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rxdr)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	int size, desc_len;
+
+	size = sizeof(struct e1000_buffer) * rxdr->count;
+	rxdr->buffer_info = vmalloc(size);
+	if (!rxdr->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(rxdr->buffer_info, 0, size);
+
+	if (hw->mac_type <= e1000_82547_rev_2)
+		desc_len = sizeof(struct e1000_rx_desc);
+	else
+		desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+	/* Round up to nearest 4K */
+
+	rxdr->size = rxdr->count * desc_len;
+	rxdr->size = ALIGN(rxdr->size, 4096);
+
+	rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+
+	if (!rxdr->desc) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+setup_rx_desc_die:
+		vfree(rxdr->buffer_info);
+		return -ENOMEM;
+	}
+
+	/* Fix for errata 23, can't cross 64kB boundary */
+	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+		void *olddesc = rxdr->desc;
+		dma_addr_t olddma = rxdr->dma;
+		DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
+				     "at %p\n", rxdr->size, rxdr->desc);
+		/* Try again, without freeing the previous */
+		rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+		/* Failed allocation, critical failure */
+		if (!rxdr->desc) {
+			pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate memory "
+				"for the receive descriptor ring\n");
+			goto setup_rx_desc_die;
+		}
+
+		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+			/* give up */
+			pci_free_consistent(pdev, rxdr->size, rxdr->desc,
+					    rxdr->dma);
+			pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate aligned memory "
+				"for the receive descriptor ring\n");
+			goto setup_rx_desc_die;
+		} else {
+			/* Free old allocation, new allocation was successful */
+			pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+		}
+	}
+	memset(rxdr->desc, 0, rxdr->size);
+
+	rxdr->next_to_clean = 0;
+	rxdr->next_to_use = 0;
+
+	return 0;
+}
+
+/**
+ * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
+ * 				  (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Rx Queue %u failed\n", i);
+			for (i-- ; i >= 0; i--)
+				e1000_free_rx_resources(adapter,
+							&adapter->rx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	rctl = er32(RCTL);
+
+	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+
+	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+	if (hw->tbi_compatibility_on == 1)
+		rctl |= E1000_RCTL_SBP;
+	else
+		rctl &= ~E1000_RCTL_SBP;
+
+	if (adapter->netdev->mtu <= ETH_DATA_LEN)
+		rctl &= ~E1000_RCTL_LPE;
+	else
+		rctl |= E1000_RCTL_LPE;
+
+	/* Setup buffer sizes */
+	rctl &= ~E1000_RCTL_SZ_4096;
+	rctl |= E1000_RCTL_BSEX;
+	switch (adapter->rx_buffer_len) {
+		case E1000_RXBUFFER_256:
+			rctl |= E1000_RCTL_SZ_256;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_512:
+			rctl |= E1000_RCTL_SZ_512;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_1024:
+			rctl |= E1000_RCTL_SZ_1024;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_2048:
+		default:
+			rctl |= E1000_RCTL_SZ_2048;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_4096:
+			rctl |= E1000_RCTL_SZ_4096;
+			break;
+		case E1000_RXBUFFER_8192:
+			rctl |= E1000_RCTL_SZ_8192;
+			break;
+		case E1000_RXBUFFER_16384:
+			rctl |= E1000_RCTL_SZ_16384;
+			break;
+	}
+
+	ew32(RCTL, rctl);
+}
+
+/**
+ * e1000_configure_rx - Configure 8254x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+
+static void e1000_configure_rx(struct e1000_adapter *adapter)
+{
+	u64 rdba;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rdlen, rctl, rxcsum, ctrl_ext;
+
+	rdlen = adapter->rx_ring[0].count *
+		sizeof(struct e1000_rx_desc);
+	adapter->clean_rx = e1000_clean_rx_irq;
+	adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+
+	/* disable receives while setting up the descriptors */
+	rctl = er32(RCTL);
+	ew32(RCTL, rctl & ~E1000_RCTL_EN);
+
+	/* set the Receive Delay Timer Register */
+	ew32(RDTR, adapter->rx_int_delay);
+
+	if (hw->mac_type >= e1000_82540) {
+		ew32(RADV, adapter->rx_abs_int_delay);
+		if (adapter->itr_setting != 0)
+			ew32(ITR, 1000000000 / (adapter->itr * 256));
+	}
+
+	if (hw->mac_type >= e1000_82571) {
+		ctrl_ext = er32(CTRL_EXT);
+		/* Reset delay timers after every interrupt */
+		ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
+		/* Auto-Mask interrupts upon ICR access */
+		ctrl_ext |= E1000_CTRL_EXT_IAME;
+		ew32(IAM, 0xffffffff);
+		ew32(CTRL_EXT, ctrl_ext);
+		E1000_WRITE_FLUSH();
+	}
+
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring */
+	switch (adapter->num_rx_queues) {
+	case 1:
+	default:
+		rdba = adapter->rx_ring[0].dma;
+		ew32(RDLEN, rdlen);
+		ew32(RDBAH, (rdba >> 32));
+		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
+		ew32(RDT, 0);
+		ew32(RDH, 0);
+		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
+		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
+		break;
+	}
+
+	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
+	if (hw->mac_type >= e1000_82543) {
+		rxcsum = er32(RXCSUM);
+		if (adapter->rx_csum)
+			rxcsum |= E1000_RXCSUM_TUOFL;
+		else
+			/* don't need to clear IPPCSE as it defaults to 0 */
+			rxcsum &= ~E1000_RXCSUM_TUOFL;
+		ew32(RXCSUM, rxcsum);
+	}
+
+	/* Enable Receives */
+	ew32(RCTL, rctl);
+}
+
+/**
+ * e1000_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	e1000_clean_tx_ring(adapter, tx_ring);
+
+	vfree(tx_ring->buffer_info);
+	tx_ring->buffer_info = NULL;
+
+	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
+}
+
+static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+					     struct e1000_buffer *buffer_info)
+{
+	buffer_info->dma = 0;
+	if (buffer_info->skb) {
+		skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
+		              DMA_TO_DEVICE);
+		dev_kfree_skb_any(buffer_info->skb);
+		buffer_info->skb = NULL;
+	}
+	buffer_info->time_stamp = 0;
+	/* buffer_info must be completely set up in the transmit path */
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+				struct e1000_tx_ring *tx_ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_buffer *buffer_info;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Tx ring sk_buffs */
+
+	for (i = 0; i < tx_ring->count; i++) {
+		buffer_info = &tx_ring->buffer_info[i];
+		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+	}
+
+	size = sizeof(struct e1000_buffer) * tx_ring->count;
+	memset(tx_ring->buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	tx_ring->last_tx_tso = 0;
+
+	writel(0, hw->hw_addr + tx_ring->tdh);
+	writel(0, hw->hw_addr + tx_ring->tdt);
+}
+
+/**
+ * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * e1000_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	e1000_clean_rx_ring(adapter, rx_ring);
+
+	vfree(rx_ring->buffer_info);
+	rx_ring->buffer_info = NULL;
+
+	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+				struct e1000_rx_ring *rx_ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_buffer *buffer_info;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		buffer_info = &rx_ring->buffer_info[i];
+		if (buffer_info->dma) {
+			pci_unmap_single(pdev,
+					 buffer_info->dma,
+					 buffer_info->length,
+					 PCI_DMA_FROMDEVICE);
+		}
+
+		buffer_info->dma = 0;
+
+		if (buffer_info->skb) {
+			dev_kfree_skb(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+	}
+
+	size = sizeof(struct e1000_buffer) * rx_ring->count;
+	memset(rx_ring->buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	writel(0, hw->hw_addr + rx_ring->rdh);
+	writel(0, hw->hw_addr + rx_ring->rdt);
+}
+
+/**
+ * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
+ * and memory write and invalidate disabled for certain operations
+ */
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	u32 rctl;
+
+	e1000_pci_clear_mwi(hw);
+
+	rctl = er32(RCTL);
+	rctl |= E1000_RCTL_RST;
+	ew32(RCTL, rctl);
+	E1000_WRITE_FLUSH();
+	mdelay(5);
+
+	if (netif_running(netdev))
+		e1000_clean_all_rx_rings(adapter);
+}
+
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	u32 rctl;
+
+	rctl = er32(RCTL);
+	rctl &= ~E1000_RCTL_RST;
+	ew32(RCTL, rctl);
+	E1000_WRITE_FLUSH();
+	mdelay(5);
+
+	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+		e1000_pci_set_mwi(hw);
+
+	if (netif_running(netdev)) {
+		/* No need to loop, because 82542 supports only 1 queue */
+		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
+		e1000_configure_rx(adapter);
+		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
+	}
+}
+
+/**
+ * e1000_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_set_mac(struct net_device *netdev, void *p)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	/* 82542 2.0 needs to be in reset to write receive address registers */
+
+	if (hw->mac_type == e1000_82542_rev2_0)
+		e1000_enter_82542_rst(adapter);
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
+
+	e1000_rar_set(hw, hw->mac_addr, 0);
+
+	/* With 82571 controllers, LAA may be overwritten (with the default)
+	 * due to controller reset from the other port. */
+	if (hw->mac_type == e1000_82571) {
+		/* activate the work around */
+		hw->laa_is_present = 1;
+
+		/* Hold a copy of the LAA in RAR[14] This is done so that
+		 * between the time RAR[0] gets clobbered  and the time it
+		 * gets fixed (in e1000_watchdog), the actual LAA is in one
+		 * of the RARs and no incoming packets directed to this port
+		 * are dropped. Eventaully the LAA will be in RAR[0] and
+		 * RAR[14] */
+		e1000_rar_set(hw, hw->mac_addr,
+					E1000_RAR_ENTRIES - 1);
+	}
+
+	if (hw->mac_type == e1000_82542_rev2_0)
+		e1000_leave_82542_rst(adapter);
+
+	return 0;
+}
+
+/**
+ * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+
+static void e1000_set_rx_mode(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct netdev_hw_addr *ha;
+	bool use_uc = false;
+	struct dev_addr_list *mc_ptr;
+	u32 rctl;
+	u32 hash_value;
+	int i, rar_entries = E1000_RAR_ENTRIES;
+	int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
+				E1000_NUM_MTA_REGISTERS_ICH8LAN :
+				E1000_NUM_MTA_REGISTERS;
+	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
+
+	if (!mcarray) {
+		DPRINTK(PROBE, ERR, "memory allocation failed\n");
+		return;
+	}
+
+	if (hw->mac_type == e1000_ich8lan)
+		rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
+
+	/* reserve RAR[14] for LAA over-write work-around */
+	if (hw->mac_type == e1000_82571)
+		rar_entries--;
+
+	/* Check for Promiscuous and All Multicast modes */
+
+	rctl = er32(RCTL);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+		rctl &= ~E1000_RCTL_VFE;
+	} else {
+		if (netdev->flags & IFF_ALLMULTI) {
+			rctl |= E1000_RCTL_MPE;
+		} else {
+			rctl &= ~E1000_RCTL_MPE;
+		}
+		if (adapter->hw.mac_type != e1000_ich8lan)
+			rctl |= E1000_RCTL_VFE;
+	}
+
+	if (netdev->uc.count > rar_entries - 1) {
+		rctl |= E1000_RCTL_UPE;
+	} else if (!(netdev->flags & IFF_PROMISC)) {
+		rctl &= ~E1000_RCTL_UPE;
+		use_uc = true;
+	}
+
+	ew32(RCTL, rctl);
+
+	/* 82542 2.0 needs to be in reset to write receive address registers */
+
+	if (hw->mac_type == e1000_82542_rev2_0)
+		e1000_enter_82542_rst(adapter);
+
+	/* load the first 14 addresses into the exact filters 1-14. Unicast
+	 * addresses take precedence to avoid disabling unicast filtering
+	 * when possible.
+	 *
+	 * RAR 0 is used for the station MAC adddress
+	 * if there are not 14 addresses, go ahead and clear the filters
+	 * -- with 82571 controllers only 0-13 entries are filled here
+	 */
+	i = 1;
+	if (use_uc)
+		list_for_each_entry(ha, &netdev->uc.list, list) {
+			if (i == rar_entries)
+				break;
+			e1000_rar_set(hw, ha->addr, i++);
+		}
+
+	WARN_ON(i == rar_entries);
+
+	mc_ptr = netdev->mc_list;
+
+	for (; i < rar_entries; i++) {
+		if (mc_ptr) {
+			e1000_rar_set(hw, mc_ptr->da_addr, i);
+			mc_ptr = mc_ptr->next;
+		} else {
+			E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+			E1000_WRITE_FLUSH();
+			E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+			E1000_WRITE_FLUSH();
+		}
+	}
+
+	/* load any remaining addresses into the hash table */
+
+	for (; mc_ptr; mc_ptr = mc_ptr->next) {
+		u32 hash_reg, hash_bit, mta;
+		hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
+		hash_reg = (hash_value >> 5) & 0x7F;
+		hash_bit = hash_value & 0x1F;
+		mta = (1 << hash_bit);
+		mcarray[hash_reg] |= mta;
+	}
+
+	/* write the hash table completely, write from bottom to avoid
+	 * both stupid write combining chipsets, and flushing each write */
+	for (i = mta_reg_count - 1; i >= 0 ; i--) {
+		/*
+		 * If we are on an 82544 has an errata where writing odd
+		 * offsets overwrites the previous even offset, but writing
+		 * backwards over the range solves the issue by always
+		 * writing the odd offset first
+		 */
+		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
+	}
+	E1000_WRITE_FLUSH();
+
+	if (hw->mac_type == e1000_82542_rev2_0)
+		e1000_leave_82542_rst(adapter);
+
+	kfree(mcarray);
+}
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy */
+
+static void e1000_update_phy_info(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+	struct e1000_hw *hw = &adapter->hw;
+	e1000_phy_get_info(hw, &adapter->phy_info);
+}
+
+/**
+ * e1000_82547_tx_fifo_stall - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+
+static void e1000_82547_tx_fifo_stall(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	u32 tctl;
+
+	if (atomic_read(&adapter->tx_fifo_stall)) {
+		if ((er32(TDT) == er32(TDH)) &&
+		   (er32(TDFT) == er32(TDFH)) &&
+		   (er32(TDFTS) == er32(TDFHS))) {
+			tctl = er32(TCTL);
+			ew32(TCTL, tctl & ~E1000_TCTL_EN);
+			ew32(TDFT, adapter->tx_head_addr);
+			ew32(TDFH, adapter->tx_head_addr);
+			ew32(TDFTS, adapter->tx_head_addr);
+			ew32(TDFHS, adapter->tx_head_addr);
+			ew32(TCTL, tctl);
+			E1000_WRITE_FLUSH();
+
+			adapter->tx_fifo_head = 0;
+			atomic_set(&adapter->tx_fifo_stall, 0);
+			netif_wake_queue(netdev);
+		} else {
+			mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
+		}
+	}
+}
+
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct e1000_tx_ring *txdr = adapter->tx_ring;
+	u32 link, tctl;
+	s32 ret_val;
+
+	ret_val = e1000_check_for_link(hw);
+	if ((ret_val == E1000_ERR_PHY) &&
+	    (hw->phy_type == e1000_phy_igp_3) &&
+	    (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+		/* See e1000_kumeran_lock_loss_workaround() */
+		DPRINTK(LINK, INFO,
+			"Gigabit has been disabled, downgrading speed\n");
+	}
+
+	if (hw->mac_type == e1000_82573) {
+		e1000_enable_tx_pkt_filtering(hw);
+		if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
+			e1000_update_mng_vlan(adapter);
+	}
+
+	if ((hw->media_type == e1000_media_type_internal_serdes) &&
+	   !(er32(TXCW) & E1000_TXCW_ANE))
+		link = !hw->serdes_link_down;
+	else
+		link = er32(STATUS) & E1000_STATUS_LU;
+
+	if (link) {
+		if (!netif_carrier_ok(netdev)) {
+			u32 ctrl;
+			bool txb2b = true;
+			e1000_get_speed_and_duplex(hw,
+			                           &adapter->link_speed,
+			                           &adapter->link_duplex);
+
+			ctrl = er32(CTRL);
+			printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, "
+			       "Flow Control: %s\n",
+			       netdev->name,
+			       adapter->link_speed,
+			       adapter->link_duplex == FULL_DUPLEX ?
+			        "Full Duplex" : "Half Duplex",
+			        ((ctrl & E1000_CTRL_TFCE) && (ctrl &
+			        E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+			        E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+			        E1000_CTRL_TFCE) ? "TX" : "None" )));
+
+			/* tweak tx_queue_len according to speed/duplex
+			 * and adjust the timeout factor */
+			netdev->tx_queue_len = adapter->tx_queue_len;
+			adapter->tx_timeout_factor = 1;
+			switch (adapter->link_speed) {
+			case SPEED_10:
+				txb2b = false;
+				netdev->tx_queue_len = 10;
+				adapter->tx_timeout_factor = 8;
+				break;
+			case SPEED_100:
+				txb2b = false;
+				netdev->tx_queue_len = 100;
+				/* maybe add some timeout factor ? */
+				break;
+			}
+
+			if ((hw->mac_type == e1000_82571 ||
+			     hw->mac_type == e1000_82572) &&
+			    !txb2b) {
+				u32 tarc0;
+				tarc0 = er32(TARC0);
+				tarc0 &= ~(1 << 21);
+				ew32(TARC0, tarc0);
+			}
+
+			/* disable TSO for pcie and 10/100 speeds, to avoid
+			 * some hardware issues */
+			if (!adapter->tso_force &&
+			    hw->bus_type == e1000_bus_type_pci_express){
+				switch (adapter->link_speed) {
+				case SPEED_10:
+				case SPEED_100:
+					DPRINTK(PROBE,INFO,
+				        "10/100 speed: disabling TSO\n");
+					netdev->features &= ~NETIF_F_TSO;
+					netdev->features &= ~NETIF_F_TSO6;
+					break;
+				case SPEED_1000:
+					netdev->features |= NETIF_F_TSO;
+					netdev->features |= NETIF_F_TSO6;
+					break;
+				default:
+					/* oops */
+					break;
+				}
+			}
+
+			/* enable transmits in the hardware, need to do this
+			 * after setting TARC0 */
+			tctl = er32(TCTL);
+			tctl |= E1000_TCTL_EN;
+			ew32(TCTL, tctl);
+
+			netif_carrier_on(netdev);
+			mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
+			adapter->smartspeed = 0;
+		} else {
+			/* make sure the receive unit is started */
+			if (hw->rx_needs_kicking) {
+				u32 rctl = er32(RCTL);
+				ew32(RCTL, rctl | E1000_RCTL_EN);
+			}
+		}
+	} else {
+		if (netif_carrier_ok(netdev)) {
+			adapter->link_speed = 0;
+			adapter->link_duplex = 0;
+			printk(KERN_INFO "e1000: %s NIC Link is Down\n",
+			       netdev->name);
+			netif_carrier_off(netdev);
+			mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
+
+			/* 80003ES2LAN workaround--
+			 * For packet buffer work-around on link down event;
+			 * disable receives in the ISR and
+			 * reset device here in the watchdog
+			 */
+			if (hw->mac_type == e1000_80003es2lan)
+				/* reset device */
+				schedule_work(&adapter->reset_task);
+		}
+
+		e1000_smartspeed(adapter);
+	}
+
+	e1000_update_stats(adapter);
+
+	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+	adapter->tpt_old = adapter->stats.tpt;
+	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
+	adapter->colc_old = adapter->stats.colc;
+
+	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
+	adapter->gorcl_old = adapter->stats.gorcl;
+	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
+	adapter->gotcl_old = adapter->stats.gotcl;
+
+	e1000_update_adaptive(hw);
+
+	if (!netif_carrier_ok(netdev)) {
+		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
+			/* We've lost link, so the controller stops DMA,
+			 * but we've got queued Tx work that's never going
+			 * to get done, so reset controller to flush Tx.
+			 * (Do the reset outside of interrupt context). */
+			adapter->tx_timeout_count++;
+			schedule_work(&adapter->reset_task);
+			/* return immediately since reset is imminent */
+			return;
+		}
+	}
+
+	/* Cause software interrupt to ensure rx ring is cleaned */
+	ew32(ICS, E1000_ICS_RXDMT0);
+
+	/* Force detection of hung controller every watchdog period */
+	adapter->detect_tx_hung = true;
+
+	/* With 82571 controllers, LAA may be overwritten due to controller
+	 * reset from the other port. Set the appropriate LAA in RAR[0] */
+	if (hw->mac_type == e1000_82571 && hw->laa_is_present)
+		e1000_rar_set(hw, hw->mac_addr, 0);
+
+	/* Reset the timer */
+	mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ *      Stores a new ITR value based on packets and byte
+ *      counts during the last interrupt.  The advantage of per interrupt
+ *      computation is faster updates and more accurate ITR for the current
+ *      traffic pattern.  Constants in this function were computed
+ *      based on theoretical maximum wire speed and thresholds were set based
+ *      on testing data as well as attempting to minimize response time
+ *      while increasing bulk throughput.
+ *      this functionality is controlled by the InterruptThrottleRate module
+ *      parameter (see e1000_param.c)
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ **/
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+				     u16 itr_setting, int packets, int bytes)
+{
+	unsigned int retval = itr_setting;
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (unlikely(hw->mac_type < e1000_82540))
+		goto update_itr_done;
+
+	if (packets == 0)
+		goto update_itr_done;
+
+	switch (itr_setting) {
+	case lowest_latency:
+		/* jumbo frames get bulk treatment*/
+		if (bytes/packets > 8000)
+			retval = bulk_latency;
+		else if ((packets < 5) && (bytes > 512))
+			retval = low_latency;
+		break;
+	case low_latency:  /* 50 usec aka 20000 ints/s */
+		if (bytes > 10000) {
+			/* jumbo frames need bulk latency setting */
+			if (bytes/packets > 8000)
+				retval = bulk_latency;
+			else if ((packets < 10) || ((bytes/packets) > 1200))
+				retval = bulk_latency;
+			else if ((packets > 35))
+				retval = lowest_latency;
+		} else if (bytes/packets > 2000)
+			retval = bulk_latency;
+		else if (packets <= 2 && bytes < 512)
+			retval = lowest_latency;
+		break;
+	case bulk_latency: /* 250 usec aka 4000 ints/s */
+		if (bytes > 25000) {
+			if (packets > 35)
+				retval = low_latency;
+		} else if (bytes < 6000) {
+			retval = low_latency;
+		}
+		break;
+	}
+
+update_itr_done:
+	return retval;
+}
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 current_itr;
+	u32 new_itr = adapter->itr;
+
+	if (unlikely(hw->mac_type < e1000_82540))
+		return;
+
+	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+	if (unlikely(adapter->link_speed != SPEED_1000)) {
+		current_itr = 0;
+		new_itr = 4000;
+		goto set_itr_now;
+	}
+
+	adapter->tx_itr = e1000_update_itr(adapter,
+	                            adapter->tx_itr,
+	                            adapter->total_tx_packets,
+	                            adapter->total_tx_bytes);
+	/* conservative mode (itr 3) eliminates the lowest_latency setting */
+	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
+		adapter->tx_itr = low_latency;
+
+	adapter->rx_itr = e1000_update_itr(adapter,
+	                            adapter->rx_itr,
+	                            adapter->total_rx_packets,
+	                            adapter->total_rx_bytes);
+	/* conservative mode (itr 3) eliminates the lowest_latency setting */
+	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
+		adapter->rx_itr = low_latency;
+
+	current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+	switch (current_itr) {
+	/* counts and packets in update_itr are dependent on these numbers */
+	case lowest_latency:
+		new_itr = 70000;
+		break;
+	case low_latency:
+		new_itr = 20000; /* aka hwitr = ~200 */
+		break;
+	case bulk_latency:
+		new_itr = 4000;
+		break;
+	default:
+		break;
+	}
+
+set_itr_now:
+	if (new_itr != adapter->itr) {
+		/* this attempts to bias the interrupt rate towards Bulk
+		 * by adding intermediate steps when interrupt rate is
+		 * increasing */
+		new_itr = new_itr > adapter->itr ?
+		             min(adapter->itr + (new_itr >> 2), new_itr) :
+		             new_itr;
+		adapter->itr = new_itr;
+		ew32(ITR, 1000000000 / (new_itr * 256));
+	}
+
+	return;
+}
+
+#define E1000_TX_FLAGS_CSUM		0x00000001
+#define E1000_TX_FLAGS_VLAN		0x00000002
+#define E1000_TX_FLAGS_TSO		0x00000004
+#define E1000_TX_FLAGS_IPV4		0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT	16
+
+static int e1000_tso(struct e1000_adapter *adapter,
+		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+{
+	struct e1000_context_desc *context_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i;
+	u32 cmd_length = 0;
+	u16 ipcse = 0, tucse, mss;
+	u8 ipcss, ipcso, tucss, tucso, hdr_len;
+	int err;
+
+	if (skb_is_gso(skb)) {
+		if (skb_header_cloned(skb)) {
+			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+			if (err)
+				return err;
+		}
+
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		mss = skb_shinfo(skb)->gso_size;
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *iph = ip_hdr(skb);
+			iph->tot_len = 0;
+			iph->check = 0;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								 iph->daddr, 0,
+								 IPPROTO_TCP,
+								 0);
+			cmd_length = E1000_TXD_CMD_IP;
+			ipcse = skb_transport_offset(skb) - 1;
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+			ipv6_hdr(skb)->payload_len = 0;
+			tcp_hdr(skb)->check =
+				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+						 &ipv6_hdr(skb)->daddr,
+						 0, IPPROTO_TCP, 0);
+			ipcse = 0;
+		}
+		ipcss = skb_network_offset(skb);
+		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+		tucss = skb_transport_offset(skb);
+		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+		tucse = 0;
+
+		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+
+		i = tx_ring->next_to_use;
+		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+		buffer_info = &tx_ring->buffer_info[i];
+
+		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
+		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
+		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
+		context_desc->upper_setup.tcp_fields.tucss = tucss;
+		context_desc->upper_setup.tcp_fields.tucso = tucso;
+		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
+		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+
+		buffer_info->time_stamp = jiffies;
+		buffer_info->next_to_watch = i;
+
+		if (++i == tx_ring->count) i = 0;
+		tx_ring->next_to_use = i;
+
+		return true;
+	}
+	return false;
+}
+
+static bool e1000_tx_csum(struct e1000_adapter *adapter,
+			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+{
+	struct e1000_context_desc *context_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i;
+	u8 css;
+	u32 cmd_len = E1000_TXD_CMD_DEXT;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return false;
+
+	switch (skb->protocol) {
+	case cpu_to_be16(ETH_P_IP):
+		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+			cmd_len |= E1000_TXD_CMD_TCP;
+		break;
+	case cpu_to_be16(ETH_P_IPV6):
+		/* XXX not handling all IPV6 headers */
+		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+			cmd_len |= E1000_TXD_CMD_TCP;
+		break;
+	default:
+		if (unlikely(net_ratelimit()))
+			DPRINTK(DRV, WARNING,
+			        "checksum_partial proto=%x!\n", skb->protocol);
+		break;
+	}
+
+	css = skb_transport_offset(skb);
+
+	i = tx_ring->next_to_use;
+	buffer_info = &tx_ring->buffer_info[i];
+	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+
+	context_desc->lower_setup.ip_config = 0;
+	context_desc->upper_setup.tcp_fields.tucss = css;
+	context_desc->upper_setup.tcp_fields.tucso =
+		css + skb->csum_offset;
+	context_desc->upper_setup.tcp_fields.tucse = 0;
+	context_desc->tcp_seg_setup.data = 0;
+	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
+
+	buffer_info->time_stamp = jiffies;
+	buffer_info->next_to_watch = i;
+
+	if (unlikely(++i == tx_ring->count)) i = 0;
+	tx_ring->next_to_use = i;
+
+	return true;
+}
+
+#define E1000_MAX_TXD_PWR	12
+#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
+
+static int e1000_tx_map(struct e1000_adapter *adapter,
+			struct e1000_tx_ring *tx_ring,
+			struct sk_buff *skb, unsigned int first,
+			unsigned int max_per_txd, unsigned int nr_frags,
+			unsigned int mss)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_buffer *buffer_info;
+	unsigned int len = skb_headlen(skb);
+	unsigned int offset, size, count = 0, i;
+	unsigned int f;
+	dma_addr_t *map;
+
+	i = tx_ring->next_to_use;
+
+	if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
+		dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
+		return 0;
+	}
+
+	map = skb_shinfo(skb)->dma_maps;
+	offset = 0;
+
+	while (len) {
+		buffer_info = &tx_ring->buffer_info[i];
+		size = min(len, max_per_txd);
+		/* Workaround for Controller erratum --
+		 * descriptor for non-tso packet in a linear SKB that follows a
+		 * tso gets written back prematurely before the data is fully
+		 * DMA'd to the controller */
+		if (!skb->data_len && tx_ring->last_tx_tso &&
+		    !skb_is_gso(skb)) {
+			tx_ring->last_tx_tso = 0;
+			size -= 4;
+		}
+
+		/* Workaround for premature desc write-backs
+		 * in TSO mode.  Append 4-byte sentinel desc */
+		if (unlikely(mss && !nr_frags && size == len && size > 8))
+			size -= 4;
+		/* work-around for errata 10 and it applies
+		 * to all controllers in PCI-X mode
+		 * The fix is to make sure that the first descriptor of a
+		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
+		 */
+		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
+		                (size > 2015) && count == 0))
+		        size = 2015;
+
+		/* Workaround for potential 82544 hang in PCI-X.  Avoid
+		 * terminating buffers within evenly-aligned dwords. */
+		if (unlikely(adapter->pcix_82544 &&
+		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
+		   size > 4))
+			size -= 4;
+
+		buffer_info->length = size;
+		buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
+		buffer_info->time_stamp = jiffies;
+		buffer_info->next_to_watch = i;
+
+		len -= size;
+		offset += size;
+		count++;
+		if (len) {
+			i++;
+			if (unlikely(i == tx_ring->count))
+				i = 0;
+		}
+	}
+
+	for (f = 0; f < nr_frags; f++) {
+		struct skb_frag_struct *frag;
+
+		frag = &skb_shinfo(skb)->frags[f];
+		len = frag->size;
+		offset = 0;
+
+		while (len) {
+			i++;
+			if (unlikely(i == tx_ring->count))
+				i = 0;
+
+			buffer_info = &tx_ring->buffer_info[i];
+			size = min(len, max_per_txd);
+			/* Workaround for premature desc write-backs
+			 * in TSO mode.  Append 4-byte sentinel desc */
+			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+				size -= 4;
+			/* Workaround for potential 82544 hang in PCI-X.
+			 * Avoid terminating buffers within evenly-aligned
+			 * dwords. */
+			if (unlikely(adapter->pcix_82544 &&
+			   !((unsigned long)(frag->page+offset+size-1) & 4) &&
+			   size > 4))
+				size -= 4;
+
+			buffer_info->length = size;
+			buffer_info->dma = map[f] + offset;
+			buffer_info->time_stamp = jiffies;
+			buffer_info->next_to_watch = i;
+
+			len -= size;
+			offset += size;
+			count++;
+		}
+	}
+
+	tx_ring->buffer_info[i].skb = skb;
+	tx_ring->buffer_info[first].next_to_watch = i;
+
+	return count;
+}
+
+static void e1000_tx_queue(struct e1000_adapter *adapter,
+			   struct e1000_tx_ring *tx_ring, int tx_flags,
+			   int count)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_tx_desc *tx_desc = NULL;
+	struct e1000_buffer *buffer_info;
+	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+	unsigned int i;
+
+	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
+		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+		             E1000_TXD_CMD_TSE;
+		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+
+		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
+			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
+	}
+
+	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+	}
+
+	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
+		txd_lower |= E1000_TXD_CMD_VLE;
+		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+	}
+
+	i = tx_ring->next_to_use;
+
+	while (count--) {
+		buffer_info = &tx_ring->buffer_info[i];
+		tx_desc = E1000_TX_DESC(*tx_ring, i);
+		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+		tx_desc->lower.data =
+			cpu_to_le32(txd_lower | buffer_info->length);
+		tx_desc->upper.data = cpu_to_le32(txd_upper);
+		if (unlikely(++i == tx_ring->count)) i = 0;
+	}
+
+	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64). */
+	wmb();
+
+	tx_ring->next_to_use = i;
+	writel(i, hw->hw_addr + tx_ring->tdt);
+	/* we need this if more than one processor can write to our tail
+	 * at a time, it syncronizes IO on IA64/Altix systems */
+	mmiowb();
+}
+
+/**
+ * 82547 workaround to avoid controller hang in half-duplex environment.
+ * The workaround is to avoid queuing a large packet that would span
+ * the internal Tx FIFO ring boundary by notifying the stack to resend
+ * the packet at a later time.  This gives the Tx FIFO an opportunity to
+ * flush all packets.  When that occurs, we reset the Tx FIFO pointers
+ * to the beginning of the Tx FIFO.
+ **/
+
+#define E1000_FIFO_HDR			0x10
+#define E1000_82547_PAD_LEN		0x3E0
+
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+				       struct sk_buff *skb)
+{
+	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
+	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
+
+	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
+
+	if (adapter->link_duplex != HALF_DUPLEX)
+		goto no_fifo_stall_required;
+
+	if (atomic_read(&adapter->tx_fifo_stall))
+		return 1;
+
+	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+		atomic_set(&adapter->tx_fifo_stall, 1);
+		return 1;
+	}
+
+no_fifo_stall_required:
+	adapter->tx_fifo_head += skb_fifo_len;
+	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
+		adapter->tx_fifo_head -= adapter->tx_fifo_size;
+	return 0;
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
+				    struct sk_buff *skb)
+{
+	struct e1000_hw *hw =  &adapter->hw;
+	u16 length, offset;
+	if (vlan_tx_tag_present(skb)) {
+		if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
+			( hw->mng_cookie.status &
+			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
+			return 0;
+	}
+	if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
+		struct ethhdr *eth = (struct ethhdr *)skb->data;
+		if ((htons(ETH_P_IP) == eth->h_proto)) {
+			const struct iphdr *ip =
+				(struct iphdr *)((u8 *)skb->data+14);
+			if (IPPROTO_UDP == ip->protocol) {
+				struct udphdr *udp =
+					(struct udphdr *)((u8 *)ip +
+						(ip->ihl << 2));
+				if (ntohs(udp->dest) == 67) {
+					offset = (u8 *)udp + 8 - skb->data;
+					length = skb->len - offset;
+
+					return e1000_mng_write_dhcp_info(hw,
+							(u8 *)udp + 8,
+							length);
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+
+	netif_stop_queue(netdev);
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it. */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available. */
+	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! */
+	netif_start_queue(netdev);
+	++adapter->restart_queue;
+	return 0;
+}
+
+static int e1000_maybe_stop_tx(struct net_device *netdev,
+                               struct e1000_tx_ring *tx_ring, int size)
+{
+	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
+		return 0;
+	return __e1000_maybe_stop_tx(netdev, size);
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_tx_ring *tx_ring;
+	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
+	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+	unsigned int tx_flags = 0;
+	unsigned int len = skb->len - skb->data_len;
+	unsigned int nr_frags;
+	unsigned int mss;
+	int count = 0;
+	int tso;
+	unsigned int f;
+
+	/* This goes back to the question of how to logically map a tx queue
+	 * to a flow.  Right now, performance is impacted slightly negatively
+	 * if using multiple tx queues.  If the stack breaks away from a
+	 * single qdisc implementation, we can look at this again. */
+	tx_ring = adapter->tx_ring;
+
+	if (unlikely(skb->len <= 0)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* 82571 and newer doesn't need the workaround that limited descriptor
+	 * length to 4kB */
+	if (hw->mac_type >= e1000_82571)
+		max_per_txd = 8192;
+
+	mss = skb_shinfo(skb)->gso_size;
+	/* The controller does a simple calculation to
+	 * make sure there is enough room in the FIFO before
+	 * initiating the DMA for each buffer.  The calc is:
+	 * 4 = ceil(buffer len/mss).  To make sure we don't
+	 * overrun the FIFO, adjust the max buffer len if mss
+	 * drops. */
+	if (mss) {
+		u8 hdr_len;
+		max_per_txd = min(mss << 2, max_per_txd);
+		max_txd_pwr = fls(max_per_txd) - 1;
+
+		/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
+		* points to just header, pull a few bytes of payload from
+		* frags into skb->data */
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		if (skb->data_len && hdr_len == len) {
+			switch (hw->mac_type) {
+				unsigned int pull_size;
+			case e1000_82544:
+				/* Make sure we have room to chop off 4 bytes,
+				 * and that the end alignment will work out to
+				 * this hardware's requirements
+				 * NOTE: this is a TSO only workaround
+				 * if end byte alignment not correct move us
+				 * into the next dword */
+				if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+					break;
+				/* fall through */
+			case e1000_82571:
+			case e1000_82572:
+			case e1000_82573:
+			case e1000_ich8lan:
+				pull_size = min((unsigned int)4, skb->data_len);
+				if (!__pskb_pull_tail(skb, pull_size)) {
+					DPRINTK(DRV, ERR,
+						"__pskb_pull_tail failed.\n");
+					dev_kfree_skb_any(skb);
+					return NETDEV_TX_OK;
+				}
+				len = skb->len - skb->data_len;
+				break;
+			default:
+				/* do nothing */
+				break;
+			}
+		}
+	}
+
+	/* reserve a descriptor for the offload context */
+	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
+		count++;
+	count++;
+
+	/* Controller Erratum workaround */
+	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
+		count++;
+
+	count += TXD_USE_COUNT(len, max_txd_pwr);
+
+	if (adapter->pcix_82544)
+		count++;
+
+	/* work-around for errata 10 and it applies to all controllers
+	 * in PCI-X mode, so add one more descriptor to the count
+	 */
+	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
+			(len > 2015)))
+		count++;
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	for (f = 0; f < nr_frags; f++)
+		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+				       max_txd_pwr);
+	if (adapter->pcix_82544)
+		count += nr_frags;
+
+
+	if (hw->tx_pkt_filtering &&
+	    (hw->mac_type == e1000_82573))
+		e1000_transfer_dhcp_info(adapter, skb);
+
+	/* need: count + 2 desc gap to keep tail from touching
+	 * head, otherwise try next time */
+	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
+		return NETDEV_TX_BUSY;
+
+	if (unlikely(hw->mac_type == e1000_82547)) {
+		if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
+			netif_stop_queue(netdev);
+			mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+		tx_flags |= E1000_TX_FLAGS_VLAN;
+		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+	}
+
+	first = tx_ring->next_to_use;
+
+	tso = e1000_tso(adapter, tx_ring, skb);
+	if (tso < 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (likely(tso)) {
+		tx_ring->last_tx_tso = 1;
+		tx_flags |= E1000_TX_FLAGS_TSO;
+	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+		tx_flags |= E1000_TX_FLAGS_CSUM;
+
+	/* Old method was to assume IPv4 packet by default if TSO was enabled.
+	 * 82571 hardware supports TSO capabilities for IPv6 as well...
+	 * no longer assume, we must. */
+	if (likely(skb->protocol == htons(ETH_P_IP)))
+		tx_flags |= E1000_TX_FLAGS_IPV4;
+
+	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
+	                     nr_frags, mss);
+
+	if (count) {
+		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
+		/* Make sure there is space in the ring for the next send. */
+		e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+
+	} else {
+		dev_kfree_skb_any(skb);
+		tx_ring->buffer_info[first].time_stamp = 0;
+		tx_ring->next_to_use = first;
+	}
+
+	return NETDEV_TX_OK;
+}
+
+/**
+ * e1000_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+
+static void e1000_tx_timeout(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	/* Do the reset outside of interrupt context */
+	adapter->tx_timeout_count++;
+	schedule_work(&adapter->reset_task);
+}
+
+static void e1000_reset_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter =
+		container_of(work, struct e1000_adapter, reset_task);
+
+	e1000_reinit_locked(adapter);
+}
+
+/**
+ * e1000_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+
+static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	/* only return the current stats */
+	return &adapter->net_stats;
+}
+
+/**
+ * e1000_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+	u16 eeprom_data = 0;
+
+	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+		DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+		return -EINVAL;
+	}
+
+	/* Adapter-specific max frame size limits. */
+	switch (hw->mac_type) {
+	case e1000_undefined ... e1000_82542_rev2_1:
+	case e1000_ich8lan:
+		if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+			DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+			return -EINVAL;
+		}
+		break;
+	case e1000_82573:
+		/* Jumbo Frames not supported if:
+		 * - this is not an 82573L device
+		 * - ASPM is enabled in any way (0x1A bits 3:2) */
+		e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
+		                  &eeprom_data);
+		if ((hw->device_id != E1000_DEV_ID_82573L) ||
+		    (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
+			if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+				DPRINTK(PROBE, ERR,
+			            	"Jumbo Frames not supported.\n");
+				return -EINVAL;
+			}
+			break;
+		}
+		/* ERT will be enabled later to enable wire speed receives */
+
+		/* fall through to get support */
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
+		if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+			DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
+		break;
+	}
+
+	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+	 * means we reserve 2 more, this pushes us to allocate from the next
+	 * larger slab size
+	 * i.e. RXBUFFER_2048 --> size-4096 slab */
+
+	if (max_frame <= E1000_RXBUFFER_256)
+		adapter->rx_buffer_len = E1000_RXBUFFER_256;
+	else if (max_frame <= E1000_RXBUFFER_512)
+		adapter->rx_buffer_len = E1000_RXBUFFER_512;
+	else if (max_frame <= E1000_RXBUFFER_1024)
+		adapter->rx_buffer_len = E1000_RXBUFFER_1024;
+	else if (max_frame <= E1000_RXBUFFER_2048)
+		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+	else if (max_frame <= E1000_RXBUFFER_4096)
+		adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+	else if (max_frame <= E1000_RXBUFFER_8192)
+		adapter->rx_buffer_len = E1000_RXBUFFER_8192;
+	else if (max_frame <= E1000_RXBUFFER_16384)
+		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+
+	/* adjust allocation if LPE protects us, and we aren't using SBP */
+	if (!hw->tbi_compatibility_on &&
+	    ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
+	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
+		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+
+	netdev->mtu = new_mtu;
+	hw->max_frame_size = max_frame;
+
+	if (netif_running(netdev))
+		e1000_reinit_locked(adapter);
+
+	return 0;
+}
+
+/**
+ * e1000_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+
+void e1000_update_stats(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long flags;
+	u16 phy_tmp;
+
+#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
+
+	/*
+	 * Prevent stats update while adapter is being reset, or if the pci
+	 * connection is down.
+	 */
+	if (adapter->link_speed == 0)
+		return;
+	if (pci_channel_offline(pdev))
+		return;
+
+	spin_lock_irqsave(&adapter->stats_lock, flags);
+
+	/* these counters are modified from e1000_tbi_adjust_stats,
+	 * called from the interrupt context, so they must only
+	 * be written while holding adapter->stats_lock
+	 */
+
+	adapter->stats.crcerrs += er32(CRCERRS);
+	adapter->stats.gprc += er32(GPRC);
+	adapter->stats.gorcl += er32(GORCL);
+	adapter->stats.gorch += er32(GORCH);
+	adapter->stats.bprc += er32(BPRC);
+	adapter->stats.mprc += er32(MPRC);
+	adapter->stats.roc += er32(ROC);
+
+	if (hw->mac_type != e1000_ich8lan) {
+		adapter->stats.prc64 += er32(PRC64);
+		adapter->stats.prc127 += er32(PRC127);
+		adapter->stats.prc255 += er32(PRC255);
+		adapter->stats.prc511 += er32(PRC511);
+		adapter->stats.prc1023 += er32(PRC1023);
+		adapter->stats.prc1522 += er32(PRC1522);
+	}
+
+	adapter->stats.symerrs += er32(SYMERRS);
+	adapter->stats.mpc += er32(MPC);
+	adapter->stats.scc += er32(SCC);
+	adapter->stats.ecol += er32(ECOL);
+	adapter->stats.mcc += er32(MCC);
+	adapter->stats.latecol += er32(LATECOL);
+	adapter->stats.dc += er32(DC);
+	adapter->stats.sec += er32(SEC);
+	adapter->stats.rlec += er32(RLEC);
+	adapter->stats.xonrxc += er32(XONRXC);
+	adapter->stats.xontxc += er32(XONTXC);
+	adapter->stats.xoffrxc += er32(XOFFRXC);
+	adapter->stats.xofftxc += er32(XOFFTXC);
+	adapter->stats.fcruc += er32(FCRUC);
+	adapter->stats.gptc += er32(GPTC);
+	adapter->stats.gotcl += er32(GOTCL);
+	adapter->stats.gotch += er32(GOTCH);
+	adapter->stats.rnbc += er32(RNBC);
+	adapter->stats.ruc += er32(RUC);
+	adapter->stats.rfc += er32(RFC);
+	adapter->stats.rjc += er32(RJC);
+	adapter->stats.torl += er32(TORL);
+	adapter->stats.torh += er32(TORH);
+	adapter->stats.totl += er32(TOTL);
+	adapter->stats.toth += er32(TOTH);
+	adapter->stats.tpr += er32(TPR);
+
+	if (hw->mac_type != e1000_ich8lan) {
+		adapter->stats.ptc64 += er32(PTC64);
+		adapter->stats.ptc127 += er32(PTC127);
+		adapter->stats.ptc255 += er32(PTC255);
+		adapter->stats.ptc511 += er32(PTC511);
+		adapter->stats.ptc1023 += er32(PTC1023);
+		adapter->stats.ptc1522 += er32(PTC1522);
+	}
+
+	adapter->stats.mptc += er32(MPTC);
+	adapter->stats.bptc += er32(BPTC);
+
+	/* used for adaptive IFS */
+
+	hw->tx_packet_delta = er32(TPT);
+	adapter->stats.tpt += hw->tx_packet_delta;
+	hw->collision_delta = er32(COLC);
+	adapter->stats.colc += hw->collision_delta;
+
+	if (hw->mac_type >= e1000_82543) {
+		adapter->stats.algnerrc += er32(ALGNERRC);
+		adapter->stats.rxerrc += er32(RXERRC);
+		adapter->stats.tncrs += er32(TNCRS);
+		adapter->stats.cexterr += er32(CEXTERR);
+		adapter->stats.tsctc += er32(TSCTC);
+		adapter->stats.tsctfc += er32(TSCTFC);
+	}
+	if (hw->mac_type > e1000_82547_rev_2) {
+		adapter->stats.iac += er32(IAC);
+		adapter->stats.icrxoc += er32(ICRXOC);
+
+		if (hw->mac_type != e1000_ich8lan) {
+			adapter->stats.icrxptc += er32(ICRXPTC);
+			adapter->stats.icrxatc += er32(ICRXATC);
+			adapter->stats.ictxptc += er32(ICTXPTC);
+			adapter->stats.ictxatc += er32(ICTXATC);
+			adapter->stats.ictxqec += er32(ICTXQEC);
+			adapter->stats.ictxqmtc += er32(ICTXQMTC);
+			adapter->stats.icrxdmtc += er32(ICRXDMTC);
+		}
+	}
+
+	/* Fill out the OS statistics structure */
+	adapter->net_stats.multicast = adapter->stats.mprc;
+	adapter->net_stats.collisions = adapter->stats.colc;
+
+	/* Rx Errors */
+
+	/* RLEC on some newer hardware can be incorrect so build
+	* our own version based on RUC and ROC */
+	adapter->net_stats.rx_errors = adapter->stats.rxerrc +
+		adapter->stats.crcerrs + adapter->stats.algnerrc +
+		adapter->stats.ruc + adapter->stats.roc +
+		adapter->stats.cexterr;
+	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
+	adapter->net_stats.rx_length_errors = adapter->stats.rlerrc;
+	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
+	adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
+	adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
+
+	/* Tx Errors */
+	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
+	adapter->net_stats.tx_errors = adapter->stats.txerrc;
+	adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
+	adapter->net_stats.tx_window_errors = adapter->stats.latecol;
+	adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
+	if (hw->bad_tx_carr_stats_fd &&
+	    adapter->link_duplex == FULL_DUPLEX) {
+		adapter->net_stats.tx_carrier_errors = 0;
+		adapter->stats.tncrs = 0;
+	}
+
+	/* Tx Dropped needs to be maintained elsewhere */
+
+	/* Phy Stats */
+	if (hw->media_type == e1000_media_type_copper) {
+		if ((adapter->link_speed == SPEED_1000) &&
+		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
+			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
+			adapter->phy_stats.idle_errors += phy_tmp;
+		}
+
+		if ((hw->mac_type <= e1000_82546) &&
+		   (hw->phy_type == e1000_phy_m88) &&
+		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
+			adapter->phy_stats.receive_errors += phy_tmp;
+	}
+
+	/* Management Stats */
+	if (hw->has_smbus) {
+		adapter->stats.mgptc += er32(MGTPTC);
+		adapter->stats.mgprc += er32(MGTPRC);
+		adapter->stats.mgpdc += er32(MGTPDC);
+	}
+
+	spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static irqreturn_t e1000_intr_msi(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = er32(ICR);
+
+	/* in NAPI mode read ICR disables interrupts using IAM */
+
+	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+		hw->get_link_status = 1;
+		/* 80003ES2LAN workaround-- For packet buffer work-around on
+		 * link down event; disable receives here in the ISR and reset
+		 * adapter in watchdog */
+		if (netif_carrier_ok(netdev) &&
+		    (hw->mac_type == e1000_80003es2lan)) {
+			/* disable receives */
+			u32 rctl = er32(RCTL);
+			ew32(RCTL, rctl & ~E1000_RCTL_EN);
+		}
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->flags))
+			mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+	if (likely(napi_schedule_prep(&adapter->napi))) {
+		adapter->total_tx_bytes = 0;
+		adapter->total_tx_packets = 0;
+		adapter->total_rx_bytes = 0;
+		adapter->total_rx_packets = 0;
+		__napi_schedule(&adapter->napi);
+	} else
+		e1000_irq_enable(adapter);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static irqreturn_t e1000_intr(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl, icr = er32(ICR);
+
+	if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
+		return IRQ_NONE;  /* Not our interrupt */
+
+	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+	 * not set, then the adapter didn't send an interrupt */
+	if (unlikely(hw->mac_type >= e1000_82571 &&
+	             !(icr & E1000_ICR_INT_ASSERTED)))
+		return IRQ_NONE;
+
+	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
+	 * need for the IMC write */
+
+	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+		hw->get_link_status = 1;
+		/* 80003ES2LAN workaround--
+		 * For packet buffer work-around on link down event;
+		 * disable receives here in the ISR and
+		 * reset adapter in watchdog
+		 */
+		if (netif_carrier_ok(netdev) &&
+		    (hw->mac_type == e1000_80003es2lan)) {
+			/* disable receives */
+			rctl = er32(RCTL);
+			ew32(RCTL, rctl & ~E1000_RCTL_EN);
+		}
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->flags))
+			mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+	if (unlikely(hw->mac_type < e1000_82571)) {
+		/* disable interrupts, without the synchronize_irq bit */
+		ew32(IMC, ~0);
+		E1000_WRITE_FLUSH();
+	}
+	if (likely(napi_schedule_prep(&adapter->napi))) {
+		adapter->total_tx_bytes = 0;
+		adapter->total_tx_packets = 0;
+		adapter->total_rx_bytes = 0;
+		adapter->total_rx_packets = 0;
+		__napi_schedule(&adapter->napi);
+	} else {
+		/* this really should not happen! if it does it is basically a
+		 * bug, but not a hard error, so enable ints and continue */
+		if (!test_bit(__E1000_DOWN, &adapter->flags))
+			e1000_irq_enable(adapter);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * e1000_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ **/
+static int e1000_clean(struct napi_struct *napi, int budget)
+{
+	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+	struct net_device *poll_dev = adapter->netdev;
+	int tx_cleaned = 0, work_done = 0;
+
+	adapter = netdev_priv(poll_dev);
+
+	tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
+
+	adapter->clean_rx(adapter, &adapter->rx_ring[0],
+	                  &work_done, budget);
+
+	if (!tx_cleaned)
+		work_done = budget;
+
+	/* If budget not fully consumed, exit the polling mode */
+	if (work_done < budget) {
+		if (likely(adapter->itr_setting & 3))
+			e1000_set_itr(adapter);
+		napi_complete(napi);
+		if (!test_bit(__E1000_DOWN, &adapter->flags))
+			e1000_irq_enable(adapter);
+	}
+
+	return work_done;
+}
+
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ **/
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+			       struct e1000_tx_ring *tx_ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct e1000_tx_desc *tx_desc, *eop_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i, eop;
+	unsigned int count = 0;
+	unsigned int total_tx_bytes=0, total_tx_packets=0;
+
+	i = tx_ring->next_to_clean;
+	eop = tx_ring->buffer_info[i].next_to_watch;
+	eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+	       (count < tx_ring->count)) {
+		bool cleaned = false;
+		for ( ; !cleaned; count++) {
+			tx_desc = E1000_TX_DESC(*tx_ring, i);
+			buffer_info = &tx_ring->buffer_info[i];
+			cleaned = (i == eop);
+
+			if (cleaned) {
+				struct sk_buff *skb = buffer_info->skb;
+				unsigned int segs, bytecount;
+				segs = skb_shinfo(skb)->gso_segs ?: 1;
+				/* multiply data chunks by size of headers */
+				bytecount = ((segs - 1) * skb_headlen(skb)) +
+				            skb->len;
+				total_tx_packets += segs;
+				total_tx_bytes += bytecount;
+			}
+			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+			tx_desc->upper.data = 0;
+
+			if (unlikely(++i == tx_ring->count)) i = 0;
+		}
+
+		eop = tx_ring->buffer_info[i].next_to_watch;
+		eop_desc = E1000_TX_DESC(*tx_ring, eop);
+	}
+
+	tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+	if (unlikely(count && netif_carrier_ok(netdev) &&
+		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+		if (netif_queue_stopped(netdev)) {
+			netif_wake_queue(netdev);
+			++adapter->restart_queue;
+		}
+	}
+
+	if (adapter->detect_tx_hung) {
+		/* Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i */
+		adapter->detect_tx_hung = false;
+		if (tx_ring->buffer_info[i].time_stamp &&
+		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
+		               (adapter->tx_timeout_factor * HZ))
+		    && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+
+			/* detected Tx unit hang */
+			DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+					"  Tx Queue             <%lu>\n"
+					"  TDH                  <%x>\n"
+					"  TDT                  <%x>\n"
+					"  next_to_use          <%x>\n"
+					"  next_to_clean        <%x>\n"
+					"buffer_info[next_to_clean]\n"
+					"  time_stamp           <%lx>\n"
+					"  next_to_watch        <%x>\n"
+					"  jiffies              <%lx>\n"
+					"  next_to_watch.status <%x>\n",
+				(unsigned long)((tx_ring - adapter->tx_ring) /
+					sizeof(struct e1000_tx_ring)),
+				readl(hw->hw_addr + tx_ring->tdh),
+				readl(hw->hw_addr + tx_ring->tdt),
+				tx_ring->next_to_use,
+				tx_ring->next_to_clean,
+				tx_ring->buffer_info[i].time_stamp,
+				eop,
+				jiffies,
+				eop_desc->upper.fields.status);
+			netif_stop_queue(netdev);
+		}
+	}
+	adapter->total_tx_bytes += total_tx_bytes;
+	adapter->total_tx_packets += total_tx_packets;
+	adapter->net_stats.tx_bytes += total_tx_bytes;
+	adapter->net_stats.tx_packets += total_tx_packets;
+	return (count < tx_ring->count);
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * @adapter:     board private structure
+ * @status_err:  receive descriptor status and error fields
+ * @csum:        receive descriptor csum field
+ * @sk_buff:     socket buffer with received data
+ **/
+
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+			      u32 csum, struct sk_buff *skb)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 status = (u16)status_err;
+	u8 errors = (u8)(status_err >> 24);
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* 82543 or newer only */
+	if (unlikely(hw->mac_type < e1000_82543)) return;
+	/* Ignore Checksum bit is set */
+	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
+	/* TCP/UDP checksum error bit is set */
+	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
+		/* let the stack verify checksum errors */
+		adapter->hw_csum_err++;
+		return;
+	}
+	/* TCP/UDP Checksum has not been calculated */
+	if (hw->mac_type <= e1000_82547_rev_2) {
+		if (!(status & E1000_RXD_STAT_TCPCS))
+			return;
+	} else {
+		if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+			return;
+	}
+	/* It must be a TCP or UDP packet with a valid checksum */
+	if (likely(status & E1000_RXD_STAT_TCPCS)) {
+		/* TCP checksum is good */
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else if (hw->mac_type > e1000_82547_rev_2) {
+		/* IP fragment with UDP payload */
+		/* Hardware complements the payload checksum, so we undo it
+		 * and then put the value in host order for further stack use.
+		 */
+		__sum16 sum = (__force __sum16)htons(csum);
+		skb->csum = csum_unfold(~sum);
+		skb->ip_summed = CHECKSUM_COMPLETE;
+	}
+	adapter->hw_csum_good++;
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ **/
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring,
+			       int *work_done, int work_to_do)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc, *next_rxd;
+	struct e1000_buffer *buffer_info, *next_buffer;
+	unsigned long flags;
+	u32 length;
+	u8 last_byte;
+	unsigned int i;
+	int cleaned_count = 0;
+	bool cleaned = false;
+	unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC(*rx_ring, i);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (rx_desc->status & E1000_RXD_STAT_DD) {
+		struct sk_buff *skb;
+		u8 status;
+
+		if (*work_done >= work_to_do)
+			break;
+		(*work_done)++;
+
+		status = rx_desc->status;
+		skb = buffer_info->skb;
+		buffer_info->skb = NULL;
+
+		prefetch(skb->data - NET_IP_ALIGN);
+
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+
+		cleaned = true;
+		cleaned_count++;
+		pci_unmap_single(pdev,
+		                 buffer_info->dma,
+		                 buffer_info->length,
+		                 PCI_DMA_FROMDEVICE);
+		buffer_info->dma = 0;
+
+		length = le16_to_cpu(rx_desc->length);
+		/* !EOP means multiple descriptors were used to store a single
+		 * packet, also make sure the frame isn't just CRC only */
+		if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
+			/* All receives must fit into a single buffer */
+			E1000_DBG("%s: Receive packet consumed multiple"
+				  " buffers\n", netdev->name);
+			/* recycle */
+			buffer_info->skb = skb;
+			goto next_desc;
+		}
+
+		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+			last_byte = *(skb->data + length - 1);
+			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
+				       last_byte)) {
+				spin_lock_irqsave(&adapter->stats_lock, flags);
+				e1000_tbi_adjust_stats(hw, &adapter->stats,
+				                       length, skb->data);
+				spin_unlock_irqrestore(&adapter->stats_lock,
+				                       flags);
+				length--;
+			} else {
+				/* recycle */
+				buffer_info->skb = skb;
+				goto next_desc;
+			}
+		}
+
+		/* adjust length to remove Ethernet CRC, this must be
+		 * done after the TBI_ACCEPT workaround above */
+		length -= 4;
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += length;
+		total_rx_packets++;
+
+		/* code added for copybreak, this should improve
+		 * performance for small packets with large amounts
+		 * of reassembly being done in the stack */
+		if (length < copybreak) {
+			struct sk_buff *new_skb =
+			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+			if (new_skb) {
+				skb_reserve(new_skb, NET_IP_ALIGN);
+				skb_copy_to_linear_data_offset(new_skb,
+							       -NET_IP_ALIGN,
+							       (skb->data -
+							        NET_IP_ALIGN),
+							       (length +
+							        NET_IP_ALIGN));
+				/* save the skb in buffer_info as good */
+				buffer_info->skb = skb;
+				skb = new_skb;
+			}
+			/* else just continue with the old one */
+		}
+		/* end copybreak code */
+		skb_put(skb, length);
+
+		/* Receive Checksum Offload */
+		e1000_rx_checksum(adapter,
+				  (u32)(status) |
+				  ((u32)(rx_desc->errors) << 24),
+				  le16_to_cpu(rx_desc->csum), skb);
+
+		skb->protocol = eth_type_trans(skb, netdev);
+
+		if (unlikely(adapter->vlgrp &&
+			    (status & E1000_RXD_STAT_VP))) {
+			vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+						 le16_to_cpu(rx_desc->special));
+		} else {
+			netif_receive_skb(skb);
+		}
+
+next_desc:
+		rx_desc->status = 0;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = E1000_DESC_UNUSED(rx_ring);
+	if (cleaned_count)
+		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+	adapter->total_rx_packets += total_rx_packets;
+	adapter->total_rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_packets += total_rx_packets;
+	return cleaned;
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: address of board private structure
+ **/
+
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+				   struct e1000_rx_ring *rx_ring,
+				   int cleaned_count)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc;
+	struct e1000_buffer *buffer_info;
+	struct sk_buff *skb;
+	unsigned int i;
+	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+
+	i = rx_ring->next_to_use;
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (cleaned_count--) {
+		skb = buffer_info->skb;
+		if (skb) {
+			skb_trim(skb, 0);
+			goto map_skb;
+		}
+
+		skb = netdev_alloc_skb(netdev, bufsz);
+		if (unlikely(!skb)) {
+			/* Better luck next round */
+			adapter->alloc_rx_buff_failed++;
+			break;
+		}
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+			struct sk_buff *oldskb = skb;
+			DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
+					     "at %p\n", bufsz, skb->data);
+			/* Try again, without freeing the previous */
+			skb = netdev_alloc_skb(netdev, bufsz);
+			/* Failed allocation, critical failure */
+			if (!skb) {
+				dev_kfree_skb(oldskb);
+				break;
+			}
+
+			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+				/* give up */
+				dev_kfree_skb(skb);
+				dev_kfree_skb(oldskb);
+				break; /* while !buffer_info->skb */
+			}
+
+			/* Use new allocation */
+			dev_kfree_skb(oldskb);
+		}
+		/* Make buffer alignment 2 beyond a 16 byte boundary
+		 * this will result in a 16 byte aligned IP header after
+		 * the 14 byte MAC header is removed
+		 */
+		skb_reserve(skb, NET_IP_ALIGN);
+
+		buffer_info->skb = skb;
+		buffer_info->length = adapter->rx_buffer_len;
+map_skb:
+		buffer_info->dma = pci_map_single(pdev,
+						  skb->data,
+						  adapter->rx_buffer_len,
+						  PCI_DMA_FROMDEVICE);
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter,
+					(void *)(unsigned long)buffer_info->dma,
+					adapter->rx_buffer_len)) {
+			DPRINTK(RX_ERR, ERR,
+				"dma align check failed: %u bytes at %p\n",
+				adapter->rx_buffer_len,
+				(void *)(unsigned long)buffer_info->dma);
+			dev_kfree_skb(skb);
+			buffer_info->skb = NULL;
+
+			pci_unmap_single(pdev, buffer_info->dma,
+					 adapter->rx_buffer_len,
+					 PCI_DMA_FROMDEVICE);
+			buffer_info->dma = 0;
+
+			break; /* while !buffer_info->skb */
+		}
+		rx_desc = E1000_RX_DESC(*rx_ring, i);
+		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+		if (unlikely(++i == rx_ring->count))
+			i = 0;
+		buffer_info = &rx_ring->buffer_info[i];
+	}
+
+	if (likely(rx_ring->next_to_use != i)) {
+		rx_ring->next_to_use = i;
+		if (unlikely(i-- == 0))
+			i = (rx_ring->count - 1);
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64). */
+		wmb();
+		writel(i, hw->hw_addr + rx_ring->rdt);
+	}
+}
+
+/**
+ * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
+ * @adapter:
+ **/
+
+static void e1000_smartspeed(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 phy_status;
+	u16 phy_ctrl;
+
+	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
+	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
+		return;
+
+	if (adapter->smartspeed == 0) {
+		/* If Master/Slave config fault is asserted twice,
+		 * we assume back-to-back */
+		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
+		if (phy_ctrl & CR_1000T_MS_ENABLE) {
+			phy_ctrl &= ~CR_1000T_MS_ENABLE;
+			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+					    phy_ctrl);
+			adapter->smartspeed++;
+			if (!e1000_phy_setup_autoneg(hw) &&
+			   !e1000_read_phy_reg(hw, PHY_CTRL,
+				   	       &phy_ctrl)) {
+				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+					     MII_CR_RESTART_AUTO_NEG);
+				e1000_write_phy_reg(hw, PHY_CTRL,
+						    phy_ctrl);
+			}
+		}
+		return;
+	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+		/* If still no link, perhaps using 2/3 pair cable */
+		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
+		phy_ctrl |= CR_1000T_MS_ENABLE;
+		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
+		if (!e1000_phy_setup_autoneg(hw) &&
+		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
+			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+				     MII_CR_RESTART_AUTO_NEG);
+			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
+		}
+	}
+	/* Restart process after E1000_SMARTSPEED_MAX iterations */
+	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+		adapter->smartspeed = 0;
+}
+
+/**
+ * e1000_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return e1000_mii_ioctl(netdev, ifr, cmd);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/**
+ * e1000_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+			   int cmd)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct mii_ioctl_data *data = if_mii(ifr);
+	int retval;
+	u16 mii_reg;
+	u16 spddplx;
+	unsigned long flags;
+
+	if (hw->media_type != e1000_media_type_copper)
+		return -EOPNOTSUPP;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = hw->phy_addr;
+		break;
+	case SIOCGMIIREG:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		spin_lock_irqsave(&adapter->stats_lock, flags);
+		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
+				   &data->val_out)) {
+			spin_unlock_irqrestore(&adapter->stats_lock, flags);
+			return -EIO;
+		}
+		spin_unlock_irqrestore(&adapter->stats_lock, flags);
+		break;
+	case SIOCSMIIREG:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		if (data->reg_num & ~(0x1F))
+			return -EFAULT;
+		mii_reg = data->val_in;
+		spin_lock_irqsave(&adapter->stats_lock, flags);
+		if (e1000_write_phy_reg(hw, data->reg_num,
+					mii_reg)) {
+			spin_unlock_irqrestore(&adapter->stats_lock, flags);
+			return -EIO;
+		}
+		spin_unlock_irqrestore(&adapter->stats_lock, flags);
+		if (hw->media_type == e1000_media_type_copper) {
+			switch (data->reg_num) {
+			case PHY_CTRL:
+				if (mii_reg & MII_CR_POWER_DOWN)
+					break;
+				if (mii_reg & MII_CR_AUTO_NEG_EN) {
+					hw->autoneg = 1;
+					hw->autoneg_advertised = 0x2F;
+				} else {
+					if (mii_reg & 0x40)
+						spddplx = SPEED_1000;
+					else if (mii_reg & 0x2000)
+						spddplx = SPEED_100;
+					else
+						spddplx = SPEED_10;
+					spddplx += (mii_reg & 0x100)
+						   ? DUPLEX_FULL :
+						   DUPLEX_HALF;
+					retval = e1000_set_spd_dplx(adapter,
+								    spddplx);
+					if (retval)
+						return retval;
+				}
+				if (netif_running(adapter->netdev))
+					e1000_reinit_locked(adapter);
+				else
+					e1000_reset(adapter);
+				break;
+			case M88E1000_PHY_SPEC_CTRL:
+			case M88E1000_EXT_PHY_SPEC_CTRL:
+				if (e1000_phy_reset(hw))
+					return -EIO;
+				break;
+			}
+		} else {
+			switch (data->reg_num) {
+			case PHY_CTRL:
+				if (mii_reg & MII_CR_POWER_DOWN)
+					break;
+				if (netif_running(adapter->netdev))
+					e1000_reinit_locked(adapter);
+				else
+					e1000_reset(adapter);
+				break;
+			}
+		}
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return E1000_SUCCESS;
+}
+
+void e1000_pci_set_mwi(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+	int ret_val = pci_set_mwi(adapter->pdev);
+
+	if (ret_val)
+		DPRINTK(PROBE, ERR, "Error in setting MWI\n");
+}
+
+void e1000_pci_clear_mwi(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+	pci_clear_mwi(adapter->pdev);
+}
+
+int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+	return pcix_get_mmrbc(adapter->pdev);
+}
+
+void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
+{
+	struct e1000_adapter *adapter = hw->back;
+	pcix_set_mmrbc(adapter->pdev, mmrbc);
+}
+
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+    struct e1000_adapter *adapter = hw->back;
+    u16 cap_offset;
+
+    cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+    if (!cap_offset)
+        return -E1000_ERR_CONFIG;
+
+    pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+    return E1000_SUCCESS;
+}
+
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
+{
+	outl(value, port);
+}
+
+static void e1000_vlan_rx_register(struct net_device *netdev,
+				   struct vlan_group *grp)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl, rctl;
+
+	if (!test_bit(__E1000_DOWN, &adapter->flags))
+		e1000_irq_disable(adapter);
+	adapter->vlgrp = grp;
+
+	if (grp) {
+		/* enable VLAN tag insert/strip */
+		ctrl = er32(CTRL);
+		ctrl |= E1000_CTRL_VME;
+		ew32(CTRL, ctrl);
+
+		if (adapter->hw.mac_type != e1000_ich8lan) {
+			/* enable VLAN receive filtering */
+			rctl = er32(RCTL);
+			rctl &= ~E1000_RCTL_CFIEN;
+			ew32(RCTL, rctl);
+			e1000_update_mng_vlan(adapter);
+		}
+	} else {
+		/* disable VLAN tag insert/strip */
+		ctrl = er32(CTRL);
+		ctrl &= ~E1000_CTRL_VME;
+		ew32(CTRL, ctrl);
+
+		if (adapter->hw.mac_type != e1000_ich8lan) {
+			if (adapter->mng_vlan_id !=
+			    (u16)E1000_MNG_VLAN_NONE) {
+				e1000_vlan_rx_kill_vid(netdev,
+				                       adapter->mng_vlan_id);
+				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+			}
+		}
+	}
+
+	if (!test_bit(__E1000_DOWN, &adapter->flags))
+		e1000_irq_enable(adapter);
+}
+
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vfta, index;
+
+	if ((hw->mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+	    (vid == adapter->mng_vlan_id))
+		return;
+	/* add VID to filter table */
+	index = (vid >> 5) & 0x7F;
+	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
+	vfta |= (1 << (vid & 0x1F));
+	e1000_write_vfta(hw, index, vfta);
+}
+
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vfta, index;
+
+	if (!test_bit(__E1000_DOWN, &adapter->flags))
+		e1000_irq_disable(adapter);
+	vlan_group_set_device(adapter->vlgrp, vid, NULL);
+	if (!test_bit(__E1000_DOWN, &adapter->flags))
+		e1000_irq_enable(adapter);
+
+	if ((hw->mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+	    (vid == adapter->mng_vlan_id)) {
+		/* release control to f/w */
+		e1000_release_hw_control(adapter);
+		return;
+	}
+
+	/* remove VID from filter table */
+	index = (vid >> 5) & 0x7F;
+	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
+	vfta &= ~(1 << (vid & 0x1F));
+	e1000_write_vfta(hw, index, vfta);
+}
+
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+	e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+	if (adapter->vlgrp) {
+		u16 vid;
+		for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+			if (!vlan_group_get_device(adapter->vlgrp, vid))
+				continue;
+			e1000_vlan_rx_add_vid(adapter->netdev, vid);
+		}
+	}
+}
+
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	hw->autoneg = 0;
+
+	/* Fiber NICs only allow 1000 gbps Full duplex */
+	if ((hw->media_type == e1000_media_type_fiber) &&
+		spddplx != (SPEED_1000 + DUPLEX_FULL)) {
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+
+	switch (spddplx) {
+	case SPEED_10 + DUPLEX_HALF:
+		hw->forced_speed_duplex = e1000_10_half;
+		break;
+	case SPEED_10 + DUPLEX_FULL:
+		hw->forced_speed_duplex = e1000_10_full;
+		break;
+	case SPEED_100 + DUPLEX_HALF:
+		hw->forced_speed_duplex = e1000_100_half;
+		break;
+	case SPEED_100 + DUPLEX_FULL:
+		hw->forced_speed_duplex = e1000_100_full;
+		break;
+	case SPEED_1000 + DUPLEX_FULL:
+		hw->autoneg = 1;
+		hw->autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	case SPEED_1000 + DUPLEX_HALF: /* not supported */
+	default:
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl, ctrl_ext, rctl, status;
+	u32 wufc = adapter->wol;
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+		e1000_down(adapter);
+	}
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+#endif
+
+	status = er32(STATUS);
+	if (status & E1000_STATUS_LU)
+		wufc &= ~E1000_WUFC_LNKC;
+
+	if (wufc) {
+		e1000_setup_rctl(adapter);
+		e1000_set_rx_mode(netdev);
+
+		/* turn on all-multi mode if wake on multicast is enabled */
+		if (wufc & E1000_WUFC_MC) {
+			rctl = er32(RCTL);
+			rctl |= E1000_RCTL_MPE;
+			ew32(RCTL, rctl);
+		}
+
+		if (hw->mac_type >= e1000_82540) {
+			ctrl = er32(CTRL);
+			/* advertise wake from D3Cold */
+			#define E1000_CTRL_ADVD3WUC 0x00100000
+			/* phy power management enable */
+			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+			ctrl |= E1000_CTRL_ADVD3WUC |
+				E1000_CTRL_EN_PHY_PWR_MGMT;
+			ew32(CTRL, ctrl);
+		}
+
+		if (hw->media_type == e1000_media_type_fiber ||
+		   hw->media_type == e1000_media_type_internal_serdes) {
+			/* keep the laser running in D3 */
+			ctrl_ext = er32(CTRL_EXT);
+			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
+			ew32(CTRL_EXT, ctrl_ext);
+		}
+
+		/* Allow time for pending master requests to run */
+		e1000_disable_pciex_master(hw);
+
+		ew32(WUC, E1000_WUC_PME_EN);
+		ew32(WUFC, wufc);
+	} else {
+		ew32(WUC, 0);
+		ew32(WUFC, 0);
+	}
+
+	e1000_release_manageability(adapter);
+
+	*enable_wake = !!wufc;
+
+	/* make sure adapter isn't asleep if manageability is enabled */
+	if (adapter->en_mng_pt)
+		*enable_wake = true;
+
+	if (hw->phy_type == e1000_phy_igp_3)
+		e1000_phy_powerdown_workaround(hw);
+
+	if (netif_running(netdev))
+		e1000_free_irq(adapter);
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant. */
+	e1000_release_hw_control(adapter);
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	int retval;
+	bool wake;
+
+	retval = __e1000_shutdown(pdev, &wake);
+	if (retval)
+		return retval;
+
+	if (wake) {
+		pci_prepare_to_sleep(pdev);
+	} else {
+		pci_wake_from_d3(pdev, false);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+
+static int e1000_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	if (adapter->need_ioport)
+		err = pci_enable_device(pdev);
+	else
+		err = pci_enable_device_mem(pdev);
+	if (err) {
+		printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	if (netif_running(netdev)) {
+		err = e1000_request_irq(adapter);
+		if (err)
+			return err;
+	}
+
+	e1000_power_up_phy(adapter);
+	e1000_reset(adapter);
+	ew32(WUS, ~0);
+
+	e1000_init_manageability(adapter);
+
+	if (netif_running(netdev))
+		e1000_up(adapter);
+
+	netif_device_attach(netdev);
+
+	/* If the controller is 82573 and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (hw->mac_type != e1000_82573 ||
+	    !e1000_check_mng_mode(hw))
+		e1000_get_hw_control(adapter);
+
+	return 0;
+}
+#endif
+
+static void e1000_shutdown(struct pci_dev *pdev)
+{
+	bool wake;
+
+	__e1000_shutdown(pdev, &wake);
+
+	if (system_state == SYSTEM_POWER_OFF) {
+		pci_wake_from_d3(pdev, wake);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void e1000_netpoll(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	disable_irq(adapter->pdev->irq);
+	e1000_intr(adapter->pdev->irq, netdev);
+	enable_irq(adapter->pdev->irq);
+}
+#endif
+
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci conneection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+						pci_channel_state_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	netif_device_detach(netdev);
+
+	if (state == pci_channel_io_perm_failure)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	if (netif_running(netdev))
+		e1000_down(adapter);
+	pci_disable_device(pdev);
+
+	/* Request a slot slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	int err;
+
+	if (adapter->need_ioport)
+		err = pci_enable_device(pdev);
+	else
+		err = pci_enable_device_mem(pdev);
+	if (err) {
+		printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	e1000_reset(adapter);
+	ew32(WUS, ~0);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	e1000_init_manageability(adapter);
+
+	if (netif_running(netdev)) {
+		if (e1000_up(adapter)) {
+			printk("e1000: can't bring device back up after reset\n");
+			return;
+		}
+	}
+
+	netif_device_attach(netdev);
+
+	/* If the controller is 82573 and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (hw->mac_type != e1000_82573 ||
+	    !e1000_check_mng_mode(hw))
+		e1000_get_hw_control(adapter);
+
+}
+
+/* e1000_main.c */
--- a/devices/e1000/e1000_main-2.6.33-ethercat.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/devices/e1000/e1000_main-2.6.33-ethercat.c	Wed Apr 13 22:06:28 2011 +0200
@@ -3457,7 +3457,7 @@
 	if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
 		return IRQ_NONE;  /* Not our interrupt */
 
-	if (!adapter->ecdev && unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+    if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
 		hw->get_link_status = 1;
 		/* guard against interrupt when we're going down */
 		if (!test_bit(__E1000_DOWN, &adapter->flags))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_osdep-2.6.31-ethercat.h	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,113 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS independent part of e1000
+ * includes register access macros
+ */
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+
+#ifdef DBG
+#define DEBUGOUT(S)		printk(KERN_DEBUG S "\n")
+#define DEBUGOUT1(S, A...)	printk(KERN_DEBUG S "\n", A)
+#else
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A...)
+#endif
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT7 DEBUGOUT3
+
+
+#define er32(reg)							\
+	(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543)		\
+			       ? E1000_##reg : E1000_82542_##reg)))
+
+#define ew32(reg, value)						\
+	(writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543)	\
+					 ? E1000_##reg : E1000_82542_##reg))))
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+    writel((value), ((a)->hw_addr + \
+        (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+        ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
+    readl((a)->hw_addr + \
+        (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+        ((offset) << 2)))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
+    writew((value), ((a)->hw_addr + \
+        (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+        ((offset) << 1))))
+
+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
+    readw((a)->hw_addr + \
+        (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+        ((offset) << 1)))
+
+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
+    writeb((value), ((a)->hw_addr + \
+        (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+        (offset))))
+
+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
+    readb((a)->hw_addr + \
+        (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+        (offset)))
+
+#define E1000_WRITE_FLUSH() er32(STATUS)
+
+#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \
+    writel((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH_FLASH_REG(a, reg) ( \
+    readl((a)->flash_address + reg))
+
+#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \
+    writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \
+    readw((a)->flash_address + reg))
+
+#endif /* _E1000_OSDEP_H_ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_osdep-2.6.31-orig.h	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,113 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS independent part of e1000
+ * includes register access macros
+ */
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+
+#ifdef DBG
+#define DEBUGOUT(S)		printk(KERN_DEBUG S "\n")
+#define DEBUGOUT1(S, A...)	printk(KERN_DEBUG S "\n", A)
+#else
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A...)
+#endif
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT7 DEBUGOUT3
+
+
+#define er32(reg)							\
+	(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543)		\
+			       ? E1000_##reg : E1000_82542_##reg)))
+
+#define ew32(reg, value)						\
+	(writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543)	\
+					 ? E1000_##reg : E1000_82542_##reg))))
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+    writel((value), ((a)->hw_addr + \
+        (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+        ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
+    readl((a)->hw_addr + \
+        (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+        ((offset) << 2)))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
+    writew((value), ((a)->hw_addr + \
+        (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+        ((offset) << 1))))
+
+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
+    readw((a)->hw_addr + \
+        (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+        ((offset) << 1)))
+
+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
+    writeb((value), ((a)->hw_addr + \
+        (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+        (offset))))
+
+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
+    readb((a)->hw_addr + \
+        (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+        (offset)))
+
+#define E1000_WRITE_FLUSH() er32(STATUS)
+
+#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \
+    writel((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH_FLASH_REG(a, reg) ( \
+    readl((a)->flash_address + reg))
+
+#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \
+    writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \
+    readw((a)->flash_address + reg))
+
+#endif /* _E1000_OSDEP_H_ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_param-2.6.31-ethercat.c	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,792 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000-2.6.31-ethercat.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET   -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#define E1000_PARAM(X, desc) \
+	static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+	static unsigned int num_##X; \
+	module_param_array_named(X, X, int, &num_##X, 0); \
+	MODULE_PARM_DESC(X, desc);
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Speed Override
+ *
+ * Valid Range: 0, 10, 100, 1000
+ *  - 0    - auto-negotiate at all supported speeds
+ *  - 10   - only link at 10 Mbps
+ *  - 100  - only link at 100 Mbps
+ *  - 1000 - only link at 1000 Mbps
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Speed, "Speed setting");
+
+/* User Specified Duplex Override
+ *
+ * Valid Range: 0-2
+ *  - 0 - auto-negotiate for duplex
+ *  - 1 - only link at half duplex
+ *  - 2 - only link at full duplex
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Duplex, "Duplex setting");
+
+/* Auto-negotiation Advertisement Override
+ *
+ * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit           7     6     5      4      3     2     1      0
+ * Speed (Mbps)  N/A   N/A   1000   N/A    100   100   10     10
+ * Duplex                    Full          Full  Half  Full   Half
+ *
+ * Default Value: 0x2F (copper); 0x20 (fiber)
+ */
+E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+#define AUTONEG_ADV_DEFAULT  0x2F
+#define AUTONEG_ADV_MASK     0x2F
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ *  - 0 - No Flow Control
+ *  - 1 - Rx only, respond to PAUSE frames but do not generate them
+ *  - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ *  - 3 - Full Flow Control Support
+ *
+ * Default Value: Read flow control settings from the EEPROM
+ */
+E1000_PARAM(FlowControl, "Flow Control setting");
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ *  - 0 - disables all checksum offload
+ *  - 1 - enables receive IP/TCP/UDP checksum offload
+ *        on 82543 and newer -based NICs
+ *
+ * Default Value: 1
+ */
+E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Transmit Interrupt Delay in units of 1.024 microseconds
+ *  Tx interrupt delay needs to typically be set to something non zero
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV                   8
+#define MAX_TXDELAY               0xFFFF
+#define MIN_TXDELAY                    0
+
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV                  32
+#define MAX_TXABSDELAY            0xFFFF
+#define MIN_TXABSDELAY                 0
+
+/* Receive Interrupt Delay in units of 1.024 microseconds
+ *   hardware will likely hang if you set this to anything but zero.
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define DEFAULT_RDTR                   0
+#define MAX_RXDELAY               0xFFFF
+#define MIN_RXDELAY                    0
+
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define DEFAULT_RADV                   8
+#define MAX_RXABSDELAY            0xFFFF
+#define MIN_RXABSDELAY                 0
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ */
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR                    3
+#define MAX_ITR                   100000
+#define MIN_ITR                      100
+
+/* Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/* Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
+struct e1000_option {
+	enum { enable_option, range_option, list_option } type;
+	const char *name;
+	const char *err;
+	int def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			const struct e1000_opt_list { int i; char *str; } *p;
+		} l;
+	} arg;
+};
+
+static int __devinit e1000_validate_option(unsigned int *value,
+					   const struct e1000_option *opt,
+					   struct e1000_adapter *adapter)
+{
+	if (*value == OPTION_UNSET) {
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+			return 0;
+		case OPTION_DISABLED:
+			DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+			return 0;
+		}
+		break;
+	case range_option:
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+			DPRINTK(PROBE, INFO,
+					"%s set to %i\n", opt->name, *value);
+			return 0;
+		}
+		break;
+	case list_option: {
+		int i;
+		const struct e1000_opt_list *ent;
+
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			ent = &opt->arg.l.p[i];
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					DPRINTK(PROBE, INFO, "%s\n", ent->str);
+				return 0;
+			}
+		}
+	}
+		break;
+	default:
+		BUG();
+	}
+
+	DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+	       opt->name, *value, opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+static void e1000_check_fiber_options(struct e1000_adapter *adapter);
+static void e1000_check_copper_options(struct e1000_adapter *adapter);
+
+/**
+ * e1000_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void __devinit e1000_check_options(struct e1000_adapter *adapter)
+{
+	struct e1000_option opt;
+	int bd = adapter->bd_number;
+
+	if (bd >= E1000_MAX_NIC) {
+		DPRINTK(PROBE, NOTICE,
+		       "Warning: no configuration for board #%i\n", bd);
+		DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+	}
+
+	{ /* Transmit Descriptor Count */
+		struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+		int i;
+		e1000_mac_type mac_type = adapter->hw.mac_type;
+
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Transmit Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(E1000_DEFAULT_TXD),
+			.def  = E1000_DEFAULT_TXD,
+			.arg  = { .r = {
+				.min = E1000_MIN_TXD,
+				.max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
+				}}
+		};
+
+		if (num_TxDescriptors > bd) {
+			tx_ring->count = TxDescriptors[bd];
+			e1000_validate_option(&tx_ring->count, &opt, adapter);
+			tx_ring->count = ALIGN(tx_ring->count,
+						REQ_TX_DESCRIPTOR_MULTIPLE);
+		} else {
+			tx_ring->count = opt.def;
+		}
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			tx_ring[i].count = tx_ring->count;
+	}
+	{ /* Receive Descriptor Count */
+		struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+		int i;
+		e1000_mac_type mac_type = adapter->hw.mac_type;
+
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Receive Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(E1000_DEFAULT_RXD),
+			.def  = E1000_DEFAULT_RXD,
+			.arg  = { .r = {
+				.min = E1000_MIN_RXD,
+				.max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
+			}}
+		};
+
+		if (num_RxDescriptors > bd) {
+			rx_ring->count = RxDescriptors[bd];
+			e1000_validate_option(&rx_ring->count, &opt, adapter);
+			rx_ring->count = ALIGN(rx_ring->count,
+						REQ_RX_DESCRIPTOR_MULTIPLE);
+		} else {
+			rx_ring->count = opt.def;
+		}
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			rx_ring[i].count = rx_ring->count;
+	}
+	{ /* Checksum Offload Enable/Disable */
+		opt = (struct e1000_option) {
+			.type = enable_option,
+			.name = "Checksum Offload",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+		if (num_XsumRX > bd) {
+			unsigned int rx_csum = XsumRX[bd];
+			e1000_validate_option(&rx_csum, &opt, adapter);
+			adapter->rx_csum = rx_csum;
+		} else {
+			adapter->rx_csum = opt.def;
+		}
+	}
+	{ /* Flow Control */
+
+		struct e1000_opt_list fc_list[] =
+			{{ E1000_FC_NONE,    "Flow Control Disabled" },
+			 { E1000_FC_RX_PAUSE,"Flow Control Receive Only" },
+			 { E1000_FC_TX_PAUSE,"Flow Control Transmit Only" },
+			 { E1000_FC_FULL,    "Flow Control Enabled" },
+			 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }};
+
+		opt = (struct e1000_option) {
+			.type = list_option,
+			.name = "Flow Control",
+			.err  = "reading default settings from EEPROM",
+			.def  = E1000_FC_DEFAULT,
+			.arg  = { .l = { .nr = ARRAY_SIZE(fc_list),
+					 .p = fc_list }}
+		};
+
+		if (num_FlowControl > bd) {
+			unsigned int fc = FlowControl[bd];
+			e1000_validate_option(&fc, &opt, adapter);
+			adapter->hw.fc = adapter->hw.original_fc = fc;
+		} else {
+			adapter->hw.fc = adapter->hw.original_fc = opt.def;
+		}
+	}
+	{ /* Transmit Interrupt Delay */
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Transmit Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_TIDV),
+			.def  = DEFAULT_TIDV,
+			.arg  = { .r = { .min = MIN_TXDELAY,
+					 .max = MAX_TXDELAY }}
+		};
+
+		if (num_TxIntDelay > bd) {
+			adapter->tx_int_delay = TxIntDelay[bd];
+			e1000_validate_option(&adapter->tx_int_delay, &opt,
+			                      adapter);
+		} else {
+			adapter->tx_int_delay = opt.def;
+		}
+	}
+	{ /* Transmit Absolute Interrupt Delay */
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Transmit Absolute Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_TADV),
+			.def  = DEFAULT_TADV,
+			.arg  = { .r = { .min = MIN_TXABSDELAY,
+					 .max = MAX_TXABSDELAY }}
+		};
+
+		if (num_TxAbsIntDelay > bd) {
+			adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+			                      adapter);
+		} else {
+			adapter->tx_abs_int_delay = opt.def;
+		}
+	}
+	{ /* Receive Interrupt Delay */
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Receive Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_RDTR),
+			.def  = DEFAULT_RDTR,
+			.arg  = { .r = { .min = MIN_RXDELAY,
+					 .max = MAX_RXDELAY }}
+		};
+
+		if (num_RxIntDelay > bd) {
+			adapter->rx_int_delay = RxIntDelay[bd];
+			e1000_validate_option(&adapter->rx_int_delay, &opt,
+			                      adapter);
+		} else {
+			adapter->rx_int_delay = opt.def;
+		}
+	}
+	{ /* Receive Absolute Interrupt Delay */
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Receive Absolute Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_RADV),
+			.def  = DEFAULT_RADV,
+			.arg  = { .r = { .min = MIN_RXABSDELAY,
+					 .max = MAX_RXABSDELAY }}
+		};
+
+		if (num_RxAbsIntDelay > bd) {
+			adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+			                      adapter);
+		} else {
+			adapter->rx_abs_int_delay = opt.def;
+		}
+	}
+	{ /* Interrupt Throttling Rate */
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Interrupt Throttling Rate (ints/sec)",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_ITR),
+			.def  = DEFAULT_ITR,
+			.arg  = { .r = { .min = MIN_ITR,
+					 .max = MAX_ITR }}
+		};
+
+		if (num_InterruptThrottleRate > bd) {
+			adapter->itr = InterruptThrottleRate[bd];
+			switch (adapter->itr) {
+			case 0:
+				DPRINTK(PROBE, INFO, "%s turned off\n",
+				        opt.name);
+				break;
+			case 1:
+				DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			case 3:
+				DPRINTK(PROBE, INFO,
+				        "%s set to dynamic conservative mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			default:
+				e1000_validate_option(&adapter->itr, &opt,
+				        adapter);
+				/* save the setting, because the dynamic bits change itr */
+				/* clear the lower two bits because they are
+				 * used as control */
+				adapter->itr_setting = adapter->itr & ~3;
+				break;
+			}
+		} else {
+			adapter->itr_setting = opt.def;
+			adapter->itr = 20000;
+		}
+	}
+	{ /* Smart Power Down */
+		opt = (struct e1000_option) {
+			.type = enable_option,
+			.name = "PHY Smart Power Down",
+			.err  = "defaulting to Disabled",
+			.def  = OPTION_DISABLED
+		};
+
+		if (num_SmartPowerDownEnable > bd) {
+			unsigned int spd = SmartPowerDownEnable[bd];
+			e1000_validate_option(&spd, &opt, adapter);
+			adapter->smart_power_down = spd;
+		} else {
+			adapter->smart_power_down = opt.def;
+		}
+	}
+	{ /* Kumeran Lock Loss Workaround */
+		opt = (struct e1000_option) {
+			.type = enable_option,
+			.name = "Kumeran Lock Loss Workaround",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+		if (num_KumeranLockLoss > bd) {
+			unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+			e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+			adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
+		} else {
+			adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
+		}
+	}
+
+	switch (adapter->hw.media_type) {
+	case e1000_media_type_fiber:
+	case e1000_media_type_internal_serdes:
+		e1000_check_fiber_options(adapter);
+		break;
+	case e1000_media_type_copper:
+		e1000_check_copper_options(adapter);
+		break;
+	default:
+		BUG();
+	}
+}
+
+/**
+ * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on fiber adapters
+ **/
+
+static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
+{
+	int bd = adapter->bd_number;
+	if (num_Speed > bd) {
+		DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+	if (num_Duplex > bd) {
+		DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+	if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+		DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
+				 "not valid for fiber adapters, "
+				 "parameter ignored\n");
+	}
+}
+
+/**
+ * e1000_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on copper adapters
+ **/
+
+static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
+{
+	struct e1000_option opt;
+	unsigned int speed, dplx, an;
+	int bd = adapter->bd_number;
+
+	{ /* Speed */
+		static const struct e1000_opt_list speed_list[] = {
+			{          0, "" },
+			{   SPEED_10, "" },
+			{  SPEED_100, "" },
+			{ SPEED_1000, "" }};
+
+		opt = (struct e1000_option) {
+			.type = list_option,
+			.name = "Speed",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(speed_list),
+					 .p = speed_list }}
+		};
+
+		if (num_Speed > bd) {
+			speed = Speed[bd];
+			e1000_validate_option(&speed, &opt, adapter);
+		} else {
+			speed = opt.def;
+		}
+	}
+	{ /* Duplex */
+		static const struct e1000_opt_list dplx_list[] = {
+			{           0, "" },
+			{ HALF_DUPLEX, "" },
+			{ FULL_DUPLEX, "" }};
+
+		opt = (struct e1000_option) {
+			.type = list_option,
+			.name = "Duplex",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(dplx_list),
+					 .p = dplx_list }}
+		};
+
+		if (e1000_check_phy_reset_block(&adapter->hw)) {
+			DPRINTK(PROBE, INFO,
+				"Link active due to SoL/IDER Session. "
+			        "Speed/Duplex/AutoNeg parameter ignored.\n");
+			return;
+		}
+		if (num_Duplex > bd) {
+			dplx = Duplex[bd];
+			e1000_validate_option(&dplx, &opt, adapter);
+		} else {
+			dplx = opt.def;
+		}
+	}
+
+	if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+		DPRINTK(PROBE, INFO,
+		       "AutoNeg specified along with Speed or Duplex, "
+		       "parameter ignored\n");
+		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+	} else { /* Autoneg */
+		static const struct e1000_opt_list an_list[] =
+			#define AA "AutoNeg advertising "
+			{{ 0x01, AA "10/HD" },
+			 { 0x02, AA "10/FD" },
+			 { 0x03, AA "10/FD, 10/HD" },
+			 { 0x04, AA "100/HD" },
+			 { 0x05, AA "100/HD, 10/HD" },
+			 { 0x06, AA "100/HD, 10/FD" },
+			 { 0x07, AA "100/HD, 10/FD, 10/HD" },
+			 { 0x08, AA "100/FD" },
+			 { 0x09, AA "100/FD, 10/HD" },
+			 { 0x0a, AA "100/FD, 10/FD" },
+			 { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+			 { 0x0c, AA "100/FD, 100/HD" },
+			 { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+			 { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+			 { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x20, AA "1000/FD" },
+			 { 0x21, AA "1000/FD, 10/HD" },
+			 { 0x22, AA "1000/FD, 10/FD" },
+			 { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+			 { 0x24, AA "1000/FD, 100/HD" },
+			 { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+			 { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+			 { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x28, AA "1000/FD, 100/FD" },
+			 { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+			 { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+			 { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+			 { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+			 { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+			 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+			 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
+
+		opt = (struct e1000_option) {
+			.type = list_option,
+			.name = "AutoNeg",
+			.err  = "parameter ignored",
+			.def  = AUTONEG_ADV_DEFAULT,
+			.arg  = { .l = { .nr = ARRAY_SIZE(an_list),
+					 .p = an_list }}
+		};
+
+		if (num_AutoNeg > bd) {
+			an = AutoNeg[bd];
+			e1000_validate_option(&an, &opt, adapter);
+		} else {
+			an = opt.def;
+		}
+		adapter->hw.autoneg_advertised = an;
+	}
+
+	switch (speed + dplx) {
+	case 0:
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		if ((num_Speed > bd) && (speed != 0 || dplx != 0))
+			DPRINTK(PROBE, INFO,
+			       "Speed and duplex autonegotiation enabled\n");
+		break;
+	case HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Half Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+		                                 ADVERTISE_100_HALF;
+		break;
+	case FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Full Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
+		                                 ADVERTISE_100_FULL |
+		                                 ADVERTISE_1000_FULL;
+		break;
+	case SPEED_10:
+		DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+		                                 ADVERTISE_10_FULL;
+		break;
+	case SPEED_10 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_10_half;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_10 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_10_full;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_100:
+		DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"100 Mbps only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
+		                                 ADVERTISE_100_FULL;
+		break;
+	case SPEED_100 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_100_half;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_100 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_100_full;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_1000:
+		DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
+			"Duplex\n");
+		goto full_duplex_only;
+	case SPEED_1000 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO,
+			"Half Duplex is not supported at 1000 Mbps\n");
+		/* fall through */
+	case SPEED_1000 + FULL_DUPLEX:
+full_duplex_only:
+		DPRINTK(PROBE, INFO,
+		       "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	default:
+		BUG();
+	}
+
+	/* Speed, AutoNeg and MDI/MDI-X must all play nice */
+	if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
+		DPRINTK(PROBE, INFO,
+			"Speed, AutoNeg and MDI-X specifications are "
+			"incompatible. Setting MDI-X to a compatible value.\n");
+	}
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_param-2.6.31-orig.c	Wed Apr 13 22:06:28 2011 +0200
@@ -0,0 +1,792 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2006 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET   -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#define E1000_PARAM(X, desc) \
+	static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+	static unsigned int num_##X; \
+	module_param_array_named(X, X, int, &num_##X, 0); \
+	MODULE_PARM_DESC(X, desc);
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Speed Override
+ *
+ * Valid Range: 0, 10, 100, 1000
+ *  - 0    - auto-negotiate at all supported speeds
+ *  - 10   - only link at 10 Mbps
+ *  - 100  - only link at 100 Mbps
+ *  - 1000 - only link at 1000 Mbps
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Speed, "Speed setting");
+
+/* User Specified Duplex Override
+ *
+ * Valid Range: 0-2
+ *  - 0 - auto-negotiate for duplex
+ *  - 1 - only link at half duplex
+ *  - 2 - only link at full duplex
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Duplex, "Duplex setting");
+
+/* Auto-negotiation Advertisement Override
+ *
+ * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit           7     6     5      4      3     2     1      0
+ * Speed (Mbps)  N/A   N/A   1000   N/A    100   100   10     10
+ * Duplex                    Full          Full  Half  Full   Half
+ *
+ * Default Value: 0x2F (copper); 0x20 (fiber)
+ */
+E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+#define AUTONEG_ADV_DEFAULT  0x2F
+#define AUTONEG_ADV_MASK     0x2F
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ *  - 0 - No Flow Control
+ *  - 1 - Rx only, respond to PAUSE frames but do not generate them
+ *  - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ *  - 3 - Full Flow Control Support
+ *
+ * Default Value: Read flow control settings from the EEPROM
+ */
+E1000_PARAM(FlowControl, "Flow Control setting");
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ *  - 0 - disables all checksum offload
+ *  - 1 - enables receive IP/TCP/UDP checksum offload
+ *        on 82543 and newer -based NICs
+ *
+ * Default Value: 1
+ */
+E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Transmit Interrupt Delay in units of 1.024 microseconds
+ *  Tx interrupt delay needs to typically be set to something non zero
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV                   8
+#define MAX_TXDELAY               0xFFFF
+#define MIN_TXDELAY                    0
+
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV                  32
+#define MAX_TXABSDELAY            0xFFFF
+#define MIN_TXABSDELAY                 0
+
+/* Receive Interrupt Delay in units of 1.024 microseconds
+ *   hardware will likely hang if you set this to anything but zero.
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define DEFAULT_RDTR                   0
+#define MAX_RXDELAY               0xFFFF
+#define MIN_RXDELAY                    0
+
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define DEFAULT_RADV                   8
+#define MAX_RXABSDELAY            0xFFFF
+#define MIN_RXABSDELAY                 0
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ */
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR                    3
+#define MAX_ITR                   100000
+#define MIN_ITR                      100
+
+/* Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/* Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
+struct e1000_option {
+	enum { enable_option, range_option, list_option } type;
+	const char *name;
+	const char *err;
+	int def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			const struct e1000_opt_list { int i; char *str; } *p;
+		} l;
+	} arg;
+};
+
+static int __devinit e1000_validate_option(unsigned int *value,
+					   const struct e1000_option *opt,
+					   struct e1000_adapter *adapter)
+{
+	if (*value == OPTION_UNSET) {
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+			return 0;
+		case OPTION_DISABLED:
+			DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+			return 0;
+		}
+		break;
+	case range_option:
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+			DPRINTK(PROBE, INFO,
+					"%s set to %i\n", opt->name, *value);
+			return 0;
+		}
+		break;
+	case list_option: {
+		int i;
+		const struct e1000_opt_list *ent;
+
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			ent = &opt->arg.l.p[i];
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					DPRINTK(PROBE, INFO, "%s\n", ent->str);
+				return 0;
+			}
+		}
+	}
+		break;
+	default:
+		BUG();
+	}
+
+	DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+	       opt->name, *value, opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+static void e1000_check_fiber_options(struct e1000_adapter *adapter);
+static void e1000_check_copper_options(struct e1000_adapter *adapter);
+
+/**
+ * e1000_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void __devinit e1000_check_options(struct e1000_adapter *adapter)
+{
+	struct e1000_option opt;
+	int bd = adapter->bd_number;
+
+	if (bd >= E1000_MAX_NIC) {
+		DPRINTK(PROBE, NOTICE,
+		       "Warning: no configuration for board #%i\n", bd);
+		DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+	}
+
+	{ /* Transmit Descriptor Count */
+		struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+		int i;
+		e1000_mac_type mac_type = adapter->hw.mac_type;
+
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Transmit Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(E1000_DEFAULT_TXD),
+			.def  = E1000_DEFAULT_TXD,
+			.arg  = { .r = {
+				.min = E1000_MIN_TXD,
+				.max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
+				}}
+		};
+
+		if (num_TxDescriptors > bd) {
+			tx_ring->count = TxDescriptors[bd];
+			e1000_validate_option(&tx_ring->count, &opt, adapter);
+			tx_ring->count = ALIGN(tx_ring->count,
+						REQ_TX_DESCRIPTOR_MULTIPLE);
+		} else {
+			tx_ring->count = opt.def;
+		}
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			tx_ring[i].count = tx_ring->count;
+	}
+	{ /* Receive Descriptor Count */
+		struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+		int i;
+		e1000_mac_type mac_type = adapter->hw.mac_type;
+
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Receive Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(E1000_DEFAULT_RXD),
+			.def  = E1000_DEFAULT_RXD,
+			.arg  = { .r = {
+				.min = E1000_MIN_RXD,
+				.max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
+			}}
+		};
+
+		if (num_RxDescriptors > bd) {
+			rx_ring->count = RxDescriptors[bd];
+			e1000_validate_option(&rx_ring->count, &opt, adapter);
+			rx_ring->count = ALIGN(rx_ring->count,
+						REQ_RX_DESCRIPTOR_MULTIPLE);
+		} else {
+			rx_ring->count = opt.def;
+		}
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			rx_ring[i].count = rx_ring->count;
+	}
+	{ /* Checksum Offload Enable/Disable */
+		opt = (struct e1000_option) {
+			.type = enable_option,
+			.name = "Checksum Offload",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+		if (num_XsumRX > bd) {
+			unsigned int rx_csum = XsumRX[bd];
+			e1000_validate_option(&rx_csum, &opt, adapter);
+			adapter->rx_csum = rx_csum;
+		} else {
+			adapter->rx_csum = opt.def;
+		}
+	}
+	{ /* Flow Control */
+
+		struct e1000_opt_list fc_list[] =
+			{{ E1000_FC_NONE,    "Flow Control Disabled" },
+			 { E1000_FC_RX_PAUSE,"Flow Control Receive Only" },
+			 { E1000_FC_TX_PAUSE,"Flow Control Transmit Only" },
+			 { E1000_FC_FULL,    "Flow Control Enabled" },
+			 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }};
+
+		opt = (struct e1000_option) {
+			.type = list_option,
+			.name = "Flow Control",
+			.err  = "reading default settings from EEPROM",
+			.def  = E1000_FC_DEFAULT,
+			.arg  = { .l = { .nr = ARRAY_SIZE(fc_list),
+					 .p = fc_list }}
+		};
+
+		if (num_FlowControl > bd) {
+			unsigned int fc = FlowControl[bd];
+			e1000_validate_option(&fc, &opt, adapter);
+			adapter->hw.fc = adapter->hw.original_fc = fc;
+		} else {
+			adapter->hw.fc = adapter->hw.original_fc = opt.def;
+		}
+	}
+	{ /* Transmit Interrupt Delay */
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Transmit Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_TIDV),
+			.def  = DEFAULT_TIDV,
+			.arg  = { .r = { .min = MIN_TXDELAY,
+					 .max = MAX_TXDELAY }}
+		};
+
+		if (num_TxIntDelay > bd) {
+			adapter->tx_int_delay = TxIntDelay[bd];
+			e1000_validate_option(&adapter->tx_int_delay, &opt,
+			                      adapter);
+		} else {
+			adapter->tx_int_delay = opt.def;
+		}
+	}
+	{ /* Transmit Absolute Interrupt Delay */
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Transmit Absolute Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_TADV),
+			.def  = DEFAULT_TADV,
+			.arg  = { .r = { .min = MIN_TXABSDELAY,
+					 .max = MAX_TXABSDELAY }}
+		};
+
+		if (num_TxAbsIntDelay > bd) {
+			adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+			                      adapter);
+		} else {
+			adapter->tx_abs_int_delay = opt.def;
+		}
+	}
+	{ /* Receive Interrupt Delay */
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Receive Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_RDTR),
+			.def  = DEFAULT_RDTR,
+			.arg  = { .r = { .min = MIN_RXDELAY,
+					 .max = MAX_RXDELAY }}
+		};
+
+		if (num_RxIntDelay > bd) {
+			adapter->rx_int_delay = RxIntDelay[bd];
+			e1000_validate_option(&adapter->rx_int_delay, &opt,
+			                      adapter);
+		} else {
+			adapter->rx_int_delay = opt.def;
+		}
+	}
+	{ /* Receive Absolute Interrupt Delay */
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Receive Absolute Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_RADV),
+			.def  = DEFAULT_RADV,
+			.arg  = { .r = { .min = MIN_RXABSDELAY,
+					 .max = MAX_RXABSDELAY }}
+		};
+
+		if (num_RxAbsIntDelay > bd) {
+			adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+			                      adapter);
+		} else {
+			adapter->rx_abs_int_delay = opt.def;
+		}
+	}
+	{ /* Interrupt Throttling Rate */
+		opt = (struct e1000_option) {
+			.type = range_option,
+			.name = "Interrupt Throttling Rate (ints/sec)",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_ITR),
+			.def  = DEFAULT_ITR,
+			.arg  = { .r = { .min = MIN_ITR,
+					 .max = MAX_ITR }}
+		};
+
+		if (num_InterruptThrottleRate > bd) {
+			adapter->itr = InterruptThrottleRate[bd];
+			switch (adapter->itr) {
+			case 0:
+				DPRINTK(PROBE, INFO, "%s turned off\n",
+				        opt.name);
+				break;
+			case 1:
+				DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			case 3:
+				DPRINTK(PROBE, INFO,
+				        "%s set to dynamic conservative mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			default:
+				e1000_validate_option(&adapter->itr, &opt,
+				        adapter);
+				/* save the setting, because the dynamic bits change itr */
+				/* clear the lower two bits because they are
+				 * used as control */
+				adapter->itr_setting = adapter->itr & ~3;
+				break;
+			}
+		} else {
+			adapter->itr_setting = opt.def;
+			adapter->itr = 20000;
+		}
+	}
+	{ /* Smart Power Down */
+		opt = (struct e1000_option) {
+			.type = enable_option,
+			.name = "PHY Smart Power Down",
+			.err  = "defaulting to Disabled",
+			.def  = OPTION_DISABLED
+		};
+
+		if (num_SmartPowerDownEnable > bd) {
+			unsigned int spd = SmartPowerDownEnable[bd];
+			e1000_validate_option(&spd, &opt, adapter);
+			adapter->smart_power_down = spd;
+		} else {
+			adapter->smart_power_down = opt.def;
+		}
+	}
+	{ /* Kumeran Lock Loss Workaround */
+		opt = (struct e1000_option) {
+			.type = enable_option,
+			.name = "Kumeran Lock Loss Workaround",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+		if (num_KumeranLockLoss > bd) {
+			unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+			e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+			adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
+		} else {
+			adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
+		}
+	}
+
+	switch (adapter->hw.media_type) {
+	case e1000_media_type_fiber:
+	case e1000_media_type_internal_serdes:
+		e1000_check_fiber_options(adapter);
+		break;
+	case e1000_media_type_copper:
+		e1000_check_copper_options(adapter);
+		break;
+	default:
+		BUG();
+	}
+}
+
+/**
+ * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on fiber adapters
+ **/
+
+static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
+{
+	int bd = adapter->bd_number;
+	if (num_Speed > bd) {
+		DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+	if (num_Duplex > bd) {
+		DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+	if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+		DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
+				 "not valid for fiber adapters, "
+				 "parameter ignored\n");
+	}
+}
+
+/**
+ * e1000_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on copper adapters
+ **/
+
+static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
+{
+	struct e1000_option opt;
+	unsigned int speed, dplx, an;
+	int bd = adapter->bd_number;
+
+	{ /* Speed */
+		static const struct e1000_opt_list speed_list[] = {
+			{          0, "" },
+			{   SPEED_10, "" },
+			{  SPEED_100, "" },
+			{ SPEED_1000, "" }};
+
+		opt = (struct e1000_option) {
+			.type = list_option,
+			.name = "Speed",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(speed_list),
+					 .p = speed_list }}
+		};
+
+		if (num_Speed > bd) {
+			speed = Speed[bd];
+			e1000_validate_option(&speed, &opt, adapter);
+		} else {
+			speed = opt.def;
+		}
+	}
+	{ /* Duplex */
+		static const struct e1000_opt_list dplx_list[] = {
+			{           0, "" },
+			{ HALF_DUPLEX, "" },
+			{ FULL_DUPLEX, "" }};
+
+		opt = (struct e1000_option) {
+			.type = list_option,
+			.name = "Duplex",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(dplx_list),
+					 .p = dplx_list }}
+		};
+
+		if (e1000_check_phy_reset_block(&adapter->hw)) {
+			DPRINTK(PROBE, INFO,
+				"Link active due to SoL/IDER Session. "
+			        "Speed/Duplex/AutoNeg parameter ignored.\n");
+			return;
+		}
+		if (num_Duplex > bd) {
+			dplx = Duplex[bd];
+			e1000_validate_option(&dplx, &opt, adapter);
+		} else {
+			dplx = opt.def;
+		}
+	}
+
+	if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+		DPRINTK(PROBE, INFO,
+		       "AutoNeg specified along with Speed or Duplex, "
+		       "parameter ignored\n");
+		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+	} else { /* Autoneg */
+		static const struct e1000_opt_list an_list[] =
+			#define AA "AutoNeg advertising "
+			{{ 0x01, AA "10/HD" },
+			 { 0x02, AA "10/FD" },
+			 { 0x03, AA "10/FD, 10/HD" },
+			 { 0x04, AA "100/HD" },
+			 { 0x05, AA "100/HD, 10/HD" },
+			 { 0x06, AA "100/HD, 10/FD" },
+			 { 0x07, AA "100/HD, 10/FD, 10/HD" },
+			 { 0x08, AA "100/FD" },
+			 { 0x09, AA "100/FD, 10/HD" },
+			 { 0x0a, AA "100/FD, 10/FD" },
+			 { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+			 { 0x0c, AA "100/FD, 100/HD" },
+			 { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+			 { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+			 { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x20, AA "1000/FD" },
+			 { 0x21, AA "1000/FD, 10/HD" },
+			 { 0x22, AA "1000/FD, 10/FD" },
+			 { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+			 { 0x24, AA "1000/FD, 100/HD" },
+			 { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+			 { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+			 { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x28, AA "1000/FD, 100/FD" },
+			 { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+			 { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+			 { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+			 { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+			 { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+			 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+			 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
+
+		opt = (struct e1000_option) {
+			.type = list_option,
+			.name = "AutoNeg",
+			.err  = "parameter ignored",
+			.def  = AUTONEG_ADV_DEFAULT,
+			.arg  = { .l = { .nr = ARRAY_SIZE(an_list),
+					 .p = an_list }}
+		};
+
+		if (num_AutoNeg > bd) {
+			an = AutoNeg[bd];
+			e1000_validate_option(&an, &opt, adapter);
+		} else {
+			an = opt.def;
+		}
+		adapter->hw.autoneg_advertised = an;
+	}
+
+	switch (speed + dplx) {
+	case 0:
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		if ((num_Speed > bd) && (speed != 0 || dplx != 0))
+			DPRINTK(PROBE, INFO,
+			       "Speed and duplex autonegotiation enabled\n");
+		break;
+	case HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Half Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+		                                 ADVERTISE_100_HALF;
+		break;
+	case FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Full Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
+		                                 ADVERTISE_100_FULL |
+		                                 ADVERTISE_1000_FULL;
+		break;
+	case SPEED_10:
+		DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+		                                 ADVERTISE_10_FULL;
+		break;
+	case SPEED_10 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_10_half;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_10 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_10_full;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_100:
+		DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"100 Mbps only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
+		                                 ADVERTISE_100_FULL;
+		break;
+	case SPEED_100 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_100_half;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_100 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_100_full;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_1000:
+		DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
+			"Duplex\n");
+		goto full_duplex_only;
+	case SPEED_1000 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO,
+			"Half Duplex is not supported at 1000 Mbps\n");
+		/* fall through */
+	case SPEED_1000 + FULL_DUPLEX:
+full_duplex_only:
+		DPRINTK(PROBE, INFO,
+		       "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	default:
+		BUG();
+	}
+
+	/* Speed, AutoNeg and MDI/MDI-X must all play nice */
+	if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
+		DPRINTK(PROBE, INFO,
+			"Speed, AutoNeg and MDI-X specifications are "
+			"incompatible. Setting MDI-X to a compatible value.\n");
+	}
+}
+
--- a/examples/dc_rtai/dc_rtai_sample.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/examples/dc_rtai/dc_rtai_sample.c	Wed Apr 13 22:06:28 2011 +0200
@@ -229,8 +229,8 @@
         }
         ecrt_master_sync_slave_clocks(master);
         ecrt_domain_queue(domain1);
+        rt_sem_signal(&master_sem);
         ecrt_master_send(master);
-        rt_sem_signal(&master_sem);
 
         rt_task_wait_period();
     }
@@ -238,30 +238,18 @@
 
 /*****************************************************************************/
 
-void send_callback(void *cb_data)
+void request_lock_callback(void *cb_data)
 {
     ec_master_t *m = (ec_master_t *) cb_data;
-
-    // too close to the next real time cycle: deny access...
-    if (get_cycles() - t_last_cycle <= t_critical) {
-        rt_sem_wait(&master_sem);
-        ecrt_master_send_ext(m);
-        rt_sem_signal(&master_sem);
-    }
-}
-
-/*****************************************************************************/
-
-void receive_callback(void *cb_data)
+    rt_sem_wait(&master_sem);
+}
+
+/*****************************************************************************/
+
+void release_lock_callback(void *cb_data)
 {
     ec_master_t *m = (ec_master_t *) cb_data;
-
-    // too close to the next real time cycle: deny access...
-    if (get_cycles() - t_last_cycle <= t_critical) {
-        rt_sem_wait(&master_sem);
-        ecrt_master_receive(m);
-        rt_sem_signal(&master_sem);
-    }
+    rt_sem_signal(&master_sem);
 }
 
 /*****************************************************************************/
@@ -285,7 +273,7 @@
         goto out_return;
     }
 
-    ecrt_master_callbacks(master, send_callback, receive_callback, master);
+    ecrt_master_callbacks(master, request_lock_callback, release_lock_callback, master);
 
     printk(KERN_INFO PFX "Registering domain...\n");
     if (!(domain1 = ecrt_master_create_domain(master))) {
--- a/examples/mini/mini.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/examples/mini/mini.c	Wed Apr 13 22:06:28 2011 +0200
@@ -346,10 +346,9 @@
     EC_WRITE_U8(domain1_pd + off_dig_out, blink ? 0x06 : 0x09);
 
     // send process data
-    down(&master_sem);
     ecrt_domain_queue(domain1);
+    up(&master_sem);
     ecrt_master_send(master);
-    up(&master_sem);
 
     // restart timer
     timer.expires += HZ / FREQUENCY;
@@ -358,21 +357,17 @@
 
 /*****************************************************************************/
 
-void send_callback(void *cb_data)
+void request_lock_callback(void *cb_data)
 {
     ec_master_t *m = (ec_master_t *) cb_data;
     down(&master_sem);
-    ecrt_master_send_ext(m);
-    up(&master_sem);
-}
-
-/*****************************************************************************/
-
-void receive_callback(void *cb_data)
+}
+
+/*****************************************************************************/
+
+void release_lock_callback(void *cb_data)
 {
     ec_master_t *m = (ec_master_t *) cb_data;
-    down(&master_sem);
-    ecrt_master_receive(m);
     up(&master_sem);
 }
 
@@ -398,7 +393,7 @@
     }
 
     sema_init(&master_sem, 1);
-    ecrt_master_callbacks(master, send_callback, receive_callback, master);
+    ecrt_master_callbacks(master, request_lock_callback, release_lock_callback, master);
 
     printk(KERN_INFO PFX "Registering domain...\n");
     if (!(domain1 = ecrt_master_create_domain(master))) {
--- a/examples/rtai/rtai_sample.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/examples/rtai/rtai_sample.c	Wed Apr 13 22:06:28 2011 +0200
@@ -234,8 +234,8 @@
 
         rt_sem_wait(&master_sem);
         ecrt_domain_queue(domain1);
+        rt_sem_signal(&master_sem);
         ecrt_master_send(master);
-        rt_sem_signal(&master_sem);
 
         rt_task_wait_period();
     }
@@ -243,30 +243,18 @@
 
 /*****************************************************************************/
 
-void send_callback(void *cb_data)
+void request_lock_callback(void *cb_data)
 {
     ec_master_t *m = (ec_master_t *) cb_data;
-
-    // too close to the next real time cycle: deny access...
-    if (get_cycles() - t_last_cycle <= t_critical) {
-        rt_sem_wait(&master_sem);
-        ecrt_master_send_ext(m);
-        rt_sem_signal(&master_sem);
-    }
-}
-
-/*****************************************************************************/
-
-void receive_callback(void *cb_data)
+    rt_sem_wait(&master_sem);
+}
+
+/*****************************************************************************/
+
+void release_lock_callback(void *cb_data)
 {
     ec_master_t *m = (ec_master_t *) cb_data;
-
-    // too close to the next real time cycle: deny access...
-    if (get_cycles() - t_last_cycle <= t_critical) {
-        rt_sem_wait(&master_sem);
-        ecrt_master_receive(m);
-        rt_sem_signal(&master_sem);
-    }
+    rt_sem_signal(&master_sem);
 }
 
 /*****************************************************************************/
@@ -292,7 +280,7 @@
         goto out_return;
     }
 
-    ecrt_master_callbacks(master, send_callback, receive_callback, master);
+    ecrt_master_callbacks(master, request_lock_callback, release_lock_callback, master);
 
     printk(KERN_INFO PFX "Registering domain...\n");
     if (!(domain1 = ecrt_master_create_domain(master))) {
--- a/examples/tty/tty.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/examples/tty/tty.c	Wed Apr 13 22:06:28 2011 +0200
@@ -141,8 +141,8 @@
     // send process data
     down(&master_sem);
     ecrt_domain_queue(domain1);
+    up(&master_sem);
     ecrt_master_send(master);
-    up(&master_sem);
 
     // restart timer
     timer.expires += HZ / FREQUENCY;
@@ -151,21 +151,17 @@
 
 /*****************************************************************************/
 
-void send_callback(void *cb_data)
+void request_lock_callback(void *cb_data)
 {
     ec_master_t *m = (ec_master_t *) cb_data;
     down(&master_sem);
-    ecrt_master_send_ext(m);
-    up(&master_sem);
-}
-
-/*****************************************************************************/
-
-void receive_callback(void *cb_data)
+}
+
+/*****************************************************************************/
+
+void release_lock_callback(void *cb_data)
 {
     ec_master_t *m = (ec_master_t *) cb_data;
-    down(&master_sem);
-    ecrt_master_receive(m);
     up(&master_sem);
 }
 
@@ -186,7 +182,7 @@
     }
 
     sema_init(&master_sem, 1);
-    ecrt_master_callbacks(master, send_callback, receive_callback, master);
+    ecrt_master_callbacks(master, request_lock_callback, release_lock_callback, master);
 
     printk(KERN_INFO PFX "Registering domain...\n");
     if (!(domain1 = ecrt_master_create_domain(master))) {
--- a/include/ecrt.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/include/ecrt.h	Wed Apr 13 22:06:28 2011 +0200
@@ -5,7 +5,7 @@
  *  Copyright (C) 2006-2008  Florian Pose, Ingenieurgemeinschaft IgH
  *
  *  This file is part of the IgH EtherCAT master userspace library.
- *  
+ *
  *  The IgH EtherCAT master userspace library is free software; you can
  *  redistribute it and/or modify it under the terms of the GNU Lesser General
  *  Public License as published by the Free Software Foundation; version 2.1
@@ -19,9 +19,9 @@
  *  You should have received a copy of the GNU Lesser General Public License
  *  along with the IgH EtherCAT master userspace library. If not, see
  *  <http://www.gnu.org/licenses/>.
- *  
+ *
  *  ---
- *  
+ *
  *  The license mentioned above concerns the source code only. Using the
  *  EtherCAT technology and brand is only permitted in compliance with the
  *  industrial property and similar rights of Beckhoff Automation GmbH.
@@ -47,11 +47,9 @@
  *   ecrt_master_sync_slave_clocks() for offset and drift compensation. The
  *   EC_TIMEVAL2NANO() macro can be used for epoch time conversion, while the
  *   ecrt_master_sync_monitor_queue() and ecrt_master_sync_monitor_process()
- *   methods can be used to monitor the synchrony. 
+ *   methods can be used to monitor the synchrony.
  * - Improved the callback mechanism. ecrt_master_callbacks() now takes two
- *   callback functions for sending and receiving datagrams.
- *   ecrt_master_send_ext() is used to execute the sending of non-application
- *   datagrams.
+ *   callback functions for locking and unlocking the fsm datagram queue.
  * - Added watchdog configuration (method ecrt_slave_config_watchdog(),
  *   #ec_watchdog_mode_t, \a watchdog_mode parameter in ec_sync_info_t and
  *   ecrt_slave_config_sync_manager()).
@@ -79,6 +77,9 @@
  *   and ecrt_master_read_idn() and ecrt_master_write_idn() to read/write IDNs
  *   ad-hoc via the user-space library.
  * - Added ecrt_master_reset() to initiate retrying to configure slaves.
+ * - Added support for overlapping PDOs which allows inputs to use the same
+ *   space as outputs on the frame. This reduces the frame length.
+ *
  *
  * @{
  */
@@ -136,6 +137,9 @@
  */
 #define EC_MAX_STRING_LENGTH 64
 
+/** Maximum number of slave ports. */
+#define EC_MAX_PORTS 4
+
 /** Timeval to nanoseconds conversion.
  *
  * This macro converts a Unix epoch time to EtherCAT DC time.
@@ -148,7 +152,7 @@
     (((TV).tv_sec - 946684800ULL) * 1000000000ULL + (TV).tv_usec * 1000ULL)
 
 /******************************************************************************
- * Data types 
+ * Data types
  *****************************************************************************/
 
 struct ec_master;
@@ -193,7 +197,7 @@
 /** Slave configuration state.
  *
  * This is used as an output parameter of ecrt_slave_config_state().
- * 
+ *
  * \see ecrt_slave_config_state().
  */
 typedef struct  {
@@ -221,12 +225,33 @@
 typedef struct {
    unsigned int slave_count; /**< Number of slaves in the bus. */
    unsigned int link_up : 1; /**< \a true, if the network link is up. */
-   uint8_t scan_busy; /**< \a true, while the master is scanning the bus */   
+   uint8_t scan_busy; /**< \a true, while the master is scanning the bus */
    uint64_t app_time; /**< Application time. */
 } ec_master_info_t;
 
 /*****************************************************************************/
 
+/** EtherCAT slave port descriptor.
+ */
+typedef enum {
+    EC_PORT_NOT_IMPLEMENTED, /**< Port is not implemented. */
+    EC_PORT_NOT_CONFIGURED, /**< Port is not configured. */
+    EC_PORT_EBUS, /**< Port is an e-bus. */
+    EC_PORT_MII /**< Port is a mii. */
+} ec_slave_port_desc_t;
+
+/*****************************************************************************/
+
+/** EtherCAT slave port information.
+ */
+typedef struct {
+    uint8_t link_up; /**< Link detected. */
+    uint8_t loop_closed; /**< Loop closed. */
+    uint8_t signal_detected; /**< Detected signal on RX port. */
+} ec_slave_port_link_t;
+
+/*****************************************************************************/
+
 /** Slave information.
  *
  * This is used as an output parameter of ecrt_master_get_slave().
@@ -241,6 +266,13 @@
     uint32_t serial_number; /**< Serial-Number stored on the slave. */
     uint16_t alias; /**< The slaves alias if not equal to 0. */
     int16_t current_on_ebus; /**< Used current in mA. */
+    struct {
+        ec_slave_port_desc_t desc;
+        ec_slave_port_link_t link;
+        uint32_t receive_time;
+        uint16_t next_slave;
+        uint32_t delay_to_next_dc;
+    } ports[EC_MAX_PORTS];
     uint8_t al_state; /**< Current state of the slave. */
     uint8_t error_flag; /**< Error flag for that slave. */
     uint8_t sync_count; /**< Number of sync managers. */
@@ -312,9 +344,9 @@
 /*****************************************************************************/
 
 /** PDO configuration information.
- * 
+ *
  * This is the data type of the \a pdos field in ec_sync_info_t.
- * 
+ *
  * \see ecrt_slave_config_pdos().
  */
 typedef struct {
@@ -363,7 +395,7 @@
     uint8_t subindex; /**< PDO entry subindex. */
     unsigned int *offset; /**< Pointer to a variable to store the PDO entry's
                        (byte-)offset in the process data. */
-    unsigned int *bit_position; /**< Pointer to a variable to store a bit 
+    unsigned int *bit_position; /**< Pointer to a variable to store a bit
                                   position (0-7) within the \a offset. Can be
                                   NULL, in which case an error is raised if the
                                   PDO entry does not byte-align. */
@@ -409,7 +441,7 @@
 unsigned int ecrt_version_magic(void);
 
 /** Requests an EtherCAT master for realtime operation.
- * 
+ *
  * Before an application can access an EtherCAT master, it has to reserve one
  * for exclusive use.
  *
@@ -496,27 +528,18 @@
 
 /** Sets the locking callbacks.
  *
- * For concurrent master access, i. e. if other instances than the application
- * want to send and receive datagrams on the bus, the application has to
- * provide a callback mechanism. This method takes two function pointers as
- * its parameters. Asynchronous master access (like EoE processing) is only
- * possible if the callbacks have been set.
- *
- * The task of the send callback (\a send_cb) is to decide, if the bus is
- * currently accessible and whether or not to call the ecrt_master_send_ext()
- * method.
- *
- * The task of the receive callback (\a receive_cb) is to decide, if a call to
- * ecrt_master_receive() is allowed and to execute it respectively.
+ * For concurrent master access, the application has to provide a locking
+ * mechanism. The method takes two function pointers and a data value as
+ * its parameters.
+ * The arbitrary \a cb_data value will be passed as argument on every callback.
+ *
  */
 void ecrt_master_callbacks(
         ec_master_t *master, /**< EtherCAT master */
-        void (*send_cb)(void *), /**< Datagram sending callback. */
-        void (*receive_cb)(void *), /**< Receive callback. */
-        void *cb_data /**< Arbitraty pointer passed to the callback functions.
-                       */
-        );
-
+        void (*lock_cb)(void *), /**< Lock function. */
+        void (*unlock_cb)(void *), /**< Unlock function. */
+        void *cb_data /**< Arbitrary user data. */
+        );
 
 /** Returns domain structure pointer
  *
@@ -832,15 +855,6 @@
         ec_master_t *master /**< EtherCAT master. */
         );
 
-/** Sends non-application datagrams.
- *
- * This method has to be called in the send callback function passed via
- * ecrt_master_callbacks() to allow the sending of non-application datagrams.
- */
-void ecrt_master_send_ext(
-        ec_master_t *master /**< EtherCAT master. */
-        );
-
 /** Reads the current master state.
  *
  * Stores the master state information in the given \a state structure.
@@ -850,17 +864,29 @@
         ec_master_state_t *state /**< Structure to store the information. */
         );
 
+/** Reads the current master state and the al_state of all configured slaves.
+ *
+ * use this function instead of ecrt_master_state if there are unused
+ * slaves on the bus
+ * Stores the master state information in the given \a state structure.
+ * \see ecrt_master_state()
+ */
+void ecrt_master_configured_slaves_state(
+        const ec_master_t *master, /**< EtherCAT master. */
+        ec_master_state_t *state /**< Structure to store the information. */
+        );
+
 /** Sets the application time.
  *
  * The master has to know the application's time when operating slaves with
  * distributed clocks. The time is not incremented by the master itself, so
  * this method has to be called cyclically.
- * 
+ *
  * The time is used when setting the slaves' <tt>System Time Offset</tt> and
  * <tt>Cyclic Operation Start Time</tt> registers and when synchronizing the
  * DC reference clock to the application time via
  * ecrt_master_sync_reference_clock().
- * 
+ *
  * The time is defined as nanoseconds from 2000-01-01 00:00. Converting an
  * epoch time can be done with the EC_TIMEVAL2NANO() macro.
  */
@@ -952,6 +978,17 @@
                                      */
         );
 
+/** Configure wether a slave allows overlapping PDOs.
+ *
+ * Overlapping PDOs allows inputs to use the same space as outputs on the frame.
+ * This reduces the frame length.
+ */
+void ecrt_slave_config_overlapping_pdos(
+        ec_slave_config_t *sc, /**< Slave configuration. */
+        uint8_t allow_overlapping_pdos /**< Allow overlapping PDOs */
+        );
+
+
 /** Add a PDO to a sync manager's PDO assignment.
  *
  * \see ecrt_slave_config_pdos()
@@ -969,7 +1006,7 @@
  * This can be called before assigning PDOs via
  * ecrt_slave_config_pdo_assign_add(), to clear the default assignment of a
  * sync manager.
- * 
+ *
  * \see ecrt_slave_config_pdos()
  */
 void ecrt_slave_config_pdo_assign_clear(
@@ -1023,28 +1060,28 @@
  *     {0x3101, 1,  8}, // status
  *     {0x3101, 2, 16}  // value
  * };
- * 
+ *
  * ec_pdo_entry_info_t el3162_channel2[] = {
  *     {0x3102, 1,  8}, // status
  *     {0x3102, 2, 16}  // value
  * };
- * 
+ *
  * ec_pdo_info_t el3162_pdos[] = {
  *     {0x1A00, 2, el3162_channel1},
  *     {0x1A01, 2, el3162_channel2}
  * };
- * 
+ *
  * ec_sync_info_t el3162_syncs[] = {
  *     {2, EC_DIR_OUTPUT},
  *     {3, EC_DIR_INPUT, 2, el3162_pdos},
  *     {0xff}
  * };
- * 
+ *
  * if (ecrt_slave_config_pdos(sc_ana_in, EC_END, el3162_syncs)) {
  *     // handle error
  * }
  * \endcode
- * 
+ *
  * The next example shows, how to configure the PDO assignment only. The
  * entries for each assigned PDO are taken from the PDO's default mapping.
  * Please note, that PDO entry registration will fail, if the PDO
@@ -1055,11 +1092,11 @@
  *     {0x1600}, // Channel 1
  *     {0x1601}  // Channel 2
  * };
- * 
+ *
  * ec_sync_info_t syncs[] = {
  *     {3, EC_DIR_INPUT, 2, pdos},
  * };
- * 
+ *
  * if (ecrt_slave_config_pdos(slave_config_ana_in, 1, syncs)) {
  *     // handle error
  * }
@@ -1101,7 +1138,7 @@
         uint16_t entry_index, /**< Index of the PDO entry to register. */
         uint8_t entry_subindex, /**< Subindex of the PDO entry to register. */
         ec_domain_t *domain, /**< Domain. */
-        unsigned int *bit_position /**< Optional address if bit addressing 
+        unsigned int *bit_position /**< Optional address if bit addressing
                                  is desired */
         );
 
--- a/lib/master.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/lib/master.c	Wed Apr 13 22:06:28 2011 +0200
@@ -1,11 +1,11 @@
 /******************************************************************************
- *  
+ *
  *  $Id$
- *  
+ *
  *  Copyright (C) 2006-2009  Florian Pose, Ingenieurgemeinschaft IgH
- *  
+ *
  *  This file is part of the IgH EtherCAT master userspace library.
- *  
+ *
  *  The IgH EtherCAT master userspace library is free software; you can
  *  redistribute it and/or modify it under the terms of the GNU Lesser General
  *  Public License as published by the Free Software Foundation; version 2.1
@@ -19,9 +19,9 @@
  *  You should have received a copy of the GNU Lesser General Public License
  *  along with the IgH EtherCAT master userspace library. If not, see
  *  <http://www.gnu.org/licenses/>.
- *  
+ *
  *  ---
- *  
+ *
  *  The license mentioned above concerns the source code only. Using the
  *  EtherCAT technology and brand is only permitted in compliance with the
  *  industrial property and similar rights of Beckhoff Automation GmbH.
@@ -118,12 +118,12 @@
         fprintf(stderr, "Failed to allocate memory.\n");
         return 0;
     }
-    
+
     index = ioctl(master->fd, EC_IOCTL_CREATE_DOMAIN, NULL);
     if (index == -1) {
         fprintf(stderr, "Failed to create domain: %s\n", strerror(errno));
         free(domain);
-        return 0; 
+        return 0;
     }
 
     domain->next = NULL;
@@ -166,17 +166,17 @@
         fprintf(stderr, "Failed to allocate memory.\n");
         return 0;
     }
-    
+
     data.alias = alias;
     data.position = position;
     data.vendor_id = vendor_id;
     data.product_code = product_code;
-    
+
     if (ioctl(master->fd, EC_IOCTL_CREATE_SLAVE_CONFIG, &data) == -1) {
         fprintf(stderr, "Failed to create slave config: %s\n",
                 strerror(errno));
         free(sc);
-        return 0; 
+        return 0;
     }
 
     sc->next = NULL;
@@ -216,7 +216,7 @@
         ec_slave_info_t *slave_info)
 {
     ec_ioctl_slave_t data;
-    int index;
+    int index, i;
 
     data.position = slave_position;
 
@@ -232,6 +232,15 @@
     slave_info->serial_number = data.serial_number;
     slave_info->alias = data.alias;
     slave_info->current_on_ebus = data.current_on_ebus;
+    for ( i = 0; i < EC_MAX_PORTS; i++ ) {
+    	slave_info->ports[i].desc = data.ports[i].desc;
+    	slave_info->ports[i].link.link_up = data.ports[i].link.link_up;
+    	slave_info->ports[i].link.loop_closed = data.ports[i].link.loop_closed;
+    	slave_info->ports[i].link.signal_detected = data.ports[i].link.signal_detected;
+    	slave_info->ports[i].receive_time = data.ports[i].receive_time;
+    	slave_info->ports[i].next_slave = data.ports[i].next_slave;
+    	slave_info->ports[i].delay_to_next_dc = data.ports[i].delay_to_next_dc;
+    }
     slave_info->al_state = data.al_state;
     slave_info->error_flag = data.error_flag;
     slave_info->sync_count = data.sync_count;
@@ -455,7 +464,7 @@
         }
 
         // Access the mapped region to cause the initial page fault
-        printf("pd: %x\n", master->process_data[0]);
+        memset(master->process_data, 0, master->process_data_size);
     }
 
     return 0;
@@ -515,6 +524,16 @@
 
 /*****************************************************************************/
 
+void ecrt_master_configured_slaves_state(const ec_master_t *master,
+                                         ec_master_state_t *state)
+{
+    if (ioctl(master->fd, EC_IOCTL_MASTER_SC_STATE, state) == -1) {
+        fprintf(stderr, "Failed to get master state: %s\n", strerror(errno));
+    }
+}
+
+/*****************************************************************************/
+
 void ecrt_master_application_time(ec_master_t *master, uint64_t app_time)
 {
     ec_ioctl_app_time_t data;
--- a/lib/slave_config.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/lib/slave_config.c	Wed Apr 13 22:06:28 2011 +0200
@@ -110,6 +110,23 @@
 
 /*****************************************************************************/
 
+void ecrt_slave_config_overlapping_pdos(ec_slave_config_t *sc,
+        uint8_t allow_overlapping_pdos)
+{
+    ec_ioctl_config_t data;
+
+    memset(&data, 0x00, sizeof(ec_ioctl_config_t));
+    data.config_index = sc->index;
+    data.allow_overlapping_pdos = allow_overlapping_pdos;
+
+    if (ioctl(sc->master->fd, EC_IOCTL_SC_OVERLAPPING_IO, &data) == -1) {
+        fprintf(stderr, "Failed to config overlapping PDOs: %s\n",
+                strerror(errno));
+    }
+}
+
+/*****************************************************************************/
+
 int ecrt_slave_config_pdo_assign_add(ec_slave_config_t *sc,
         uint8_t sync_index, uint16_t pdo_index)
 {
@@ -415,7 +432,7 @@
     if (size) {
         req->data = malloc(size);
         if (!req->data) {
-            fprintf(stderr, "Failed to allocate %u bytes of SDO data"
+            fprintf(stderr, "Failed to allocate %zu bytes of SDO data"
                     " memory.\n", size);
             free(req);
             return 0;
@@ -484,7 +501,7 @@
     if (size) {
         voe->data = malloc(size);
         if (!voe->data) {
-            fprintf(stderr, "Failed to allocate %u bytes of VoE data"
+            fprintf(stderr, "Failed to allocate %zu bytes of VoE data"
                     " memory.\n", size);
             free(voe);
             return 0;
--- a/master/cdev.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/cdev.c	Wed Apr 13 22:06:28 2011 +0200
@@ -186,7 +186,7 @@
     ec_ioctl_master_t data;
     unsigned int i;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
     data.slave_count = master->slave_count;
     data.config_count = ec_master_config_count(master);
@@ -197,9 +197,9 @@
     data.phase = (uint8_t) master->phase;
     data.active = (uint8_t) master->active;
     data.scan_busy = master->scan_busy;
-    up(&master->master_sem);
-
-    if (down_interruptible(&master->device_sem))
+    ec_mutex_unlock(&master->master_mutex);
+
+    if (ec_mutex_lock_interruptible(&master->device_mutex))
         return -EINTR;
 
     if (master->main_device.dev) {
@@ -242,7 +242,7 @@
         data.devices[1].loss_rates[i] = master->backup_device.loss_rates[i];
     }
 
-    up(&master->device_sem);
+    ec_mutex_unlock(&master->device_mutex);
 
     data.app_time = master->app_time;
     data.ref_clock =
@@ -271,12 +271,12 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(slave = ec_master_find_slave_const(
                     master, 0, data.position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n", data.position);
         return -EINVAL;
     }
@@ -330,7 +330,7 @@
     ec_cdev_strcpy(data.order, slave->sii.order);
     ec_cdev_strcpy(data.name, slave->sii.name);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -355,19 +355,19 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(slave = ec_master_find_slave_const(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
         return -EINVAL;
     }
 
     if (data.sync_index >= slave->sii.sync_count) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_SLAVE_ERR(slave, "Sync manager %u does not exist!\n",
                 data.sync_index);
         return -EINVAL;
@@ -381,7 +381,7 @@
     data.enable = sync->enable;
     data.pdo_count = ec_pdo_list_count(&sync->pdos);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -407,19 +407,19 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(slave = ec_master_find_slave_const(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
         return -EINVAL;
     }
 
     if (data.sync_index >= slave->sii.sync_count) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_SLAVE_ERR(slave, "Sync manager %u does not exist!\n",
                 data.sync_index);
         return -EINVAL;
@@ -428,7 +428,7 @@
     sync = &slave->sii.syncs[data.sync_index];
     if (!(pdo = ec_pdo_list_find_pdo_by_pos_const(
                     &sync->pdos, data.pdo_pos))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_SLAVE_ERR(slave, "Sync manager %u does not contain a PDO with "
                 "position %u!\n", data.sync_index, data.pdo_pos);
         return -EINVAL;
@@ -438,7 +438,7 @@
     data.entry_count = ec_pdo_entry_count(pdo);
     ec_cdev_strcpy(data.name, pdo->name);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -465,19 +465,19 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(slave = ec_master_find_slave_const(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
         return -EINVAL;
     }
 
     if (data.sync_index >= slave->sii.sync_count) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_SLAVE_ERR(slave, "Sync manager %u does not exist!\n",
                 data.sync_index);
         return -EINVAL;
@@ -486,7 +486,7 @@
     sync = &slave->sii.syncs[data.sync_index];
     if (!(pdo = ec_pdo_list_find_pdo_by_pos_const(
                     &sync->pdos, data.pdo_pos))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_SLAVE_ERR(slave, "Sync manager %u does not contain a PDO with "
                 "position %u!\n", data.sync_index, data.pdo_pos);
         return -EINVAL;
@@ -494,7 +494,7 @@
 
     if (!(entry = ec_pdo_find_entry_by_pos_const(
                     pdo, data.entry_pos))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_SLAVE_ERR(slave, "PDO 0x%04X does not contain an entry with "
                 "position %u!\n", data.pdo_pos, data.entry_pos);
         return -EINVAL;
@@ -505,7 +505,7 @@
     data.bit_length = entry->bit_length;
     ec_cdev_strcpy(data.name, entry->name);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -529,22 +529,23 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(domain = ec_master_find_domain_const(master, data.index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Domain %u does not exist!\n", data.index);
         return -EINVAL;
     }
 
     data.data_size = domain->data_size;
+    data.tx_size = domain->tx_size;
     data.logical_base_address = domain->logical_base_address;
     data.working_counter = domain->working_counter;
     data.expected_working_counter = domain->expected_working_counter;
     data.fmmu_count = ec_domain_fmmu_count(domain);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -569,18 +570,18 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(domain = ec_master_find_domain_const(master, data.domain_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Domain %u does not exist!\n",
                 data.domain_index);
         return -EINVAL;
     }
 
     if (!(fmmu = ec_domain_find_fmmu(domain, data.fmmu_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Domain %u has less than %u"
                 " fmmu configurations.\n",
                 data.domain_index, data.fmmu_index + 1);
@@ -592,9 +593,10 @@
     data.sync_index = fmmu->sync_index;
     data.dir = fmmu->dir;
     data.logical_address = fmmu->logical_start_address;
+    data.domain_address = fmmu->domain_address;
     data.data_size = fmmu->data_size;
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -618,18 +620,18 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(domain = ec_master_find_domain_const(master, data.domain_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Domain %u does not exist!\n",
                 data.domain_index);
         return -EINVAL;
     }
 
     if (domain->data_size != data.data_size) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Data size mismatch %u/%zu!\n",
                 data.data_size, domain->data_size);
         return -EFAULT;
@@ -637,11 +639,11 @@
 
     if (copy_to_user((void __user *) data.target, domain->data,
                 domain->data_size)) {
-        up(&master->master_sem);
-        return -EFAULT;
-    }
-
-    up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
+        return -EFAULT;
+    }
+
+    ec_mutex_unlock(&master->master_mutex);
     return 0;
 }
 
@@ -686,12 +688,12 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(slave = ec_master_find_slave(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
         return -EINVAL;
@@ -699,7 +701,7 @@
 
     ec_slave_request_state(slave, data.al_state);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
     return 0;
 }
 
@@ -720,12 +722,12 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(slave = ec_master_find_slave_const(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
         return -EINVAL;
@@ -733,7 +735,7 @@
 
     if (!(sdo = ec_slave_get_sdo_by_pos_const(
                     slave, data.sdo_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_SLAVE_ERR(slave, "SDO %u does not exist!\n", data.sdo_position);
         return -EINVAL;
     }
@@ -742,7 +744,7 @@
     data.max_subindex = sdo->max_subindex;
     ec_cdev_strcpy(data.name, sdo->name);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -768,12 +770,12 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(slave = ec_master_find_slave_const(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
         return -EINVAL;
@@ -782,14 +784,14 @@
     if (data.sdo_spec <= 0) {
         if (!(sdo = ec_slave_get_sdo_by_pos_const(
                         slave, -data.sdo_spec))) {
-            up(&master->master_sem);
+            ec_mutex_unlock(&master->master_mutex);
             EC_SLAVE_ERR(slave, "SDO %u does not exist!\n", -data.sdo_spec);
             return -EINVAL;
         }
     } else {
         if (!(sdo = ec_slave_get_sdo_const(
                         slave, data.sdo_spec))) {
-            up(&master->master_sem);
+            ec_mutex_unlock(&master->master_mutex);
             EC_SLAVE_ERR(slave, "SDO 0x%04X does not exist!\n",
                     data.sdo_spec);
             return -EINVAL;
@@ -798,7 +800,7 @@
 
     if (!(entry = ec_sdo_get_entry_const(
                     sdo, data.sdo_entry_subindex))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_SLAVE_ERR(slave, "SDO entry 0x%04X:%02X does not exist!\n",
                 sdo->index, data.sdo_entry_subindex);
         return -EINVAL;
@@ -820,7 +822,7 @@
         entry->write_access[EC_SDO_ENTRY_ACCESS_OP];
     ec_cdev_strcpy(data.description, entry->description);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -838,78 +840,74 @@
         )
 {
     ec_ioctl_slave_sdo_upload_t data;
-    ec_master_sdo_request_t request;
+    ec_master_sdo_request_t* request;
     int retval;
 
     if (copy_from_user(&data, (void __user *) arg, sizeof(data))) {
         return -EFAULT;
     }
 
-    ec_sdo_request_init(&request.req);
-    ec_sdo_request_address(&request.req,
-            data.sdo_index, data.sdo_entry_subindex);
-    ecrt_sdo_request_read(&request.req);
-
-    if (down_interruptible(&master->master_sem))
-        return -EINTR;
-
-    if (!(request.slave = ec_master_find_slave(
+    request = kmalloc(sizeof(*request), GFP_KERNEL);
+    if (!request)
+        return -ENOMEM;
+    kref_init(&request->refcount);
+
+    ec_sdo_request_init(&request->req);
+    ec_sdo_request_address(&request->req,
+        data.sdo_index, data.sdo_entry_subindex);
+    ecrt_sdo_request_read(&request->req);
+
+    if (ec_mutex_lock_interruptible(&master->master_mutex))  {
+        kref_put(&request->refcount,ec_master_sdo_request_release);
+        return -EINTR;
+    }
+    if (!(request->slave = ec_master_find_slave(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
-        ec_sdo_request_clear(&request.req);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
-        return -EINVAL;
-    }
-
-    EC_SLAVE_DBG(request.slave, 1, "Schedule SDO upload request.\n");
+        kref_put(&request->refcount,ec_master_sdo_request_release);
+        return -EINVAL;
+    }
+
+    EC_SLAVE_DBG(request->slave, 1, "Schedule SDO upload request %p.\n",request);
 
     // schedule request.
-    list_add_tail(&request.list, &request.slave->slave_sdo_requests);
-
-    up(&master->master_sem);
+    kref_get(&request->refcount);
+    list_add_tail(&request->list, &request->slave->slave_sdo_requests);
+
+    ec_mutex_unlock(&master->master_mutex);
 
     // wait for processing through FSM
-    if (wait_event_interruptible(request.slave->sdo_queue,
-                request.req.state != EC_INT_REQUEST_QUEUED)) {
+    if (wait_event_interruptible(request->slave->sdo_queue,
+          ((request->req.state == EC_INT_REQUEST_SUCCESS) || (request->req.state == EC_INT_REQUEST_FAILURE)))) {
         // interrupted by signal
-        down(&master->master_sem);
-        if (request.req.state == EC_INT_REQUEST_QUEUED) {
-            list_del(&request.list);
-            up(&master->master_sem);
-            ec_sdo_request_clear(&request.req);
-            return -EINTR;
-        }
-        // request already processing: interrupt not possible.
-        up(&master->master_sem);
-    }
-
-    // wait until master FSM has finished processing
-    wait_event(request.slave->sdo_queue,
-            request.req.state != EC_INT_REQUEST_BUSY);
-
-    EC_SLAVE_DBG(request.slave, 1, "Finished SDO upload request.\n");
-
-    data.abort_code = request.req.abort_code;
-
-    if (request.req.state != EC_INT_REQUEST_SUCCESS) {
+        kref_put(&request->refcount,ec_master_sdo_request_release);
+        return -EINTR;
+    }
+
+    EC_SLAVE_DBG(request->slave, 1, "Finished SDO upload request %p.\n",request);
+
+    data.abort_code = request->req.abort_code;
+
+    if (request->req.state != EC_INT_REQUEST_SUCCESS) {
         data.data_size = 0;
-        if (request.req.errno) {
-            retval = -request.req.errno;
+        if (request->req.errno) {
+            retval = -request->req.errno;
         } else {
             retval = -EIO;
         }
     } else {
-        if (request.req.data_size > data.target_size) {
+        if (request->req.data_size > data.target_size) {
             EC_MASTER_ERR(master, "Buffer too small.\n");
-            ec_sdo_request_clear(&request.req);
+            kref_put(&request->refcount,ec_master_sdo_request_release);
             return -EOVERFLOW;
         }
-        data.data_size = request.req.data_size;
+        data.data_size = request->req.data_size;
 
         if (copy_to_user((void __user *) data.target,
-                    request.req.data, data.data_size)) {
-            ec_sdo_request_clear(&request.req);
+                    request->req.data, data.data_size)) {
+            kref_put(&request->refcount,ec_master_sdo_request_release);
             return -EFAULT;
         }
         retval = 0;
@@ -919,7 +917,7 @@
         retval = -EFAULT;
     }
 
-    ec_sdo_request_clear(&request.req);
+    kref_put(&request->refcount,ec_master_sdo_request_release);
     return retval;
 }
 
@@ -933,7 +931,7 @@
         )
 {
     ec_ioctl_slave_sdo_download_t data;
-    ec_master_sdo_request_t request;
+    ec_master_sdo_request_t* request;
     int retval;
 
     if (copy_from_user(&data, (void __user *) arg, sizeof(data))) {
@@ -946,67 +944,63 @@
         return -EINVAL;
     }
 
-    ec_sdo_request_init(&request.req);
-    ec_sdo_request_address(&request.req,
+    request = kmalloc(sizeof(*request), GFP_KERNEL);
+    if (!request)
+        return -ENOMEM;
+    kref_init(&request->refcount);
+
+    ec_sdo_request_init(&request->req);
+    ec_sdo_request_address(&request->req,
             data.sdo_index, data.sdo_entry_subindex);
-    if (ec_sdo_request_alloc(&request.req, data.data_size)) {
-        ec_sdo_request_clear(&request.req);
+    if (ec_sdo_request_alloc(&request->req, data.data_size)) {
+        kref_put(&request->refcount,ec_master_sdo_request_release);
         return -ENOMEM;
     }
-    if (copy_from_user(request.req.data,
+    if (copy_from_user(request->req.data,
                 (void __user *) data.data, data.data_size)) {
-        ec_sdo_request_clear(&request.req);
-        return -EFAULT;
-    }
-    request.req.data_size = data.data_size;
-    ecrt_sdo_request_write(&request.req);
-
-    if (down_interruptible(&master->master_sem))
-        return -EINTR;
-
-    if (!(request.slave = ec_master_find_slave(
+        kref_put(&request->refcount,ec_master_sdo_request_release);
+        return -EFAULT;
+    }
+    request->req.data_size = data.data_size;
+    ecrt_sdo_request_write(&request->req);
+
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
+        kref_put(&request->refcount,ec_master_sdo_request_release);
+        return -EINTR;
+    }
+    if (!(request->slave = ec_master_find_slave(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
-        ec_sdo_request_clear(&request.req);
+        kref_put(&request->refcount,ec_master_sdo_request_release);
         return -EINVAL;
     }
     
-    EC_SLAVE_DBG(request.slave, 1, "Schedule SDO download request.\n");
+    EC_SLAVE_DBG(request->slave, 1, "Schedule SDO download request %p.\n",request);
 
     // schedule request.
-    list_add_tail(&request.list, &request.slave->slave_sdo_requests);
-
-    up(&master->master_sem);
+    kref_get(&request->refcount);
+    list_add_tail(&request->list, &request->slave->slave_sdo_requests);
+
+    ec_mutex_unlock(&master->master_mutex);
 
     // wait for processing through FSM
-    if (wait_event_interruptible(request.slave->sdo_queue,
-                request.req.state != EC_INT_REQUEST_QUEUED)) {
+    if (wait_event_interruptible(request->slave->sdo_queue,
+       ((request->req.state == EC_INT_REQUEST_SUCCESS) || (request->req.state == EC_INT_REQUEST_FAILURE)))) {
         // interrupted by signal
-        down(&master->master_sem);
-        if (request.req.state == EC_INT_REQUEST_QUEUED) {
-            list_del(&request.list);
-            up(&master->master_sem);
-            ec_sdo_request_clear(&request.req);
-            return -EINTR;
-        }
-        // request already processing: interrupt not possible.
-        up(&master->master_sem);
-    }
-
-    // wait until master FSM has finished processing
-    wait_event(request.slave->sdo_queue,
-            request.req.state != EC_INT_REQUEST_BUSY);
-
-    EC_SLAVE_DBG(request.slave, 1, "Finished SDO download request.\n");
-
-    data.abort_code = request.req.abort_code;
-
-    if (request.req.state == EC_INT_REQUEST_SUCCESS) {
+        kref_put(&request->refcount,ec_master_sdo_request_release);
+        return -EINTR;
+    }
+
+    EC_SLAVE_DBG(request->slave, 1, "Finished SDO download request %p.\n",request);
+
+    data.abort_code = request->req.abort_code;
+
+    if (request->req.state == EC_INT_REQUEST_SUCCESS) {
         retval = 0;
-    } else if (request.req.errno) {
-        retval = -request.req.errno;
+    } else if (request->req.errno) {
+        retval = -request->req.errno;
     } else {
         retval = -EIO;
     }
@@ -1015,7 +1009,7 @@
         retval = -EFAULT;
     }
 
-    ec_sdo_request_clear(&request.req);
+    kref_put(&request->refcount,ec_master_sdo_request_release);
     return retval;
 }
 
@@ -1036,12 +1030,12 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(slave = ec_master_find_slave_const(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
         return -EINVAL;
@@ -1049,7 +1043,7 @@
 
     if (!data.nwords
             || data.offset + data.nwords > slave->sii_nwords) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_SLAVE_ERR(slave, "Invalid SII read offset/size %u/%u for slave SII"
                 " size %zu!\n", data.offset, data.nwords, slave->sii_nwords);
         return -EINVAL;
@@ -1061,7 +1055,7 @@
     else
         retval = 0;
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
     return retval;
 }
 
@@ -1078,7 +1072,8 @@
     ec_slave_t *slave;
     unsigned int byte_size;
     uint16_t *words;
-    ec_sii_write_request_t request;
+    ec_sii_write_request_t* request;
+    int retval;
 
     if (copy_from_user(&data, (void __user *) arg, sizeof(data))) {
         return -EFAULT;
@@ -1094,58 +1089,57 @@
         return -ENOMEM;
     }
 
+    request = kmalloc(sizeof(*request), GFP_KERNEL);
+    if (!request)
+        return -ENOMEM;
+    kref_init(&request->refcount);
+    // init SII write request
+    INIT_LIST_HEAD(&request->list);
+    request->words = words; // now "owned" by request, see ec_master_sii_write_request_release
+    request->offset = data.offset;
+    request->nwords = data.nwords;
+
     if (copy_from_user(words,
                 (void __user *) data.words, byte_size)) {
-        kfree(words);
-        return -EFAULT;
-    }
-
-    if (down_interruptible(&master->master_sem))
-        return -EINTR;
-
+        kref_put(&request->refcount,ec_master_sii_write_request_release);
+        return -EFAULT;
+    }
+
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
+        kref_put(&request->refcount,ec_master_sii_write_request_release);
+        return -EINTR;
+    }
     if (!(slave = ec_master_find_slave(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
-        kfree(words);
-        return -EINVAL;
-    }
-
-    // init SII write request
-    INIT_LIST_HEAD(&request.list);
-    request.slave = slave;
-    request.words = words;
-    request.offset = data.offset;
-    request.nwords = data.nwords;
-    request.state = EC_INT_REQUEST_QUEUED;
+        kref_put(&request->refcount,ec_master_sii_write_request_release);
+        return -EINVAL;
+    }
+
+    request->slave = slave;
+    request->state = EC_INT_REQUEST_QUEUED;
 
     // schedule SII write request.
-    list_add_tail(&request.list, &master->sii_requests);
-
-    up(&master->master_sem);
+    list_add_tail(&request->list, &master->sii_requests);
+    kref_get(&request->refcount);
+
+    ec_mutex_unlock(&master->master_mutex);
 
     // wait for processing through FSM
     if (wait_event_interruptible(master->sii_queue,
-                request.state != EC_INT_REQUEST_QUEUED)) {
-        // interrupted by signal
-        down(&master->master_sem);
-        if (request.state == EC_INT_REQUEST_QUEUED) {
-            // abort request
-            list_del(&request.list);
-            up(&master->master_sem);
-            kfree(words);
-            return -EINTR;
-        }
-        up(&master->master_sem);
-    }
-
-    // wait until master FSM has finished processing
-    wait_event(master->sii_queue, request.state != EC_INT_REQUEST_BUSY);
-
-    kfree(words);
-
-    return request.state == EC_INT_REQUEST_SUCCESS ? 0 : -EIO;
+          ((request->state == EC_INT_REQUEST_SUCCESS) || (request->state == EC_INT_REQUEST_FAILURE)))) {
+           // interrupted by signal
+           kref_put(&request->refcount,ec_master_sii_write_request_release);
+           return -EINTR;
+    }
+
+
+    retval = request->state == EC_INT_REQUEST_SUCCESS ? 0 : -EIO;
+    kref_put(&request->refcount,ec_master_sii_write_request_release);
+
+    return retval;
 }
 
 /*****************************************************************************/
@@ -1160,7 +1154,8 @@
     ec_ioctl_slave_reg_t data;
     ec_slave_t *slave;
     uint8_t *contents;
-    ec_reg_request_t request;
+    ec_reg_request_t* request;
+    int retval;
 
     if (copy_from_user(&data, (void __user *) arg, sizeof(data))) {
         return -EFAULT;
@@ -1175,56 +1170,58 @@
         return -ENOMEM;
     }
 
-    if (down_interruptible(&master->master_sem))
-        return -EINTR;
-
+    request = kmalloc(sizeof(*request), GFP_KERNEL);
+    if (!request)
+        return -ENOMEM;
+    kref_init(&request->refcount);
+
+    // init register request
+    INIT_LIST_HEAD(&request->list);
+    request->dir = EC_DIR_INPUT;
+    request->data = contents;   // now "owned" by request, see ec_master_reg_request_release
+    request->offset = data.offset;
+    request->length = data.length;
+
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
+        kref_put(&request->refcount,ec_master_reg_request_release);
+        return -EINTR;
+    }
     if (!(slave = ec_master_find_slave(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
-        return -EINVAL;
-    }
-
-    // init register request
-    INIT_LIST_HEAD(&request.list);
-    request.slave = slave;
-    request.dir = EC_DIR_INPUT;
-    request.data = contents;
-    request.offset = data.offset;
-    request.length = data.length;
-    request.state = EC_INT_REQUEST_QUEUED;
+        kref_put(&request->refcount,ec_master_reg_request_release);
+        return -EINVAL;
+    }
+
+    request->slave = slave;
+    request->state = EC_INT_REQUEST_QUEUED;
 
     // schedule request.
-    list_add_tail(&request.list, &master->reg_requests);
-
-    up(&master->master_sem);
+    list_add_tail(&request->list, &master->reg_requests);
+    kref_get(&request->refcount);
+
+    ec_mutex_unlock(&master->master_mutex);
 
     // wait for processing through FSM
     if (wait_event_interruptible(master->reg_queue,
-                request.state != EC_INT_REQUEST_QUEUED)) {
-        // interrupted by signal
-        down(&master->master_sem);
-        if (request.state == EC_INT_REQUEST_QUEUED) {
-            // abort request
-            list_del(&request.list);
-            up(&master->master_sem);
-            kfree(contents);
-            return -EINTR;
+          ((request->state == EC_INT_REQUEST_SUCCESS) || (request->state == EC_INT_REQUEST_FAILURE)))) {
+           // interrupted by signal
+           kref_put(&request->refcount,ec_master_reg_request_release);
+           return -EINTR;
+    }
+
+    if (request->state == EC_INT_REQUEST_SUCCESS) {
+        if (copy_to_user((void __user *) data.data, request->data, data.length)) {
+            kref_put(&request->refcount,ec_master_reg_request_release);
+            return -EFAULT;
         }
-        up(&master->master_sem);
-    }
-
-    // wait until master FSM has finished processing
-    wait_event(master->reg_queue, request.state != EC_INT_REQUEST_BUSY);
-
-    if (request.state == EC_INT_REQUEST_SUCCESS) {
-        if (copy_to_user((void __user *) data.data, contents, data.length))
-            return -EFAULT;
-    }
-    kfree(contents);
-
-    return request.state == EC_INT_REQUEST_SUCCESS ? 0 : -EIO;
+    }
+    retval = request->state == EC_INT_REQUEST_SUCCESS ? 0 : -EIO;
+
+    kref_put(&request->refcount,ec_master_reg_request_release);
+    return retval;
 }
 
 /*****************************************************************************/
@@ -1239,7 +1236,8 @@
     ec_ioctl_slave_reg_t data;
     ec_slave_t *slave;
     uint8_t *contents;
-    ec_reg_request_t request;
+    ec_reg_request_t* request;
+    int retval;
 
     if (copy_from_user(&data, (void __user *) arg, sizeof(data))) {
         return -EFAULT;
@@ -1259,53 +1257,52 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
-        return -EINTR;
+    request = kmalloc(sizeof(*request), GFP_KERNEL);
+    if (!request)
+        return -ENOMEM;
+    kref_init(&request->refcount);
+    // init register request
+    INIT_LIST_HEAD(&request->list);
+    request->dir = EC_DIR_OUTPUT;
+    request->data = contents; // now "owned" by request, see ec_master_reg_request_release
+    request->offset = data.offset;
+    request->length = data.length;
+
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
+        kref_put(&request->refcount,ec_master_reg_request_release);
+        return -EINTR;
+    }
 
     if (!(slave = ec_master_find_slave(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
-        kfree(contents);
-        return -EINVAL;
-    }
-
-    // init register request
-    INIT_LIST_HEAD(&request.list);
-    request.slave = slave;
-    request.dir = EC_DIR_OUTPUT;
-    request.data = contents;
-    request.offset = data.offset;
-    request.length = data.length;
-    request.state = EC_INT_REQUEST_QUEUED;
+        kref_put(&request->refcount,ec_master_reg_request_release);
+        return -EINVAL;
+    }
+
+    request->slave = slave;
+    request->state = EC_INT_REQUEST_QUEUED;
 
     // schedule request.
-    list_add_tail(&request.list, &master->reg_requests);
-
-    up(&master->master_sem);
+    list_add_tail(&request->list, &master->reg_requests);
+    kref_get(&request->refcount);
+
+    ec_mutex_unlock(&master->master_mutex);
 
     // wait for processing through FSM
     if (wait_event_interruptible(master->reg_queue,
-                request.state != EC_INT_REQUEST_QUEUED)) {
-        // interrupted by signal
-        down(&master->master_sem);
-        if (request.state == EC_INT_REQUEST_QUEUED) {
-            // abort request
-            list_del(&request.list);
-            up(&master->master_sem);
-            kfree(contents);
-            return -EINTR;
-        }
-        up(&master->master_sem);
-    }
-
-    // wait until master FSM has finished processing
-    wait_event(master->reg_queue, request.state != EC_INT_REQUEST_BUSY);
-
-    kfree(contents);
-
-    return request.state == EC_INT_REQUEST_SUCCESS ? 0 : -EIO;
+          ((request->state == EC_INT_REQUEST_SUCCESS) || (request->state == EC_INT_REQUEST_FAILURE)))) {
+           // interrupted by signal
+           kref_put(&request->refcount,ec_master_reg_request_release);
+           return -EINTR;
+    }
+
+    retval = request->state == EC_INT_REQUEST_SUCCESS ? 0 : -EIO;
+    kref_put(&request->refcount,ec_master_reg_request_release);
+    return retval;
+
 }
 
 /*****************************************************************************/
@@ -1325,12 +1322,12 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config_const(
                     master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave config %u does not exist!\n",
                 data.config_index);
         return -EINVAL;
@@ -1356,7 +1353,7 @@
         data.dc_sync[i] = sc->dc_sync[i];
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -1387,12 +1384,12 @@
         return -EINVAL;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config_const(
                     master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave config %u does not exist!\n",
                 data.config_index);
         return -EINVAL;
@@ -1401,7 +1398,7 @@
     if (!(pdo = ec_pdo_list_find_pdo_by_pos_const(
                     &sc->sync_configs[data.sync_index].pdos,
                     data.pdo_pos))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Invalid PDO position!\n");
         return -EINVAL;
     }
@@ -1410,7 +1407,7 @@
     data.entry_count = ec_pdo_entry_count(pdo);
     ec_cdev_strcpy(data.name, pdo->name);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -1442,12 +1439,12 @@
         return -EINVAL;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config_const(
                     master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave config %u does not exist!\n",
                 data.config_index);
         return -EINVAL;
@@ -1456,14 +1453,14 @@
     if (!(pdo = ec_pdo_list_find_pdo_by_pos_const(
                     &sc->sync_configs[data.sync_index].pdos,
                     data.pdo_pos))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Invalid PDO position!\n");
         return -EINVAL;
     }
 
     if (!(entry = ec_pdo_find_entry_by_pos_const(
                     pdo, data.entry_pos))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Entry not found!\n");
         return -EINVAL;
     }
@@ -1473,7 +1470,7 @@
     data.bit_length = entry->bit_length;
     ec_cdev_strcpy(data.name, entry->name);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -1498,12 +1495,12 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config_const(
                     master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave config %u does not exist!\n",
                 data.config_index);
         return -EINVAL;
@@ -1511,7 +1508,7 @@
 
     if (!(req = ec_slave_config_get_sdo_by_pos_const(
                     sc, data.sdo_pos))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Invalid SDO position!\n");
         return -EINVAL;
     }
@@ -1522,7 +1519,7 @@
     memcpy(&data.data, req->data,
             min((u32) data.size, (u32) EC_MAX_SDO_DATA_SIZE));
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -1547,12 +1544,12 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config_const(
                     master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave config %u does not exist!\n",
                 data.config_index);
         return -EINVAL;
@@ -1560,7 +1557,7 @@
 
     if (!(req = ec_slave_config_get_idn_by_pos_const(
                     sc, data.idn_pos))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Invalid IDN position!\n");
         return -EINVAL;
     }
@@ -1572,7 +1569,7 @@
     memcpy(&data.data, req->data,
             min((u32) data.size, (u32) EC_MAX_IDN_DATA_SIZE));
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -1598,11 +1595,11 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(eoe = ec_master_get_eoe_handler_const(master, data.eoe_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "EoE handler %u does not exist!\n",
                 data.eoe_index);
         return -EINVAL;
@@ -1615,14 +1612,14 @@
     }
     snprintf(data.name, EC_DATAGRAM_NAME_SIZE, eoe->dev->name);
     data.open = eoe->opened;
-    data.rx_bytes = eoe->stats.tx_bytes;
+    data.tx_bytes = eoe->stats.tx_bytes;
+    data.tx_rate = eoe->tx_rate;
+    data.rx_bytes = eoe->stats.rx_bytes;
     data.rx_rate = eoe->tx_rate;
-    data.tx_bytes = eoe->stats.rx_bytes;
-    data.tx_rate = eoe->tx_rate;
     data.tx_queued_frames = eoe->tx_queued_frames;
     data.tx_queue_size = eoe->tx_queue_size;
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -1704,7 +1701,7 @@
 
     data.config_index = 0;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     list_for_each_entry(entry, &master->configs, list) {
@@ -1713,7 +1710,7 @@
         data.config_index++;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -1742,14 +1739,14 @@
     
     priv->process_data_size = 0;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     list_for_each_entry(domain, &master->domains, list) {
         priv->process_data_size += ecrt_domain_size(domain);
     }
     
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (priv->process_data_size) {
         priv->process_data = vmalloc(priv->process_data_size);
@@ -1767,9 +1764,6 @@
         }
     }
 
-    ecrt_master_callbacks(master, ec_master_internal_send_cb,
-            ec_master_internal_receive_cb, master);
-
     ret = ecrt_master_activate(master);
     if (ret < 0)
         return ret;
@@ -1815,10 +1809,10 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
     ec_master_set_send_interval(master,send_interval);
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     return 0;
 }
@@ -1837,9 +1831,9 @@
     if (unlikely(!priv->requested))
         return -EPERM;
 
-    down(&master->io_sem);
+    ec_mutex_lock(&master->io_mutex);
     ecrt_master_send(master);
-    up(&master->io_sem);
+    ec_mutex_unlock(&master->io_mutex);
     return 0;
 }
 
@@ -1856,9 +1850,9 @@
     if (unlikely(!priv->requested))
         return -EPERM;
 
-    down(&master->io_sem);
+    ec_mutex_lock(&master->io_mutex);
     ecrt_master_receive(master);
-    up(&master->io_sem);
+    ec_mutex_unlock(&master->io_mutex);
     return 0;
 }
 
@@ -1887,6 +1881,29 @@
 
 /*****************************************************************************/
 
+/** Get the master state of all configured slaves.
+ */
+int ec_cdev_ioctl_master_sc_state(
+        ec_master_t *master, /**< EtherCAT master. */
+        unsigned long arg, /**< ioctl() argument. */
+        ec_cdev_priv_t *priv /**< Private data structure of file handle. */
+        )
+{
+    ec_master_state_t data;
+
+    if (unlikely(!priv->requested))
+        return -EPERM;
+
+    ecrt_master_configured_slaves_state(master, &data);
+
+    if (copy_to_user((void __user *) arg, &data, sizeof(data)))
+        return -EFAULT;
+
+    return 0;
+}
+
+/*****************************************************************************/
+
 /** Get the master state.
  */
 int ec_cdev_ioctl_app_time(
@@ -1921,9 +1938,9 @@
     if (unlikely(!priv->requested))
         return -EPERM;
 
-    down(&master->io_sem);
+    ec_mutex_lock(&master->io_mutex);
     ecrt_master_sync_reference_clock(master);
-    up(&master->io_sem);
+    ec_mutex_unlock(&master->io_mutex);
     return 0;
 }
 
@@ -1940,9 +1957,9 @@
     if (unlikely(!priv->requested))
         return -EPERM;
 
-    down(&master->io_sem);
+    ec_mutex_lock(&master->io_mutex);
     ecrt_master_sync_slave_clocks(master);
-    up(&master->io_sem);
+    ec_mutex_unlock(&master->io_mutex);
     return 0;
 }
 
@@ -1959,9 +1976,9 @@
     if (unlikely(!priv->requested))
         return -EPERM;
 
-    down(&master->io_sem);
+    ec_mutex_lock(&master->io_mutex);
     ecrt_master_sync_monitor_queue(master);
-    up(&master->io_sem);
+    ec_mutex_unlock(&master->io_mutex);
     return 0;
 }
 
@@ -1980,9 +1997,9 @@
     if (unlikely(!priv->requested))
         return -EPERM;
 
-    down(&master->io_sem);
+    ec_mutex_lock(&master->io_mutex);
     time_diff = ecrt_master_sync_monitor_process(master);
-    up(&master->io_sem);
+    ec_mutex_unlock(&master->io_mutex);
 
     if (copy_to_user((void __user *) arg, &time_diff, sizeof(time_diff)))
         return -EFAULT;
@@ -2003,9 +2020,9 @@
     if (unlikely(!priv->requested))
         return -EPERM;
 
-    down(&master->master_sem);
+    ec_mutex_lock(&master->master_mutex);
     ecrt_master_reset(master);
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
     return 0;
 }
 
@@ -2034,7 +2051,7 @@
         goto out_return;
     }
 
-    if (down_interruptible(&master->master_sem)) {
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
         ret = -EINTR;
         goto out_return;
     }
@@ -2055,7 +2072,7 @@
     }
 
 out_up:
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 out_return:
     return ret;
 }
@@ -2084,7 +2101,7 @@
         goto out_return;
     }
 
-    if (down_interruptible(&master->master_sem)) {
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
         ret = -EINTR;
         goto out_return;
     }
@@ -2098,11 +2115,55 @@
             data.watchdog_divider, data.watchdog_intervals);
 
 out_up:
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 out_return:
     return ret;
 }
 
+
+/*****************************************************************************/
+
+/** Configure wether a slave allows overlapping PDOs.
+ */
+int ec_cdev_ioctl_sc_allow_overlapping_pdos(
+        ec_master_t *master, /**< EtherCAT master. */
+        unsigned long arg, /**< ioctl() argument. */
+        ec_cdev_priv_t *priv /**< Private data structure of file handle. */
+        )
+{
+    ec_ioctl_config_t data;
+    ec_slave_config_t *sc;
+    int ret = 0;
+
+    if (unlikely(!priv->requested)) {
+        ret = -EPERM;
+        goto out_return;
+    }
+
+    if (copy_from_user(&data, (void __user *) arg, sizeof(data))) {
+        ret = -EFAULT;
+        goto out_return;
+    }
+
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
+        ret = -EINTR;
+        goto out_return;
+    }
+
+    if (!(sc = ec_master_get_config(master, data.config_index))) {
+        ret = -ENOENT;
+        goto out_up;
+    }
+
+    ecrt_slave_config_overlapping_pdos(sc,
+            data.allow_overlapping_pdos);
+
+out_up:
+    ec_mutex_unlock(&master->master_mutex);
+out_return:
+    return ret;
+}
+
 /*****************************************************************************/
 
 /** Add a PDO to the assignment.
@@ -2122,15 +2183,15 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem); // FIXME
+    ec_mutex_unlock(&master->master_mutex); // FIXME
 
     return ecrt_slave_config_pdo_assign_add(sc, data.sync_index, data.index);
 }
@@ -2154,15 +2215,15 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem); // FIXME
+    ec_mutex_unlock(&master->master_mutex); // FIXME
 
     ecrt_slave_config_pdo_assign_clear(sc, data.sync_index);
     return 0;
@@ -2187,15 +2248,15 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem); // FIXME
+    ec_mutex_unlock(&master->master_mutex); // FIXME
 
     return ecrt_slave_config_pdo_mapping_add(sc, data.pdo_index,
             data.entry_index, data.entry_subindex, data.entry_bit_length);
@@ -2220,15 +2281,15 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem); // FIXME
+    ec_mutex_unlock(&master->master_mutex); // FIXME
 
     ecrt_slave_config_pdo_mapping_clear(sc, data.index);
     return 0;
@@ -2255,20 +2316,20 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(domain = ec_master_find_domain(master, data.domain_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem); // FIXME
+    ec_mutex_unlock(&master->master_mutex); // FIXME
 
     ret = ecrt_slave_config_reg_pdo_entry(sc, data.entry_index,
             data.entry_subindex, domain, &data.bit_position);
@@ -2298,11 +2359,11 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
@@ -2312,7 +2373,7 @@
             data.dc_sync[1].cycle_time,
             data.dc_sync[1].shift_time);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     return 0;
 }
@@ -2350,18 +2411,18 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem)) {
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
         kfree(sdo_data);
         return -EINTR;
     }
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         kfree(sdo_data);
         return -ENOENT;
     }
 
-    up(&master->master_sem); // FIXME
+    ec_mutex_unlock(&master->master_mutex); // FIXME
 
     if (data.complete_access) {
         ret = ecrt_slave_config_complete_sdo(sc,
@@ -2397,12 +2458,12 @@
 
     data.request_index = 0;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     sc = ec_master_get_config(master, data.config_index);
     if (!sc) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
@@ -2410,7 +2471,7 @@
         data.request_index++;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     req = ecrt_slave_config_create_sdo_request_err(sc, data.sdo_index,
             data.sdo_subindex, data.size);
@@ -2446,12 +2507,12 @@
 
     data.voe_index = 0;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     sc = ec_master_get_config(master, data.config_index);
     if (!sc) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
@@ -2459,7 +2520,7 @@
         data.voe_index++;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     voe = ecrt_slave_config_create_voe_handler_err(sc, data.size);
     if (IS_ERR(voe))
@@ -2492,17 +2553,17 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config_const(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     ecrt_slave_config_state(sc, &state);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) data.state, &state, sizeof(state)))
         return -EFAULT;
@@ -2543,18 +2604,18 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem)) {
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
         kfree(data);
         return -EINTR;
     }
 
     if (!(sc = ec_master_get_config(master, ioctl.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         kfree(data);
         return -ENOENT;
     }
 
-    up(&master->master_sem); // FIXME
+    ec_mutex_unlock(&master->master_mutex); // FIXME
 
     ret = ecrt_slave_config_idn(
             sc, ioctl.drive_no, ioctl.idn, ioctl.al_state, data, ioctl.size);
@@ -2578,19 +2639,19 @@
     if (unlikely(!priv->requested))
         return -EPERM;
 
-    if (down_interruptible(&master->master_sem)) {
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
         return -EINTR;
     }
 
     list_for_each_entry(domain, &master->domains, list) {
         if (domain->index == arg) {
-            up(&master->master_sem);
+            ec_mutex_unlock(&master->master_mutex);
             return offset;
         }
         offset += ecrt_domain_size(domain);
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
     return -ENOENT;
 }
 
@@ -2609,16 +2670,16 @@
     if (unlikely(!priv->requested))
         return -EPERM;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(domain = ec_master_find_domain(master, arg))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     ecrt_domain_process(domain);
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
     return 0;
 }
 
@@ -2637,16 +2698,16 @@
     if (unlikely(!priv->requested))
         return -EPERM;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(domain = ec_master_find_domain(master, arg))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     ecrt_domain_queue(domain);
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
     return 0;
 }
 
@@ -2671,17 +2732,17 @@
         return -EFAULT;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(domain = ec_master_find_domain_const(master, data.domain_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     ecrt_domain_state(domain, &state);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) data.state, &state, sizeof(state)))
         return -EFAULT;
@@ -2709,20 +2770,20 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(req = ec_slave_config_find_sdo_request(sc, data.request_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     ecrt_sdo_request_timeout(req, data.timeout);
     return 0;
@@ -2748,16 +2809,16 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(req = ec_slave_config_find_sdo_request(sc, data.request_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
@@ -2767,7 +2828,7 @@
     else
         data.size = 0;
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) arg, &data, sizeof(data)))
         return -EFAULT;
@@ -2795,20 +2856,20 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(req = ec_slave_config_find_sdo_request(sc, data.request_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     ecrt_sdo_request_read(req);
     return 0;
@@ -2840,20 +2901,20 @@
         return -EINVAL;
     }
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(req = ec_slave_config_find_sdo_request(sc, data.request_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     ret = ec_sdo_request_alloc(req, data.size);
     if (ret)
@@ -2887,20 +2948,20 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(req = ec_slave_config_find_sdo_request(sc, data.request_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) data.data, ecrt_sdo_request_data(req),
                 ecrt_sdo_request_data_size(req)))
@@ -2937,20 +2998,20 @@
     if (get_user(vendor_type, data.vendor_type))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(voe = ec_slave_config_find_voe_handler(sc, data.voe_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     ecrt_voe_handler_send_header(voe, vendor_id, vendor_type);
     return 0;
@@ -2978,22 +3039,22 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(voe = ec_slave_config_find_voe_handler(sc, data.voe_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     ecrt_voe_handler_received_header(voe, &vendor_id, &vendor_type);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (likely(data.vendor_id))
         if (put_user(vendor_id, data.vendor_id))
@@ -3026,20 +3087,20 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(voe = ec_slave_config_find_voe_handler(sc, data.voe_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     ecrt_voe_handler_read(voe);
     return 0;
@@ -3065,20 +3126,20 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(voe = ec_slave_config_find_voe_handler(sc, data.voe_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     ecrt_voe_handler_read_nosync(voe);
     return 0;
@@ -3104,20 +3165,20 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(voe = ec_slave_config_find_voe_handler(sc, data.voe_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (data.size) {
         if (data.size > ec_voe_handler_mem_size(voe))
@@ -3152,20 +3213,20 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(voe = ec_slave_config_find_voe_handler(sc, data.voe_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     data.state = ecrt_voe_handler_execute(voe);
     if (data.state == EC_REQUEST_SUCCESS && voe->dir == EC_DIR_INPUT)
@@ -3199,20 +3260,20 @@
     if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
         return -EFAULT;
 
-    if (down_interruptible(&master->master_sem))
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
         return -EINTR;
 
     if (!(sc = ec_master_get_config(master, data.config_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
     if (!(voe = ec_slave_config_find_voe_handler(sc, data.voe_index))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         return -ENOENT;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     if (copy_to_user((void __user *) data.data, ecrt_voe_handler_data(voe),
                 ecrt_voe_handler_data_size(voe)))
@@ -3231,74 +3292,70 @@
         )
 {
     ec_ioctl_slave_foe_t data;
-    ec_master_foe_request_t request;
+    ec_master_foe_request_t* request;
     int retval;
 
     if (copy_from_user(&data, (void __user *) arg, sizeof(data))) {
         return -EFAULT;
     }
 
-    ec_foe_request_init(&request.req, data.file_name);
-    ec_foe_request_read(&request.req);
-    ec_foe_request_alloc(&request.req, 10000); // FIXME
-
-    if (down_interruptible(&master->master_sem))
-        return -EINTR;
-
-    if (!(request.slave = ec_master_find_slave(
+    request = kmalloc(sizeof(*request), GFP_KERNEL);
+    if (!request)
+        return -ENOMEM;
+    kref_init(&request->refcount);
+
+    ec_foe_request_init(&request->req, data.file_name);
+    ec_foe_request_read(&request->req);
+    ec_foe_request_alloc(&request->req, 10000); // FIXME
+
+    if (ec_mutex_lock_interruptible(&master->master_mutex))  {
+        kref_put(&request->refcount,ec_master_foe_request_release);
+        return -EINTR;
+    }
+    if (!(request->slave = ec_master_find_slave(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
-        ec_foe_request_clear(&request.req);
+        ec_mutex_unlock(&master->master_mutex);
+        kref_put(&request->refcount,ec_master_foe_request_release);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
         return -EINVAL;
     }
 
     // schedule request.
-    list_add_tail(&request.list, &request.slave->foe_requests);
-
-    up(&master->master_sem);
-
-    EC_SLAVE_DBG(request.slave, 1, "Scheduled FoE read request.\n");
+    list_add_tail(&request->list, &request->slave->foe_requests);
+    kref_get(&request->refcount);
+
+    ec_mutex_unlock(&master->master_mutex);
+
+    EC_SLAVE_DBG(request->slave, 1, "Scheduled FoE read request %p.\n",request);
 
     // wait for processing through FSM
-    if (wait_event_interruptible(request.slave->foe_queue,
-                request.req.state != EC_INT_REQUEST_QUEUED)) {
+    if (wait_event_interruptible(request->slave->foe_queue,
+          ((request->req.state == EC_INT_REQUEST_SUCCESS) || (request->req.state == EC_INT_REQUEST_FAILURE)))) {
         // interrupted by signal
-        down(&master->master_sem);
-        if (request.req.state == EC_INT_REQUEST_QUEUED) {
-            list_del(&request.list);
-            up(&master->master_sem);
-            ec_foe_request_clear(&request.req);
-            return -EINTR;
-        }
-        // request already processing: interrupt not possible.
-        up(&master->master_sem);
-    }
-
-    // wait until master FSM has finished processing
-    wait_event(request.slave->foe_queue,
-            request.req.state != EC_INT_REQUEST_BUSY);
-
-    data.result = request.req.result;
-    data.error_code = request.req.error_code;
-
-    EC_SLAVE_DBG(request.slave, 1, "Read %zd bytes via FoE"
-            " (result = 0x%x).\n", request.req.data_size, request.req.result);
-
-    if (request.req.state != EC_INT_REQUEST_SUCCESS) {
+        kref_put(&request->refcount,ec_master_foe_request_release);
+        return -EINTR;
+    }
+
+    data.result = request->req.result;
+    data.error_code = request->req.error_code;
+
+    EC_SLAVE_DBG(request->slave, 1, "Read %zd bytes via FoE"
+            " (result = 0x%x).\n", request->req.data_size, request->req.result);
+
+    if (request->req.state != EC_INT_REQUEST_SUCCESS) {
         data.data_size = 0;
         retval = -EIO;
     } else {
-        if (request.req.data_size > data.buffer_size) {
+        if (request->req.data_size > data.buffer_size) {
             EC_MASTER_ERR(master, "Buffer too small.\n");
-            ec_foe_request_clear(&request.req);
+            kref_put(&request->refcount,ec_master_foe_request_release);
             return -EOVERFLOW;
         }
-        data.data_size = request.req.data_size;
+        data.data_size = request->req.data_size;
         if (copy_to_user((void __user *) data.buffer,
-                    request.req.buffer, data.data_size)) {
-            ec_foe_request_clear(&request.req);
+                    request->req.buffer, data.data_size)) {
+            kref_put(&request->refcount,ec_master_foe_request_release);
             return -EFAULT;
         }
         retval = 0;
@@ -3308,9 +3365,8 @@
         retval = -EFAULT;
     }
 
-    EC_SLAVE_DBG(request.slave, 1, "Finished FoE read request.\n");
-
-    ec_foe_request_clear(&request.req);
+    EC_SLAVE_DBG(request->slave, 1, "Finished FoE read request %p.\n",request);
+    kref_put(&request->refcount,ec_master_foe_request_release);
 
     return retval;
 }
@@ -3325,79 +3381,73 @@
         )
 {
     ec_ioctl_slave_foe_t data;
-    ec_master_foe_request_t request;
+    ec_master_foe_request_t* request;
     int retval;
 
     if (copy_from_user(&data, (void __user *) arg, sizeof(data))) {
         return -EFAULT;
     }
 
-    INIT_LIST_HEAD(&request.list);
-
-    ec_foe_request_init(&request.req, data.file_name);
-
-    if (ec_foe_request_alloc(&request.req, data.buffer_size)) {
-        ec_foe_request_clear(&request.req);
+    request = kmalloc(sizeof(*request), GFP_KERNEL);
+    if (!request)
         return -ENOMEM;
-    }
-    if (copy_from_user(request.req.buffer,
+    kref_init(&request->refcount);
+
+    INIT_LIST_HEAD(&request->list);
+
+    ec_foe_request_init(&request->req, data.file_name);
+
+    if (ec_foe_request_alloc(&request->req, data.buffer_size)) {
+        kref_put(&request->refcount,ec_master_foe_request_release);
+        return -ENOMEM;
+    }
+    if (copy_from_user(request->req.buffer,
                 (void __user *) data.buffer, data.buffer_size)) {
-        ec_foe_request_clear(&request.req);
-        return -EFAULT;
-    }
-    request.req.data_size = data.buffer_size;
-    ec_foe_request_write(&request.req);
-
-    if (down_interruptible(&master->master_sem))
-        return -EINTR;
-
-    if (!(request.slave = ec_master_find_slave(
+        kref_put(&request->refcount,ec_master_foe_request_release);
+        return -EFAULT;
+    }
+    request->req.data_size = data.buffer_size;
+    ec_foe_request_write(&request->req);
+
+    if (ec_mutex_lock_interruptible(&master->master_mutex))
+        return -EINTR;
+
+    if (!(request->slave = ec_master_find_slave(
                     master, 0, data.slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 data.slave_position);
-        ec_foe_request_clear(&request.req);
-        return -EINVAL;
-    }
-
-    EC_SLAVE_DBG(request.slave, 1, "Scheduling FoE write request.\n");
+        kref_put(&request->refcount,ec_master_foe_request_release);
+        return -EINVAL;
+    }
+
+    EC_SLAVE_DBG(request->slave, 1, "Scheduling FoE write request %p.\n",request);
 
     // schedule FoE write request.
-    list_add_tail(&request.list, &request.slave->foe_requests);
-
-    up(&master->master_sem);
+    list_add_tail(&request->list, &request->slave->foe_requests);
+    kref_get(&request->refcount);
+
+    ec_mutex_unlock(&master->master_mutex);
 
     // wait for processing through FSM
-    if (wait_event_interruptible(request.slave->foe_queue,
-                request.req.state != EC_INT_REQUEST_QUEUED)) {
+    if (wait_event_interruptible(request->slave->foe_queue,
+       ((request->req.state == EC_INT_REQUEST_SUCCESS) || (request->req.state == EC_INT_REQUEST_FAILURE)))) {
         // interrupted by signal
-        down(&master->master_sem);
-        if (request.req.state == EC_INT_REQUEST_QUEUED) {
-            // abort request
-            list_del(&request.list);
-            up(&master->master_sem);
-            ec_foe_request_clear(&request.req);
-            return -EINTR;
-        }
-        up(&master->master_sem);
-    }
-
-    // wait until master FSM has finished processing
-    wait_event(request.slave->foe_queue,
-            request.req.state != EC_INT_REQUEST_BUSY);
-
-    data.result = request.req.result;
-    data.error_code = request.req.error_code;
-
-    retval = request.req.state == EC_INT_REQUEST_SUCCESS ? 0 : -EIO;
+        kref_put(&request->refcount,ec_master_foe_request_release);
+        return -EINTR;
+    }
+
+    data.result = request->req.result;
+    data.error_code = request->req.error_code;
+
+    retval = request->req.state == EC_INT_REQUEST_SUCCESS ? 0 : -EIO;
 
     if (__copy_to_user((void __user *) arg, &data, sizeof(data))) {
         retval = -EFAULT;
     }
 
-    ec_foe_request_clear(&request.req);
-
-    EC_SLAVE_DBG(request.slave, 1, "Finished FoE write request.\n");
+    EC_SLAVE_DBG(request->slave, 1, "Finished FoE write request %p.\n",request);
+    kref_put(&request->refcount,ec_master_foe_request_release);
 
     return retval;
 }
@@ -3670,6 +3720,8 @@
             return ec_cdev_ioctl_receive(master, arg, priv);
         case EC_IOCTL_MASTER_STATE:
             return ec_cdev_ioctl_master_state(master, arg, priv);
+        case EC_IOCTL_MASTER_SC_STATE:
+            return ec_cdev_ioctl_master_sc_state(master, arg, priv);
         case EC_IOCTL_APP_TIME:
             if (!(filp->f_mode & FMODE_WRITE))
                 return -EPERM;
@@ -3702,6 +3754,10 @@
             if (!(filp->f_mode & FMODE_WRITE))
                 return -EPERM;
             return ec_cdev_ioctl_sc_watchdog(master, arg, priv);
+        case EC_IOCTL_SC_OVERLAPPING_IO:
+            if (!(filp->f_mode & FMODE_WRITE))
+                return -EPERM;
+            return ec_cdev_ioctl_sc_allow_overlapping_pdos(master,arg,priv);
         case EC_IOCTL_SC_ADD_PDO:
             if (!(filp->f_mode & FMODE_WRITE))
                 return -EPERM;
--- a/master/datagram.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/datagram.c	Wed Apr 13 22:06:28 2011 +0200
@@ -87,13 +87,17 @@
  */
 void ec_datagram_init(ec_datagram_t *datagram /**< EtherCAT datagram. */)
 {
+    INIT_LIST_HEAD(&datagram->list); // mark as unqueued
     INIT_LIST_HEAD(&datagram->queue); // mark as unqueued
+    INIT_LIST_HEAD(&datagram->fsm_queue); // mark as unqueued
+    INIT_LIST_HEAD(&datagram->sent); // mark as unqueued
     datagram->type = EC_DATAGRAM_NONE;
     memset(datagram->address, 0x00, EC_ADDR_LEN);
     datagram->data = NULL;
     datagram->data_origin = EC_ORIG_INTERNAL;
     datagram->mem_size = 0;
     datagram->data_size = 0;
+    datagram->domain = NULL;
     datagram->index = 0x00;
     datagram->working_counter = 0x0000;
     datagram->state = EC_DATAGRAM_INIT;
@@ -130,6 +134,9 @@
  */
 void ec_datagram_unqueue(ec_datagram_t *datagram /**< EtherCAT datagram. */)
 {
+    if (!list_empty(&datagram->fsm_queue)) {
+        list_del_init(&datagram->fsm_queue);
+    }
     if (!list_empty(&datagram->queue)) {
         list_del_init(&datagram->queue);
     }
--- a/master/datagram.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/datagram.h	Wed Apr 13 22:06:28 2011 +0200
@@ -86,7 +86,8 @@
  */
 typedef struct {
     struct list_head list; /**< Needed by domain datagram lists. */
-    struct list_head queue; /**< Master datagram queue item. */
+    struct list_head queue; /**< Master datagram send-receive queue item. */
+    struct list_head fsm_queue; /**< Master datagram fsm queue item. */
     struct list_head sent; /**< Master list item for sent datagrams. */
     ec_datagram_type_t type; /**< Datagram type (APRD, BWR, etc.). */
     uint8_t address[EC_ADDR_LEN]; /**< Recipient address. */
@@ -94,6 +95,7 @@
     ec_origin_t data_origin; /**< Origin of the \a data memory. */
     size_t mem_size; /**< Datagram \a data memory size. */
     size_t data_size; /**< Size of the data in \a data. */
+    ec_domain_t *domain; /**< Owning domain (may be null for non-domain datagrams) */
     uint8_t index; /**< Index (set by master). */
     uint16_t working_counter; /**< Working counter. */
     ec_datagram_state_t state; /**< State. */
--- a/master/device.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/device.c	Wed Apr 13 22:06:28 2011 +0200
@@ -501,9 +501,9 @@
     ec_mac_print(device->dev->dev_addr, str);
     EC_MASTER_INFO(master, "Releasing main device %s.\n", str);
     
-    down(&master->device_sem);
+    ec_mutex_lock(&master->device_mutex);
     ec_device_detach(device);
-    up(&master->device_sem);
+    ec_mutex_unlock(&master->device_mutex);
 }
 
 /*****************************************************************************/
--- a/master/domain.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/domain.c	Wed Apr 13 22:06:28 2011 +0200
@@ -60,6 +60,7 @@
     domain->index = index;
     INIT_LIST_HEAD(&domain->fmmu_configs);
     domain->data_size = 0;
+    domain->tx_size = 0;
     domain->data = NULL;
     domain->data_origin = EC_ORIG_INTERNAL;
     domain->logical_base_address = 0x00000000;
@@ -113,6 +114,7 @@
     fmmu->domain = domain;
 
     domain->data_size += fmmu->data_size;
+    domain->tx_size += fmmu->tx_size;
     list_add_tail(&fmmu->list, &domain->fmmu_configs);
 
     EC_MASTER_DBG(domain->master, 1, "Domain %u:"
@@ -179,6 +181,7 @@
 
     ec_datagram_zero(datagram);
     list_add_tail(&datagram->list, &domain->datagrams);
+    datagram->domain = domain;
     return 0;
 }
 
@@ -238,6 +241,7 @@
     list_for_each_entry(fmmu, &domain->fmmu_configs, list) {
         // Correct logical FMMU address
         fmmu->logical_start_address += base_address;
+        fmmu->domain_address += base_address;
 
         // Increment Input/Output counter to determine datagram types
         // and calculate expected working counters
@@ -249,7 +253,7 @@
 
         // If the current FMMU's data do not fit in the current datagram,
         // allocate a new one.
-        if (datagram_size + fmmu->data_size > EC_MAX_DATA_SIZE) {
+        if (datagram_size + fmmu->tx_size > EC_MAX_DATA_SIZE) {
             ret = ec_domain_add_datagram(domain,
                     domain->logical_base_address + datagram_offset,
                     datagram_size, domain->data + datagram_offset,
@@ -267,7 +271,7 @@
             }
         }
 
-        datagram_size += fmmu->data_size;
+        datagram_size += fmmu->tx_size;
     }
 
     // Allocate last datagram, if data are left (this is also the case if the
@@ -377,14 +381,14 @@
     EC_MASTER_DBG(domain->master, 1, "ecrt_domain_external_memory("
             "domain = 0x%p, mem = 0x%p)\n", domain, mem);
 
-    down(&domain->master->master_sem);
+    ec_mutex_lock(&domain->master->master_mutex);
 
     ec_domain_clear_data(domain);
 
     domain->data = mem;
     domain->data_origin = EC_ORIG_EXTERNAL;
 
-    up(&domain->master->master_sem);
+    ec_mutex_unlock(&domain->master->master_mutex);
 }
 
 /*****************************************************************************/
--- a/master/domain.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/domain.h	Wed Apr 13 22:06:28 2011 +0200
@@ -59,6 +59,7 @@
 
     struct list_head fmmu_configs; /**< FMMU configurations contained. */
     size_t data_size; /**< Size of the process data. */
+    size_t tx_size; /**< Size of the transmitted data. */
     uint8_t *data; /**< Memory for the process data. */
     ec_origin_t data_origin; /**< Origin of the \a data memory. */
     uint32_t logical_base_address; /**< Logical offset address of the
--- a/master/ethernet.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/ethernet.c	Wed Apr 13 22:06:28 2011 +0200
@@ -111,6 +111,7 @@
     eoe->slave = slave;
 
     ec_datagram_init(&eoe->datagram);
+    ec_mbox_init(&eoe->mbox,&eoe->datagram);
     eoe->queue_datagram = 0;
     eoe->state = ec_eoe_state_rx_start;
     eoe->opened = 0;
@@ -122,7 +123,7 @@
     eoe->tx_queue_size = EC_EOE_TX_QUEUE_SIZE;
     eoe->tx_queued_frames = 0;
 
-    sema_init(&eoe->tx_queue_sem, 1);
+    ec_mutex_init(&eoe->tx_queue_mutex);
     eoe->tx_frame_number = 0xFF;
     memset(&eoe->stats, 0, sizeof(struct net_device_stats));
 
@@ -220,6 +221,7 @@
 
     free_netdev(eoe->dev);
 
+    ec_mbox_clear(&eoe->mbox);
     ec_datagram_clear(&eoe->datagram);
 }
 
@@ -231,7 +233,7 @@
 {
     ec_eoe_frame_t *frame, *next;
 
-    down(&eoe->tx_queue_sem);
+    ec_mutex_lock(&eoe->tx_queue_mutex);
 
     list_for_each_entry_safe(frame, next, &eoe->tx_queue, queue) {
         list_del(&frame->queue);
@@ -240,7 +242,7 @@
     }
     eoe->tx_queued_frames = 0;
 
-    up(&eoe->tx_queue_sem);
+    ec_mutex_unlock(&eoe->tx_queue_mutex);
 }
 
 /*****************************************************************************/
@@ -294,7 +296,7 @@
     printk("\n");
 #endif
 
-    data = ec_slave_mbox_prepare_send(eoe->slave, &eoe->datagram,
+    data = ec_slave_mbox_prepare_send(eoe->slave, &eoe->mbox,
             0x02, current_size + 4);
     if (IS_ERR(data))
         return PTR_ERR(data);
@@ -323,7 +325,9 @@
         return;
 
     // if the datagram was not sent, or is not yet received, skip this cycle
-    if (eoe->queue_datagram || eoe->datagram.state == EC_DATAGRAM_SENT)
+    if (eoe->queue_datagram ||
+        ec_mbox_is_datagram_state(&eoe->mbox,EC_DATAGRAM_QUEUED) ||
+        ec_mbox_is_datagram_state(&eoe->mbox,EC_DATAGRAM_SENT))
         return;
 
     // call state function
@@ -348,7 +352,7 @@
 void ec_eoe_queue(ec_eoe_t *eoe /**< EoE handler */)
 {
    if (eoe->queue_datagram) {
-       ec_master_queue_datagram_ext(eoe->slave->master, &eoe->datagram);
+       ec_master_mbox_queue_datagrams(eoe->slave->master, &eoe->mbox);
        eoe->queue_datagram = 0;
    }
 }
@@ -394,7 +398,7 @@
         return;
     }
 
-    ec_slave_mbox_prepare_check(eoe->slave, &eoe->datagram);
+    ec_slave_mbox_prepare_check(eoe->slave, &eoe->mbox);
     eoe->queue_datagram = 1;
     eoe->state = ec_eoe_state_rx_check;
 }
@@ -408,7 +412,7 @@
  */
 void ec_eoe_state_rx_check(ec_eoe_t *eoe /**< EoE handler */)
 {
-    if (eoe->datagram.state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(&eoe->mbox,EC_DATAGRAM_RECEIVED)) {
         eoe->stats.rx_errors++;
 #if EOE_DEBUG_LEVEL >= 1
         EC_SLAVE_WARN(eoe->slave, "Failed to receive mbox"
@@ -418,14 +422,14 @@
         return;
     }
 
-    if (!ec_slave_mbox_check(&eoe->datagram)) {
+    if (!ec_slave_mbox_check(&eoe->mbox)) {
         eoe->rx_idle = 1;
         eoe->state = ec_eoe_state_tx_start;
         return;
     }
 
     eoe->rx_idle = 0;
-    ec_slave_mbox_prepare_fetch(eoe->slave, &eoe->datagram);
+    ec_slave_mbox_prepare_fetch(eoe->slave, &eoe->mbox);
     eoe->queue_datagram = 1;
     eoe->state = ec_eoe_state_rx_fetch;
 }
@@ -447,7 +451,7 @@
     unsigned int i;
 #endif
 
-    if (eoe->datagram.state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(&eoe->mbox,EC_DATAGRAM_RECEIVED)) {
         eoe->stats.rx_errors++;
 #if EOE_DEBUG_LEVEL >= 1
         EC_SLAVE_WARN(eoe->slave, "Failed to receive mbox"
@@ -457,7 +461,7 @@
         return;
     }
 
-    data = ec_slave_mbox_fetch(eoe->slave, &eoe->datagram,
+    data = ec_slave_mbox_fetch(eoe->slave, &eoe->mbox,
             &mbox_prot, &rec_size);
     if (IS_ERR(data)) {
         eoe->stats.rx_errors++;
@@ -620,10 +624,10 @@
         return;
     }
 
-    down(&eoe->tx_queue_sem);
+    ec_mutex_lock(&eoe->tx_queue_mutex);
 
     if (!eoe->tx_queued_frames || list_empty(&eoe->tx_queue)) {
-        up(&eoe->tx_queue_sem);
+        ec_mutex_unlock(&eoe->tx_queue_mutex);
         eoe->tx_idle = 1;
         // no data available.
         // start a new receive immediately.
@@ -644,7 +648,7 @@
     }
 
     eoe->tx_queued_frames--;
-    up(&eoe->tx_queue_sem);
+    ec_mutex_unlock(&eoe->tx_queue_mutex);
 
     eoe->tx_idle = 0;
 
@@ -684,7 +688,7 @@
  */
 void ec_eoe_state_tx_sent(ec_eoe_t *eoe /**< EoE handler */)
 {
-    if (eoe->datagram.state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(&eoe->mbox,EC_DATAGRAM_RECEIVED)) {
         if (eoe->tries) {
             eoe->tries--; // try again
             eoe->queue_datagram = 1;
@@ -700,7 +704,7 @@
         return;
     }
 
-    if (eoe->datagram.working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(&eoe->mbox,1)) {
         if (eoe->tries) {
             eoe->tries--; // try again
             eoe->queue_datagram = 1;
@@ -812,14 +816,14 @@
 
     frame->skb = skb;
 
-    down(&eoe->tx_queue_sem);
+    ec_mutex_lock(&eoe->tx_queue_mutex);
     list_add_tail(&frame->queue, &eoe->tx_queue);
     eoe->tx_queued_frames++;
     if (eoe->tx_queued_frames == eoe->tx_queue_size) {
         netif_stop_queue(dev);
         eoe->tx_queue_active = 0;
     }
-    up(&eoe->tx_queue_sem);
+    ec_mutex_unlock(&eoe->tx_queue_mutex);
 
 #if EOE_DEBUG_LEVEL >= 2
     EC_SLAVE_DBG(eoe->slave, 0, "EoE %s TX queued frame"
--- a/master/ethernet.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/ethernet.h	Wed Apr 13 22:06:28 2011 +0200
@@ -73,6 +73,7 @@
     struct list_head list; /**< list item */
     ec_slave_t *slave; /**< pointer to the corresponding slave */
     ec_datagram_t datagram; /**< datagram */
+    ec_mailbox_t mbox; /**< mailbox */
     unsigned int queue_datagram; /**< the datagram is ready for queuing */
     void (*state)(ec_eoe_t *); /**< state function for the state machine */
     struct net_device *dev; /**< net_device for virtual ethernet device */
@@ -92,7 +93,7 @@
     unsigned int tx_queue_size; /**< Transmit queue size. */
     unsigned int tx_queue_active; /**< kernel netif queue started */
     unsigned int tx_queued_frames; /**< number of frames in the queue */
-    struct semaphore tx_queue_sem; /**< Semaphore for the send queue. */
+    struct ec_mutex_t tx_queue_mutex; /**< Mutex for the send queue. */
     ec_eoe_frame_t *tx_frame; /**< current TX frame */
     uint8_t tx_frame_number; /**< number of the transmitted frame */
     uint8_t tx_fragment_number; /**< number of the fragment */
--- a/master/fmmu_config.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fmmu_config.c	Wed Apr 13 22:06:28 2011 +0200
@@ -43,14 +43,12 @@
 
 /** FMMU configuration constructor.
  *
- * Inits an FMMU configuration, sets the logical start address and adds the
- * process data size for the mapped PDOs of the given direction to the domain
- * data size.
+ * Inits an FMMU configuration and the process data size forthe mapped PDOs
+ * of the given direction to the domain data size.
  */
 void ec_fmmu_config_init(
         ec_fmmu_config_t *fmmu, /**< EtherCAT FMMU configuration. */
         ec_slave_config_t *sc, /**< EtherCAT slave configuration. */
-        ec_domain_t *domain, /**< EtherCAT domain. */
         uint8_t sync_index, /**< Sync manager index to use. */
         ec_direction_t dir /**< PDO direction. */
         )
@@ -59,14 +57,32 @@
     fmmu->sc = sc;
     fmmu->sync_index = sync_index;
     fmmu->dir = dir;
-
-    fmmu->logical_start_address = domain->data_size;
     fmmu->data_size = ec_pdo_list_total_size(
             &sc->sync_configs[sync_index].pdos);
+}
 
+
+/*****************************************************************************/
+
+/** Sets FMMU domain
+ *
+ * Sets the logical start address and the size of the transmitted data
+ */
+void ec_fmmu_config_domain(
+        ec_fmmu_config_t *fmmu, /**< EtherCAT FMMU configuration. */
+        ec_domain_t *domain, /**< EtherCAT domain. */
+        uint32_t logical_start_address, /**< FMMU logical start address. */
+        size_t tx_size /**< Size of transmitted data */
+        )
+{
+    fmmu->domain = domain;
+    fmmu->domain_address = domain->data_size;
+    fmmu->logical_start_address = logical_start_address;
+    fmmu->tx_size = tx_size;
     ec_domain_add_fmmu_config(domain, fmmu);
 }
 
+
 /*****************************************************************************/
 
 /** Initializes an FMMU configuration page.
@@ -79,9 +95,11 @@
         uint8_t *data /**> Configuration page memory. */
         )
 {
-    EC_CONFIG_DBG(fmmu->sc, 1, "FMMU: LogAddr 0x%08X, Size %3u,"
+    EC_CONFIG_DBG(fmmu->sc, 1, "FMMU: LogAddr 0x%08X, DomAddr 0x%08X,"
+            " Size %3u, Tx %3u"
             " PhysAddr 0x%04X, SM%u, Dir %s\n",
-            fmmu->logical_start_address, fmmu->data_size,
+            fmmu->logical_start_address, fmmu->domain_address,
+            fmmu->data_size, fmmu->data_size,
             sync->physical_start_address, fmmu->sync_index,
             fmmu->dir == EC_DIR_INPUT ? "in" : "out");
 
--- a/master/fmmu_config.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fmmu_config.h	Wed Apr 13 22:06:28 2011 +0200
@@ -50,13 +50,18 @@
     uint8_t sync_index; /**< Index of sync manager to use. */
     ec_direction_t dir; /**< FMMU direction. */
     uint32_t logical_start_address; /**< Logical start address. */
+    size_t tx_size; /**< Transmitted (bus) size. */
+    uint32_t domain_address;    /** Domain start address */
     unsigned int data_size; /**< Covered PDO size. */
 } ec_fmmu_config_t;
 
 /*****************************************************************************/
 
 void ec_fmmu_config_init(ec_fmmu_config_t *, ec_slave_config_t *,
-        ec_domain_t *, uint8_t, ec_direction_t);
+        uint8_t, ec_direction_t);
+
+void ec_fmmu_config_domain(ec_fmmu_config_t *, ec_domain_t *,
+        uint32_t , size_t);
 
 void ec_fmmu_config_page(const ec_fmmu_config_t *, const ec_sync_t *,
         uint8_t *);
--- a/master/fsm_coe.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fsm_coe.c	Wed Apr 13 22:06:28 2011 +0200
@@ -168,11 +168,11 @@
 */
 
 void ec_fsm_coe_init(ec_fsm_coe_t *fsm, /**< finite state machine */
-                     ec_datagram_t *datagram /**< datagram */
+                     ec_mailbox_t *mbox /**< mailbox */
                      )
 {
     fsm->state = NULL;
-    fsm->datagram = datagram;
+    fsm->mbox = mbox;
 }
 
 /*****************************************************************************/
@@ -286,7 +286,7 @@
 
 void ec_fsm_coe_dict_start(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
     ec_slave_t *slave = fsm->slave;
     uint8_t *data;
 
@@ -303,7 +303,7 @@
         return;
     }
 
-    data = ec_slave_mbox_prepare_send(slave, datagram, 0x03, 8);
+    data = ec_slave_mbox_prepare_send(slave, mbox, 0x03, 8);
     if (IS_ERR(data)) {
         fsm->state = ec_fsm_coe_error;
         return;
@@ -328,13 +328,14 @@
 
 void ec_fsm_coe_dict_request(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: request again?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE dictionary"
                 " request datagram: ");
@@ -342,7 +343,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE dictionary request failed: ");
         ec_datagram_print_wc_error(datagram);
@@ -351,7 +352,7 @@
 
     fsm->jiffies_start = datagram->jiffies_sent;
 
-    ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_dict_check;
 }
@@ -364,20 +365,21 @@
 
 void ec_fsm_coe_dict_check(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE mailbox check datagram: ");
         ec_datagram_print_state(datagram);
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave,"Reception of CoE mailbox check"
                 " datagram failed: ");
@@ -385,7 +387,7 @@
         return;
     }
 
-    if (!ec_slave_mbox_check(datagram)) {
+    if (!ec_slave_mbox_check(mbox)) {
         unsigned long diff_ms =
             (datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
         if (diff_ms >= EC_FSM_COE_DICT_TIMEOUT) {
@@ -395,13 +397,13 @@
             return;
         }
 
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         return;
     }
 
     // Fetch response
-    ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_dict_response;
 }
@@ -415,18 +417,21 @@
 
 void ec_fsm_coe_dict_response(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
     uint8_t *data, mbox_prot;
     size_t rec_size;
     unsigned int sdo_count, i;
     uint16_t sdo_index, fragments_left;
     ec_sdo_t *sdo;
-
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    bool first_segment;
+    size_t index_list_offset;
+
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: request again?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE dictionary"
                 " response datagram: ");
@@ -434,14 +439,14 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE dictionary response failed: ");
         ec_datagram_print_wc_error(datagram);
         return;
     }
 
-    data = ec_slave_mbox_fetch(slave, datagram, &mbox_prot, &rec_size);
+    data = ec_slave_mbox_fetch(slave, mbox, &mbox_prot, &rec_size);
     if (IS_ERR(data)) {
         fsm->state = ec_fsm_coe_error;
         return;
@@ -456,7 +461,7 @@
 
     if (ec_fsm_coe_check_emergency(fsm, data, rec_size)) {
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_dict_check;
         return;
@@ -490,7 +495,7 @@
                     " Retrying...\n");
             ec_print_data(data, rec_size);
         }
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_dict_check;
         return;
@@ -503,10 +508,13 @@
         return;
     }
 
-    sdo_count = (rec_size - 8) / 2;
+    first_segment = list_empty(&slave->sdo_dictionary) ? true : false;
+    index_list_offset = first_segment ? 8 : 6;
+
+    sdo_count = (rec_size - index_list_offset) / 2;
 
     for (i = 0; i < sdo_count; i++) {
-        sdo_index = EC_READ_U16(data + 8 + i * 2);
+        sdo_index = EC_READ_U16(data + index_list_offset + i * 2);
         if (!sdo_index) {
             EC_SLAVE_DBG(slave, 1, "SDO dictionary contains index 0x0000.\n");
             continue;
@@ -531,7 +539,7 @@
     if (EC_READ_U8(data + 2) & 0x80 || fragments_left) {
         // more messages waiting. check again.
         fsm->jiffies_start = datagram->jiffies_sent;
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_dict_check;
         return;
@@ -546,7 +554,7 @@
     // fetch SDO descriptions
     fsm->sdo = list_entry(slave->sdo_dictionary.next, ec_sdo_t, list);
 
-    data = ec_slave_mbox_prepare_send(slave, datagram, 0x03, 8);
+    data = ec_slave_mbox_prepare_send(slave, mbox, 0x03, 8);
     if (IS_ERR(data)) {
         fsm->state = ec_fsm_coe_error;
         return;
@@ -571,13 +579,14 @@
 
 void ec_fsm_coe_dict_desc_request(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: check for response first?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE SDO"
                 " description request datagram: ");
@@ -585,7 +594,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE SDO description"
                 " request failed: ");
@@ -595,7 +604,7 @@
 
     fsm->jiffies_start = datagram->jiffies_sent;
 
-    ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_dict_desc_check;
 }
@@ -608,20 +617,21 @@
 
 void ec_fsm_coe_dict_desc_check(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE mailbox check datagram: ");
         ec_datagram_print_state(datagram);
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE mailbox check"
                 " datagram failed: ");
@@ -629,7 +639,7 @@
         return;
     }
 
-    if (!ec_slave_mbox_check(datagram)) {
+    if (!ec_slave_mbox_check(mbox)) {
         unsigned long diff_ms =
             (datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
         if (diff_ms >= EC_FSM_COE_DICT_TIMEOUT) {
@@ -640,13 +650,13 @@
             return;
         }
 
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         return;
     }
 
     // Fetch response
-    ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_dict_desc_response;
 }
@@ -661,16 +671,17 @@
 void ec_fsm_coe_dict_desc_response(ec_fsm_coe_t *fsm
                                    /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
     ec_sdo_t *sdo = fsm->sdo;
     uint8_t *data, mbox_prot;
     size_t rec_size, name_size;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: request again?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE SDO description"
                 " response datagram: ");
@@ -678,7 +689,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE SDO description"
                 " response failed: ");
@@ -686,7 +697,7 @@
         return;
     }
 
-    data = ec_slave_mbox_fetch(slave, datagram, &mbox_prot, &rec_size);
+    data = ec_slave_mbox_fetch(slave, mbox, &mbox_prot, &rec_size);
     if (IS_ERR(data)) {
         fsm->state = ec_fsm_coe_error;
         return;
@@ -701,7 +712,7 @@
 
     if (ec_fsm_coe_check_emergency(fsm, data, rec_size)) {
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_dict_desc_check;
         return;
@@ -739,7 +750,7 @@
             ec_print_data(data, rec_size);
         }
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_dict_desc_check;
         return;
@@ -777,7 +788,7 @@
 
     fsm->subindex = 0;
 
-    data = ec_slave_mbox_prepare_send(slave, datagram, 0x03, 10);
+    data = ec_slave_mbox_prepare_send(slave, mbox, 0x03, 10);
     if (IS_ERR(data)) {
         fsm->state = ec_fsm_coe_error;
         return;
@@ -805,13 +816,14 @@
 void ec_fsm_coe_dict_entry_request(ec_fsm_coe_t *fsm
                                    /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: check for response first?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE SDO entry"
                 " request datagram: ");
@@ -819,7 +831,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE SDO entry request failed: ");
         ec_datagram_print_wc_error(datagram);
@@ -828,7 +840,7 @@
 
     fsm->jiffies_start = datagram->jiffies_sent;
 
-    ec_slave_mbox_prepare_check(slave, datagram); // can not fail
+    ec_slave_mbox_prepare_check(slave, mbox); // can not fail
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_dict_entry_check;
 }
@@ -842,20 +854,21 @@
 void ec_fsm_coe_dict_entry_check(ec_fsm_coe_t *fsm
                                  /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE mailbox check datagram: ");
         ec_datagram_print_state(datagram);
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE mailbox check"
                 " datagram failed: ");
@@ -863,7 +876,7 @@
         return;
     }
 
-    if (!ec_slave_mbox_check(datagram)) {
+    if (!ec_slave_mbox_check(mbox)) {
         unsigned long diff_ms =
             (datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
         if (diff_ms >= EC_FSM_COE_DICT_TIMEOUT) {
@@ -874,13 +887,13 @@
             return;
         }
 
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         return;
     }
 
     // Fetch response
-    ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_dict_entry_response;
 }
@@ -895,7 +908,8 @@
 void ec_fsm_coe_dict_entry_response(ec_fsm_coe_t *fsm
                                     /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
     ec_sdo_t *sdo = fsm->sdo;
     uint8_t *data, mbox_prot;
@@ -903,10 +917,10 @@
     ec_sdo_entry_t *entry;
     u16 word;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: request again?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE SDO"
                 " description response datagram: ");
@@ -914,7 +928,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE SDO description"
                 " response failed: ");
@@ -922,7 +936,7 @@
         return;
     }
 
-    data = ec_slave_mbox_fetch(slave, datagram, &mbox_prot, &rec_size);
+    data = ec_slave_mbox_fetch(slave, mbox, &mbox_prot, &rec_size);
     if (IS_ERR(data)) {
         fsm->state = ec_fsm_coe_error;
         return;
@@ -937,7 +951,7 @@
 
     if (ec_fsm_coe_check_emergency(fsm, data, rec_size)) {
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_dict_entry_check;
         return;
@@ -978,7 +992,7 @@
             ec_print_data(data, rec_size);
         }
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_dict_entry_check;
         return;
@@ -1030,7 +1044,7 @@
     if (fsm->subindex < sdo->max_subindex) {
         fsm->subindex++;
 
-        data = ec_slave_mbox_prepare_send(slave, datagram, 0x03, 10);
+        data = ec_slave_mbox_prepare_send(slave, mbox, 0x03, 10);
         if (IS_ERR(data)) {
             fsm->state = ec_fsm_coe_error;
             return;
@@ -1053,7 +1067,7 @@
     if (fsm->sdo->list.next != &slave->sdo_dictionary) {
         fsm->sdo = list_entry(fsm->sdo->list.next, ec_sdo_t, list);
 
-        data = ec_slave_mbox_prepare_send(slave, datagram, 0x03, 8);
+        data = ec_slave_mbox_prepare_send(slave, mbox, 0x03, 8);
         if (IS_ERR(data)) {
             fsm->state = ec_fsm_coe_error;
             return;
@@ -1083,7 +1097,7 @@
         ec_fsm_coe_t *fsm /**< finite state machine */
         )
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
     ec_slave_t *slave = fsm->slave;
     ec_sdo_request_t *request = fsm->request;
     uint8_t *data;
@@ -1117,7 +1131,7 @@
     }
 
     if (request->data_size <= 4) { // use expedited transfer type
-        data = ec_slave_mbox_prepare_send(slave, datagram, 0x03,
+        data = ec_slave_mbox_prepare_send(slave, mbox, 0x03,
                 EC_COE_DOWN_REQ_HEADER_SIZE);
         if (IS_ERR(data)) {
             request->errno = PTR_ERR(data);
@@ -1159,7 +1173,7 @@
             data_size = required_data_size;
         }
 
-        data = ec_slave_mbox_prepare_send(slave, datagram, 0x03,
+        data = ec_slave_mbox_prepare_send(slave, mbox, 0x03,
                 data_size);
         if (IS_ERR(data)) {
             request->errno = PTR_ERR(data);
@@ -1208,14 +1222,15 @@
 
 void ec_fsm_coe_down_request(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
     unsigned long diff_ms;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: check for response first?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE download"
@@ -1226,8 +1241,8 @@
 
     diff_ms = (jiffies - fsm->request->jiffies_sent) * 1000 / HZ;
 
-    if (datagram->working_counter != 1) {
-        if (!datagram->working_counter) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
+        if (ec_mbox_is_datagram_wc(mbox,0)) {
             if (diff_ms < fsm->request->response_timeout) {
 #if DEBUG_RETRIES
                 EC_SLAVE_DBG(slave, 1, "Slave did not respond to SDO"
@@ -1256,7 +1271,7 @@
 
     fsm->jiffies_start = datagram->jiffies_sent;
 
-    ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_down_check;
 }
@@ -1267,13 +1282,14 @@
  */
 void ec_fsm_coe_down_check(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE mailbox check"
@@ -1282,7 +1298,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE mailbox check"
@@ -1291,7 +1307,7 @@
         return;
     }
 
-    if (!ec_slave_mbox_check(datagram)) {
+    if (!ec_slave_mbox_check(mbox)) {
         unsigned long diff_ms =
             (datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
         if (diff_ms >= fsm->request->response_timeout) {
@@ -1303,13 +1319,13 @@
             return;
         }
 
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         return;
     }
 
     // Fetch response
-    ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_down_response;
 }
@@ -1322,7 +1338,7 @@
         ec_fsm_coe_t *fsm /**< finite state machine */
         )
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
     ec_slave_t *slave = fsm->slave;
     ec_sdo_request_t *request = fsm->request;
     size_t max_segment_size =
@@ -1349,7 +1365,7 @@
             + EC_COE_DOWN_SEG_MIN_DATA_SIZE;
     }
 
-    data = ec_slave_mbox_prepare_send(slave, datagram, 0x03,
+    data = ec_slave_mbox_prepare_send(slave, mbox, 0x03,
             data_size);
     if (IS_ERR(data)) {
         request->errno = PTR_ERR(data);
@@ -1389,16 +1405,17 @@
 
 void ec_fsm_coe_down_response(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = fsm->mbox->datagram;
     ec_slave_t *slave = fsm->slave;
     uint8_t *data, mbox_prot;
     size_t rec_size;
     ec_sdo_request_t *request = fsm->request;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: request again?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE download"
@@ -1407,7 +1424,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE download response failed: ");
@@ -1415,7 +1432,7 @@
         return;
     }
 
-    data = ec_slave_mbox_fetch(slave, datagram, &mbox_prot, &rec_size);
+    data = ec_slave_mbox_fetch(slave, mbox, &mbox_prot, &rec_size);
     if (IS_ERR(data)) {
         request->errno = PTR_ERR(data);
         fsm->state = ec_fsm_coe_error;
@@ -1432,7 +1449,7 @@
 
     if (ec_fsm_coe_check_emergency(fsm, data, rec_size)) {
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_down_check;
         return;
@@ -1484,7 +1501,7 @@
             ec_print_data(data, rec_size);
         }
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_down_check;
         return;
@@ -1506,13 +1523,14 @@
 
 void ec_fsm_coe_down_seg_check(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE mailbox check datagram: ");
@@ -1520,7 +1538,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE mailbox segment check"
@@ -1529,7 +1547,7 @@
         return;
     }
 
-    if (!ec_slave_mbox_check(datagram)) {
+    if (!ec_slave_mbox_check(mbox)) {
         unsigned long diff_ms =
             (datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
         if (diff_ms >= fsm->request->response_timeout) {
@@ -1540,13 +1558,13 @@
             return;
         }
 
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         return;
     }
 
     // Fetch response
-    ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_down_seg_response;
 }
@@ -1562,16 +1580,17 @@
         ec_fsm_coe_t *fsm /**< finite state machine */
         )
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
     uint8_t *data, mbox_prot;
     size_t rec_size;
     ec_sdo_request_t *request = fsm->request;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: request again?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE download response"
@@ -1580,7 +1599,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE download response failed: ");
@@ -1588,7 +1607,7 @@
         return;
     }
 
-    data = ec_slave_mbox_fetch(slave, datagram, &mbox_prot, &rec_size);
+    data = ec_slave_mbox_fetch(slave, mbox, &mbox_prot, &rec_size);
     if (IS_ERR(data)) {
         request->errno = PTR_ERR(data);
         fsm->state = ec_fsm_coe_error;
@@ -1605,7 +1624,7 @@
 
     if (ec_fsm_coe_check_emergency(fsm, data, rec_size)) {
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_down_check;
         return;
@@ -1655,7 +1674,7 @@
             ec_print_data(data, rec_size);
         }
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_down_seg_check;
         return;
@@ -1686,7 +1705,7 @@
 
 void ec_fsm_coe_up_start(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
     ec_slave_t *slave = fsm->slave;
     ec_master_t *master = slave->master;
     ec_sdo_request_t *request = fsm->request;
@@ -1702,7 +1721,7 @@
         return;
     }
 
-    data = ec_slave_mbox_prepare_send(slave, datagram, 0x03, 10);
+    data = ec_slave_mbox_prepare_send(slave, mbox, 0x03, 10);
     if (IS_ERR(data)) {
         request->errno = PTR_ERR(data);
         fsm->state = ec_fsm_coe_error;
@@ -1734,14 +1753,15 @@
 
 void ec_fsm_coe_up_request(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
     unsigned long diff_ms;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: check for response first?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE upload request: ");
@@ -1751,8 +1771,8 @@
 
     diff_ms = (jiffies - fsm->request->jiffies_sent) * 1000 / HZ;
 
-    if (datagram->working_counter != 1) {
-        if (!datagram->working_counter) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
+        if (ec_mbox_is_datagram_wc(mbox,0)) {
             if (diff_ms < fsm->request->response_timeout) {
 #if DEBUG_RETRIES
                 EC_SLAVE_DBG(slave, 1, "Slave did not respond to"
@@ -1781,7 +1801,7 @@
 
     fsm->jiffies_start = datagram->jiffies_sent;
 
-    ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_up_check;
 }
@@ -1794,13 +1814,14 @@
 
 void ec_fsm_coe_up_check(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE mailbox check datagram: ");
@@ -1808,7 +1829,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE mailbox check"
@@ -1817,7 +1838,7 @@
         return;
     }
 
-    if (!ec_slave_mbox_check(datagram)) {
+    if (!ec_slave_mbox_check(mbox)) {
         unsigned long diff_ms =
             (datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
         if (diff_ms >= fsm->request->response_timeout) {
@@ -1829,13 +1850,13 @@
             return;
         }
 
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         return;
     }
 
     // Fetch response
-    ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_up_response;
 }
@@ -1849,7 +1870,7 @@
         )
 {
     uint8_t *data =
-        ec_slave_mbox_prepare_send(fsm->slave, fsm->datagram, 0x03, 10);
+        ec_slave_mbox_prepare_send(fsm->slave, fsm->mbox, 0x03, 10);
     if (IS_ERR(data)) {
         fsm->request->errno = PTR_ERR(data);
         fsm->state = ec_fsm_coe_error;
@@ -1876,7 +1897,8 @@
 
 void ec_fsm_coe_up_response(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
     ec_master_t *master = slave->master;
     uint16_t rec_index;
@@ -1886,10 +1908,10 @@
     unsigned int expedited, size_specified;
     int ret;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: request again?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE upload response"
@@ -1898,7 +1920,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE upload response failed: ");
@@ -1906,7 +1928,7 @@
         return;
     }
 
-    data = ec_slave_mbox_fetch(slave, datagram, &mbox_prot, &rec_size);
+    data = ec_slave_mbox_fetch(slave, mbox, &mbox_prot, &rec_size);
     if (IS_ERR(data)) {
         request->errno = PTR_ERR(data);
         fsm->state = ec_fsm_coe_error;
@@ -1928,7 +1950,7 @@
 
     if (ec_fsm_coe_check_emergency(fsm, data, rec_size)) {
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_up_check;
         return;
@@ -1979,7 +2001,7 @@
         ec_print_data(data, rec_size);
 
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_up_check;
         return;
@@ -2075,13 +2097,14 @@
 
 void ec_fsm_coe_up_seg_request(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: check for response first?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE upload segment"
@@ -2090,7 +2113,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE upload segment"
@@ -2101,7 +2124,7 @@
 
     fsm->jiffies_start = datagram->jiffies_sent;
 
-    ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_up_seg_check;
 }
@@ -2114,13 +2137,14 @@
 
 void ec_fsm_coe_up_seg_check(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE mailbox check"
@@ -2129,7 +2153,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE mailbox check datagram"
@@ -2138,7 +2162,7 @@
         return;
     }
 
-    if (!ec_slave_mbox_check(datagram)) {
+    if (!ec_slave_mbox_check(mbox)) {
         unsigned long diff_ms =
             (datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
         if (diff_ms >= fsm->request->response_timeout) {
@@ -2149,13 +2173,13 @@
             return;
         }
 
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         return;
     }
 
     // Fetch response
-    ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_coe_up_seg_response;
 }
@@ -2169,7 +2193,8 @@
 
 void ec_fsm_coe_up_seg_response(ec_fsm_coe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
     ec_master_t *master = slave->master;
     uint8_t *data, mbox_prot;
@@ -2177,10 +2202,10 @@
     ec_sdo_request_t *request = fsm->request;
     unsigned int last_segment;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: request again?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Failed to receive CoE upload segment"
@@ -2189,7 +2214,7 @@
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         request->errno = EIO;
         fsm->state = ec_fsm_coe_error;
         EC_SLAVE_ERR(slave, "Reception of CoE upload segment"
@@ -2198,7 +2223,7 @@
         return;
     }
 
-    data = ec_slave_mbox_fetch(slave, datagram, &mbox_prot, &rec_size);
+    data = ec_slave_mbox_fetch(slave, mbox, &mbox_prot, &rec_size);
     if (IS_ERR(data)) {
         request->errno = PTR_ERR(data);
         fsm->state = ec_fsm_coe_error;
@@ -2220,7 +2245,7 @@
 
     if (ec_fsm_coe_check_emergency(fsm, data, rec_size)) {
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_up_seg_check;
         return;
@@ -2253,7 +2278,7 @@
             ec_print_data(data, rec_size);
         }
         // check for CoE response again
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_coe_up_seg_check;
         return;
--- a/master/fsm_coe.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fsm_coe.h	Wed Apr 13 22:06:28 2011 +0200
@@ -39,6 +39,7 @@
 
 #include "globals.h"
 #include "datagram.h"
+#include "mailbox.h"
 #include "slave.h"
 #include "sdo.h"
 #include "sdo_request.h"
@@ -51,7 +52,7 @@
  */
 struct ec_fsm_coe {
     ec_slave_t *slave; /**< slave the FSM runs on */
-    ec_datagram_t *datagram; /**< datagram used in the state machine */
+    ec_mailbox_t *mbox; /**< mailbox used in the state machine */
     unsigned int retries; /**< retries upon datagram timeout */
 
     void (*state)(ec_fsm_coe_t *); /**< CoE state function */
@@ -67,7 +68,7 @@
 
 /*****************************************************************************/
 
-void ec_fsm_coe_init(ec_fsm_coe_t *, ec_datagram_t *);
+void ec_fsm_coe_init(ec_fsm_coe_t *, ec_mailbox_t *);
 void ec_fsm_coe_clear(ec_fsm_coe_t *);
 
 void ec_fsm_coe_dictionary(ec_fsm_coe_t *, ec_slave_t *);
--- a/master/fsm_foe.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fsm_foe.c	Wed Apr 13 22:06:28 2011 +0200
@@ -107,11 +107,11 @@
 /** Constructor.
  */
 void ec_fsm_foe_init(ec_fsm_foe_t *fsm, /**< finite state machine */
-                     ec_datagram_t *datagram /**< datagram */
+                     ec_mailbox_t *mbox /**< mailbox */
                      )
 {
-    fsm->state     = NULL;
-    fsm->datagram  = datagram;
+    fsm->state = NULL;
+    fsm->mbox  = mbox;
 }
 
 /*****************************************************************************/
@@ -207,7 +207,7 @@
     }
 
     data = ec_slave_mbox_prepare_send(fsm->slave,
-            fsm->datagram, EC_MBOX_TYPE_FILEACCESS,
+            fsm->mbox,EC_MBOX_TYPE_FILEACCESS,
             current_size + EC_FOE_HEADER_SIZE);
     if (IS_ERR(data))
         return -1;
@@ -238,7 +238,7 @@
 
     current_size = fsm->tx_filename_len;
 
-    data = ec_slave_mbox_prepare_send(fsm->slave, fsm->datagram,
+    data = ec_slave_mbox_prepare_send(fsm->slave, fsm->mbox,
             EC_MBOX_TYPE_FILEACCESS, current_size + EC_FOE_HEADER_SIZE);
     if (IS_ERR(data))
         return -1;
@@ -308,21 +308,22 @@
         ec_fsm_foe_t *fsm /**< FoE statemachine. */
         )
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
 #ifdef DEBUG_FOE
     printk("ec_fsm_foe_ack_check()\n");
 #endif
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         ec_foe_set_rx_error(fsm, FOE_RECEIVE_ERROR);
         EC_SLAVE_ERR(slave, "Failed to receive FoE mailbox check datagram: ");
         ec_datagram_print_state(datagram);
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         // slave did not put anything in the mailbox yet
         ec_foe_set_rx_error(fsm, FOE_WC_ERROR);
         EC_SLAVE_ERR(slave, "Reception of FoE mailbox check datagram"
@@ -331,7 +332,7 @@
         return;
     }
 
-    if (!ec_slave_mbox_check(datagram)) {
+    if (!ec_slave_mbox_check(mbox)) {
         unsigned long diff_ms =
             (datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
         if (diff_ms >= EC_FSM_FOE_TIMEOUT) {
@@ -340,13 +341,13 @@
             return;
         }
 
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         return;
     }
 
     // Fetch response
-    ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
 
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_foe_state_ack_read;
@@ -360,7 +361,8 @@
         ec_fsm_foe_t *fsm /**< FoE statemachine. */
         )
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
     uint8_t *data, mbox_prot;
     uint8_t opCode;
@@ -370,21 +372,21 @@
     printk("ec_fsm_foe_ack_read()\n");
 #endif
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         ec_foe_set_rx_error(fsm, FOE_RECEIVE_ERROR);
         EC_SLAVE_ERR(slave, "Failed to receive FoE ack response datagram: ");
         ec_datagram_print_state(datagram);
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         ec_foe_set_rx_error(fsm, FOE_WC_ERROR);
         EC_SLAVE_ERR(slave, "Reception of FoE ack response failed: ");
         ec_datagram_print_wc_error(datagram);
         return;
     }
 
-    if (!(data = ec_slave_mbox_fetch(fsm->slave, datagram,
+    if (!(data = ec_slave_mbox_fetch(fsm->slave, fsm->mbox,
                     &mbox_prot, &rec_size))) {
         ec_foe_set_tx_error(fsm, FOE_PROT_ERROR);
         return;
@@ -440,21 +442,22 @@
         ec_fsm_foe_t *fsm /**< FoE statemachine. */
         )
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
 #ifdef DEBUG_FOE
     printk("ec_foe_state_sent_wrq()\n");
 #endif
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         ec_foe_set_rx_error(fsm, FOE_RECEIVE_ERROR);
         EC_SLAVE_ERR(slave, "Failed to send FoE WRQ: ");
         ec_datagram_print_state(datagram);
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         // slave did not put anything in the mailbox yet
         ec_foe_set_rx_error(fsm, FOE_WC_ERROR);
         EC_SLAVE_ERR(slave, "Reception of FoE WRQ failed: ");
@@ -464,7 +467,7 @@
 
     fsm->jiffies_start = datagram->jiffies_sent;
 
-    ec_slave_mbox_prepare_check(fsm->slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_check(fsm->slave, fsm->mbox); // can not fail.
 
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_foe_state_ack_check;
@@ -481,28 +484,29 @@
         ec_fsm_foe_t *fsm /**< Foe statemachine. */
         )
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
 #ifdef DEBUG_FOE
     printk("ec_fsm_foe_state_data_sent()\n");
 #endif
 
-    if (fsm->datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         ec_foe_set_tx_error(fsm, FOE_RECEIVE_ERROR);
         EC_SLAVE_ERR(slave, "Failed to receive FoE ack response datagram: ");
         ec_datagram_print_state(datagram);
         return;
     }
 
-    if (fsm->datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         ec_foe_set_tx_error(fsm, FOE_WC_ERROR);
         EC_SLAVE_ERR(slave, "Reception of FoE data send failed: ");
         ec_datagram_print_wc_error(datagram);
         return;
     }
 
-    ec_slave_mbox_prepare_check(fsm->slave, fsm->datagram);
+    ec_slave_mbox_prepare_check(slave, mbox);
     fsm->jiffies_start = jiffies;
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_foe_state_ack_check;
@@ -519,7 +523,7 @@
 
     current_size = fsm->rx_filename_len;
 
-    data = ec_slave_mbox_prepare_send(fsm->slave, fsm->datagram,
+    data = ec_slave_mbox_prepare_send(fsm->slave, fsm->mbox,
             EC_MBOX_TYPE_FILEACCESS, current_size + EC_FOE_HEADER_SIZE);
     if (IS_ERR(data))
         return -1;
@@ -546,7 +550,7 @@
 {
     uint8_t *data;
 
-    data = ec_slave_mbox_prepare_send(foe->slave, foe->datagram,
+    data = ec_slave_mbox_prepare_send(foe->slave, foe->mbox,
             EC_MBOX_TYPE_FILEACCESS, EC_FOE_HEADER_SIZE);
     if (IS_ERR(data))
         return -1;
@@ -568,21 +572,22 @@
         ec_fsm_foe_t *fsm /**< FoE statemachine. */
         )
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
 #ifdef DEBUG_FOE
     printk("ec_foe_state_rrq_sent()\n");
 #endif
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         ec_foe_set_rx_error(fsm, FOE_RECEIVE_ERROR);
         EC_SLAVE_ERR(slave, "Failed to send FoE RRQ: ");
         ec_datagram_print_state(datagram);
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         // slave did not put anything in the mailbox yet
         ec_foe_set_rx_error(fsm, FOE_WC_ERROR);
         EC_SLAVE_ERR(slave, "Reception of FoE RRQ failed: ");
@@ -592,7 +597,7 @@
 
     fsm->jiffies_start = datagram->jiffies_sent;
 
-    ec_slave_mbox_prepare_check(fsm->slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_check(fsm->slave, fsm->mbox); // can not fail.
 
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_foe_state_data_check;
@@ -657,28 +662,29 @@
         ec_fsm_foe_t *fsm /**< FoE statemachine. */
         )
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
 #ifdef DEBUG_FOE
     printk("ec_fsm_foe_state_data_check()\n");
 #endif
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         ec_foe_set_rx_error(fsm, FOE_RECEIVE_ERROR);
         EC_SLAVE_ERR(slave, "Failed to send FoE DATA READ: ");
         ec_datagram_print_state(datagram);
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         ec_foe_set_rx_error(fsm, FOE_WC_ERROR);
         EC_SLAVE_ERR(slave, "Reception of FoE DATA READ: ");
         ec_datagram_print_wc_error(datagram);
         return;
     }
 
-    if (!ec_slave_mbox_check(datagram)) {
+    if (!ec_slave_mbox_check(mbox)) {
         unsigned long diff_ms =
             (datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
         if (diff_ms >= EC_FSM_FOE_TIMEOUT) {
@@ -687,13 +693,13 @@
             return;
         }
 
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         return;
     }
 
     // Fetch response
-    ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
 
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_foe_state_data_read;
@@ -711,28 +717,29 @@
     size_t rec_size;
     uint8_t *data, opCode, packet_no, mbox_prot;
 
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
 #ifdef DEBUG_FOE
     printk("ec_fsm_foe_state_data_read()\n");
 #endif
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         ec_foe_set_rx_error(fsm, FOE_RECEIVE_ERROR);
         EC_SLAVE_ERR(slave, "Failed to receive FoE DATA READ datagram: ");
         ec_datagram_print_state(datagram);
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         ec_foe_set_rx_error(fsm, FOE_WC_ERROR);
         EC_SLAVE_ERR(slave, "Reception of FoE DATA READ failed: ");
         ec_datagram_print_wc_error(datagram);
         return;
     }
 
-    if (!(data = ec_slave_mbox_fetch(slave, datagram, &mbox_prot, &rec_size))) {
+    if (!(data = ec_slave_mbox_fetch(slave, mbox, &mbox_prot, &rec_size))) {
         ec_foe_set_rx_error(fsm, FOE_MBOX_FETCH_ERROR);
         return;
     }
@@ -831,21 +838,22 @@
         ec_fsm_foe_t *fsm /**< FoE statemachine. */
         )
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_datagram_t *datagram = mbox->datagram;
     ec_slave_t *slave = fsm->slave;
 
 #ifdef DEBUG_FOE
     printk("ec_foe_state_sent_ack()\n");
 #endif
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         ec_foe_set_rx_error(fsm, FOE_RECEIVE_ERROR);
         EC_SLAVE_ERR(slave, "Failed to send FoE ACK: ");
         ec_datagram_print_state(datagram);
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         // slave did not put anything into the mailbox yet
         ec_foe_set_rx_error(fsm, FOE_WC_ERROR);
         EC_SLAVE_ERR(slave, "Reception of FoE ACK failed: ");
@@ -855,7 +863,7 @@
 
     fsm->jiffies_start = datagram->jiffies_sent;
 
-    ec_slave_mbox_prepare_check(fsm->slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_check(fsm->slave, fsm->mbox); // can not fail.
 
     if (fsm->rx_last_packet) {
         fsm->rx_expected_packet_no = 0;
--- a/master/fsm_foe.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fsm_foe.h	Wed Apr 13 22:06:28 2011 +0200
@@ -51,7 +51,7 @@
  */
 struct ec_fsm_foe {
     ec_slave_t *slave; /**< slave the FSM runs on */
-    ec_datagram_t *datagram; /**< datagram used in the state machine */
+    ec_mailbox_t *mbox; /**< mailbox used in the state machine */
     unsigned int retries; /**< retries upon datagram timeout */
 
     void (*state)(ec_fsm_foe_t *); /**< FoE state function */
@@ -80,7 +80,7 @@
 
 /*****************************************************************************/
 
-void ec_fsm_foe_init(ec_fsm_foe_t *, ec_datagram_t *);
+void ec_fsm_foe_init(ec_fsm_foe_t *, ec_mailbox_t *);
 void ec_fsm_foe_clear(ec_fsm_foe_t *);
 
 int ec_fsm_foe_exec(ec_fsm_foe_t *);
--- a/master/fsm_master.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fsm_master.c	Wed Apr 13 22:06:28 2011 +0200
@@ -48,7 +48,11 @@
 
 /** Time difference [ns] to tolerate without setting a new system time offset.
  */
+#ifdef EC_HAVE_CYCLES
+#define EC_SYSTEM_TIME_TOLERANCE_NS 10000
+#else
 #define EC_SYSTEM_TIME_TOLERANCE_NS 100000000
+#endif
 
 /*****************************************************************************/
 
@@ -81,6 +85,7 @@
 {
     fsm->master = master;
     fsm->datagram = datagram;
+    fsm->mbox = &master->fsm_mbox;
     fsm->state = ec_fsm_master_state_start;
     fsm->idle = 0;
     fsm->link_state = 0;
@@ -89,7 +94,7 @@
     fsm->slave_states = EC_SLAVE_STATE_UNKNOWN;
 
     // init sub-state-machines
-    ec_fsm_coe_init(&fsm->fsm_coe, fsm->datagram);
+    ec_fsm_coe_init(&fsm->fsm_coe, fsm->mbox);
     ec_fsm_pdo_init(&fsm->fsm_pdo, &fsm->fsm_coe);
     ec_fsm_change_init(&fsm->fsm_change, fsm->datagram);
     ec_fsm_slave_config_init(&fsm->fsm_slave_config, fsm->datagram,
@@ -129,12 +134,11 @@
         ec_fsm_master_t *fsm /**< Master state machine. */
         )
 {
-    if (fsm->datagram->state == EC_DATAGRAM_SENT
-        || fsm->datagram->state == EC_DATAGRAM_QUEUED) {
+    if (ec_mbox_is_datagram_state(fsm->mbox,EC_DATAGRAM_QUEUED)
+        || ec_mbox_is_datagram_state(fsm->mbox,EC_DATAGRAM_SENT)) {
         // datagram was not sent or received yet.
         return 0;
     }
-
     fsm->state(fsm);
     return 1;
 }
@@ -211,10 +215,6 @@
         EC_MASTER_DBG(master, 1, "Master state machine detected "
                 "link down. Clearing slave list.\n");
 
-#ifdef EC_EOE
-        ec_master_eoe_stop(master);
-        ec_master_clear_eoe_handlers(master);
-#endif
         ec_master_clear_slaves(master);
         fsm->slave_states = 0x00;
     }
@@ -238,12 +238,12 @@
     }
 
     if (fsm->rescan_required) {
-        down(&master->scan_sem);
+        ec_mutex_lock(&master->scan_mutex);
         if (!master->allow_scan) {
-            up(&master->scan_sem);
+            ec_mutex_unlock(&master->scan_mutex);
         } else {
             master->scan_busy = 1;
-            up(&master->scan_sem);
+            ec_mutex_unlock(&master->scan_mutex);
 
             // clear all slaves and scan the bus
             fsm->rescan_required = 0;
@@ -251,7 +251,6 @@
             fsm->scan_jiffies = jiffies;
 
 #ifdef EC_EOE
-            ec_master_eoe_stop(master);
             ec_master_clear_eoe_handlers(master);
 #endif
             ec_master_clear_slaves(master);
@@ -392,6 +391,7 @@
                     "datagram size (%zu)!\n", request->length,
                     fsm->datagram->mem_size);
             request->state = EC_INT_REQUEST_FAILURE;
+            kref_put(&request->refcount,ec_master_reg_request_release);
             wake_up(&master->reg_queue);
             continue;
         }
@@ -571,12 +571,12 @@
                 || slave->force_config) && !slave->error_flag) {
 
         // Start slave configuration, if it is allowed.
-        down(&master->config_sem);
+        ec_mutex_lock(&master->config_mutex);
         if (!master->allow_config) {
-            up(&master->config_sem);
+            ec_mutex_unlock(&master->config_mutex);
         } else {
             master->config_busy = 1;
-            up(&master->config_sem);
+            ec_mutex_unlock(&master->config_mutex);
 
             if (master->debug_level) {
                 char old_state[EC_STATE_STRING_SIZE],
@@ -802,11 +802,6 @@
     // Attach slave configurations
     ec_master_attach_slave_configs(master);
 
-#ifdef EC_EOE
-    // check if EoE processing has to be started
-    ec_master_eoe_start(master);
-#endif
-
     if (master->slave_count) {
         fsm->slave = master->slaves; // begin with first slave
         ec_fsm_master_enter_write_system_times(fsm);
@@ -889,26 +884,27 @@
         ec_fsm_master_t *fsm, /**< Master state machine. */
         u64 system_time, /**< System time register. */
         u64 old_offset, /**< Time offset register. */
-        unsigned long jiffies_since_read /**< Jiffies for correction. */
+		u64 correction /**< Correction. */
         )
 {
     ec_slave_t *slave = fsm->slave;
-    u32 correction, system_time32, old_offset32, new_offset;
+	u32 correction32, system_time32, old_offset32, new_offset;
     s32 time_diff;
 
-    system_time32 = (u32) system_time;
-    old_offset32 = (u32) old_offset;
-
-    // correct read system time by elapsed time since read operation
-    correction = jiffies_since_read * 1000 / HZ * 1000000;
-    system_time32 += correction;
-    time_diff = (u32) slave->master->app_time - system_time32;
+	system_time32 = (u32) system_time;
+	// correct read system time by elapsed time between read operation
+	// and app_time set time
+	correction32 = (u32)correction;
+	system_time32 -= correction32;
+	old_offset32 = (u32) old_offset;
+
+    time_diff = (u32) slave->master->app_start_time - system_time32;
 
     EC_SLAVE_DBG(slave, 1, "DC system time offset calculation:"
             " system_time=%u (corrected with %u),"
-            " app_time=%llu, diff=%i\n",
-            system_time32, correction,
-            slave->master->app_time, time_diff);
+            " app_start_time=%llu, diff=%i\n",
+			system_time32, correction32,
+            slave->master->app_start_time, time_diff);
 
     if (EC_ABS(time_diff) > EC_SYSTEM_TIME_TOLERANCE_NS) {
         new_offset = time_diff + old_offset32;
@@ -929,23 +925,23 @@
         ec_fsm_master_t *fsm, /**< Master state machine. */
         u64 system_time, /**< System time register. */
         u64 old_offset, /**< Time offset register. */
-        unsigned long jiffies_since_read /**< Jiffies for correction. */
+		u64 correction /**< Correction. */
         )
 {
     ec_slave_t *slave = fsm->slave;
-    u64 new_offset, correction;
+	u64 new_offset;
     s64 time_diff;
 
-    // correct read system time by elapsed time since read operation
-    correction = (u64) (jiffies_since_read * 1000 / HZ) * 1000000;
-    system_time += correction;
-    time_diff = fsm->slave->master->app_time - system_time;
+	// correct read system time by elapsed time between read operation
+	// and app_time set time
+	system_time -= correction;
+    time_diff = fsm->slave->master->app_start_time - system_time;
 
     EC_SLAVE_DBG(slave, 1, "DC system time offset calculation:"
             " system_time=%llu (corrected with %llu),"
-            " app_time=%llu, diff=%lli\n",
+            " app_start_time=%llu, diff=%lli\n",
             system_time, correction,
-            slave->master->app_time, time_diff);
+            slave->master->app_start_time, time_diff);
 
     if (EC_ABS(time_diff) > EC_SYSTEM_TIME_TOLERANCE_NS) {
         new_offset = time_diff + old_offset;
@@ -969,8 +965,7 @@
 {
     ec_datagram_t *datagram = fsm->datagram;
     ec_slave_t *slave = fsm->slave;
-    u64 system_time, old_offset, new_offset;
-    unsigned long jiffies_since_read;
+	u64 system_time, old_offset, new_offset, correction;
 
     if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
         return;
@@ -993,14 +988,25 @@
 
     system_time = EC_READ_U64(datagram->data);     // 0x0910
     old_offset = EC_READ_U64(datagram->data + 16); // 0x0920
-    jiffies_since_read = jiffies - datagram->jiffies_sent;
+	/* correct read system time by elapsed time since read operation
+	   and the app_time set time */
+#ifdef EC_HAVE_CYCLES
+	correction =
+            (datagram->cycles_sent - slave->master->dc_cycles_app_start_time)
+			* 1000000LL;
+	do_div(correction,cpu_khz);
+#else
+	correction =
+			(u64) ((datagram->jiffies_sent-slave->master->dc_jiffies_app_start_time) * 1000 / HZ)
+			* 1000000;
+#endif
 
     if (slave->base_dc_range == EC_DC_32) {
         new_offset = ec_fsm_master_dc_offset32(fsm,
-                system_time, old_offset, jiffies_since_read);
+				system_time, old_offset, correction);
     } else {
         new_offset = ec_fsm_master_dc_offset64(fsm,
-                system_time, old_offset, jiffies_since_read);
+				system_time, old_offset, correction);
     }
 
     // set DC system time offset and transmission delay
@@ -1063,6 +1069,7 @@
     if (!ec_fsm_sii_success(&fsm->fsm_sii)) {
         EC_SLAVE_ERR(slave, "Failed to write SII data.\n");
         request->state = EC_INT_REQUEST_FAILURE;
+        kref_put(&request->refcount,ec_master_sii_write_request_release);
         wake_up(&master->sii_queue);
         ec_fsm_master_restart(fsm);
         return;
@@ -1091,6 +1098,7 @@
     // TODO: Evaluate other SII contents!
 
     request->state = EC_INT_REQUEST_SUCCESS;
+    kref_put(&request->refcount,ec_master_sii_write_request_release);
     wake_up(&master->sii_queue);
 
     // check for another SII write request
@@ -1184,6 +1192,7 @@
                 " request datagram: ");
         ec_datagram_print_state(datagram);
         request->state = EC_INT_REQUEST_FAILURE;
+        kref_put(&request->refcount,ec_master_reg_request_release);
         wake_up(&master->reg_queue);
         ec_fsm_master_restart(fsm);
         return;
@@ -1198,6 +1207,7 @@
                 EC_MASTER_ERR(master, "Failed to allocate %zu bytes"
                         " of memory for register data.\n", request->length);
                 request->state = EC_INT_REQUEST_FAILURE;
+                kref_put(&request->refcount,ec_master_reg_request_release);
                 wake_up(&master->reg_queue);
                 ec_fsm_master_restart(fsm);
                 return;
@@ -1212,6 +1222,7 @@
         EC_MASTER_ERR(master, "Register request failed.\n");
     }
 
+    kref_put(&request->refcount,ec_master_reg_request_release);
     wake_up(&master->reg_queue);
 
     // check for another register request
@@ -1222,3 +1233,72 @@
 }
 
 /*****************************************************************************/
+
+/** called by kref_put if the SII write request's refcount becomes zero.
+ *
+ */
+void ec_master_sii_write_request_release(struct kref *ref)
+{
+    ec_sii_write_request_t *request = container_of(ref, ec_sii_write_request_t, refcount);
+    if (request->slave)
+        EC_SLAVE_DBG(request->slave, 1, "Releasing SII write request %p.\n",request);
+    kfree(request->words);
+    kfree(request);
+}
+
+/*****************************************************************************/
+
+/** called by kref_put if the reg request's refcount becomes zero.
+ *
+ */
+void ec_master_reg_request_release(struct kref *ref)
+{
+    ec_reg_request_t *request = container_of(ref, ec_reg_request_t, refcount);
+    if (request->slave)
+        EC_SLAVE_DBG(request->slave, 1, "Releasing reg request %p.\n",request);
+    if (request->data)
+        kfree(request->data);
+    kfree(request);
+}
+
+/*****************************************************************************/
+
+/** called by kref_put if the SDO request's refcount becomes zero.
+ *
+ */
+void ec_master_sdo_request_release(struct kref *ref)
+{
+    ec_master_sdo_request_t *request = container_of(ref, ec_master_sdo_request_t, refcount);
+    if (request->slave)
+        EC_SLAVE_DBG(request->slave, 1, "Releasing SDO request %p.\n",request);
+    ec_sdo_request_clear(&request->req);
+    kfree(request);
+}
+
+/*****************************************************************************/
+
+/** called by kref_put if the FoE request's refcount becomes zero.
+ *
+ */
+void ec_master_foe_request_release(struct kref *ref)
+{
+    ec_master_foe_request_t *request = container_of(ref, ec_master_foe_request_t, refcount);
+    if (request->slave)
+        EC_SLAVE_DBG(request->slave, 1, "Releasing FoE request %p.\n",request);
+    ec_foe_request_clear(&request->req);
+    kfree(request);
+}
+
+/*****************************************************************************/
+
+/** called by kref_put if the SoE request's refcount becomes zero.
+ *
+ */
+void ec_master_soe_request_release(struct kref *ref)
+{
+    ec_master_soe_request_t *request = container_of(ref, ec_master_soe_request_t, refcount);
+    if (request->slave)
+        EC_SLAVE_DBG(request->slave, 1, "Releasing SoE request %p.\n",request);
+    ec_soe_request_clear(&request->req);
+    kfree(request);
+}
--- a/master/fsm_master.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fsm_master.h	Wed Apr 13 22:06:28 2011 +0200
@@ -39,6 +39,7 @@
 
 #include "globals.h"
 #include "datagram.h"
+#include "mailbox.h"
 #include "foe_request.h"
 #include "sdo_request.h"
 #include "soe_request.h"
@@ -57,8 +58,11 @@
     size_t nwords; /**< Number of words. */
     const uint16_t *words; /**< Pointer to the data words. */
     ec_internal_request_state_t state; /**< State of the request. */
+    struct kref refcount;
 } ec_sii_write_request_t;
 
+void ec_master_sii_write_request_release(struct kref *);
+
 /*****************************************************************************/
 
 /** Register request.
@@ -71,8 +75,11 @@
     size_t length; /**< Number of bytes. */
     uint8_t *data; /**< Data to write / memory for read data. */
     ec_internal_request_state_t state; /**< State of the request. */
+    struct kref refcount;
 } ec_reg_request_t;
 
+void ec_master_reg_request_release(struct kref *);
+
 /*****************************************************************************/
 
 /** Slave/SDO request record for master's SDO request list.
@@ -81,8 +88,11 @@
     struct list_head list; /**< List element. */
     ec_slave_t *slave; /**< Slave. */
     ec_sdo_request_t req; /**< SDO request. */
+    struct kref refcount;
 } ec_master_sdo_request_t;
 
+void ec_master_sdo_request_release(struct kref *);
+
 /*****************************************************************************/
 
 /** FoE request.
@@ -91,8 +101,11 @@
     struct list_head list; /**< List head. */
     ec_slave_t *slave; /**< EtherCAT slave. */
     ec_foe_request_t req; /**< FoE request. */
+    struct kref refcount;
 } ec_master_foe_request_t;
 
+void ec_master_foe_request_release(struct kref *);
+
 /*****************************************************************************/
 
 /** SoE request.
@@ -101,8 +114,11 @@
     struct list_head list; /**< List head. */
     ec_slave_t *slave; /**< EtherCAT slave. */
     ec_soe_request_t req; /**< SoE request. */
+    struct kref refcount;
 } ec_master_soe_request_t;
 
+void ec_master_soe_request_release(struct kref *);
+
 /*****************************************************************************/
 
 typedef struct ec_fsm_master ec_fsm_master_t; /**< \see ec_fsm_master */
@@ -112,6 +128,7 @@
 struct ec_fsm_master {
     ec_master_t *master; /**< master the FSM runs on */
     ec_datagram_t *datagram; /**< datagram used in the state machine */
+    ec_mailbox_t* mbox; /**< mailbox used in the CoE state machine */
     unsigned int retries; /**< retries on datagram timeout. */
 
     void (*state)(ec_fsm_master_t *); /**< master state function */
--- a/master/fsm_slave.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fsm_slave.c	Wed Apr 13 22:06:28 2011 +0200
@@ -57,21 +57,21 @@
 void ec_fsm_slave_init(
         ec_fsm_slave_t *fsm, /**< Slave state machine. */
         ec_slave_t *slave, /**< EtherCAT slave. */
-        ec_datagram_t *datagram /**< Datagram object to use. */
+        ec_mailbox_t *mbox/**< Datagram object to use. */
         )
 {
     fsm->slave = slave;
-    fsm->datagram = datagram;
-    fsm->datagram->data_size = 0;
+    fsm->mbox = mbox;
+    slave->datagram.data_size = 0;
 
     EC_SLAVE_DBG(slave, 1, "Init FSM.\n");
 
     fsm->state = ec_fsm_slave_state_idle;
 
     // init sub-state-machines
-    ec_fsm_coe_init(&fsm->fsm_coe, fsm->datagram);
-    ec_fsm_foe_init(&fsm->fsm_foe, fsm->datagram);
-    ec_fsm_soe_init(&fsm->fsm_soe, fsm->datagram);
+    ec_fsm_coe_init(&fsm->fsm_coe, fsm->mbox);
+    ec_fsm_foe_init(&fsm->fsm_foe, fsm->mbox);
+    ec_fsm_soe_init(&fsm->fsm_soe, fsm->mbox);
 }
 
 /*****************************************************************************/
@@ -94,19 +94,21 @@
  *
  * If the state machine's datagram is not sent or received yet, the execution
  * of the state machine is delayed to the next cycle.
- */
-void ec_fsm_slave_exec(
-        ec_fsm_slave_t *fsm /**< Slave state machine. */
-        )
-{
-    if (fsm->datagram->state == EC_DATAGRAM_SENT
-        || fsm->datagram->state == EC_DATAGRAM_QUEUED) {
+ *
+ * \return true, if the state machine was executed
+ */
+int ec_fsm_slave_exec(
+        ec_fsm_slave_t *fsm /**< Slave state machine. */
+        )
+{
+    if (ec_mbox_is_datagram_state(fsm->mbox,EC_DATAGRAM_QUEUED)
+        || ec_mbox_is_datagram_state(fsm->mbox,EC_DATAGRAM_SENT)) {
         // datagram was not sent or received yet.
-        return;
+        return 0;
     }
 
     fsm->state(fsm);
-    return;
+    return 1;
 }
 
 /*****************************************************************************/
@@ -176,9 +178,10 @@
 
         list_del_init(&request->list); // dequeue
         if (slave->current_state & EC_SLAVE_STATE_ACK_ERR) {
-            EC_SLAVE_WARN(slave, "Aborting SDO request,"
-                    " slave has error flag set.\n");
+            EC_SLAVE_WARN(slave, "Aborting SDO request %p,"
+                    " slave has error flag set.\n",request);
             request->req.state = EC_INT_REQUEST_FAILURE;
+            kref_put(&request->refcount,ec_master_sdo_request_release);
             wake_up(&slave->sdo_queue);
             fsm->sdo_request = NULL;
             fsm->state = ec_fsm_slave_state_idle;
@@ -186,8 +189,9 @@
         }
 
         if (slave->current_state == EC_SLAVE_STATE_INIT) {
-            EC_SLAVE_WARN(slave, "Aborting SDO request, slave is in INIT.\n");
+            EC_SLAVE_WARN(slave, "Aborting SDO request %p, slave is in INIT.\n",request);
             request->req.state = EC_INT_REQUEST_FAILURE;
+            kref_put(&request->refcount,ec_master_sdo_request_release);
             wake_up(&slave->sdo_queue);
             fsm->sdo_request = NULL;
             fsm->state = ec_fsm_slave_state_idle;
@@ -197,14 +201,14 @@
         request->req.state = EC_INT_REQUEST_BUSY;
 
         // Found pending SDO request. Execute it!
-        EC_SLAVE_DBG(slave, 1, "Processing SDO request...\n");
+        EC_SLAVE_DBG(slave, 1, "Processing SDO request %p...\n",request);
 
         // Start SDO transfer
-        fsm->sdo_request = &request->req;
+        fsm->sdo_request = request;
         fsm->state = ec_fsm_slave_state_sdo_request;
         ec_fsm_coe_transfer(&fsm->fsm_coe, slave, &request->req);
         ec_fsm_coe_exec(&fsm->fsm_coe); // execute immediately
-        ec_master_queue_external_datagram(fsm->slave->master,fsm->datagram);
+        ec_slave_mbox_queue_datagrams(slave, fsm->mbox);
         return 1;
     }
     return 0;
@@ -219,26 +223,28 @@
         )
 {
     ec_slave_t *slave = fsm->slave;
-    ec_sdo_request_t *request = fsm->sdo_request;
+    ec_master_sdo_request_t *request = fsm->sdo_request;
 
     if (ec_fsm_coe_exec(&fsm->fsm_coe))
     {
-        ec_master_queue_external_datagram(fsm->slave->master,fsm->datagram);
+        ec_slave_mbox_queue_datagrams(slave, fsm->mbox);
         return;
     }
     if (!ec_fsm_coe_success(&fsm->fsm_coe)) {
-        EC_SLAVE_ERR(slave, "Failed to process SDO request.\n");
-        request->state = EC_INT_REQUEST_FAILURE;
+        EC_SLAVE_ERR(slave, "Failed to process SDO request %p.\n",request);
+        request->req.state = EC_INT_REQUEST_FAILURE;
+        kref_put(&request->refcount,ec_master_sdo_request_release);
         wake_up(&slave->sdo_queue);
         fsm->sdo_request = NULL;
         fsm->state = ec_fsm_slave_state_idle;
         return;
     }
 
-    EC_SLAVE_DBG(slave, 1, "Finished SDO request.\n");
+    EC_SLAVE_DBG(slave, 1, "Finished SDO request %p.\n",request);
 
     // SDO request finished
-    request->state = EC_INT_REQUEST_SUCCESS;
+    request->req.state = EC_INT_REQUEST_SUCCESS;
+    kref_put(&request->refcount,ec_master_sdo_request_release);
     wake_up(&slave->sdo_queue);
 
     fsm->sdo_request = NULL;
@@ -261,10 +267,11 @@
     // search the first request to be processed
     list_for_each_entry_safe(request, next, &slave->foe_requests, list) {
         if (slave->current_state & EC_SLAVE_STATE_ACK_ERR) {
-            EC_SLAVE_WARN(slave, "Aborting FOE request,"
-                    " slave has error flag set.\n");
+            EC_SLAVE_WARN(slave, "Aborting FOE request %p,"
+                    " slave has error flag set.\n",request);
             request->req.state = EC_INT_REQUEST_FAILURE;
-            wake_up(&slave->sdo_queue);
+            kref_put(&request->refcount,ec_master_foe_request_release);
+            wake_up(&slave->foe_queue);
             fsm->sdo_request = NULL;
             fsm->state = ec_fsm_slave_state_idle;
             return 0;
@@ -272,13 +279,13 @@
         list_del_init(&request->list); // dequeue
         request->req.state = EC_INT_REQUEST_BUSY;
 
-        EC_SLAVE_DBG(slave, 1, "Processing FoE request.\n");
-
-        fsm->foe_request = &request->req;
+        EC_SLAVE_DBG(slave, 1, "Processing FoE request %p.\n",request);
+
+        fsm->foe_request = request;
         fsm->state = ec_fsm_slave_state_foe_request;
         ec_fsm_foe_transfer(&fsm->fsm_foe, slave, &request->req);
         ec_fsm_foe_exec(&fsm->fsm_foe);
-        ec_master_queue_external_datagram(fsm->slave->master,fsm->datagram);
+        ec_slave_mbox_queue_datagrams(slave, fsm->mbox);
         return 1;
     }
     return 0;
@@ -293,17 +300,18 @@
         )
 {
     ec_slave_t *slave = fsm->slave;
-    ec_foe_request_t *request = fsm->foe_request;
+    ec_master_foe_request_t *request = fsm->foe_request;
 
     if (ec_fsm_foe_exec(&fsm->fsm_foe))
     {
-        ec_master_queue_external_datagram(fsm->slave->master,fsm->datagram);
+        ec_slave_mbox_queue_datagrams(slave, fsm->mbox);
         return;
     }
 
     if (!ec_fsm_foe_success(&fsm->fsm_foe)) {
-        EC_SLAVE_ERR(slave, "Failed to handle FoE request.\n");
-        request->state = EC_INT_REQUEST_FAILURE;
+        EC_SLAVE_ERR(slave, "Failed to handle FoE request %p.\n",request);
+        request->req.state = EC_INT_REQUEST_FAILURE;
+        kref_put(&request->refcount,ec_master_foe_request_release);
         wake_up(&slave->foe_queue);
         fsm->foe_request = NULL;
         fsm->state = ec_fsm_slave_state_idle;
@@ -311,10 +319,11 @@
     }
 
     // finished transferring FoE
-    EC_SLAVE_DBG(slave, 1, "Successfully transferred %zu bytes of FoE"
-            " data.\n", request->data_size);
-
-    request->state = EC_INT_REQUEST_SUCCESS;
+    EC_SLAVE_DBG(slave, 1, "FoE request %p successfully transferred %zu bytes.\n",
+            request,request->req.data_size);
+
+    request->req.state = EC_INT_REQUEST_SUCCESS;
+    kref_put(&request->refcount,ec_master_foe_request_release);
     wake_up(&slave->foe_queue);
 
     fsm->foe_request = NULL;
@@ -332,16 +341,17 @@
         )
 {
     ec_slave_t *slave = fsm->slave;
-    ec_master_soe_request_t *req, *next;
+    ec_master_soe_request_t *request, *next;
 
     // search the first request to be processed
-    list_for_each_entry_safe(req, next, &slave->soe_requests, list) {
-
-        list_del_init(&req->list); // dequeue
+    list_for_each_entry_safe(request, next, &slave->soe_requests, list) {
+
+        list_del_init(&request->list); // dequeue
         if (slave->current_state & EC_SLAVE_STATE_ACK_ERR) {
             EC_SLAVE_WARN(slave, "Aborting SoE request,"
                     " slave has error flag set.\n");
-            req->req.state = EC_INT_REQUEST_FAILURE;
+            request->req.state = EC_INT_REQUEST_FAILURE;
+            kref_put(&request->refcount,ec_master_soe_request_release);
             wake_up(&slave->soe_queue);
             fsm->state = ec_fsm_slave_state_idle;
             return 0;
@@ -349,23 +359,24 @@
 
         if (slave->current_state == EC_SLAVE_STATE_INIT) {
             EC_SLAVE_WARN(slave, "Aborting SoE request, slave is in INIT.\n");
-            req->req.state = EC_INT_REQUEST_FAILURE;
+            request->req.state = EC_INT_REQUEST_FAILURE;
+            kref_put(&request->refcount,ec_master_soe_request_release);
             wake_up(&slave->soe_queue);
             fsm->state = ec_fsm_slave_state_idle;
             return 0;
         }
 
-        req->req.state = EC_INT_REQUEST_BUSY;
+        request->req.state = EC_INT_REQUEST_BUSY;
 
         // Found pending request. Execute it!
         EC_SLAVE_DBG(slave, 1, "Processing SoE request...\n");
 
         // Start SoE transfer
-        fsm->soe_request = &req->req;
+        fsm->soe_request = request;
         fsm->state = ec_fsm_slave_state_soe_request;
-        ec_fsm_soe_transfer(&fsm->fsm_soe, slave, &req->req);
+        ec_fsm_soe_transfer(&fsm->fsm_soe, slave, &request->req);
         ec_fsm_soe_exec(&fsm->fsm_soe); // execute immediately
-        ec_master_queue_external_datagram(fsm->slave->master, fsm->datagram);
+        ec_slave_mbox_queue_datagrams(slave, fsm->mbox);
         return 1;
     }
     return 0;
@@ -380,16 +391,17 @@
         )
 {
     ec_slave_t *slave = fsm->slave;
-    ec_soe_request_t *request = fsm->soe_request;
+    ec_master_soe_request_t *request = fsm->soe_request;
 
     if (ec_fsm_soe_exec(&fsm->fsm_soe)) {
-        ec_master_queue_external_datagram(fsm->slave->master, fsm->datagram);
+        ec_slave_mbox_queue_datagrams(slave, fsm->mbox);
         return;
     }
 
     if (!ec_fsm_soe_success(&fsm->fsm_soe)) {
         EC_SLAVE_ERR(slave, "Failed to process SoE request.\n");
-        request->state = EC_INT_REQUEST_FAILURE;
+        request->req.state = EC_INT_REQUEST_FAILURE;
+        kref_put(&request->refcount,ec_master_soe_request_release);
         wake_up(&slave->soe_queue);
         fsm->soe_request = NULL;
         fsm->state = ec_fsm_slave_state_idle;
@@ -399,7 +411,8 @@
     EC_SLAVE_DBG(slave, 1, "Finished SoE request.\n");
 
     // SoE request finished
-    request->state = EC_INT_REQUEST_SUCCESS;
+    request->req.state = EC_INT_REQUEST_SUCCESS;
+    kref_put(&request->refcount,ec_master_soe_request_release);
     wake_up(&slave->soe_queue);
 
     fsm->soe_request = NULL;
--- a/master/fsm_slave.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fsm_slave.h	Wed Apr 13 22:06:28 2011 +0200
@@ -42,6 +42,7 @@
 #include "fsm_coe.h"
 #include "fsm_foe.h"
 #include "fsm_soe.h"
+#include "fsm_master.h"
 
 typedef struct ec_fsm_slave ec_fsm_slave_t; /**< \see ec_fsm_slave */
 
@@ -49,13 +50,13 @@
  */
 struct ec_fsm_slave {
     ec_slave_t *slave; /**< slave the FSM runs on */
-    ec_datagram_t *datagram; /**< datagram used in the state machine */
+    ec_mailbox_t *mbox; /**< mailbox used in the state machine */
 
     void (*state)(ec_fsm_slave_t *); /**< master state function */
-    ec_sdo_request_t *sdo_request; /**< SDO request to process. */
-    ec_foe_request_t *foe_request; /**< FoE request to process. */
+    ec_master_sdo_request_t *sdo_request; /**< SDO request to process. */
+    ec_master_foe_request_t *foe_request; /**< FoE request to process. */
     off_t foe_index; /**< index to FoE write request data */
-    ec_soe_request_t *soe_request; /**< SoE request to process. */
+    ec_master_soe_request_t *soe_request; /**< SoE request to process. */
 
     ec_fsm_coe_t fsm_coe; /**< CoE state machine */
     ec_fsm_foe_t fsm_foe; /**< FoE state machine */
@@ -64,10 +65,10 @@
 
 /*****************************************************************************/
 
-void ec_fsm_slave_init(ec_fsm_slave_t *, ec_slave_t *, ec_datagram_t *);
+void ec_fsm_slave_init(ec_fsm_slave_t *, ec_slave_t *, ec_mailbox_t *);
 void ec_fsm_slave_clear(ec_fsm_slave_t *);
 
-void ec_fsm_slave_exec(ec_fsm_slave_t *);
+int ec_fsm_slave_exec(ec_fsm_slave_t *);
 void ec_fsm_slave_ready(ec_fsm_slave_t *);
 
 /*****************************************************************************/
--- a/master/fsm_slave_config.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fsm_slave_config.c	Wed Apr 13 22:06:28 2011 +0200
@@ -177,8 +177,8 @@
         ec_fsm_slave_config_t *fsm /**< slave state machine */
         )
 {
-    if (fsm->datagram->state == EC_DATAGRAM_SENT
-        || fsm->datagram->state == EC_DATAGRAM_QUEUED) {
+    if (fsm->datagram->state == EC_DATAGRAM_QUEUED
+        || fsm->datagram->state == EC_DATAGRAM_SENT) {
         // datagram was not sent or received yet.
         return ec_fsm_slave_config_running(fsm);
     }
@@ -738,8 +738,7 @@
             ec_soe_request_write(&fsm->soe_request_copy);
             ec_fsm_soe_transfer(fsm_soe, fsm->slave, &fsm->soe_request_copy);
             ec_fsm_soe_exec(fsm_soe); // execute immediately
-            ec_master_queue_external_datagram(slave->master,
-                    fsm_soe->datagram);
+            ec_slave_mbox_queue_datagrams(slave, fsm_soe->mbox);
             return;
         }
     }
@@ -760,7 +759,7 @@
     ec_fsm_soe_t *fsm_soe = &slave->fsm.fsm_soe;
 
     if (ec_fsm_soe_exec(fsm_soe)) {
-        ec_master_queue_external_datagram(slave->master, fsm_soe->datagram);
+        ec_slave_mbox_queue_datagrams(slave, fsm_soe->mbox);
         return;
     }
 
@@ -785,8 +784,7 @@
             ec_soe_request_write(&fsm->soe_request_copy);
             ec_fsm_soe_transfer(fsm_soe, fsm->slave, &fsm->soe_request_copy);
             ec_fsm_soe_exec(fsm_soe); // execute immediately
-            ec_master_queue_external_datagram(slave->master,
-                    fsm_soe->datagram);
+            ec_slave_mbox_queue_datagrams(slave, fsm_soe->mbox);
             return;
         }
     }
@@ -1248,7 +1246,7 @@
     abs_sync_diff = EC_READ_U32(datagram->data) & 0x7fffffff;
     diff_ms = (datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
 
-    if (abs_sync_diff > EC_DC_MAX_SYNC_DIFF_NS) {
+	if (abs_sync_diff > EC_DC_MAX_SYNC_DIFF_NS) {
 
         if (diff_ms >= EC_DC_SYNC_WAIT_MS) {
             EC_SLAVE_WARN(slave, "Slave did not sync after %lu ms.\n",
@@ -1454,8 +1452,7 @@
             ec_soe_request_write(&fsm->soe_request_copy);
             ec_fsm_soe_transfer(fsm_soe, fsm->slave, &fsm->soe_request_copy);
             ec_fsm_soe_exec(fsm_soe); // execute immediately
-            ec_master_queue_external_datagram(slave->master,
-                    fsm_soe->datagram);
+            ec_slave_mbox_queue_datagrams(slave, fsm_soe->mbox);
             return;
         }
     }
@@ -1476,7 +1473,7 @@
     ec_fsm_soe_t *fsm_soe = &slave->fsm.fsm_soe;
 
     if (ec_fsm_soe_exec(fsm_soe)) {
-        ec_master_queue_external_datagram(slave->master, fsm_soe->datagram);
+        ec_slave_mbox_queue_datagrams(slave, fsm_soe->mbox);
         return;
     }
 
@@ -1501,8 +1498,7 @@
             ec_soe_request_write(&fsm->soe_request_copy);
             ec_fsm_soe_transfer(fsm_soe, fsm->slave, &fsm->soe_request_copy);
             ec_fsm_soe_exec(fsm_soe); // execute immediately
-            ec_master_queue_external_datagram(slave->master,
-                    fsm_soe->datagram);
+            ec_slave_mbox_queue_datagrams(slave, fsm_soe->mbox);
             return;
         }
     }
--- a/master/fsm_slave_scan.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fsm_slave_scan.c	Wed Apr 13 22:06:28 2011 +0200
@@ -137,8 +137,8 @@
 
 int ec_fsm_slave_scan_exec(ec_fsm_slave_scan_t *fsm /**< slave state machine */)
 {
-    if (fsm->datagram->state == EC_DATAGRAM_SENT
-        || fsm->datagram->state == EC_DATAGRAM_QUEUED) {
+    if (fsm->datagram->state == EC_DATAGRAM_QUEUED
+        || fsm->datagram->state == EC_DATAGRAM_SENT) {
         // datagram was not sent or received yet.
         return ec_fsm_slave_scan_running(fsm);
     }
--- a/master/fsm_soe.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fsm_soe.c	Wed Apr 13 22:06:28 2011 +0200
@@ -106,11 +106,11 @@
  */
 void ec_fsm_soe_init(
         ec_fsm_soe_t *fsm, /**< finite state machine */
-        ec_datagram_t *datagram /**< datagram */
+        ec_mailbox_t *mbox /**< mailbox */
         )
 {
     fsm->state = NULL;
-    fsm->datagram = datagram;
+    fsm->mbox = mbox;
 }
 
 /*****************************************************************************/
@@ -195,7 +195,7 @@
  */
 void ec_fsm_soe_read_start(ec_fsm_soe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
     ec_slave_t *slave = fsm->slave;
     ec_master_t *master = slave->master;
     ec_soe_request_t *request = fsm->request;
@@ -211,7 +211,7 @@
         return;
     }
 
-    data = ec_slave_mbox_prepare_send(slave, datagram, EC_MBOX_TYPE_SOE,
+    data = ec_slave_mbox_prepare_send(slave, mbox, EC_MBOX_TYPE_SOE,
             EC_SOE_SIZE);
     if (IS_ERR(data)) {
         fsm->state = ec_fsm_soe_error;
@@ -240,25 +240,25 @@
  */
 void ec_fsm_soe_read_request(ec_fsm_soe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
     ec_slave_t *slave = fsm->slave;
     unsigned long diff_ms;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: check for response first?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_soe_error;
         EC_SLAVE_ERR(slave, "Failed to receive SoE read request: ");
-        ec_datagram_print_state(datagram);
+        ec_datagram_print_state(mbox->datagram);
         ec_fsm_soe_print_error(fsm);
         return;
     }
 
     diff_ms = (jiffies - fsm->request->jiffies_sent) * 1000 / HZ;
 
-    if (datagram->working_counter != 1) {
-        if (!datagram->working_counter) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
+        if (ec_mbox_is_datagram_wc(mbox,0)) {
             if (diff_ms < EC_SOE_RESPONSE_TIMEOUT) {
                 // no response; send request datagram again
                 return;
@@ -267,13 +267,13 @@
         fsm->state = ec_fsm_soe_error;
         EC_SLAVE_ERR(slave, "Reception of SoE read request"
                 " failed after %lu ms: ", diff_ms);
-        ec_datagram_print_wc_error(datagram);
-        ec_fsm_soe_print_error(fsm);
-        return;
-    }
-
-    fsm->jiffies_start = datagram->jiffies_sent;
-    ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_datagram_print_wc_error(mbox->datagram);
+        ec_fsm_soe_print_error(fsm);
+        return;
+    }
+
+    fsm->jiffies_start = mbox->datagram->jiffies_sent;
+    ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_soe_read_check;
 }
@@ -284,32 +284,32 @@
  */
 void ec_fsm_soe_read_check(ec_fsm_soe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
-    ec_slave_t *slave = fsm->slave;
-
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    ec_mailbox_t *mbox = fsm->mbox;
+    ec_slave_t *slave = fsm->slave;
+
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_soe_error;
         EC_SLAVE_ERR(slave, "Failed to receive SoE mailbox check datagram: ");
-        ec_datagram_print_state(datagram);
-        ec_fsm_soe_print_error(fsm);
-        return;
-    }
-
-    if (datagram->working_counter != 1) {
+        ec_datagram_print_state(mbox->datagram);
+        ec_fsm_soe_print_error(fsm);
+        return;
+    }
+
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_soe_error;
         EC_SLAVE_ERR(slave, "Reception of SoE mailbox check"
                 " datagram failed: ");
-        ec_datagram_print_wc_error(datagram);
-        ec_fsm_soe_print_error(fsm);
-        return;
-    }
-
-    if (!ec_slave_mbox_check(datagram)) {
+        ec_datagram_print_wc_error(mbox->datagram);
+        ec_fsm_soe_print_error(fsm);
+        return;
+    }
+
+    if (!ec_slave_mbox_check(mbox)) {
         unsigned long diff_ms =
-            (datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
+            (mbox->datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
         if (diff_ms >= EC_SOE_RESPONSE_TIMEOUT) {
             fsm->state = ec_fsm_soe_error;
             EC_SLAVE_ERR(slave, "Timeout after %lu ms while waiting for"
@@ -318,13 +318,13 @@
             return;
         }
 
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         return;
     }
 
     // Fetch response
-    ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_soe_read_response;
 }
@@ -335,7 +335,7 @@
  */
 void ec_fsm_soe_read_response(ec_fsm_soe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
     ec_slave_t *slave = fsm->slave;
     ec_master_t *master = slave->master;
     uint8_t *data, mbox_prot, header, opcode, incomplete, error_flag,
@@ -343,26 +343,26 @@
     size_t rec_size, data_size;
     ec_soe_request_t *req = fsm->request;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: request again?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_soe_error;
         EC_SLAVE_ERR(slave, "Failed to receive SoE read response datagram: ");
-        ec_datagram_print_state(datagram);
-        ec_fsm_soe_print_error(fsm);
-        return;
-    }
-
-    if (datagram->working_counter != 1) {
+        ec_datagram_print_state(mbox->datagram);
+        ec_fsm_soe_print_error(fsm);
+        return;
+    }
+
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_soe_error;
         EC_SLAVE_ERR(slave, "Reception of SoE read response failed: ");
-        ec_datagram_print_wc_error(datagram);
-        ec_fsm_soe_print_error(fsm);
-        return;
-    }
-
-    data = ec_slave_mbox_fetch(slave, datagram, &mbox_prot, &rec_size);
+        ec_datagram_print_wc_error(mbox->datagram);
+        ec_fsm_soe_print_error(fsm);
+        return;
+    }
+
+    data = ec_slave_mbox_fetch(slave, mbox, &mbox_prot, &rec_size);
     if (IS_ERR(data)) {
         fsm->state = ec_fsm_soe_error;
         ec_fsm_soe_print_error(fsm);
@@ -435,8 +435,8 @@
     if (incomplete) {
         EC_SLAVE_DBG(slave, 1, "SoE data incomplete. Waiting for fragment"
                 " at offset %zu.\n", req->data_size);
-        fsm->jiffies_start = datagram->jiffies_sent;
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        fsm->jiffies_start = mbox->datagram->jiffies_sent;
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_soe_read_check;
     } else {
@@ -459,7 +459,7 @@
         ec_fsm_soe_t *fsm /**< finite state machine */
         )
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
     ec_slave_t *slave = fsm->slave;
     ec_master_t *master = slave->master;
     ec_soe_request_t *req = fsm->request;
@@ -485,7 +485,7 @@
         fragments_left++;
     }
 
-    data = ec_slave_mbox_prepare_send(slave, datagram, EC_MBOX_TYPE_SOE,
+    data = ec_slave_mbox_prepare_send(slave, mbox, EC_MBOX_TYPE_SOE,
             EC_SOE_SIZE + fragment_size);
     if (IS_ERR(data)) {
         fsm->state = ec_fsm_soe_error;
@@ -539,25 +539,25 @@
  */
 void ec_fsm_soe_write_request(ec_fsm_soe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
     ec_slave_t *slave = fsm->slave;
     unsigned long diff_ms;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: check for response first?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_soe_error;
         EC_SLAVE_ERR(slave, "Failed to receive SoE write request: ");
-        ec_datagram_print_state(datagram);
+        ec_datagram_print_state(mbox->datagram);
         ec_fsm_soe_print_error(fsm);
         return;
     }
 
     diff_ms = (jiffies - fsm->request->jiffies_sent) * 1000 / HZ;
 
-    if (datagram->working_counter != 1) {
-        if (!datagram->working_counter) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
+        if (ec_mbox_is_datagram_wc(mbox,0)) {
             if (diff_ms < EC_SOE_RESPONSE_TIMEOUT) {
                 // no response; send request datagram again
                 return;
@@ -566,14 +566,14 @@
         fsm->state = ec_fsm_soe_error;
         EC_SLAVE_ERR(slave, "Reception of SoE write request"
                 " failed after %lu ms: ", diff_ms);
-        ec_datagram_print_wc_error(datagram);
-        ec_fsm_soe_print_error(fsm);
-        return;
-    }
-
-    fsm->jiffies_start = datagram->jiffies_sent;
-
-    ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_datagram_print_wc_error(mbox->datagram);
+        ec_fsm_soe_print_error(fsm);
+        return;
+    }
+
+    fsm->jiffies_start = mbox->datagram->jiffies_sent;
+
+    ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
     fsm->retries = EC_FSM_RETRIES;
     fsm->state = ec_fsm_soe_write_check;
 }
@@ -584,25 +584,25 @@
  */
 void ec_fsm_soe_write_check(ec_fsm_soe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
     ec_slave_t *slave = fsm->slave;
     ec_soe_request_t *req = fsm->request;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_soe_error;
         EC_SLAVE_ERR(slave, "Failed to receive SoE write request datagram: ");
-        ec_datagram_print_state(datagram);
-        ec_fsm_soe_print_error(fsm);
-        return;
-    }
-
-    if (datagram->working_counter != 1) {
+        ec_datagram_print_state(mbox->datagram);
+        ec_fsm_soe_print_error(fsm);
+        return;
+    }
+
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_soe_error;
         EC_SLAVE_ERR(slave, "Reception of SoE write request datagram: ");
-        ec_datagram_print_wc_error(datagram);
+        ec_datagram_print_wc_error(mbox->datagram);
         ec_fsm_soe_print_error(fsm);
         return;
     }
@@ -610,9 +610,9 @@
     if (fsm->offset < req->data_size) {
         ec_fsm_soe_write_next_fragment(fsm);
     } else {
-        if (!ec_slave_mbox_check(datagram)) {
+        if (!ec_slave_mbox_check(mbox)) {
             unsigned long diff_ms =
-                (datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
+                (mbox->datagram->jiffies_received - fsm->jiffies_start) * 1000 / HZ;
             if (diff_ms >= EC_SOE_RESPONSE_TIMEOUT) {
                 fsm->state = ec_fsm_soe_error;
                 EC_SLAVE_ERR(slave, "Timeout after %lu ms while waiting"
@@ -621,13 +621,13 @@
                 return;
             }
 
-            ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+            ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
             fsm->retries = EC_FSM_RETRIES;
             return;
         }
 
         // Fetch response
-        ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
         fsm->retries = EC_FSM_RETRIES;
         fsm->state = ec_fsm_soe_write_response;
     }
@@ -639,7 +639,7 @@
  */
 void ec_fsm_soe_write_response(ec_fsm_soe_t *fsm /**< finite state machine */)
 {
-    ec_datagram_t *datagram = fsm->datagram;
+    ec_mailbox_t *mbox = fsm->mbox;
     ec_slave_t *slave = fsm->slave;
     ec_master_t *master = slave->master;
     ec_soe_request_t *req = fsm->request;
@@ -647,27 +647,27 @@
     uint16_t idn;
     size_t rec_size;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && fsm->retries--)
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && fsm->retries--)
         return; // FIXME: request again?
 
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         fsm->state = ec_fsm_soe_error;
         EC_SLAVE_ERR(slave, "Failed to receive SoE write"
                 " response datagram: ");
-        ec_datagram_print_state(datagram);
-        ec_fsm_soe_print_error(fsm);
-        return;
-    }
-
-    if (datagram->working_counter != 1) {
+        ec_datagram_print_state(mbox->datagram);
+        ec_fsm_soe_print_error(fsm);
+        return;
+    }
+
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         fsm->state = ec_fsm_soe_error;
         EC_SLAVE_ERR(slave, "Reception of SoE write response failed: ");
-        ec_datagram_print_wc_error(datagram);
-        ec_fsm_soe_print_error(fsm);
-        return;
-    }
-
-    data = ec_slave_mbox_fetch(slave, datagram, &mbox_prot, &rec_size);
+        ec_datagram_print_wc_error(mbox->datagram);
+        ec_fsm_soe_print_error(fsm);
+        return;
+    }
+
+    data = ec_slave_mbox_fetch(slave, mbox, &mbox_prot, &rec_size);
     if (IS_ERR(data)) {
         fsm->state = ec_fsm_soe_error;
         ec_fsm_soe_print_error(fsm);
--- a/master/fsm_soe.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/fsm_soe.h	Wed Apr 13 22:06:28 2011 +0200
@@ -50,7 +50,7 @@
  */
 struct ec_fsm_soe {
     ec_slave_t *slave; /**< slave the FSM runs on */
-    ec_datagram_t *datagram; /**< datagram used in the state machine */
+    ec_mailbox_t *mbox; /**< mailbox used in the state machine */
     unsigned int retries; /**< retries upon datagram timeout */
 
     void (*state)(ec_fsm_soe_t *); /**< CoE state function */
@@ -61,7 +61,7 @@
 
 /*****************************************************************************/
 
-void ec_fsm_soe_init(ec_fsm_soe_t *, ec_datagram_t *);
+void ec_fsm_soe_init(ec_fsm_soe_t *, ec_mailbox_t *);
 void ec_fsm_soe_clear(ec_fsm_soe_t *);
 
 void ec_fsm_soe_transfer(ec_fsm_soe_t *, ec_slave_t *, ec_soe_request_t *);
--- a/master/globals.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/globals.h	Wed Apr 13 22:06:28 2011 +0200
@@ -39,6 +39,17 @@
 #include "../globals.h"
 #include "../include/ecrt.h"
 
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+#include <linux/rtmutex.h>
+#endif  // KERNEL_VERSION(2,6,24)
+#endif // __KERNEL__
+
+#ifdef CONFIG_TRACING
+//#define USE_TRACE_PRINTK
+#endif
+
 /******************************************************************************
  * EtherCAT master
  *****************************************************************************/
@@ -46,8 +57,8 @@
 /** Datagram timeout in microseconds. */
 #define EC_IO_TIMEOUT 500
 
-/** SDO injection timeout in microseconds. */
-#define EC_SDO_INJECTION_TIMEOUT 10000
+/** FSM injection timeout in microseconds. */
+#define EC_FSM_INJECTION_TIMEOUT 10000
 
 /** Time to send a byte in nanoseconds.
  *
@@ -97,9 +108,6 @@
 /** Word offset of first SII category. */
 #define EC_FIRST_SII_CATEGORY_OFFSET 0x40
 
-/** Maximum number of slave ports. */
-#define EC_MAX_PORTS 4
-
 /** Size of a sync manager configuration page. */
 #define EC_SYNC_PAGE_SIZE 8
 
@@ -173,23 +181,6 @@
     uint8_t enable_not_lrw : 1; /**< Slave does not support LRW. */
 } ec_sii_general_flags_t;
 
-/** EtherCAT slave port descriptor.
- */
-typedef enum {
-    EC_PORT_NOT_IMPLEMENTED,
-    EC_PORT_NOT_CONFIGURED,
-    EC_PORT_EBUS,
-    EC_PORT_MII
-} ec_slave_port_desc_t;
-
-/** EtherCAT slave port information.
- */
-typedef struct {
-    uint8_t link_up; /**< Link detected. */
-    uint8_t loop_closed; /**< Loop closed. */
-    uint8_t signal_detected; /**< Detected signal on RX port. */
-} ec_slave_port_link_t;
-
 /** EtherCAT slave distributed clocks range.
  */
 typedef enum {
@@ -325,4 +316,60 @@
 
 /*****************************************************************************/
 
+/*****************************************************************************/
+
+#ifdef __KERNEL__
+
+/** Mutual exclusion helpers.
+ *
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+#define ec_mutex_t rt_mutex
+static inline void ec_mutex_init(struct ec_mutex_t *mutex)
+{
+    rt_mutex_init(mutex);
+}
+static inline void ec_mutex_lock(struct ec_mutex_t *mutex)
+{
+    rt_mutex_lock(mutex);
+}
+static inline int ec_mutex_trylock(struct ec_mutex_t *mutex)
+{
+    return rt_mutex_trylock(mutex);
+}
+static inline int ec_mutex_lock_interruptible(struct ec_mutex_t *mutex)
+{
+    return rt_mutex_lock_interruptible(mutex,0);
+}
+static inline void ec_mutex_unlock(struct ec_mutex_t *mutex)
+{
+    rt_mutex_unlock(mutex);
+}
+#else   // < KERNEL_VERSION(2,6,24)
+#define ec_mutex_t semaphore
+static inline void ec_mutex_init(struct ec_mutex_t *sem)
+{
+    sema_init(sem, 1);
+}
+static inline void ec_mutex_lock(struct ec_mutex_t *sem)
+{
+    down(sem);
+}
+static inline int ec_mutex_trylock(struct ec_mutex_t *sem)
+{
+    down(sem);
+    return 1;
+}
+static inline int ec_mutex_lock_interruptible(struct ec_mutex_t *sem)
+{
+    return down_interruptible(sem);
+}
+static inline void ec_mutex_unlock(struct ec_mutex_t *sem)
+{
+    up(sem);
+}
+
+#endif // KERNEL_VERSION(2,6,24)
+#endif // __KERNEL__
+
 #endif
--- a/master/ioctl.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/ioctl.h	Wed Apr 13 22:06:28 2011 +0200
@@ -56,7 +56,7 @@
  *
  * Increment this when changing the ioctl interface!
  */
-#define EC_IOCTL_VERSION_MAGIC 11
+#define EC_IOCTL_VERSION_MAGIC 12
 
 // Command-line tool
 #define EC_IOCTL_MODULE                EC_IOR(0x00, ec_ioctl_module_t)
@@ -137,6 +137,8 @@
 #define EC_IOCTL_VOE_EXEC             EC_IOWR(0x47, ec_ioctl_voe_t)
 #define EC_IOCTL_VOE_DATA             EC_IOWR(0x48, ec_ioctl_voe_t)
 #define EC_IOCTL_SET_SEND_INTERVAL     EC_IOW(0x49, size_t)
+#define EC_IOCTL_MASTER_SC_STATE        EC_IOR(0x50, ec_master_state_t)
+#define EC_IOCTL_SC_OVERLAPPING_IO      EC_IOW(0x51, ec_ioctl_config_t)
 
 /*****************************************************************************/
 
@@ -278,6 +280,7 @@
 
     // outputs
     uint32_t data_size;
+    uint32_t tx_size;
     uint32_t logical_base_address;
     uint16_t working_counter;
     uint16_t expected_working_counter;
@@ -297,6 +300,7 @@
     uint8_t sync_index;
     ec_direction_t dir;
     uint32_t logical_address;
+    uint32_t domain_address;
     uint32_t data_size;
 } ec_ioctl_domain_fmmu_t;
 
@@ -459,6 +463,7 @@
     } syncs[EC_MAX_SYNC_MANAGERS];
     uint16_t watchdog_divider;
     uint16_t watchdog_intervals;
+    uint8_t allow_overlapping_pdos;
     uint32_t sdo_count;
     uint32_t idn_count;
     int32_t slave_position;
--- a/master/mailbox.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/mailbox.c	Wed Apr 13 22:06:28 2011 +0200
@@ -37,10 +37,84 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 
+#include "slave.h"
 #include "mailbox.h"
 #include "datagram.h"
 #include "master.h"
 
+
+/*****************************************************************************/
+
+/**
+   Mailbox constructor.
+*/
+
+void ec_mbox_init(ec_mailbox_t* mbox, /** mailbox */
+                        ec_datagram_t* datagram  /**< Datagram used for the mailbox content. */
+                        )
+{
+    mbox->datagram = datagram;
+#ifdef EC_REDUCE_MBOXFRAMESIZE
+    ec_datagram_init(&mbox->end_datagram);
+#endif
+}
+
+
+/*****************************************************************************/
+
+/**
+   Clears mailbox datagrams.
+*/
+
+void ec_mbox_clear(ec_mailbox_t* mbox /** mailbox */
+                         )
+{
+#ifdef EC_REDUCE_MBOXFRAMESIZE
+    ec_datagram_clear(&mbox->end_datagram);
+#endif
+}
+
+
+/*****************************************************************************/
+
+/**
+   Queues the slave datagrams.
+*/
+
+void  ec_slave_mbox_queue_datagrams(const ec_slave_t* slave, /** slave */
+                                    ec_mailbox_t* mbox /** mailbox */
+                                    )
+{
+    ec_master_queue_request_fsm_datagram(slave->master, mbox->datagram);
+#ifdef EC_REDUCE_MBOXFRAMESIZE
+    if (mbox->end_datagram.type != EC_DATAGRAM_NONE)
+    {
+        ec_master_queue_request_fsm_datagram(slave->master, &mbox->end_datagram);
+    }
+#endif
+}
+
+
+/*****************************************************************************/
+
+/**
+   Queues the datagrams.
+*/
+
+void  ec_master_mbox_queue_datagrams(ec_master_t* master, /** master */
+                                    ec_mailbox_t* mbox /** mailbox */
+                                    )
+{
+    ec_master_queue_fsm_datagram(master, mbox->datagram);
+#ifdef EC_REDUCE_MBOXFRAMESIZE
+    if (mbox->end_datagram.type != EC_DATAGRAM_NONE)
+    {
+        ec_master_queue_fsm_datagram(master, &mbox->end_datagram);
+    }
+#endif
+}
+
+
 /*****************************************************************************/
 
 /**
@@ -48,12 +122,13 @@
    \return Pointer to mailbox datagram data, or ERR_PTR() code.
 */
 
-uint8_t *ec_slave_mbox_prepare_send(const ec_slave_t *slave, /**< slave */
-                                    ec_datagram_t *datagram, /**< datagram */
+uint8_t *ec_slave_mbox_prepare_send(const ec_slave_t* slave, /** slave */
+                                    ec_mailbox_t* mbox, /** mailbox */
                                     uint8_t type, /**< mailbox protocol */
                                     size_t size /**< size of the data */
                                     )
 {
+    ec_datagram_t* datagram = mbox->datagram;
     size_t total_size;
     int ret;
 
@@ -72,8 +147,13 @@
     }
 
     ret = ec_datagram_fpwr(datagram, slave->station_address,
-            slave->configured_rx_mailbox_offset,
-            slave->configured_rx_mailbox_size);
+                           slave->configured_rx_mailbox_offset,
+#ifdef EC_REDUCE_MBOXFRAMESIZE
+                           total_size
+#else
+                           slave->configured_rx_mailbox_size
+#endif
+                           );
     if (ret)
         return ERR_PTR(ret);
 
@@ -82,6 +162,17 @@
     EC_WRITE_U8 (datagram->data + 4, 0x00); // channel & priority
     EC_WRITE_U8 (datagram->data + 5, type); // underlying protocol type
 
+#ifdef EC_REDUCE_MBOXFRAMESIZE
+    /* in order to fulfil the ESC's mailbox protocol,
+       at least the last byte of the mailbox must be written */
+    if (total_size < slave->configured_rx_mailbox_size) {
+        ret = ec_datagram_fpwr(&mbox->end_datagram, slave->station_address,
+            slave->configured_rx_mailbox_offset+slave->configured_rx_mailbox_size-1,
+            1);
+        if (ret)
+            return ERR_PTR(ret);
+    }
+#endif
     return datagram->data + EC_MBOX_HEADER_SIZE;
 }
 
@@ -93,15 +184,19 @@
    \return 0 in case of success, else < 0
 */
 
-int ec_slave_mbox_prepare_check(const ec_slave_t *slave, /**< slave */
-                                ec_datagram_t *datagram /**< datagram */
+int ec_slave_mbox_prepare_check(const ec_slave_t* slave, /** slave */
+                                ec_mailbox_t* mbox /** mailbox */
                                 )
 {
+    ec_datagram_t* datagram = mbox->datagram;
     int ret = ec_datagram_fprd(datagram, slave->station_address, 0x808, 8);
     if (ret)
         return ret;
 
     ec_datagram_zero(datagram);
+#ifdef EC_REDUCE_MBOXFRAMESIZE
+    mbox->end_datagram.type = EC_DATAGRAM_NONE;
+#endif
     return 0;
 }
 
@@ -112,9 +207,9 @@
    \return 0 in case of success, else < 0
 */
 
-int ec_slave_mbox_check(const ec_datagram_t *datagram /**< datagram */)
-{
-    return EC_READ_U8(datagram->data + 5) & 8 ? 1 : 0;
+int ec_slave_mbox_check(ec_mailbox_t* mbox /** mailbox */)
+{
+    return EC_READ_U8(mbox->datagram->data + 5) & 8 ? 1 : 0;
 }
 
 /*****************************************************************************/
@@ -124,10 +219,11 @@
    \return 0 in case of success, else < 0
 */
 
-int ec_slave_mbox_prepare_fetch(const ec_slave_t *slave, /**< slave */
-                                ec_datagram_t *datagram /**< datagram */
+int ec_slave_mbox_prepare_fetch(const ec_slave_t* slave, /** slave */
+                                ec_mailbox_t* mbox /** mailbox */
                                 )
 {
+    ec_datagram_t* datagram = mbox->datagram;
     int ret = ec_datagram_fprd(datagram, slave->station_address,
             slave->configured_tx_mailbox_offset,
             slave->configured_tx_mailbox_size);
@@ -135,6 +231,9 @@
         return ret;
 
     ec_datagram_zero(datagram);
+#ifdef EC_REDUCE_MBOXFRAMESIZE
+    mbox->end_datagram.type = EC_DATAGRAM_NONE;
+#endif
     return 0;
 }
 
@@ -162,12 +261,13 @@
  *
  * \return Pointer to the received data, or ERR_PTR() code.
  */
-uint8_t *ec_slave_mbox_fetch(const ec_slave_t *slave, /**< slave */
-                             ec_datagram_t *datagram, /**< datagram */
+uint8_t *ec_slave_mbox_fetch(const ec_slave_t* slave, /** slave */
+                             ec_mailbox_t* mbox, /** mailbox */
                              uint8_t *type, /**< expected mailbox protocol */
                              size_t *size /**< size of the received data */
                              )
 {
+    ec_datagram_t* datagram = mbox->datagram;
     size_t data_size;
 
     data_size = EC_READ_U16(datagram->data);
--- a/master/mailbox.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/mailbox.h	Wed Apr 13 22:06:28 2011 +0200
@@ -37,7 +37,8 @@
 #ifndef __EC_MAILBOX_H__
 #define __EC_MAILBOX_H__
 
-#include "slave.h"
+#include "globals.h"
+#include "datagram.h"
 
 /*****************************************************************************/
 
@@ -47,12 +48,53 @@
 
 /*****************************************************************************/
 
-uint8_t *ec_slave_mbox_prepare_send(const ec_slave_t *, ec_datagram_t *,
+/** EtherCAT slave mailbox.
+ */
+struct ec_mailbox
+{
+    ec_datagram_t* datagram;    /**< Datagram used for the mailbox content. */
+#ifdef EC_REDUCE_MBOXFRAMESIZE
+    ec_datagram_t end_datagram; /**< Datagram used for writing the end byte to the mailbox. */
+#endif
+};
+typedef struct ec_mailbox ec_mailbox_t; /**< \see ec_mailbox. */
+
+/*****************************************************************************/
+
+void     ec_mbox_init(ec_mailbox_t *, ec_datagram_t*);
+void     ec_mbox_clear(ec_mailbox_t*);
+/**
+   Checks the datagrams states.
+*/
+static inline int ec_mbox_is_datagram_state(ec_mailbox_t*mbox,
+                                            ec_datagram_state_t state) {
+    return (mbox->datagram->state == state)
+#ifdef EC_REDUCE_MBOXFRAMESIZE
+         && (mbox->end_datagram.type == EC_DATAGRAM_NONE || mbox->end_datagram.state == state)
+#endif
+    ;
+}
+
+/**
+   Checks the datagrams working counter.
+*/
+static inline int ec_mbox_is_datagram_wc(ec_mailbox_t*mbox,
+                                         size_t wc) {
+    return (mbox->datagram->working_counter == wc)
+#ifdef EC_REDUCE_MBOXFRAMESIZE
+         && (mbox->end_datagram.type == EC_DATAGRAM_NONE || mbox->end_datagram.working_counter == wc)
+#endif
+         ;
+}
+
+void     ec_slave_mbox_queue_datagrams(const ec_slave_t*,ec_mailbox_t*);
+void     ec_master_mbox_queue_datagrams(ec_master_t*, ec_mailbox_t*);
+uint8_t *ec_slave_mbox_prepare_send(const ec_slave_t*,ec_mailbox_t *,
                                     uint8_t, size_t);
-int      ec_slave_mbox_prepare_check(const ec_slave_t *, ec_datagram_t *);
-int      ec_slave_mbox_check(const ec_datagram_t *);
-int      ec_slave_mbox_prepare_fetch(const ec_slave_t *, ec_datagram_t *);
-uint8_t *ec_slave_mbox_fetch(const ec_slave_t *, ec_datagram_t *,
+int      ec_slave_mbox_prepare_check(const ec_slave_t*,ec_mailbox_t *);
+int      ec_slave_mbox_check(ec_mailbox_t *);
+int      ec_slave_mbox_prepare_fetch(const ec_slave_t*,ec_mailbox_t *);
+uint8_t *ec_slave_mbox_fetch(const ec_slave_t*,ec_mailbox_t *,
                              uint8_t *, size_t *);
 
 /*****************************************************************************/
--- a/master/master.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/master.c	Wed Apr 13 22:06:28 2011 +0200
@@ -56,9 +56,13 @@
 
 /*****************************************************************************/
 
-/** Set to 1 to enable external datagram injection debugging.
- */
+/** Set to 1 to enable fsm datagram injection debugging.
+ */
+#ifdef USE_TRACE_PRINTK
+#define DEBUG_INJECT 1
+#else
 #define DEBUG_INJECT 0
+#endif
 
 #ifdef EC_HAVE_CYCLES
 
@@ -66,9 +70,9 @@
  */
 static cycles_t timeout_cycles;
 
-/** Timeout for external datagram injection [cycles].
- */
-static cycles_t ext_injection_timeout_cycles;
+/** Timeout for fsm datagram injection [cycles].
+ */
+static cycles_t fsm_injection_timeout_cycles;
 
 #else
 
@@ -76,9 +80,9 @@
  */
 static unsigned long timeout_jiffies;
 
-/** Timeout for external datagram injection [jiffies].
- */
-static unsigned long ext_injection_timeout_jiffies;
+/** Timeout for fsm datagram injection [jiffies].
+ */
+static unsigned long fsm_injection_timeout_jiffies;
 
 #endif
 
@@ -89,7 +93,7 @@
 static int ec_master_idle_thread(void *);
 static int ec_master_operation_thread(void *);
 #ifdef EC_EOE
-static int ec_master_eoe_thread(void *);
+static int ec_master_eoe_processing(ec_master_t *);
 #endif
 void ec_master_find_dc_ref_clock(ec_master_t *);
 
@@ -101,11 +105,11 @@
 {
 #ifdef EC_HAVE_CYCLES
     timeout_cycles = (cycles_t) EC_IO_TIMEOUT /* us */ * (cpu_khz / 1000);
-    ext_injection_timeout_cycles = (cycles_t) EC_SDO_INJECTION_TIMEOUT /* us */ * (cpu_khz / 1000);
+    fsm_injection_timeout_cycles = (cycles_t) EC_FSM_INJECTION_TIMEOUT /* us */ * (cpu_khz / 1000);
 #else
     // one jiffy may always elapse between time measurement
     timeout_jiffies = max(EC_IO_TIMEOUT * HZ / 1000000, 1);
-    ext_injection_timeout_jiffies = max(EC_SDO_INJECTION_TIMEOUT * HZ / 1000000, 1);
+    fsm_injection_timeout_jiffies = max(EC_FSM_INJECTION_TIMEOUT * HZ / 1000000, 1);
 #endif
 }
 
@@ -130,12 +134,12 @@
     master->index = index;
     master->reserved = 0;
 
-    sema_init(&master->master_sem, 1);
+    ec_mutex_init(&master->master_mutex);
 
     master->main_mac = main_mac;
     master->backup_mac = backup_mac;
 
-    sema_init(&master->device_sem, 1);
+    ec_mutex_init(&master->device_mutex);
 
     master->phase = EC_ORPHANED;
     master->active = 0;
@@ -145,30 +149,32 @@
 
     master->slaves = NULL;
     master->slave_count = 0;
-    
+
     INIT_LIST_HEAD(&master->configs);
 
     master->app_time = 0ULL;
+#ifdef EC_HAVE_CYCLES
+    master->dc_cycles_app_start_time = 0;
+#endif
+    master->dc_jiffies_app_start_time = 0;
     master->app_start_time = 0ULL;
     master->has_app_time = 0;
 
     master->scan_busy = 0;
     master->allow_scan = 1;
-    sema_init(&master->scan_sem, 1);
+    ec_mutex_init(&master->scan_mutex);
     init_waitqueue_head(&master->scan_queue);
 
     master->config_busy = 0;
     master->allow_config = 1;
-    sema_init(&master->config_sem, 1);
+    ec_mutex_init(&master->config_mutex);
     init_waitqueue_head(&master->config_queue);
     
     INIT_LIST_HEAD(&master->datagram_queue);
     master->datagram_index = 0;
 
-    INIT_LIST_HEAD(&master->ext_datagram_queue);
-    sema_init(&master->ext_queue_sem, 1);
-
-    INIT_LIST_HEAD(&master->external_datagram_queue);
+    ec_mutex_init(&master->fsm_queue_mutex);
+    INIT_LIST_HEAD(&master->fsm_datagram_queue);
     
     // send interval in IDLE phase
     ec_master_set_send_interval(master, 1000000 / HZ);
@@ -188,13 +194,13 @@
     INIT_LIST_HEAD(&master->eoe_handlers);
 #endif
 
-    sema_init(&master->io_sem, 1);
-    master->send_cb = NULL;
-    master->receive_cb = NULL;
-    master->cb_data = NULL;
-    master->app_send_cb = NULL;
-    master->app_receive_cb = NULL;
-    master->app_cb_data = NULL;
+    ec_mutex_init(&master->io_mutex);
+    master->fsm_queue_lock_cb = NULL;
+    master->fsm_queue_unlock_cb = NULL;
+    master->fsm_queue_locking_data = NULL;
+    master->app_fsm_queue_lock_cb = NULL;
+    master->app_fsm_queue_unlock_cb = NULL;
+    master->app_fsm_queue_locking_data = NULL;
 
     INIT_LIST_HEAD(&master->sii_requests);
     init_waitqueue_head(&master->sii_queue);
@@ -222,6 +228,7 @@
     }
 
     // create state machine object
+    ec_mbox_init(&master->fsm_mbox,&master->fsm_datagram);
     ec_fsm_master_init(&master->fsm, master, &master->fsm_datagram);
 
     // init reference sync datagram
@@ -323,7 +330,7 @@
 #endif
 
     ec_cdev_clear(&master->cdev);
-    
+
 #ifdef EC_EOE
     ec_master_clear_eoe_handlers(master);
 #endif
@@ -335,6 +342,7 @@
     ec_datagram_clear(&master->sync_datagram);
     ec_datagram_clear(&master->ref_sync_datagram);
     ec_fsm_master_clear(&master->fsm);
+    ec_mbox_clear(&master->fsm_mbox);
     ec_datagram_clear(&master->fsm_datagram);
     ec_device_clear(&master->backup_device);
     ec_device_clear(&master->main_device);
@@ -395,6 +403,7 @@
         EC_MASTER_WARN(master, "Discarding SII request, slave %u about"
                 " to be deleted.\n", request->slave->ring_position);
         request->state = EC_INT_REQUEST_FAILURE;
+        kref_put(&request->refcount,ec_master_sii_write_request_release);
         wake_up(&master->sii_queue);
     }
 
@@ -405,14 +414,18 @@
         EC_MASTER_WARN(master, "Discarding register request, slave %u"
                 " about to be deleted.\n", request->slave->ring_position);
         request->state = EC_INT_REQUEST_FAILURE;
+        kref_put(&request->refcount,ec_master_reg_request_release);
         wake_up(&master->reg_queue);
     }
 
+    // we must lock the io_mutex here because the slave's fsm_datagram will be unqueued
+    ec_mutex_lock(&master->io_mutex);
     for (slave = master->slaves;
             slave < master->slaves + master->slave_count;
             slave++) {
         ec_slave_clear(slave);
     }
+    ec_mutex_unlock(&master->io_mutex);
 
     if (master->slaves) {
         kfree(master->slaves);
@@ -430,11 +443,14 @@
 {
     ec_domain_t *domain, *next;
 
+    // we must lock the io_mutex here because the domains's datagram will be unqueued
+    ec_mutex_lock(&master->io_mutex);
     list_for_each_entry_safe(domain, next, &master->domains, list) {
         list_del(&domain->list);
         ec_domain_clear(domain);
         kfree(domain);
     }
+    ec_mutex_unlock(&master->io_mutex);
 }
 
 /*****************************************************************************/
@@ -445,38 +461,10 @@
         ec_master_t *master /**< EtherCAT master. */
         )
 {
-    down(&master->master_sem);
+    ec_mutex_lock(&master->master_mutex);
     ec_master_clear_domains(master);
     ec_master_clear_slave_configs(master);
-    up(&master->master_sem);
-}
-
-/*****************************************************************************/
-
-/** Internal sending callback.
- */
-void ec_master_internal_send_cb(
-        void *cb_data /**< Callback data. */
-        )
-{
-    ec_master_t *master = (ec_master_t *) cb_data;
-    down(&master->io_sem);
-    ecrt_master_send_ext(master);
-    up(&master->io_sem);
-}
-
-/*****************************************************************************/
-
-/** Internal receiving callback.
- */
-void ec_master_internal_receive_cb(
-        void *cb_data /**< Callback data. */
-        )
-{
-    ec_master_t *master = (ec_master_t *) cb_data;
-    down(&master->io_sem);
-    ecrt_master_receive(master);
-    up(&master->io_sem);
+    ec_mutex_unlock(&master->master_mutex);
 }
 
 /*****************************************************************************/
@@ -546,9 +534,9 @@
 
     EC_MASTER_DBG(master, 1, "ORPHANED -> IDLE.\n");
 
-    master->send_cb = ec_master_internal_send_cb;
-    master->receive_cb = ec_master_internal_receive_cb;
-    master->cb_data = master;
+    master->fsm_queue_lock_cb = NULL;
+    master->fsm_queue_unlock_cb = NULL;
+    master->fsm_queue_locking_data = NULL;
 
     master->phase = EC_IDLE;
     ret = ec_master_thread_start(master, ec_master_idle_thread,
@@ -569,14 +557,11 @@
 
     master->phase = EC_ORPHANED;
     
-#ifdef EC_EOE
-    ec_master_eoe_stop(master);
-#endif
     ec_master_thread_stop(master);
 
-    down(&master->master_sem);
+    ec_mutex_lock(&master->master_mutex);
     ec_master_clear_slaves(master);
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 }
 
 /*****************************************************************************/
@@ -593,10 +578,10 @@
 
     EC_MASTER_DBG(master, 1, "IDLE -> OPERATION.\n");
 
-    down(&master->config_sem);
+    ec_mutex_lock(&master->config_mutex);
     master->allow_config = 0; // temporarily disable slave configuration
     if (master->config_busy) {
-        up(&master->config_sem);
+        ec_mutex_unlock(&master->config_mutex);
 
         // wait for slave configuration to complete
         ret = wait_event_interruptible(master->config_queue,
@@ -610,15 +595,15 @@
         EC_MASTER_DBG(master, 1, "Waiting for pending slave"
                 " configuration returned.\n");
     } else {
-        up(&master->config_sem);
-    }
-
-    down(&master->scan_sem);
+        ec_mutex_unlock(&master->config_mutex);
+    }
+
+    ec_mutex_lock(&master->scan_mutex);
     master->allow_scan = 0; // 'lock' the slave list
     if (!master->scan_busy) {
-        up(&master->scan_sem);
+        ec_mutex_unlock(&master->scan_mutex);
     } else {
-        up(&master->scan_sem);
+        ec_mutex_unlock(&master->scan_mutex);
 
         // wait for slave scan to complete
         ret = wait_event_interruptible(master->scan_queue, !master->scan_busy);
@@ -648,9 +633,9 @@
 #endif
 
     master->phase = EC_OPERATION;
-    master->app_send_cb = NULL;
-    master->app_receive_cb = NULL;
-    master->app_cb_data = NULL;
+    master->app_fsm_queue_lock_cb = NULL;
+    master->app_fsm_queue_unlock_cb = NULL;
+    master->app_fsm_queue_locking_data = NULL;
     return ret;
     
 out_allow:
@@ -680,27 +665,40 @@
 
 /*****************************************************************************/
 
-/** Injects external datagrams that fit into the datagram queue.
- */
-void ec_master_inject_external_datagrams(
+/** Injects fsm datagrams that fit into the datagram queue.
+ */
+void ec_master_inject_fsm_datagrams(
         ec_master_t *master /**< EtherCAT master */
         )
 {
-    ec_datagram_t *datagram, *n;
+    ec_datagram_t *datagram, *next;
     size_t queue_size = 0;
 
+    if (master->fsm_queue_lock_cb)
+        master->fsm_queue_lock_cb(master->fsm_queue_locking_data);
+    if (ec_mutex_trylock(&master->fsm_queue_mutex) == 0) {
+           if (master->fsm_queue_unlock_cb)
+               master->fsm_queue_unlock_cb(master->fsm_queue_locking_data);
+           return;
+    }
+    if (list_empty(&master->fsm_datagram_queue)) {
+        ec_mutex_unlock(&master->fsm_queue_mutex);
+        if (master->fsm_queue_unlock_cb)
+            master->fsm_queue_unlock_cb(master->fsm_queue_locking_data);
+        return;
+    }
     list_for_each_entry(datagram, &master->datagram_queue, queue) {
         queue_size += datagram->data_size;
     }
 
-    list_for_each_entry_safe(datagram, n, &master->external_datagram_queue,
-            queue) {
+    list_for_each_entry_safe(datagram, next, &master->fsm_datagram_queue,
+            fsm_queue) {
         queue_size += datagram->data_size;
         if (queue_size <= master->max_queue_size) {
-            list_del_init(&datagram->queue);
+            list_del_init(&datagram->fsm_queue);
 #if DEBUG_INJECT
-            EC_MASTER_DBG(master, 0, "Injecting external datagram %08x"
-                    " size=%u, queue_size=%u\n", (unsigned int) datagram,
+            EC_MASTER_DBG(master, 2, "Injecting fsm datagram %p"
+                    " size=%zu, queue_size=%zu\n", datagram,
                     datagram->data_size, queue_size);
 #endif
 #ifdef EC_HAVE_CYCLES
@@ -711,9 +709,9 @@
         }
         else {
             if (datagram->data_size > master->max_queue_size) {
-                list_del_init(&datagram->queue);
+                list_del_init(&datagram->fsm_queue);
                 datagram->state = EC_DATAGRAM_ERROR;
-                EC_MASTER_ERR(master, "External datagram %p is too large,"
+                EC_MASTER_ERR(master, "Fsm datagram %p is too large,"
                         " size=%zu, max_queue_size=%zu\n",
                         datagram, datagram->data_size,
                         master->max_queue_size);
@@ -722,15 +720,15 @@
                 cycles_t cycles_now = get_cycles();
 
                 if (cycles_now - datagram->cycles_sent
-                        > ext_injection_timeout_cycles)
+                        > fsm_injection_timeout_cycles)
 #else
                 if (jiffies - datagram->jiffies_sent
-                        > ext_injection_timeout_jiffies)
+                        > fsm_injection_timeout_jiffies)
 #endif
                 {
                     unsigned int time_us;
 
-                    list_del_init(&datagram->queue);
+                    list_del_init(&datagram->fsm_queue);
                     datagram->state = EC_DATAGRAM_ERROR;
 #ifdef EC_HAVE_CYCLES
                     time_us = (unsigned int)
@@ -741,21 +739,24 @@
                         ((jiffies - datagram->jiffies_sent) * 1000000 / HZ);
 #endif
                     EC_MASTER_ERR(master, "Timeout %u us: Injecting"
-                            " external datagram %p size=%zu,"
+                            " fsm datagram %p size=%zu,"
                             " max_queue_size=%zu\n", time_us, datagram,
                             datagram->data_size, master->max_queue_size);
                 }
 #if DEBUG_INJECT
                 else {
-                    EC_MASTER_DBG(master, 0, "Deferred injecting"
-                            " of external datagram %p"
-                            " size=%u, queue_size=%u\n",
+                    EC_MASTER_DBG(master, 2, "Deferred injecting"
+                            " of fsm datagram %p"
+                            " size=%zu, queue_size=%zu\n",
                             datagram, datagram->data_size, queue_size);
                 }
 #endif
             }
         }
     }
+    ec_mutex_unlock(&master->fsm_queue_mutex);
+    if (master->fsm_queue_unlock_cb)
+        master->fsm_queue_unlock_cb(master->fsm_queue_locking_data);
 }
 
 /*****************************************************************************/
@@ -776,40 +777,59 @@
 
 /*****************************************************************************/
 
-/** Places an external datagram in the sdo datagram queue.
- */
-void ec_master_queue_external_datagram(
+/** Places an request (SDO/FoE/SoE/EoE) fsm datagram in the sdo datagram queue.
+ */
+void ec_master_queue_request_fsm_datagram(
         ec_master_t *master, /**< EtherCAT master */
         ec_datagram_t *datagram /**< datagram */
         )
 {
+    ec_master_queue_fsm_datagram(master,datagram);
+    master->fsm.idle = 0;   // pump the bus as fast as possible
+}
+
+/*****************************************************************************/
+
+/** Places an fsm datagram in the sdo datagram queue.
+ */
+void ec_master_queue_fsm_datagram(
+        ec_master_t *master, /**< EtherCAT master */
+        ec_datagram_t *datagram /**< datagram */
+        )
+{
     ec_datagram_t *queued_datagram;
 
-    down(&master->io_sem);
+    if (master->fsm_queue_lock_cb)
+        master->fsm_queue_lock_cb(master->fsm_queue_locking_data);
+    ec_mutex_lock(&master->fsm_queue_mutex);
 
     // check, if the datagram is already queued
-    list_for_each_entry(queued_datagram, &master->external_datagram_queue,
-            queue) {
+    list_for_each_entry(queued_datagram, &master->fsm_datagram_queue,
+            fsm_queue) {
         if (queued_datagram == datagram) {
             datagram->state = EC_DATAGRAM_QUEUED;
+            ec_mutex_unlock(&master->fsm_queue_mutex);
+            if (master->fsm_queue_unlock_cb)
+                master->fsm_queue_unlock_cb(master->fsm_queue_locking_data);
             return;
         }
     }
 
 #if DEBUG_INJECT
-    EC_MASTER_DBG(master, 0, "Requesting external datagram %p size=%u\n",
+    EC_MASTER_DBG(master, 2, "Requesting fsm datagram %p size=%zu\n",
             datagram, datagram->data_size);
 #endif
 
-    list_add_tail(&datagram->queue, &master->external_datagram_queue);
+    list_add_tail(&datagram->fsm_queue, &master->fsm_datagram_queue);
     datagram->state = EC_DATAGRAM_QUEUED;
 #ifdef EC_HAVE_CYCLES
     datagram->cycles_sent = get_cycles();
 #endif
     datagram->jiffies_sent = jiffies;
 
-    master->fsm.idle = 0;
-    up(&master->io_sem);
+    ec_mutex_unlock(&master->fsm_queue_mutex);
+    if (master->fsm_queue_unlock_cb)
+        master->fsm_queue_unlock_cb(master->fsm_queue_locking_data);
 }
 
 /*****************************************************************************/
@@ -839,19 +859,6 @@
     datagram->state = EC_DATAGRAM_QUEUED;
 }
 
-/*****************************************************************************/
-
-/** Places a datagram in the non-application datagram queue.
- */
-void ec_master_queue_datagram_ext(
-        ec_master_t *master, /**< EtherCAT master */
-        ec_datagram_t *datagram /**< datagram */
-        )
-{
-    down(&master->ext_queue_sem);
-    list_add_tail(&datagram->queue, &master->ext_datagram_queue);
-    up(&master->ext_queue_sem);
-}
 
 /*****************************************************************************/
 
@@ -862,7 +869,7 @@
 {
     ec_datagram_t *datagram, *next;
     size_t datagram_size;
-    uint8_t *frame_data, *cur_data;
+    uint8_t *frame_data, *cur_data, *frame_datagram_data;
     void *follows_word;
 #ifdef EC_HAVE_CYCLES
     cycles_t cycles_start, cycles_sent, cycles_end;
@@ -870,6 +877,7 @@
     unsigned long jiffies_sent;
     unsigned int frame_count, more_datagrams_waiting;
     struct list_head sent_datagrams;
+    ec_fmmu_config_t* domain_fmmu;
 
 #ifdef EC_HAVE_CYCLES
     cycles_start = get_cycles();
@@ -901,8 +909,8 @@
             list_add_tail(&datagram->sent, &sent_datagrams);
             datagram->index = master->datagram_index++;
 
-            EC_MASTER_DBG(master, 2, "adding datagram 0x%02X\n",
-                    datagram->index);
+            EC_MASTER_DBG(master, 2, "adding datagram %p i=0x%02X size=%zu\n",datagram,
+                    datagram->index,datagram_size);
 
             // set "datagram following" flag in previous frame
             if (follows_word)
@@ -918,7 +926,28 @@
             cur_data += EC_DATAGRAM_HEADER_SIZE;
 
             // EtherCAT datagram data
-            memcpy(cur_data, datagram->data, datagram->data_size);
+            frame_datagram_data = cur_data;
+            if (datagram->domain) {
+                unsigned int datagram_address = EC_READ_U32(datagram->address);
+                int i = 0;
+                uint8_t *domain_data = datagram->data;
+                list_for_each_entry(domain_fmmu, &datagram->domain->fmmu_configs, list) {
+                    if (domain_fmmu->dir == EC_DIR_OUTPUT ) {
+                        unsigned int frame_offset = domain_fmmu->logical_start_address-datagram_address;
+                        memcpy(frame_datagram_data+frame_offset, domain_data, domain_fmmu->data_size);
+                        if (unlikely(master->debug_level > 1)) {
+                            EC_DBG("sending dg %p i=0x%02X fmmu %u fp=%u dp=%zu size=%u\n",
+                                   datagram,datagram->index, i,frame_offset,domain_data-datagram->data,domain_fmmu->data_size);
+                            ec_print_data(domain_data, domain_fmmu->data_size);
+                        }
+                    }
+                    domain_data += domain_fmmu->data_size;
+                    ++i;
+                }
+            }
+            else {
+                memcpy(frame_datagram_data, datagram->data, datagram->data_size);
+            }
             cur_data += datagram->data_size;
 
             // EtherCAT datagram footer
@@ -988,8 +1017,9 @@
     size_t frame_size, data_size;
     uint8_t datagram_type, datagram_index;
     unsigned int cmd_follows, matched;
-    const uint8_t *cur_data;
+    const uint8_t *cur_data, *frame_datagram_data;
     ec_datagram_t *datagram;
+    ec_fmmu_config_t* domain_fmmu;
 
     if (unlikely(size < EC_FRAME_HEADER_SIZE)) {
         if (master->debug_level) {
@@ -1072,9 +1102,30 @@
             cur_data += data_size + EC_DATAGRAM_FOOTER_SIZE;
             continue;
         }
-
-        // copy received data into the datagram memory
-        memcpy(datagram->data, cur_data, data_size);
+        frame_datagram_data = cur_data;
+        if (datagram->domain) {
+            size_t datagram_address = EC_READ_U32(datagram->address);
+            int i = 0;
+            uint8_t *domain_data = datagram->data;
+            list_for_each_entry(domain_fmmu, &datagram->domain->fmmu_configs, list) {
+                if (domain_fmmu->dir == EC_DIR_INPUT ) {
+                    unsigned int frame_offset = domain_fmmu->logical_start_address-datagram_address;
+                    memcpy(domain_data, frame_datagram_data+frame_offset, domain_fmmu->data_size);
+                    if (unlikely(master->debug_level > 1)) {
+                        EC_DBG("receiving dg %p i=0x%02X fmmu %u fp=%u dp=%zu size=%u\n",
+                               datagram,datagram->index, i,
+                               frame_offset,domain_data-datagram->data,domain_fmmu->data_size);
+                        ec_print_data(domain_data, domain_fmmu->data_size);
+                    }
+                }
+                domain_data += domain_fmmu->data_size;
+                ++i;
+            }
+        }
+        else {
+            // copy received data into the datagram memory
+            memcpy(datagram->data, frame_datagram_data, data_size);
+        }
         cur_data += data_size;
 
         // set the datagram's working counter
@@ -1087,6 +1138,8 @@
         datagram->cycles_received = master->main_device.cycles_poll;
 #endif
         datagram->jiffies_received = master->main_device.jiffies_poll;
+        EC_MASTER_DBG(master, 2, "removing datagram %p i=0x%02X\n",datagram,
+                datagram->index);
         list_del_init(&datagram->queue);
     }
 }
@@ -1208,9 +1261,7 @@
 {
     ec_master_t *master = (ec_master_t *) priv_data;
     ec_slave_t *slave = NULL;
-    int fsm_exec;
     size_t sent_bytes;
-
     // send interval in IDLE phase
     ec_master_set_send_interval(master, 1000000 / HZ); 
 
@@ -1222,32 +1273,33 @@
         ec_datagram_output_stats(&master->fsm_datagram);
 
         // receive
-        down(&master->io_sem);
+        ec_mutex_lock(&master->io_mutex);
         ecrt_master_receive(master);
-        up(&master->io_sem);
-
-        fsm_exec = 0;
+        ec_mutex_unlock(&master->io_mutex);
+
         // execute master & slave state machines
-        if (down_interruptible(&master->master_sem))
+        if (ec_mutex_lock_interruptible(&master->master_mutex))
             break;
-        fsm_exec = ec_fsm_master_exec(&master->fsm);
+        if (ec_fsm_master_exec(&master->fsm)) {
+            ec_master_mbox_queue_datagrams(master, &master->fsm_mbox);
+        }
         for (slave = master->slaves;
                 slave < master->slaves + master->slave_count;
                 slave++) {
-            ec_fsm_slave_exec(&slave->fsm);
-        }
-        up(&master->master_sem);
+            ec_fsm_slave_exec(&slave->fsm); // may queue datagram in fsm queue
+        }
+#if defined(EC_EOE)
+        if (!ec_master_eoe_processing(master))
+            master->fsm.idle = 0;  // pump the bus as fast as possible
+#endif
+        ec_mutex_unlock(&master->master_mutex);
 
         // queue and send
-        down(&master->io_sem);
-        if (fsm_exec) {
-            ec_master_queue_datagram(master, &master->fsm_datagram);
-        }
-        ec_master_inject_external_datagrams(master);
+        ec_mutex_lock(&master->io_mutex);
         ecrt_master_send(master);
         sent_bytes = master->main_device.tx_skb[
             master->main_device.tx_ring_index]->len;
-        up(&master->io_sem);
+        ec_mutex_unlock(&master->io_mutex);
 
         if (ec_fsm_master_idle(&master->fsm)) {
 #ifdef EC_USE_HRTIMER
@@ -1278,7 +1330,6 @@
 {
     ec_master_t *master = (ec_master_t *) priv_data;
     ec_slave_t *slave = NULL;
-    int fsm_exec;
 
     EC_MASTER_DBG(master, 1, "Operation thread running"
             " with fsm interval = %u us, max data size=%zu\n",
@@ -1287,26 +1338,23 @@
     while (!kthread_should_stop()) {
         ec_datagram_output_stats(&master->fsm_datagram);
 
-        if (master->injection_seq_rt == master->injection_seq_fsm) {
-            // output statistics
-            ec_master_output_stats(master);
-
-            fsm_exec = 0;
-            // execute master & slave state machines
-            if (down_interruptible(&master->master_sem))
-                break;
-            fsm_exec += ec_fsm_master_exec(&master->fsm);
-            for (slave = master->slaves;
-                    slave < master->slaves + master->slave_count;
-                    slave++) {
-                ec_fsm_slave_exec(&slave->fsm);
-            }
-            up(&master->master_sem);
-
-            // inject datagrams (let the rt thread queue them, see ecrt_master_send)
-            if (fsm_exec)
-                master->injection_seq_fsm++;
-        }
+        // output statistics
+        ec_master_output_stats(master);
+
+        // execute master & slave state machines
+        if (ec_mutex_lock_interruptible(&master->master_mutex))
+            break;
+        if (ec_fsm_master_exec(&master->fsm))
+            ec_master_mbox_queue_datagrams(master, &master->fsm_mbox);
+        for (slave = master->slaves;
+                slave < master->slaves + master->slave_count;
+                slave++) {
+            ec_fsm_slave_exec(&slave->fsm); // may queue datagram in fsm queue
+        }
+#if defined(EC_EOE)
+        ec_master_eoe_processing(master);
+#endif
+        ec_mutex_unlock(&master->master_mutex);
 
 #ifdef EC_USE_HRTIMER
         // the op thread should not work faster than the sending RT thread
@@ -1329,119 +1377,48 @@
 /*****************************************************************************/
 
 #ifdef EC_EOE
-/** Starts Ethernet over EtherCAT processing on demand.
- */
-void ec_master_eoe_start(ec_master_t *master /**< EtherCAT master */)
-{
-    struct sched_param param = { .sched_priority = 0 };
-
-    if (master->eoe_thread) {
-        EC_MASTER_WARN(master, "EoE already running!\n");
-        return;
-    }
-
-    if (list_empty(&master->eoe_handlers))
-        return;
-
-    if (!master->send_cb || !master->receive_cb) {
-        EC_MASTER_WARN(master, "No EoE processing"
-                " because of missing callbacks!\n");
-        return;
-    }
-
-    EC_MASTER_INFO(master, "Starting EoE thread.\n");
-    master->eoe_thread = kthread_run(ec_master_eoe_thread, master,
-            "EtherCAT-EoE");
-    if (IS_ERR(master->eoe_thread)) {
-        int err = (int) PTR_ERR(master->eoe_thread);
-        EC_MASTER_ERR(master, "Failed to start EoE thread (error %i)!\n",
-                err);
-        master->eoe_thread = NULL;
-        return;
-    }
-
-    sched_setscheduler(master->eoe_thread, SCHED_NORMAL, &param);
-    set_user_nice(master->eoe_thread, 0);
-}
-
-/*****************************************************************************/
-
-/** Stops the Ethernet over EtherCAT processing.
- */
-void ec_master_eoe_stop(ec_master_t *master /**< EtherCAT master */)
-{
-    if (master->eoe_thread) {
-        EC_MASTER_INFO(master, "Stopping EoE thread.\n");
-
-        kthread_stop(master->eoe_thread);
-        master->eoe_thread = NULL;
-        EC_MASTER_INFO(master, "EoE thread exited.\n");
-    }
-}
 
 /*****************************************************************************/
 
 /** Does the Ethernet over EtherCAT processing.
  */
-static int ec_master_eoe_thread(void *priv_data)
-{
-    ec_master_t *master = (ec_master_t *) priv_data;
+static int ec_master_eoe_processing(ec_master_t *master)
+{
     ec_eoe_t *eoe;
     unsigned int none_open, sth_to_send, all_idle;
-
-    EC_MASTER_DBG(master, 1, "EoE thread running.\n");
-
-    while (!kthread_should_stop()) {
-        none_open = 1;
-        all_idle = 1;
-
+    none_open = 1;
+    all_idle = 1;
+
+    list_for_each_entry(eoe, &master->eoe_handlers, list) {
+        if (ec_eoe_is_open(eoe)) {
+            none_open = 0;
+            break;
+        }
+    }
+    if (none_open)
+        return all_idle;
+
+    // actual EoE processing
+    sth_to_send = 0;
+    list_for_each_entry(eoe, &master->eoe_handlers, list) {
+        ec_eoe_run(eoe);
+        if (eoe->queue_datagram) {
+            sth_to_send = 1;
+        }
+        if (!ec_eoe_is_idle(eoe)) {
+            all_idle = 0;
+        }
+    }
+
+    if (sth_to_send) {
         list_for_each_entry(eoe, &master->eoe_handlers, list) {
-            if (ec_eoe_is_open(eoe)) {
-                none_open = 0;
-                break;
-            }
-        }
-        if (none_open)
-            goto schedule;
-
-        // receive datagrams
-        master->receive_cb(master->cb_data);
-
-        // actual EoE processing
-        sth_to_send = 0;
-        list_for_each_entry(eoe, &master->eoe_handlers, list) {
-            ec_eoe_run(eoe);
-            if (eoe->queue_datagram) {
-                sth_to_send = 1;
-            }
-            if (!ec_eoe_is_idle(eoe)) {
-                all_idle = 0;
-            }
-        }
-
-        if (sth_to_send) {
-            list_for_each_entry(eoe, &master->eoe_handlers, list) {
-                ec_eoe_queue(eoe);
-            }
-            // (try to) send datagrams
-            down(&master->ext_queue_sem);
-            master->send_cb(master->cb_data);
-            up(&master->ext_queue_sem);
-        }
-
-schedule:
-        if (all_idle) {
-            set_current_state(TASK_INTERRUPTIBLE);
-            schedule_timeout(1);
-        } else {
-            schedule();
-        }
-    }
-    
-    EC_MASTER_DBG(master, 1, "EoE thread exiting...\n");
-    return 0;
-}
-#endif
+            ec_eoe_queue(eoe);
+        }
+    }
+    return all_idle;
+}
+
+#endif // EC_EOE
 
 /*****************************************************************************/
 
@@ -1781,7 +1758,8 @@
 
     slave->ports[0].next_slave = port0_slave;
 
-    for (i = 1; i < EC_MAX_PORTS; i++) {
+    i = 3;
+    while (i != 0) {
         if (!slave->ports[i].link.loop_closed) {
             *slave_position = *slave_position + 1;
             if (*slave_position < master->slave_count) {
@@ -1794,6 +1772,14 @@
                 return -1;
             }
         }
+        switch (i)
+        {
+        case 0: i = 3; break;
+        case 1: i = 2; break;
+        case 3: i = 1; break;
+        case 2:
+        default:i = 0; break;
+        }
     }
 
     return 0;
@@ -1907,7 +1893,7 @@
         return ERR_PTR(-ENOMEM);
     }
 
-    down(&master->master_sem);
+    ec_mutex_lock(&master->master_mutex);
 
     if (list_empty(&master->domains)) {
         index = 0;
@@ -1919,7 +1905,7 @@
     ec_domain_init(domain, master, index);
     list_add_tail(&domain->list, &master->domains);
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     EC_MASTER_DBG(master, 1, "Created domain %u.\n", domain->index);
 
@@ -1943,9 +1929,6 @@
     uint32_t domain_offset;
     ec_domain_t *domain;
     int ret;
-#ifdef EC_EOE
-    int eoe_was_running;
-#endif
 
     EC_MASTER_DBG(master, 1, "ecrt_master_activate(master = 0x%p)\n", master);
 
@@ -1954,44 +1937,35 @@
         return 0;
     }
 
-    down(&master->master_sem);
+    ec_mutex_lock(&master->master_mutex);
 
     // finish all domains
     domain_offset = 0;
     list_for_each_entry(domain, &master->domains, list) {
         ret = ec_domain_finish(domain, domain_offset);
         if (ret < 0) {
-            up(&master->master_sem);
+            ec_mutex_unlock(&master->master_mutex);
             EC_MASTER_ERR(master, "Failed to finish domain 0x%p!\n", domain);
             return ret;
         }
         domain_offset += domain->data_size;
     }
     
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     // restart EoE process and master thread with new locking
 
     ec_master_thread_stop(master);
-#ifdef EC_EOE
-    eoe_was_running = master->eoe_thread != NULL;
-    ec_master_eoe_stop(master);
-#endif
 
     EC_MASTER_DBG(master, 1, "FSM datagram is %p.\n", &master->fsm_datagram);
 
     master->injection_seq_fsm = 0;
     master->injection_seq_rt = 0;
 
-    master->send_cb = master->app_send_cb;
-    master->receive_cb = master->app_receive_cb;
-    master->cb_data = master->app_cb_data;
+    master->fsm_queue_lock_cb = master->app_fsm_queue_lock_cb;
+    master->fsm_queue_unlock_cb = master->app_fsm_queue_unlock_cb;
+    master->fsm_queue_locking_data = master->app_fsm_queue_locking_data;
     
-#ifdef EC_EOE
-    if (eoe_was_running) {
-        ec_master_eoe_start(master);
-    }
-#endif
     ret = ec_master_thread_start(master, ec_master_operation_thread,
                 "EtherCAT-OP");
     if (ret < 0) {
@@ -2016,7 +1990,7 @@
     ec_slave_t *slave;
 #ifdef EC_EOE
     ec_eoe_t *eoe;
-    int eoe_was_running;
+    int is_eoe_slave;
 #endif
 
     EC_MASTER_DBG(master, 1, "%s(master = 0x%p)\n", __func__, master);
@@ -2027,14 +2001,10 @@
     }
 
     ec_master_thread_stop(master);
-#ifdef EC_EOE
-    eoe_was_running = master->eoe_thread != NULL;
-    ec_master_eoe_stop(master);
-#endif
     
-    master->send_cb = ec_master_internal_send_cb;
-    master->receive_cb = ec_master_internal_receive_cb;
-    master->cb_data = master;
+    master->fsm_queue_lock_cb = NULL;
+    master->fsm_queue_unlock_cb= NULL;
+    master->fsm_queue_locking_data = NULL;
     
     ec_master_clear_config(master);
 
@@ -2042,32 +2012,35 @@
             slave < master->slaves + master->slave_count;
             slave++) {
 
-        // set states for all slaves
+        // set state to PREOP for all but eoe slaves
+#ifdef EC_EOE
+        is_eoe_slave = 0;
+        // ... but leave EoE slaves in OP
+        list_for_each_entry(eoe, &master->eoe_handlers, list) {
+            if (slave == eoe->slave && ec_eoe_is_open(eoe))
+                is_eoe_slave = 1;
+       }
+       if (!is_eoe_slave) {
+           ec_slave_request_state(slave, EC_SLAVE_STATE_PREOP);
+           // mark for reconfiguration, because the master could have no
+           // possibility for a reconfiguration between two sequential operation
+           // phases.
+           slave->force_config = 1;
+        }
+#else
         ec_slave_request_state(slave, EC_SLAVE_STATE_PREOP);
-
         // mark for reconfiguration, because the master could have no
         // possibility for a reconfiguration between two sequential operation
         // phases.
         slave->force_config = 1;
-    }
-
-#ifdef EC_EOE
-    // ... but leave EoE slaves in OP
-    list_for_each_entry(eoe, &master->eoe_handlers, list) {
-        if (ec_eoe_is_open(eoe))
-            ec_slave_request_state(eoe->slave, EC_SLAVE_STATE_OP);
-    }
-#endif
+#endif
+
+    }
 
     master->app_time = 0ULL;
     master->app_start_time = 0ULL;
     master->has_app_time = 0;
 
-#ifdef EC_EOE
-    if (eoe_was_running) {
-        ec_master_eoe_start(master);
-    }
-#endif
     if (ec_master_thread_start(master, ec_master_idle_thread,
                 "EtherCAT-IDLE"))
         EC_MASTER_WARN(master, "Failed to restart master thread!\n");
@@ -2081,18 +2054,13 @@
 
 void ecrt_master_send(ec_master_t *master)
 {
-    ec_datagram_t *datagram, *n;
-
-    if (master->injection_seq_rt != master->injection_seq_fsm) {
-        // inject datagrams produced by master & slave FSMs
-        ec_master_queue_datagram(master, &master->fsm_datagram);
-        master->injection_seq_rt = master->injection_seq_fsm;
-    }
-    ec_master_inject_external_datagrams(master);
+    ec_datagram_t *datagram, *next;
+
+    ec_master_inject_fsm_datagrams(master);
 
     if (unlikely(!master->main_device.link_state)) {
         // link is down, no datagram can be sent
-        list_for_each_entry_safe(datagram, n, &master->datagram_queue, queue) {
+        list_for_each_entry_safe(datagram, next, &master->datagram_queue, queue) {
             datagram->state = EC_DATAGRAM_ERROR;
             list_del_init(&datagram->queue);
         }
@@ -2151,20 +2119,6 @@
     }
 }
 
-/*****************************************************************************/
-
-void ecrt_master_send_ext(ec_master_t *master)
-{
-    ec_datagram_t *datagram, *next;
-
-    list_for_each_entry_safe(datagram, next, &master->ext_datagram_queue,
-            queue) {
-        list_del(&datagram->queue);
-        ec_master_queue_datagram(master, datagram);
-    }
-
-    ecrt_master_send(master);
-}
 
 /*****************************************************************************/
 
@@ -2213,14 +2167,14 @@
         ec_slave_config_init(sc, master,
                 alias, position, vendor_id, product_code);
 
-        down(&master->master_sem);
+        ec_mutex_lock(&master->master_mutex);
 
         // try to find the addressed slave
         ec_slave_config_attach(sc);
         ec_slave_config_load_default_sync_config(sc);
         list_add_tail(&sc->list, &master->configs);
 
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
     }
 
     return sc;
@@ -2258,7 +2212,7 @@
 {
     const ec_slave_t *slave;
 
-    if (down_interruptible(&master->master_sem)) {
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
         return -EINTR;
     }
 
@@ -2281,7 +2235,7 @@
         slave_info->name[0] = 0;
     }
 
-    up(&master->master_sem);
+    ec_mutex_unlock(&master->master_mutex);
 
     return 0;
 }
@@ -2289,16 +2243,18 @@
 /*****************************************************************************/
 
 void ecrt_master_callbacks(ec_master_t *master,
-        void (*send_cb)(void *), void (*receive_cb)(void *), void *cb_data)
-{
-    EC_MASTER_DBG(master, 1, "ecrt_master_callbacks(master = 0x%p,"
-            " send_cb = 0x%p, receive_cb = 0x%p, cb_data = 0x%p)\n",
-            master, send_cb, receive_cb, cb_data);
-
-    master->app_send_cb = send_cb;
-    master->app_receive_cb = receive_cb;
-    master->app_cb_data = cb_data;
-}
+                           void (*lock_cb)(void *), void (*unlock_cb)(void *),
+                           void *cb_data)
+{
+    EC_MASTER_DBG(master, 1,"ecrt_master_callbacks(master = %p, "
+                            "lock_cb = %p, unlock_cb = %p, cb_data = %p)\n",
+                            master, lock_cb, unlock_cb, cb_data);
+
+    master->app_fsm_queue_lock_cb = lock_cb;
+    master->app_fsm_queue_unlock_cb = unlock_cb;
+    master->app_fsm_queue_locking_data = cb_data;
+}
+
 
 /*****************************************************************************/
 
@@ -2311,12 +2267,36 @@
 
 /*****************************************************************************/
 
+void ecrt_master_configured_slaves_state(const ec_master_t *master, ec_master_state_t *state)
+{
+    const ec_slave_config_t *sc;
+    ec_slave_config_state_t sc_state;
+
+    // collect al_states of all configured online slaves
+    state->al_states = 0;
+    list_for_each_entry(sc, &master->configs, list) {
+        ecrt_slave_config_state(sc,&sc_state);
+        if (sc_state.online)
+            state->al_states |= sc_state.al_state;
+    }
+
+    state->slaves_responding = master->fsm.slaves_responding;
+    state->link_up = master->main_device.link_state;
+}
+
+/*****************************************************************************/
+
 void ecrt_master_application_time(ec_master_t *master, uint64_t app_time)
 {
     master->app_time = app_time;
 
     if (unlikely(!master->has_app_time)) {
-        master->app_start_time = app_time;
+		EC_MASTER_DBG(master, 1, "set application start time = %llu\n",app_time);
+		master->app_start_time = app_time;
+#ifdef EC_HAVE_CYCLES
+    master->dc_cycles_app_start_time = get_cycles();
+#endif
+    master->dc_jiffies_app_start_time = jiffies;
         master->has_app_time = 1;
     }
 }
@@ -2362,7 +2342,7 @@
         uint8_t drive_no, uint16_t idn, uint8_t *data, size_t data_size,
         uint16_t *error_code)
 {
-    ec_master_soe_request_t request;
+    ec_master_soe_request_t* request;
     int retval;
 
     if (drive_no > 7) {
@@ -2370,63 +2350,61 @@
         return -EINVAL;
     }
 
-    INIT_LIST_HEAD(&request.list);
-    ec_soe_request_init(&request.req);
-    ec_soe_request_set_drive_no(&request.req, drive_no);
-    ec_soe_request_set_idn(&request.req, idn);
-
-    if (ec_soe_request_alloc(&request.req, data_size)) {
-        ec_soe_request_clear(&request.req);
+    request = kmalloc(sizeof(*request), GFP_KERNEL);
+    if (!request)
         return -ENOMEM;
-    }
-
-    memcpy(request.req.data, data, data_size);
-    request.req.data_size = data_size;
-    ec_soe_request_write(&request.req);
-
-    if (down_interruptible(&master->master_sem))
+    kref_init(&request->refcount);
+
+    INIT_LIST_HEAD(&request->list);
+    ec_soe_request_init(&request->req);
+    ec_soe_request_set_drive_no(&request->req, drive_no);
+    ec_soe_request_set_idn(&request->req, idn);
+
+    if (ec_soe_request_alloc(&request->req, data_size)) {
+        ec_soe_request_clear(&request->req);
+        kref_put(&request->refcount,ec_master_soe_request_release);
+        return -ENOMEM;
+    }
+
+    memcpy(request->req.data, data, data_size);
+    request->req.data_size = data_size;
+    ec_soe_request_write(&request->req);
+
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
+        kref_put(&request->refcount,ec_master_soe_request_release);
         return -EINTR;
-
-    if (!(request.slave = ec_master_find_slave(
+    }
+
+    if (!(request->slave = ec_master_find_slave(
                     master, 0, slave_position))) {
-        up(&master->master_sem);
+        ec_mutex_unlock(&master->master_mutex);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n",
                 slave_position);
-        ec_soe_request_clear(&request.req);
+        kref_put(&request->refcount,ec_master_soe_request_release);
         return -EINVAL;
     }
 
-    EC_SLAVE_DBG(request.slave, 1, "Scheduling SoE write request.\n");
+    EC_SLAVE_DBG(request->slave, 1, "Scheduled SoE write request %p.\n",request);
 
     // schedule SoE write request.
-    list_add_tail(&request.list, &request.slave->soe_requests);
-
-    up(&master->master_sem);
+    list_add_tail(&request->list, &request->slave->soe_requests);
+    kref_get(&request->refcount);
+
+    ec_mutex_unlock(&master->master_mutex);
 
     // wait for processing through FSM
-    if (wait_event_interruptible(request.slave->soe_queue,
-                request.req.state != EC_INT_REQUEST_QUEUED)) {
-        // interrupted by signal
-        down(&master->master_sem);
-        if (request.req.state == EC_INT_REQUEST_QUEUED) {
-            // abort request
-            list_del(&request.list);
-            up(&master->master_sem);
-            ec_soe_request_clear(&request.req);
-            return -EINTR;
-        }
-        up(&master->master_sem);
-    }
-
-    // wait until master FSM has finished processing
-    wait_event(request.slave->soe_queue,
-            request.req.state != EC_INT_REQUEST_BUSY);
+    if (wait_event_interruptible(request->slave->soe_queue,
+          ((request->req.state == EC_INT_REQUEST_SUCCESS) || (request->req.state == EC_INT_REQUEST_FAILURE)))) {
+           // interrupted by signal
+           kref_put(&request->refcount,ec_master_soe_request_release);
+           return -EINTR;
+    }
 
     if (error_code) {
-        *error_code = request.req.error_code;
-    }
-    retval = request.req.state == EC_INT_REQUEST_SUCCESS ? 0 : -EIO;
-    ec_soe_request_clear(&request.req);
+        *error_code = request->req.error_code;
+    }
+    retval = request->req.state == EC_INT_REQUEST_SUCCESS ? 0 : -EIO;
+    kref_put(&request->refcount,ec_master_soe_request_release);
 
     return retval;
 }
@@ -2437,78 +2415,76 @@
         uint8_t drive_no, uint16_t idn, uint8_t *target, size_t target_size,
         size_t *result_size, uint16_t *error_code)
 {
-    ec_master_soe_request_t request;
+    ec_master_soe_request_t* request;
 
     if (drive_no > 7) {
         EC_MASTER_ERR(master, "Invalid drive number!\n");
         return -EINVAL;
     }
 
-    INIT_LIST_HEAD(&request.list);
-    ec_soe_request_init(&request.req);
-    ec_soe_request_set_drive_no(&request.req, drive_no);
-    ec_soe_request_set_idn(&request.req, idn);
-    ec_soe_request_read(&request.req);
-
-    if (down_interruptible(&master->master_sem))
+    request = kmalloc(sizeof(*request), GFP_KERNEL);
+    if (!request)
+        return -ENOMEM;
+    kref_init(&request->refcount);
+
+    INIT_LIST_HEAD(&request->list);
+    ec_soe_request_init(&request->req);
+    ec_soe_request_set_drive_no(&request->req, drive_no);
+    ec_soe_request_set_idn(&request->req, idn);
+    ec_soe_request_read(&request->req);
+
+    if (ec_mutex_lock_interruptible(&master->master_mutex)) {
+        kref_put(&request->refcount,ec_master_soe_request_release);
         return -EINTR;
-
-    if (!(request.slave = ec_master_find_slave(master, 0, slave_position))) {
-        up(&master->master_sem);
-        ec_soe_request_clear(&request.req);
+    }
+
+    if (!(request->slave = ec_master_find_slave(master, 0, slave_position))) {
+        ec_mutex_unlock(&master->master_mutex);
+        kref_put(&request->refcount,ec_master_soe_request_release);
         EC_MASTER_ERR(master, "Slave %u does not exist!\n", slave_position);
         return -EINVAL;
     }
 
     // schedule request.
-    list_add_tail(&request.list, &request.slave->soe_requests);
-
-    up(&master->master_sem);
-
-    EC_SLAVE_DBG(request.slave, 1, "Scheduled SoE read request.\n");
+    list_add_tail(&request->list, &request->slave->soe_requests);
+    kref_get(&request->refcount);
+
+    ec_mutex_unlock(&master->master_mutex);
+
+    EC_SLAVE_DBG(request->slave, 1, "Scheduled SoE read request %p.\n",request);
 
     // wait for processing through FSM
-    if (wait_event_interruptible(request.slave->soe_queue,
-                request.req.state != EC_INT_REQUEST_QUEUED)) {
-        // interrupted by signal
-        down(&master->master_sem);
-        if (request.req.state == EC_INT_REQUEST_QUEUED) {
-            list_del(&request.list);
-            up(&master->master_sem);
-            ec_soe_request_clear(&request.req);
-            return -EINTR;
-        }
-        // request already processing: interrupt not possible.
-        up(&master->master_sem);
-    }
-
-    // wait until master FSM has finished processing
-    wait_event(request.slave->soe_queue,
-            request.req.state != EC_INT_REQUEST_BUSY);
+    if (wait_event_interruptible(request->slave->soe_queue,
+          ((request->req.state == EC_INT_REQUEST_SUCCESS) || (request->req.state == EC_INT_REQUEST_FAILURE)))) {
+           // interrupted by signal
+           kref_put(&request->refcount,ec_master_soe_request_release);
+           return -EINTR;
+    }
 
     if (error_code) {
-        *error_code = request.req.error_code;
-    }
-
-    EC_SLAVE_DBG(request.slave, 1, "Read %zd bytes via SoE.\n",
-            request.req.data_size);
-
-    if (request.req.state != EC_INT_REQUEST_SUCCESS) {
+        *error_code = request->req.error_code;
+    }
+
+    EC_SLAVE_DBG(request->slave, 1, "SoE request %p read %zd bytes via SoE.\n",
+            request,request->req.data_size);
+
+    if (request->req.state != EC_INT_REQUEST_SUCCESS) {
         if (result_size) {
             *result_size = 0;
         }
-        ec_soe_request_clear(&request.req);
+        kref_put(&request->refcount,ec_master_soe_request_release);
         return -EIO;
     } else {
-        if (request.req.data_size > target_size) {
+        if (request->req.data_size > target_size) {
             EC_MASTER_ERR(master, "Buffer too small.\n");
-            ec_soe_request_clear(&request.req);
+            kref_put(&request->refcount,ec_master_soe_request_release);
             return -EOVERFLOW;
         }
         if (result_size) {
-            *result_size = request.req.data_size;
-        }
-        memcpy(target, request.req.data, request.req.data_size);
+            *result_size = request->req.data_size;
+        }
+        memcpy(target, request->req.data, request->req.data_size);
+        kref_put(&request->refcount,ec_master_soe_request_release);
         return 0;
     }
 }
@@ -2534,7 +2510,6 @@
 EXPORT_SYMBOL(ecrt_master_activate);
 EXPORT_SYMBOL(ecrt_master_deactivate);
 EXPORT_SYMBOL(ecrt_master_send);
-EXPORT_SYMBOL(ecrt_master_send_ext);
 EXPORT_SYMBOL(ecrt_master_receive);
 EXPORT_SYMBOL(ecrt_master_callbacks);
 EXPORT_SYMBOL(ecrt_master);
--- a/master/master.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/master.h	Wed Apr 13 22:06:28 2011 +0200
@@ -43,18 +43,13 @@
 #include <linux/wait.h>
 #include <linux/kthread.h>
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
-#include <linux/semaphore.h>
-#else
-#include <asm/semaphore.h>
-#endif
-
 #include "device.h"
 #include "domain.h"
 #include "ethernet.h"
 #include "fsm_master.h"
 #include "cdev.h"
 
+
 /*****************************************************************************/
 
 /** Convenience macro for printing master-specific information to syslog.
@@ -66,8 +61,16 @@
  * \param fmt format string (like in printf())
  * \param args arguments (optional)
  */
+#ifdef USE_TRACE_PRINTK
+#define EC_MASTER_INFO(master, fmt, args...) \
+    do { \
+        __trace_printk(_THIS_IP_,"EtherCAT %u: " fmt, master->index, ##args); \
+        printk(KERN_INFO "EtherCAT %u: " fmt, master->index, ##args);   \
+    } while (0)
+#else
 #define EC_MASTER_INFO(master, fmt, args...) \
     printk(KERN_INFO "EtherCAT %u: " fmt, master->index, ##args)
+#endif
 
 /** Convenience macro for printing master-specific errors to syslog.
  *
@@ -78,8 +81,16 @@
  * \param fmt format string (like in printf())
  * \param args arguments (optional)
  */
+#ifdef USE_TRACE_PRINTK
+#define EC_MASTER_ERR(master, fmt, args...) \
+    do { \
+        __trace_printk(_THIS_IP_,"EtherCAT ERROR %u: " fmt, master->index, ##args); \
+        printk(KERN_ERR "EtherCAT ERROR %u: " fmt, master->index, ##args); \
+    } while (0)
+#else
 #define EC_MASTER_ERR(master, fmt, args...) \
     printk(KERN_ERR "EtherCAT ERROR %u: " fmt, master->index, ##args)
+#endif
 
 /** Convenience macro for printing master-specific warnings to syslog.
  *
@@ -90,8 +101,16 @@
  * \param fmt format string (like in printf())
  * \param args arguments (optional)
  */
+#ifdef USE_TRACE_PRINTK
+#define EC_MASTER_WARN(master, fmt, args...) \
+    do { \
+        __trace_printk(_THIS_IP_,"EtherCAT WARNING %u: " fmt, master->index, ##args); \
+        printk(KERN_WARNING "EtherCAT WARNING %u: " fmt, master->index, ##args);    \
+    } while (0)
+#else
 #define EC_MASTER_WARN(master, fmt, args...) \
     printk(KERN_WARNING "EtherCAT WARNING %u: " fmt, master->index, ##args)
+#endif
 
 /** Convenience macro for printing master-specific debug messages to syslog.
  *
@@ -102,13 +121,25 @@
  * \param fmt format string (like in printf())
  * \param args arguments (optional)
  */
+#ifdef USE_TRACE_PRINTK
 #define EC_MASTER_DBG(master, level, fmt, args...) \
     do { \
+        __trace_printk(_THIS_IP_,"EtherCAT DEBUG%u %u: " fmt, \
+            level,master->index, ##args); \
         if (master->debug_level >= level) { \
             printk(KERN_DEBUG "EtherCAT DEBUG %u: " fmt, \
                     master->index, ##args); \
         } \
     } while (0)
+#else
+#define EC_MASTER_DBG(master, level, fmt, args...) \
+    do { \
+        if (master->debug_level >= level) { \
+            printk(KERN_DEBUG "EtherCAT DEBUG %u: " fmt, \
+                    master->index, ##args); \
+        } \
+    } while (0)
+#endif
 
 /*****************************************************************************/
 
@@ -152,16 +183,17 @@
     struct class_device *class_device; /**< Master class device. */
 #endif
 
-    struct semaphore master_sem; /**< Master semaphore. */
+    struct ec_mutex_t master_mutex; /**< Master mutex. */
 
     ec_device_t main_device; /**< EtherCAT main device. */
     const uint8_t *main_mac; /**< MAC address of main device. */
     ec_device_t backup_device; /**< EtherCAT backup device. */
     const uint8_t *backup_mac; /**< MAC address of backup device. */
-    struct semaphore device_sem; /**< Device semaphore. */
+    struct ec_mutex_t device_mutex; /**< Device mutex. */
 
     ec_fsm_master_t fsm; /**< Master state machine. */
     ec_datagram_t fsm_datagram; /**< Datagram used for state machines. */
+    ec_mailbox_t fsm_mbox; /**< Mailbox used for state machines. */
     ec_master_phase_t phase; /**< Master phase. */
     unsigned int active; /**< Master has been activated. */
     unsigned int config_changed; /**< The configuration changed. */
@@ -187,10 +219,14 @@
     ec_datagram_t sync_mon_datagram; /**< Datagram used for DC synchronisation
                                        monitoring. */
     ec_slave_t *dc_ref_clock; /**< DC reference clock slave. */
-    
+#ifdef EC_HAVE_CYCLES
+    cycles_t dc_cycles_app_start_time; /** cycles at last ecrt_master_sync() call.*/
+#endif
+    unsigned long dc_jiffies_app_start_time;/** jiffies at last
+                                            ecrt_master_sync() call.*/
     unsigned int scan_busy; /**< Current scan state. */
     unsigned int allow_scan; /**< \a True, if slave scanning is allowed. */
-    struct semaphore scan_sem; /**< Semaphore protecting the \a scan_busy
+    struct ec_mutex_t scan_mutex; /**< Mutex protecting the \a scan_busy
                                  variable and the \a allow_scan flag. */
     wait_queue_head_t scan_queue; /**< Queue for processes that wait for
                                     slave scanning. */
@@ -198,7 +234,7 @@
     unsigned int config_busy; /**< State of slave configuration. */
     unsigned int allow_config; /**< \a True, if slave configuration is
                                  allowed. */
-    struct semaphore config_sem; /**< Semaphore protecting the \a config_busy
+    struct ec_mutex_t config_mutex; /**< Mutex protecting the \a config_busy
                                    variable and the allow_config flag. */
     wait_queue_head_t config_queue; /**< Queue for processes that wait for
                                       slave configuration. */
@@ -206,12 +242,10 @@
     struct list_head datagram_queue; /**< Datagram queue. */
     uint8_t datagram_index; /**< Current datagram index. */
 
-    struct list_head ext_datagram_queue; /**< Queue for non-application
-                                           datagrams. */
-    struct semaphore ext_queue_sem; /**< Semaphore protecting the \a
-                                      ext_datagram_queue. */
-
-    struct list_head external_datagram_queue; /**< External Datagram queue. */
+    struct ec_mutex_t fsm_queue_mutex; /**< Mutex protecting the \a
+                                      fsm_datagram_queue. */
+    struct list_head fsm_datagram_queue; /**< External Datagram queue. */
+
     unsigned int send_interval; /**< Interval between calls to ecrt_master_send */
     size_t max_queue_size; /**< Maximum size of datagram queue */
 
@@ -225,16 +259,14 @@
     struct list_head eoe_handlers; /**< Ethernet over EtherCAT handlers. */
 #endif
 
-    struct semaphore io_sem; /**< Semaphore used in \a IDLE phase. */
-
-    void (*send_cb)(void *); /**< Current send datagrams callback. */
-    void (*receive_cb)(void *); /**< Current receive datagrams callback. */
-    void *cb_data; /**< Current callback data. */
-    void (*app_send_cb)(void *); /**< Application's send datagrams
-                                          callback. */
-    void (*app_receive_cb)(void *); /**< Application's receive datagrams
-                                      callback. */
-    void *app_cb_data; /**< Application callback data. */
+    struct ec_mutex_t io_mutex; /**< Mutex used in \a IDLE phase. */
+
+    void (*fsm_queue_lock_cb)(void *); /**< FSM queue lock callback. */
+    void (*fsm_queue_unlock_cb)(void *); /**< FSM queue unlock callback. */
+    void *fsm_queue_locking_data; /**< Data parameter of fsm queue locking callbacks. */
+    void (*app_fsm_queue_lock_cb)(void *); /**< App's FSM queue lock callback. */
+    void (*app_fsm_queue_unlock_cb)(void *); /**< App's FSM queue unlock callback. */
+    void *app_fsm_queue_locking_data; /**< App's data parameter of fsm queue locking callbacks. */
 
     struct list_head sii_requests; /**< SII write requests. */
     wait_queue_head_t sii_queue; /**< Wait queue for SII
@@ -260,18 +292,12 @@
 int ec_master_enter_operation_phase(ec_master_t *);
 void ec_master_leave_operation_phase(ec_master_t *);
 
-#ifdef EC_EOE
-// EoE
-void ec_master_eoe_start(ec_master_t *);
-void ec_master_eoe_stop(ec_master_t *);
-#endif
-
 // datagram IO
 void ec_master_receive_datagrams(ec_master_t *, const uint8_t *, size_t);
 void ec_master_queue_datagram(ec_master_t *, ec_datagram_t *);
-void ec_master_queue_datagram_ext(ec_master_t *, ec_datagram_t *);
-void ec_master_queue_external_datagram(ec_master_t *, ec_datagram_t *);
-void ec_master_inject_external_datagrams(ec_master_t *);
+void ec_master_queue_request_fsm_datagram(ec_master_t *, ec_datagram_t *);
+void ec_master_queue_fsm_datagram(ec_master_t *, ec_datagram_t *);
+void ec_master_inject_fsm_datagrams(ec_master_t *);
 
 // misc.
 void ec_master_set_send_interval(ec_master_t *, unsigned int);
@@ -308,9 +334,6 @@
 void ec_master_calc_dc(ec_master_t *);
 void ec_master_request_op(ec_master_t *);
 
-void ec_master_internal_send_cb(void *);
-void ec_master_internal_receive_cb(void *);
-
-/*****************************************************************************/
-
-#endif
+/*****************************************************************************/
+
+#endif
--- a/master/module.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/module.c	Wed Apr 13 22:06:28 2011 +0200
@@ -61,7 +61,7 @@
 static unsigned int debug_level;  /**< Debug level parameter. */
 
 static ec_master_t *masters; /**< Array of masters. */
-static struct semaphore master_sem; /**< Master semaphore. */
+static struct ec_mutex_t master_mutex; /**< Master mutex. */
 
 dev_t device_number; /**< Device number for master cdevs. */
 struct class *class; /**< Device class. */
@@ -101,7 +101,7 @@
 
     EC_INFO("Master driver %s\n", EC_MASTER_VERSION);
 
-    sema_init(&master_sem, 1);
+    ec_mutex_init(&master_mutex);
 
     if (master_count) {
         if (alloc_chrdev_region(&device_number,
@@ -468,9 +468,9 @@
     for (i = 0; i < master_count; i++) {
         master = &masters[i];
 
-        down(&master->device_sem);
+        ec_mutex_lock(&master->device_mutex);
         if (master->main_device.dev) { // master already has a device
-            up(&master->device_sem);
+            ec_mutex_unlock(&master->device_mutex);
             continue;
         }
             
@@ -481,14 +481,14 @@
                     str, master->index);
 
             ec_device_attach(&master->main_device, net_dev, poll, module);
-            up(&master->device_sem);
+            ec_mutex_unlock(&master->device_mutex);
             
             snprintf(net_dev->name, IFNAMSIZ, "ec%u", master->index);
 
             return &master->main_device; // offer accepted
         }
         else {
-            up(&master->device_sem);
+            ec_mutex_unlock(&master->device_mutex);
 
             if (master->debug_level) {
                 ec_mac_print(net_dev->dev_addr, str);
@@ -524,40 +524,40 @@
     }
     master = &masters[master_index];
 
-    if (down_interruptible(&master_sem)) {
+    if (ec_mutex_lock_interruptible(&master_mutex)) {
         errptr = ERR_PTR(-EINTR);
         goto out_return;
     }
 
     if (master->reserved) {
-        up(&master_sem);
+        ec_mutex_unlock(&master_mutex);
         EC_MASTER_ERR(master, "Master already in use!\n");
         errptr = ERR_PTR(-EBUSY);
         goto out_return;
     }
     master->reserved = 1;
-    up(&master_sem);
-
-    if (down_interruptible(&master->device_sem)) {
+    ec_mutex_unlock(&master_mutex);
+
+    if (ec_mutex_lock_interruptible(&master->device_mutex)) {
         errptr = ERR_PTR(-EINTR);
         goto out_release;
     }
     
     if (master->phase != EC_IDLE) {
-        up(&master->device_sem);
+        ec_mutex_unlock(&master->device_mutex);
         EC_MASTER_ERR(master, "Master still waiting for devices!\n");
         errptr = ERR_PTR(-ENODEV);
         goto out_release;
     }
 
     if (!try_module_get(master->main_device.module)) {
-        up(&master->device_sem);
+        ec_mutex_unlock(&master->device_mutex);
         EC_ERR("Device module is unloading!\n");
         errptr = ERR_PTR(-ENODEV);
         goto out_release;
     }
 
-    up(&master->device_sem);
+    ec_mutex_unlock(&master->device_mutex);
 
     if (ec_master_enter_operation_phase(master)) {
         EC_MASTER_ERR(master, "Failed to enter OPERATION phase!\n");
--- a/master/slave.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/slave.c	Wed Apr 13 22:06:28 2011 +0200
@@ -159,19 +159,20 @@
     INIT_LIST_HEAD(&slave->soe_requests);
     init_waitqueue_head(&slave->soe_queue);
 
-    // init state machine datagram
-    ec_datagram_init(&slave->fsm_datagram);
-    snprintf(slave->fsm_datagram.name, EC_DATAGRAM_NAME_SIZE,
+    // init datagram
+    ec_datagram_init(&slave->datagram);
+    snprintf(slave->datagram.name, EC_DATAGRAM_NAME_SIZE,
             "slave%u-fsm", slave->ring_position);
-    ret = ec_datagram_prealloc(&slave->fsm_datagram, EC_MAX_DATA_SIZE);
+    ret = ec_datagram_prealloc(&slave->datagram, EC_MAX_DATA_SIZE);
     if (ret < 0) {
-        ec_datagram_clear(&slave->fsm_datagram);
+        ec_datagram_clear(&slave->datagram);
         EC_SLAVE_ERR(slave, "Failed to allocate FSM datagram.\n");
         return;
     }
+    ec_mbox_init(&slave->mbox,&slave->datagram);
 
     // create state machine object
-    ec_fsm_slave_init(&slave->fsm, slave, &slave->fsm_datagram);
+    ec_fsm_slave_init(&slave->fsm, slave, &slave->mbox);
 }
 
 /*****************************************************************************/
@@ -194,9 +195,10 @@
             list_entry(slave->slave_sdo_requests.next,
                 ec_master_sdo_request_t, list);
         list_del_init(&request->list); // dequeue
-        EC_SLAVE_WARN(slave, "Discarding SDO request,"
-                " slave about to be deleted.\n");
+        EC_SLAVE_WARN(slave, "Discarding SDO request %p,"
+                " slave about to be deleted.\n",request);
         request->req.state = EC_INT_REQUEST_FAILURE;
+        kref_put(&request->refcount,ec_master_sdo_request_release);
         wake_up(&slave->sdo_queue);
     }
 
@@ -208,6 +210,7 @@
         EC_SLAVE_WARN(slave, "Discarding FoE request,"
                 " slave about to be deleted.\n");
         request->req.state = EC_INT_REQUEST_FAILURE;
+        kref_put(&request->refcount,ec_master_foe_request_release);
         wake_up(&slave->foe_queue);
     }
 
@@ -219,6 +222,7 @@
         EC_SLAVE_WARN(slave, "Discarding SoE request,"
                 " slave about to be deleted.\n");
         request->req.state = EC_INT_REQUEST_FAILURE;
+        kref_put(&request->refcount,ec_master_soe_request_release);
         wake_up(&slave->soe_queue);
     }
 
@@ -252,7 +256,7 @@
     if (slave->sii_words)
         kfree(slave->sii_words);
     ec_fsm_slave_clear(&slave->fsm);
-    ec_datagram_clear(&slave->fsm_datagram);
+    ec_mbox_clear(&slave->mbox);
 }
 
 /*****************************************************************************/
@@ -803,6 +807,59 @@
 
 /*****************************************************************************/
 
+/** returns the previous connected port of a given port.
+ */
+
+unsigned int ec_slave_get_previous_port(
+    ec_slave_t *slave, /**< EtherCAT slave. */
+    unsigned int i /**< Port index */
+    )
+{
+    do
+    {
+        switch (i)
+        {
+        case 0: i = 2; break;
+        case 1: i = 3; break;
+        case 2: i = 1; break;
+        case 3:
+        default:i = 0; break;
+        }
+        if (slave->ports[i].next_slave)
+            return i;
+    } while (i);
+    return 0;
+}
+
+/*****************************************************************************/
+
+/** returns the next connected port of a given port.
+ */
+
+unsigned int ec_slave_get_next_port(
+    ec_slave_t *slave, /**< EtherCAT slave. */
+    unsigned int i /**< Port index */
+    )
+{
+    do
+    {
+        switch (i)
+        {
+        case 0: i = 3; break;
+        case 1: i = 2; break;
+        case 3: i = 1; break;
+        case 2:
+        default:i = 0; break;
+        }
+        if (slave->ports[i].next_slave)
+            return i;
+    } while (i);
+    return 0;
+}
+
+
+/*****************************************************************************/
+
 /** Calculates the sum of round-trip-times of connected ports 1-3.
  */
 uint32_t ec_slave_calc_rtt_sum(
@@ -810,13 +867,11 @@
         )
 {
     uint32_t rtt_sum = 0, rtt;
-    unsigned int i;
-    
-    for (i = 1; i < EC_MAX_PORTS; i++) {
-        if (slave->ports[i].next_slave) {
-            rtt = slave->ports[i].receive_time - slave->ports[i - 1].receive_time;
-            rtt_sum += rtt;
-        }
+    unsigned int i = ec_slave_get_next_port(slave,0);
+    while (i != 0) {
+        rtt = slave->ports[i].receive_time - slave->ports[ec_slave_get_previous_port(slave,i)].receive_time;
+        rtt_sum += rtt;
+        i = ec_slave_get_next_port(slave,i);
     }
 
     return rtt_sum;
@@ -830,20 +885,21 @@
         ec_slave_t *slave /**< EtherCAT slave. */
         )
 {
+    unsigned int i;
     ec_slave_t *dc_slave = NULL;
 
     if (slave->base_dc_supported) {
         dc_slave = slave;
     } else {
-        unsigned int i;
-
-        for (i = 1; i < EC_MAX_PORTS; i++) {
+        i = ec_slave_get_next_port(slave,0);
+        while (i != 0) {
             ec_slave_t *next = slave->ports[i].next_slave;
             if (next) {
                 dc_slave = ec_slave_find_next_dc_slave(next);
                 if (dc_slave)
                     break;
             }
+            i = ec_slave_get_next_port(slave,i);
         }
     }
 
@@ -859,32 +915,30 @@
         )
 {
     unsigned int i;
-    ec_slave_t *next, *next_dc;
+    ec_slave_t *next_dc;
     uint32_t rtt, next_rtt_sum;
 
     if (!slave->base_dc_supported)
         return;
 
-    for (i = 1; i < EC_MAX_PORTS; i++) {
-        next = slave->ports[i].next_slave;
-        if (!next)
-            continue;
-        next_dc = ec_slave_find_next_dc_slave(next);
-        if (!next_dc)
-            continue;
-
-        rtt = slave->ports[i].receive_time - slave->ports[i - 1].receive_time;
-        next_rtt_sum = ec_slave_calc_rtt_sum(next_dc);
-
-        slave->ports[i].delay_to_next_dc = (rtt - next_rtt_sum) / 2; // FIXME
-        next_dc->ports[0].delay_to_next_dc = (rtt - next_rtt_sum) / 2;
+    i = ec_slave_get_next_port(slave,0);
+    while (i != 0) {
+        next_dc = ec_slave_find_next_dc_slave(slave->ports[i].next_slave);
+        if (next_dc) {
+            rtt = slave->ports[i].receive_time - slave->ports[ec_slave_get_previous_port(slave,i)].receive_time;
+            next_rtt_sum = ec_slave_calc_rtt_sum(next_dc);
+
+            slave->ports[i].delay_to_next_dc = (rtt - next_rtt_sum) / 2; // FIXME
+            next_dc->ports[0].delay_to_next_dc = (rtt - next_rtt_sum) / 2;
 
 #if 0
-        EC_SLAVE_DBG(slave, 1, "delay %u:%u rtt=%u"
-                " next_rtt_sum=%u delay=%u\n",
-                slave->ring_position, i, rtt, next_rtt_sum,
-                slave->ports[i].delay_to_next_dc);
+            EC_SLAVE_DBG(slave, 1, "delay %u:%u rtt=%u"
+                    " next_rtt_sum=%u delay=%u\n",
+                    slave->ring_position, i, rtt, next_rtt_sum,
+                    slave->ports[i].delay_to_next_dc);
 #endif
+        }
+        i = ec_slave_get_next_port(slave,i);
     }
 }
 
@@ -898,28 +952,26 @@
         )
 {
     unsigned int i;
-    ec_slave_t *next, *next_dc;
-
-#if 0
+    ec_slave_t *next_dc;
+
+#if 1
     EC_SLAVE_DBG(slave, 1, "%u\n", *delay);
 #endif
 
     slave->transmission_delay = *delay;
 
-    for (i = 1; i < EC_MAX_PORTS; i++) {
+    i = ec_slave_get_next_port(slave,0);
+    while (i != 0) {
         ec_slave_port_t *port = &slave->ports[i];
-        next = port->next_slave;
-        if (!next)
-            continue;
-        next_dc = ec_slave_find_next_dc_slave(next);
-        if (!next_dc)
-            continue;
-
-        *delay = *delay + port->delay_to_next_dc;
+        next_dc = ec_slave_find_next_dc_slave(port->next_slave);
+        if (next_dc) {
+            *delay = *delay + port->delay_to_next_dc;
 #if 0
-        EC_SLAVE_DBG(slave, 1, "%u:%u %u\n", slave->ring_position, i, *delay);
+            EC_SLAVE_DBG(slave, 1, "%u:%u %u\n", slave->ring_position, i, *delay);
 #endif
-        ec_slave_calc_transmission_delays_rec(next_dc, delay);
+            ec_slave_calc_transmission_delays_rec(next_dc, delay);
+        }
+        i = ec_slave_get_next_port(slave,i);
     }
 
     *delay = *delay + slave->ports[0].delay_to_next_dc;
--- a/master/slave.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/slave.h	Wed Apr 13 22:06:28 2011 +0200
@@ -46,6 +46,7 @@
 #include "sync.h"
 #include "sdo.h"
 #include "fsm_slave.h"
+#include "mailbox.h"
 
 /*****************************************************************************/
 
@@ -59,9 +60,19 @@
  * \param fmt format string (like in printf())
  * \param args arguments (optional)
  */
+#ifdef USE_TRACE_PRINTK
+#define EC_SLAVE_INFO(slave, fmt, args...) \
+    do { \
+        __trace_printk(_THIS_IP_,"EtherCAT %u-%u: " fmt, slave->master->index, \
+                slave->ring_position, ##args); \
+        printk(KERN_INFO "EtherCAT %u-%u: " fmt, slave->master->index, \
+                slave->ring_position, ##args);  \
+    } while (0)
+#else
 #define EC_SLAVE_INFO(slave, fmt, args...) \
     printk(KERN_INFO "EtherCAT %u-%u: " fmt, slave->master->index, \
             slave->ring_position, ##args)
+#endif
 
 /** Convenience macro for printing slave-specific errors to syslog.
  *
@@ -73,9 +84,19 @@
  * \param fmt format string (like in printf())
  * \param args arguments (optional)
  */
+#ifdef USE_TRACE_PRINTK
+#define EC_SLAVE_ERR(slave, fmt, args...) \
+    do { \
+        __trace_printk(_THIS_IP_,"EtherCAT ERROR %u-%u: " fmt, slave->master->index, \
+                slave->ring_position, ##args); \
+        printk(KERN_ERR "EtherCAT ERROR %u-%u: " fmt, slave->master->index, \
+                slave->ring_position, ##args);  \
+    } while (0)
+#else
 #define EC_SLAVE_ERR(slave, fmt, args...) \
     printk(KERN_ERR "EtherCAT ERROR %u-%u: " fmt, slave->master->index, \
             slave->ring_position, ##args)
+#endif
 
 /** Convenience macro for printing slave-specific warnings to syslog.
  *
@@ -87,9 +108,19 @@
  * \param fmt format string (like in printf())
  * \param args arguments (optional)
  */
+#ifdef USE_TRACE_PRINTK
+#define EC_SLAVE_WARN(slave, fmt, args...) \
+    do { \
+        __trace_printk(_THIS_IP_,"EtherCAT WARNING %u-%u: " fmt, \
+                slave->master->index, slave->ring_position, ##args); \
+        printk(KERN_WARNING "EtherCAT WARNING %u-%u: " fmt, \
+                slave->master->index, slave->ring_position, ##args);    \
+    } while (0)
+#else
 #define EC_SLAVE_WARN(slave, fmt, args...) \
     printk(KERN_WARNING "EtherCAT WARNING %u-%u: " fmt, \
             slave->master->index, slave->ring_position, ##args)
+#endif
 
 /** Convenience macro for printing slave-specific debug messages to syslog.
  *
@@ -101,13 +132,25 @@
  * \param fmt format string (like in printf())
  * \param args arguments (optional)
  */
+#ifdef USE_TRACE_PRINTK
 #define EC_SLAVE_DBG(slave, level, fmt, args...) \
     do { \
+        __trace_printk(_THIS_IP_,"EtherCAT DEBUG%u %u-%u: " fmt, \
+            level,slave->master->index, slave->ring_position, ##args); \
         if (slave->master->debug_level >= level) { \
             printk(KERN_DEBUG "EtherCAT DEBUG %u-%u: " fmt, \
                     slave->master->index, slave->ring_position, ##args); \
         } \
     } while (0)
+#else
+#define EC_SLAVE_DBG(slave, level, fmt, args...) \
+    do { \
+        if (slave->master->debug_level >= level) { \
+            printk(KERN_DEBUG "EtherCAT DEBUG %u-%u: " fmt, \
+                    slave->master->index, slave->ring_position, ##args); \
+        } \
+    } while (0)
+#endif
 
 /*****************************************************************************/
 
@@ -232,7 +275,8 @@
     wait_queue_head_t soe_queue; /**< Wait queue for SoE requests from user
                                    space. */
     ec_fsm_slave_t fsm; /**< Slave state machine. */
-    ec_datagram_t fsm_datagram; /**< Datagram used for state machines. */
+    ec_datagram_t datagram; /** Datagram used for data transfers */
+    ec_mailbox_t mbox; /**< Mailbox used for data transfers. */
 };
 
 /*****************************************************************************/
--- a/master/slave_config.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/slave_config.c	Wed Apr 13 22:06:28 2011 +0200
@@ -70,7 +70,7 @@
     sc->product_code = product_code;
     sc->watchdog_divider = 0; // use default
     sc->watchdog_intervals = 0; // use default
-
+    sc->allow_overlapping_pdos = 0; // default not allowed
     sc->slave = NULL;
 
     for (i = 0; i < EC_MAX_SYNC_MANAGERS; i++)
@@ -164,12 +164,15 @@
 {
     unsigned int i;
     ec_fmmu_config_t *fmmu;
+    ec_fmmu_config_t *prev_fmmu;
+    uint32_t fmmu_logical_start_address;
+    size_t tx_size, old_prev_tx_size;
 
     // FMMU configuration already prepared?
     for (i = 0; i < sc->used_fmmus; i++) {
         fmmu = &sc->fmmu_configs[i];
         if (fmmu->domain == domain && fmmu->sync_index == sync_index)
-            return fmmu->logical_start_address;
+            return fmmu->domain_address;
     }
 
     if (sc->used_fmmus == EC_MAX_FMMUS) {
@@ -177,13 +180,29 @@
         return -EOVERFLOW;
     }
 
-    fmmu = &sc->fmmu_configs[sc->used_fmmus++];
-
-    down(&sc->master->master_sem);
-    ec_fmmu_config_init(fmmu, sc, domain, sync_index, dir);
-    up(&sc->master->master_sem);
-
-    return fmmu->logical_start_address;
+    fmmu = &sc->fmmu_configs[sc->used_fmmus];
+
+    ec_mutex_lock(&sc->master->master_mutex);
+    ec_fmmu_config_init(fmmu, sc, sync_index, dir);
+    fmmu_logical_start_address = domain->tx_size;
+    tx_size = fmmu->data_size;
+    if (sc->allow_overlapping_pdos && sc->used_fmmus > 0) {
+        prev_fmmu = &sc->fmmu_configs[sc->used_fmmus-1];
+        if (fmmu->dir != prev_fmmu->dir && prev_fmmu->tx_size != 0) {
+            // prev fmmu has opposite direction
+            // and is not already paired with prev-prev fmmu
+            old_prev_tx_size = prev_fmmu->tx_size;
+            prev_fmmu->tx_size = max(fmmu->data_size,prev_fmmu->data_size);
+            domain->tx_size += prev_fmmu->tx_size - old_prev_tx_size;
+            tx_size = 0;
+            fmmu_logical_start_address = prev_fmmu->logical_start_address;
+        }
+    }
+    ec_fmmu_config_domain(fmmu,domain,fmmu_logical_start_address,tx_size);
+    ec_mutex_unlock(&sc->master->master_mutex);
+
+    ++sc->used_fmmus;
+    return fmmu->domain_address;
 }
 
 /*****************************************************************************/
@@ -227,8 +246,6 @@
     slave->config = sc;
     sc->slave = slave;
 
-    ec_slave_request_state(slave, EC_SLAVE_STATE_OP);
-
     EC_CONFIG_DBG(sc, 1, "Attached slave %u.\n", slave->ring_position);
 
     return 0;
@@ -494,6 +511,18 @@
 
 /*****************************************************************************/
 
+void ecrt_slave_config_overlapping_pdos(ec_slave_config_t *sc,
+        uint8_t allow_overlapping_pdos )
+{
+    if (sc->master->debug_level)
+        EC_DBG("%s(sc = 0x%p, allow_overlapping_pdos = %u)\n",
+                __func__, sc, allow_overlapping_pdos);
+
+    sc->allow_overlapping_pdos = allow_overlapping_pdos;
+}
+
+/*****************************************************************************/
+
 int ecrt_slave_config_pdo_assign_add(ec_slave_config_t *sc,
         uint8_t sync_index, uint16_t pdo_index)
 {
@@ -507,18 +536,18 @@
         return -EINVAL;
     }
 
-    down(&sc->master->master_sem);
+    ec_mutex_lock(&sc->master->master_mutex);
 
     pdo = ec_pdo_list_add_pdo(&sc->sync_configs[sync_index].pdos, pdo_index);
     if (IS_ERR(pdo)) {
-        up(&sc->master->master_sem);
+        ec_mutex_unlock(&sc->master->master_mutex);
         return PTR_ERR(pdo);
     }
     pdo->sync_index = sync_index;
 
     ec_slave_config_load_default_mapping(sc, pdo);
 
-    up(&sc->master->master_sem);
+    ec_mutex_unlock(&sc->master->master_mutex);
     return 0;
 }
 
@@ -535,9 +564,9 @@
         return;
     }
 
-    down(&sc->master->master_sem);
+    ec_mutex_lock(&sc->master->master_mutex);
     ec_pdo_list_clear_pdos(&sc->sync_configs[sync_index].pdos);
-    up(&sc->master->master_sem);
+    ec_mutex_unlock(&sc->master->master_mutex);
 }
 
 /*****************************************************************************/
@@ -563,10 +592,10 @@
             break;
 
     if (pdo) {
-        down(&sc->master->master_sem);
+        ec_mutex_lock(&sc->master->master_mutex);
         entry = ec_pdo_add_entry(pdo, entry_index, entry_subindex,
                 entry_bit_length);
-        up(&sc->master->master_sem);
+        ec_mutex_unlock(&sc->master->master_mutex);
         if (IS_ERR(entry))
             retval = PTR_ERR(entry);
     } else {
@@ -594,9 +623,9 @@
             break;
 
     if (pdo) {
-        down(&sc->master->master_sem);
+        ec_mutex_lock(&sc->master->master_mutex);
         ec_pdo_clear_entries(pdo);
-        up(&sc->master->master_sem);
+        ec_mutex_unlock(&sc->master->master_mutex);
     } else {
         EC_CONFIG_WARN(sc, "PDO 0x%04X is not assigned.\n", pdo_index);
     }
@@ -685,10 +714,6 @@
     ec_pdo_entry_t *entry;
     int sync_offset;
 
-    EC_CONFIG_DBG(sc, 1, "%s(sc = 0x%p, index = 0x%04X, "
-            "subindex = 0x%02X, domain = 0x%p, bit_position = 0x%p)\n",
-            __func__, sc, index, subindex, domain, bit_position);
-
     for (sync_index = 0; sync_index < EC_MAX_SYNC_MANAGERS; sync_index++) {
         sync_config = &sc->sync_configs[sync_index];
         bit_offset = 0;
@@ -712,7 +737,11 @@
                     if (sync_offset < 0)
                         return sync_offset;
 
-                    return sync_offset + bit_offset / 8;
+                    EC_CONFIG_DBG(sc, 1, "%s(index = 0x%04X, "
+                                  "subindex = 0x%02X, domain = %u, bytepos=%u, bitpos=%u)\n",
+                                  __func__,index, subindex,
+                                  domain->index, sync_offset + bit_offset / 8, bit_pos);
+					return sync_offset + bit_offset / 8;
                 }
             }
         }
@@ -776,9 +805,9 @@
         return ret;
     }
         
-    down(&sc->master->master_sem);
+    ec_mutex_lock(&sc->master->master_mutex);
     list_add_tail(&req->list, &sc->sdo_configs);
-    up(&sc->master->master_sem);
+    ec_mutex_unlock(&sc->master->master_mutex);
     return 0;
 }
 
@@ -861,9 +890,9 @@
         return ret;
     }
         
-    down(&sc->master->master_sem);
+    ec_mutex_lock(&sc->master->master_mutex);
     list_add_tail(&req->list, &sc->sdo_configs);
-    up(&sc->master->master_sem);
+    ec_mutex_unlock(&sc->master->master_mutex);
     return 0;
 }
 
@@ -902,9 +931,9 @@
     memset(req->data, 0x00, size);
     req->data_size = size;
     
-    down(&sc->master->master_sem);
+    ec_mutex_lock(&sc->master->master_mutex);
     list_add_tail(&req->list, &sc->sdo_requests);
-    up(&sc->master->master_sem);
+    ec_mutex_unlock(&sc->master->master_mutex);
 
     return req; 
 }
@@ -944,9 +973,9 @@
         return ERR_PTR(ret);
     }
 
-    down(&sc->master->master_sem);
+    ec_mutex_lock(&sc->master->master_mutex);
     list_add_tail(&voe->list, &sc->voe_handlers);
-    up(&sc->master->master_sem);
+    ec_mutex_unlock(&sc->master->master_mutex);
 
     return voe; 
 }
@@ -1027,9 +1056,9 @@
         return ret;
     }
         
-    down(&sc->master->master_sem);
+    ec_mutex_lock(&sc->master->master_mutex);
     list_add_tail(&req->list, &sc->soe_configs);
-    up(&sc->master->master_sem);
+    ec_mutex_unlock(&sc->master->master_mutex);
     return 0;
 }
 
@@ -1039,6 +1068,7 @@
 
 EXPORT_SYMBOL(ecrt_slave_config_sync_manager);
 EXPORT_SYMBOL(ecrt_slave_config_watchdog);
+EXPORT_SYMBOL(ecrt_slave_config_overlapping_pdos);
 EXPORT_SYMBOL(ecrt_slave_config_pdo_assign_add);
 EXPORT_SYMBOL(ecrt_slave_config_pdo_assign_clear);
 EXPORT_SYMBOL(ecrt_slave_config_pdo_mapping_add);
--- a/master/slave_config.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/slave_config.h	Wed Apr 13 22:06:28 2011 +0200
@@ -126,7 +126,8 @@
                                  intervals (see spec. reg. 0x0400). */
     uint16_t watchdog_intervals; /**< Process data watchdog intervals (see
                                    spec. reg. 0x0420). */
-
+    uint8_t allow_overlapping_pdos;	/**< Allow input PDOs use the same frame space
+                                      as output PDOs. */
     ec_slave_t *slave; /**< Slave pointer. This is \a NULL, if the slave is
                          offline. */
 
--- a/master/voe_handler.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/voe_handler.c	Wed Apr 13 22:06:28 2011 +0200
@@ -88,6 +88,7 @@
     voe->request_state = EC_INT_REQUEST_INIT;
 
     ec_datagram_init(&voe->datagram);
+    ec_mbox_init(&voe->mbox,&voe->datagram);
     return ec_datagram_prealloc(&voe->datagram,
             size + EC_MBOX_HEADER_SIZE + EC_VOE_HEADER_SIZE);
 }
@@ -100,6 +101,7 @@
         ec_voe_handler_t *voe /**< VoE handler. */
         )
 {
+    ec_mbox_clear(&voe->mbox);
     ec_datagram_clear(&voe->datagram);
 }
 
@@ -191,7 +193,7 @@
     if (voe->config->slave) { // FIXME locking?
         voe->state(voe);
         if (voe->request_state == EC_INT_REQUEST_BUSY)
-            ec_master_queue_datagram(voe->config->master, &voe->datagram);
+            ec_slave_mbox_queue_datagrams(voe->config->slave,&voe->mbox);
     } else {
         voe->state = ec_voe_handler_state_error;
         voe->request_state = EC_INT_REQUEST_FAILURE;
@@ -224,7 +226,7 @@
         return;
     }
 
-    data = ec_slave_mbox_prepare_send(slave, &voe->datagram,
+    data = ec_slave_mbox_prepare_send(slave, &voe->mbox,
             EC_MBOX_TYPE_VOE, EC_VOE_HEADER_SIZE + voe->data_size);
     if (IS_ERR(data)) {
         voe->state = ec_voe_handler_state_error;
@@ -247,22 +249,22 @@
  */
 void ec_voe_handler_state_write_response(ec_voe_handler_t *voe)
 {
-    ec_datagram_t *datagram = &voe->datagram;
-    ec_slave_t *slave = voe->config->slave;
-
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && voe->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    ec_mailbox_t *mbox = &voe->mbox;
+    ec_slave_t *slave = voe->config->slave;
+
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && voe->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         voe->state = ec_voe_handler_state_error;
         voe->request_state = EC_INT_REQUEST_FAILURE;
         EC_SLAVE_ERR(slave, "Failed to receive VoE write request datagram: ");
-        ec_datagram_print_state(datagram);
-        return;
-    }
-
-    if (datagram->working_counter != 1) {
-        if (!datagram->working_counter) {
+        ec_datagram_print_state(mbox->datagram);
+        return;
+    }
+
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
+        if (ec_mbox_is_datagram_wc(mbox,0)) {
             unsigned long diff_ms =
                 (jiffies - voe->jiffies_start) * 1000 / HZ;
             if (diff_ms < EC_VOE_RESPONSE_TIMEOUT) {
@@ -276,7 +278,7 @@
         voe->state = ec_voe_handler_state_error;
         voe->request_state = EC_INT_REQUEST_FAILURE;
         EC_SLAVE_ERR(slave, "Reception of VoE write request failed: ");
-        ec_datagram_print_wc_error(datagram);
+        ec_datagram_print_wc_error(mbox->datagram);
         return;
     }
 
@@ -292,7 +294,7 @@
  */
 void ec_voe_handler_state_read_start(ec_voe_handler_t *voe)
 {
-    ec_datagram_t *datagram = &voe->datagram;
+    ec_mailbox_t *mbox = &voe->mbox;
     ec_slave_t *slave = voe->config->slave;
 
     EC_SLAVE_DBG(slave, 1, "Reading VoE data.\n");
@@ -304,7 +306,7 @@
         return;
     }
 
-    ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
 
     voe->jiffies_start = jiffies;
     voe->retries = EC_FSM_RETRIES;
@@ -317,32 +319,32 @@
  */
 void ec_voe_handler_state_read_check(ec_voe_handler_t *voe)
 {
-    ec_datagram_t *datagram = &voe->datagram;
-    ec_slave_t *slave = voe->config->slave;
-
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && voe->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    ec_mailbox_t *mbox = &voe->mbox;
+    ec_slave_t *slave = voe->config->slave;
+
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && voe->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         voe->state = ec_voe_handler_state_error;
         voe->request_state = EC_INT_REQUEST_FAILURE;
         EC_SLAVE_ERR(slave, "Failed to receive VoE mailbox check datagram: ");
-        ec_datagram_print_state(datagram);
-        return;
-    }
-
-    if (datagram->working_counter != 1) {
+        ec_datagram_print_state(mbox->datagram);
+        return;
+    }
+
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         voe->state = ec_voe_handler_state_error;
         voe->request_state = EC_INT_REQUEST_FAILURE;
         EC_SLAVE_ERR(slave, "Reception of VoE mailbox check"
                 " datagram failed: ");
-        ec_datagram_print_wc_error(datagram);
-        return;
-    }
-
-    if (!ec_slave_mbox_check(datagram)) {
+        ec_datagram_print_wc_error(mbox->datagram);
+        return;
+    }
+
+    if (!ec_slave_mbox_check(mbox)) {
         unsigned long diff_ms =
-            (datagram->jiffies_received - voe->jiffies_start) * 1000 / HZ;
+            (mbox->datagram->jiffies_received - voe->jiffies_start) * 1000 / HZ;
         if (diff_ms >= EC_VOE_RESPONSE_TIMEOUT) {
             voe->state = ec_voe_handler_state_error;
             voe->request_state = EC_INT_REQUEST_FAILURE;
@@ -350,13 +352,13 @@
             return;
         }
 
-        ec_slave_mbox_prepare_check(slave, datagram); // can not fail.
+        ec_slave_mbox_prepare_check(slave, mbox); // can not fail.
         voe->retries = EC_FSM_RETRIES;
         return;
     }
 
     // Fetch response
-    ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
     voe->retries = EC_FSM_RETRIES;
     voe->state = ec_voe_handler_state_read_response;
 }
@@ -367,32 +369,32 @@
  */
 void ec_voe_handler_state_read_response(ec_voe_handler_t *voe)
 {
-    ec_datagram_t *datagram = &voe->datagram;
+    ec_mailbox_t *mbox = &voe->mbox;
     ec_slave_t *slave = voe->config->slave;
     ec_master_t *master = voe->config->master;
     uint8_t *data, mbox_prot;
     size_t rec_size;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && voe->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && voe->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         voe->state = ec_voe_handler_state_error;
         voe->request_state = EC_INT_REQUEST_FAILURE;
         EC_SLAVE_ERR(slave, "Failed to receive VoE read datagram: ");
-        ec_datagram_print_state(datagram);
-        return;
-    }
-
-    if (datagram->working_counter != 1) {
+        ec_datagram_print_state(mbox->datagram);
+        return;
+    }
+
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         voe->state = ec_voe_handler_state_error;
         voe->request_state = EC_INT_REQUEST_FAILURE;
         EC_SLAVE_ERR(slave, "Reception of VoE read response failed: ");
-        ec_datagram_print_wc_error(datagram);
-        return;
-    }
-
-    data = ec_slave_mbox_fetch(slave, datagram, &mbox_prot, &rec_size);
+        ec_datagram_print_wc_error(mbox->datagram);
+        return;
+    }
+
+    data = ec_slave_mbox_fetch(slave, mbox, &mbox_prot, &rec_size);
     if (IS_ERR(data)) {
         voe->state = ec_voe_handler_state_error;
         voe->request_state = EC_INT_REQUEST_FAILURE;
@@ -432,7 +434,7 @@
  */
 void ec_voe_handler_state_read_nosync_start(ec_voe_handler_t *voe)
 {
-    ec_datagram_t *datagram = &voe->datagram;
+    ec_mailbox_t *mbox = &voe->mbox;
     ec_slave_t *slave = voe->config->slave;
 
     EC_SLAVE_DBG(slave, 1, "Reading VoE data.\n");
@@ -444,7 +446,7 @@
         return;
     }
 
-    ec_slave_mbox_prepare_fetch(slave, datagram); // can not fail.
+    ec_slave_mbox_prepare_fetch(slave, mbox); // can not fail.
 
     voe->jiffies_start = jiffies;
     voe->retries = EC_FSM_RETRIES;
@@ -458,39 +460,39 @@
  */
 void ec_voe_handler_state_read_nosync_response(ec_voe_handler_t *voe)
 {
-    ec_datagram_t *datagram = &voe->datagram;
+    ec_mailbox_t *mbox = &voe->mbox;
     ec_slave_t *slave = voe->config->slave;
     ec_master_t *master = voe->config->master;
     uint8_t *data, mbox_prot;
     size_t rec_size;
 
-    if (datagram->state == EC_DATAGRAM_TIMED_OUT && voe->retries--)
-        return;
-
-    if (datagram->state != EC_DATAGRAM_RECEIVED) {
+    if (ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_TIMED_OUT) && voe->retries--)
+        return;
+
+    if (!ec_mbox_is_datagram_state(mbox,EC_DATAGRAM_RECEIVED)) {
         voe->state = ec_voe_handler_state_error;
         voe->request_state = EC_INT_REQUEST_FAILURE;
         EC_SLAVE_ERR(slave, "Failed to receive VoE read datagram: ");
-        ec_datagram_print_state(datagram);
-        return;
-    }
-
-    if (datagram->working_counter == 0) {
+        ec_datagram_print_state(mbox->datagram);
+        return;
+    }
+
+    if (ec_mbox_is_datagram_wc(mbox,0)) {
         voe->state = ec_voe_handler_state_error;
         voe->request_state = EC_INT_REQUEST_FAILURE;
         EC_SLAVE_DBG(slave, 1, "Slave did not send VoE data.\n");
         return;
     }
 
-    if (datagram->working_counter != 1) {
+    if (!ec_mbox_is_datagram_wc(mbox,1)) {
         voe->state = ec_voe_handler_state_error;
         voe->request_state = EC_INT_REQUEST_FAILURE;
         EC_SLAVE_WARN(slave, "Reception of VoE read response failed: ");
-        ec_datagram_print_wc_error(datagram);
-        return;
-    }
-
-    if (!(data = ec_slave_mbox_fetch(slave, datagram,
+        ec_datagram_print_wc_error(mbox->datagram);
+        return;
+    }
+
+    if (!(data = ec_slave_mbox_fetch(slave, mbox,
                     &mbox_prot, &rec_size))) {
         voe->state = ec_voe_handler_state_error;
         voe->request_state = EC_INT_REQUEST_FAILURE;
--- a/master/voe_handler.h	Wed Mar 23 08:06:58 2011 +0100
+++ b/master/voe_handler.h	Wed Apr 13 22:06:28 2011 +0200
@@ -50,6 +50,7 @@
     struct list_head list; /**< List item. */
     ec_slave_config_t *config; /**< Parent slave configuration. */
     ec_datagram_t datagram; /**< State machine datagram. */
+    ec_mailbox_t mbox; /**< State machine mailbox. */
     uint32_t vendor_id; /**< Vendor ID for the header. */
     uint16_t vendor_type; /**< Vendor type for the header. */
     size_t data_size; /**< Size of VoE data. */
--- a/tool/CommandDomains.cpp	Wed Mar 23 08:06:58 2011 +0100
+++ b/tool/CommandDomains.cpp	Wed Apr 13 22:06:28 2011 +0200
@@ -143,6 +143,8 @@
         << setw(8) << domain.logical_base_address
         << ", Size " << dec << setfill(' ')
         << setw(3) << domain.data_size
+        << ", TxSize " << dec << setfill(' ')
+        << setw(3) << domain.tx_size
         << ", WorkingCounter "
         << domain.working_counter << "/"
         << domain.expected_working_counter << endl;
@@ -173,7 +175,7 @@
             << setw(8) << fmmu.logical_address
             << ", Size " << dec << fmmu.data_size << endl;
 
-        dataOffset = fmmu.logical_address - domain.logical_base_address;
+        dataOffset = fmmu.domain_address - domain.logical_base_address;
         if (dataOffset + fmmu.data_size > domain.data_size) {
             stringstream err;
             delete [] processData;
--- a/tool/CommandFoeWrite.cpp	Wed Mar 23 08:06:58 2011 +0100
+++ b/tool/CommandFoeWrite.cpp	Wed Apr 13 22:06:28 2011 +0200
@@ -140,7 +140,8 @@
 
     // write data via foe to the slave
     data.offset = 0;
-    strncpy(data.file_name, storeFileName.c_str(), sizeof(data.file_name));
+    strncpy(data.file_name, storeFileName.c_str(), sizeof(data.file_name)-1);
+    data.file_name[sizeof(data.file_name)-1] = '\0';
 
     try {
         m.writeFoe(&data);
--- a/tty/module.c	Wed Mar 23 08:06:58 2011 +0100
+++ b/tty/module.c	Wed Apr 13 22:06:28 2011 +0200
@@ -65,7 +65,7 @@
 
 static struct tty_driver *tty_driver = NULL;
 ec_tty_t *ttys[EC_TTY_MAX_DEVICES];
-struct semaphore tty_sem;
+struct ec_mutex_t tty_sem;
 
 void ec_tty_wakeup(unsigned long);
 
@@ -111,7 +111,7 @@
     struct timer_list timer;
     struct tty_struct *tty;
     unsigned int open_count;
-    struct semaphore sem;
+    struct ec_mutex_t sem;
 
     ec_tty_operations_t ops;
     void *cb_data;
@@ -131,7 +131,7 @@
 
     printk(KERN_INFO PFX "TTY driver %s\n", EC_MASTER_VERSION);
 
-    sema_init(&tty_sem, 1);
+    ec_mutex_init(&tty_sem);
 
     for (i = 0; i < EC_TTY_MAX_DEVICES; i++) {
         ttys[i] = NULL;
@@ -202,7 +202,7 @@
     init_timer(&t->timer);
     t->tty = NULL;
     t->open_count = 0;
-    sema_init(&t->sem, 1);
+    ec_mutex_init(&t->sem);
     t->ops = *ops;
     t->cb_data = cb_data;
 
@@ -391,9 +391,9 @@
         tty->driver_data = t;
     }
 
-    down(&t->sem);
+    ec_mutex_lock(&t->sem);
     t->open_count++;
-    up(&t->sem);
+    ec_mutex_unlock(&t->sem);
     return 0;
 }
 
@@ -409,11 +409,11 @@
 #endif
 
     if (t) {
-        down(&t->sem);
+        ec_mutex_lock(&t->sem);
         if (--t->open_count == 0) {
             t->tty = NULL;
         }
-        up(&t->sem);
+        ec_mutex_unlock(&t->sem);
     }
 }
 
@@ -676,7 +676,7 @@
     ec_tty_t *tty;
     int minor, ret;
 
-    if (down_interruptible(&tty_sem)) {
+    if (ec_mutex_lock_interruptible(&tty_sem)) {
         return ERR_PTR(-EINTR);
     }
 
@@ -686,25 +686,25 @@
 
             tty = kmalloc(sizeof(ec_tty_t), GFP_KERNEL);
             if (!tty) {
-                up(&tty_sem);
+                ec_mutex_unlock(&tty_sem);
                 printk(KERN_ERR PFX "Failed to allocate memory.\n");
                 return ERR_PTR(-ENOMEM);
             }
 
             ret = ec_tty_init(tty, minor, ops, cb_data);
             if (ret) {
-                up(&tty_sem);
+                ec_mutex_unlock(&tty_sem);
                 kfree(tty);
                 return ERR_PTR(ret);
             }
 
             ttys[minor] = tty;
-            up(&tty_sem);
+            ec_mutex_unlock(&tty_sem);
             return tty;
         }
     }
 
-    up(&tty_sem);
+    ec_mutex_unlock(&tty_sem);
     printk(KERN_ERR PFX "No free interfaces avaliable.\n");
     return ERR_PTR(-EBUSY);
 }