--- a/NEWS Mon Feb 07 21:17:09 2011 +0100
+++ b/NEWS Mon Feb 07 21:30:25 2011 +0100
@@ -29,7 +29,7 @@
* Added 8139too driver for kernels 2.6.25 (F. Pose), 2.6.26 (M. Luescher),
2.6.27, 2.6.28, 2.6.29 (M. Goetze), 2.6.31 (F. Pose) and 2.6.34 (Malcolm
Lewis).
-* Added e1000 driver for 2.6.26 (M. Luescher) and 2.6.27.
+* Added e1000 driver for 2.6.26 (M. Luescher), 2.6.27 and 2.6.28.
* Added r8169 driver for 2.6.24, 2.6.27, 2.6.28, 2.6.29, 2.6.31 and 2.6.32
(latter thanks to Robbie K).
* Debug interfaces are created with the Ethernet addresses of the attached
--- a/devices/e1000/Makefile.am Mon Feb 07 21:17:09 2011 +0100
+++ b/devices/e1000/Makefile.am Mon Feb 07 21:30:25 2011 +0100
@@ -44,6 +44,8 @@
e1000-2.6.26-orig.h \
e1000-2.6.27-ethercat.h \
e1000-2.6.27-orig.h \
+ e1000-2.6.28-ethercat.h \
+ e1000-2.6.28-orig.h \
e1000_ethtool-2.6.13-ethercat.c \
e1000_ethtool-2.6.13-orig.c \
e1000_ethtool-2.6.18-ethercat.c \
@@ -58,6 +60,8 @@
e1000_ethtool-2.6.26-orig.c \
e1000_ethtool-2.6.27-ethercat.c \
e1000_ethtool-2.6.27-orig.c \
+ e1000_ethtool-2.6.28-ethercat.c \
+ e1000_ethtool-2.6.28-orig.c \
e1000_hw-2.6.13-ethercat.c \
e1000_hw-2.6.13-ethercat.h \
e1000_hw-2.6.13-orig.c \
@@ -86,6 +90,10 @@
e1000_hw-2.6.27-ethercat.h \
e1000_hw-2.6.27-orig.c \
e1000_hw-2.6.27-orig.h \
+ e1000_hw-2.6.28-ethercat.c \
+ e1000_hw-2.6.28-ethercat.h \
+ e1000_hw-2.6.28-orig.c \
+ e1000_hw-2.6.28-orig.h \
e1000_main-2.6.13-ethercat.c \
e1000_main-2.6.13-orig.c \
e1000_main-2.6.18-ethercat.c \
@@ -100,6 +108,8 @@
e1000_main-2.6.26-orig.c \
e1000_main-2.6.27-ethercat.c \
e1000_main-2.6.27-orig.c \
+ e1000_main-2.6.28-ethercat.c \
+ e1000_main-2.6.28-orig.c \
e1000_osdep-2.6.13-ethercat.h \
e1000_osdep-2.6.13-orig.h \
e1000_osdep-2.6.18-ethercat.h \
@@ -114,6 +124,8 @@
e1000_osdep-2.6.26-orig.h \
e1000_osdep-2.6.27-ethercat.h \
e1000_osdep-2.6.27-orig.h \
+ e1000_osdep-2.6.28-ethercat.h \
+ e1000_osdep-2.6.28-orig.h \
e1000_param-2.6.13-ethercat.c \
e1000_param-2.6.13-orig.c \
e1000_param-2.6.18-ethercat.c \
@@ -127,7 +139,10 @@
e1000_param-2.6.26-ethercat.c \
e1000_param-2.6.26-orig.c \
e1000_param-2.6.27-ethercat.c \
- e1000_param-2.6.27-orig.c
+ e1000_param-2.6.27-orig.c \
+ e1000_param-2.6.28-ethercat.c \
+ e1000_param-2.6.28-orig.c
+
BUILT_SOURCES = \
Kbuild
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000-2.6.28-ethercat.h Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,360 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/capability.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#include <net/checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include "../ecdev.h"
+
+
+#define BAR_0 0
+#define BAR_1 1
+#define BAR_5 5
+
+#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+struct e1000_adapter;
+
+#include "e1000_hw-2.6.28-ethercat.h"
+
+#ifdef DBG
+#define E1000_DBG(args...) printk(KERN_DEBUG "ec_e1000: " args)
+#else
+#define E1000_DBG(args...)
+#endif
+
+#define E1000_ERR(args...) printk(KERN_ERR "ec_e1000: " args)
+
+#define PFX "ec_e1000: "
+
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+do { \
+ if (NETIF_MSG_##nlevel & adapter->msg_enable) \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, \
+ adapter->netdev->name, __func__, ##args); \
+} while (0)
+
+#define E1000_MAX_INTR 10
+
+/* TX/RX descriptor defines */
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 256
+#define E1000_MIN_TXD 80
+#define E1000_MAX_82544_TXD 4096
+
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 256
+#define E1000_MIN_RXD 80
+#define E1000_MAX_82544_RXD 4096
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define E1000_RXBUFFER_128 128 /* Used for packet split */
+#define E1000_RXBUFFER_256 256 /* Used for packet split */
+#define E1000_RXBUFFER_512 512
+#define E1000_RXBUFFER_1024 1024
+#define E1000_RXBUFFER_2048 2048
+#define E1000_RXBUFFER_4096 4096
+#define E1000_RXBUFFER_8192 8192
+#define E1000_RXBUFFER_16384 16384
+
+/* SmartSpeed delimiters */
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX 15
+
+/* Packet Buffer allocations */
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
+
+/* Flow Control Watermarks */
+#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
+
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define E1000_TX_QUEUE_WAKE 16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_ICH8_APME 0x0004
+#define E1000_EEPROM_APME 0x0400
+
+#ifndef E1000_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define E1000_MASTER_SLAVE e1000_ms_hw_default
+#endif
+
+#define E1000_MNG_VLAN_NONE (-1)
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct e1000_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+};
+
+struct e1000_tx_ring {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+ /* number of descriptors in the ring */
+ unsigned int count;
+ /* next descriptor to associate a buffer with */
+ unsigned int next_to_use;
+ /* next descriptor to check for DD status bit */
+ unsigned int next_to_clean;
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+
+ spinlock_t tx_lock;
+ u16 tdh;
+ u16 tdt;
+ bool last_tx_tso;
+};
+
+struct e1000_rx_ring {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+ /* number of descriptors in the ring */
+ unsigned int count;
+ /* next descriptor to associate a buffer with */
+ unsigned int next_to_use;
+ /* next descriptor to check for DD status bit */
+ unsigned int next_to_clean;
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+
+ /* cpu for rx queue */
+ int cpu;
+
+ u16 rdh;
+ u16 rdt;
+};
+
+#define E1000_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) \
+ ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define E1000_RX_DESC_EXT(R, i) \
+ (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
+#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
+
+/* board specific private data structure */
+
+struct e1000_adapter {
+ struct timer_list tx_fifo_stall_timer;
+ struct timer_list watchdog_timer;
+ struct timer_list phy_info_timer;
+ struct vlan_group *vlgrp;
+ u16 mng_vlan_id;
+ u32 bd_number;
+ u32 rx_buffer_len;
+ u32 wol;
+ u32 smartspeed;
+ u32 en_mng_pt;
+ u16 link_speed;
+ u16 link_duplex;
+ spinlock_t stats_lock;
+ spinlock_t tx_queue_lock;
+ unsigned int total_tx_bytes;
+ unsigned int total_tx_packets;
+ unsigned int total_rx_bytes;
+ unsigned int total_rx_packets;
+ /* Interrupt Throttle Rate */
+ u32 itr;
+ u32 itr_setting;
+ u16 tx_itr;
+ u16 rx_itr;
+
+ struct work_struct reset_task;
+ u8 fc_autoneg;
+
+ struct timer_list blink_timer;
+ unsigned long led_status;
+
+ /* TX */
+ struct e1000_tx_ring *tx_ring; /* One per active queue */
+ unsigned int restart_queue;
+ unsigned long tx_queue_len;
+ u32 txd_cmd;
+ u32 tx_int_delay;
+ u32 tx_abs_int_delay;
+ u32 gotcl;
+ u64 gotcl_old;
+ u64 tpt_old;
+ u64 colc_old;
+ u32 tx_timeout_count;
+ u32 tx_fifo_head;
+ u32 tx_head_addr;
+ u32 tx_fifo_size;
+ u8 tx_timeout_factor;
+ atomic_t tx_fifo_stall;
+ bool pcix_82544;
+ bool detect_tx_hung;
+
+ /* RX */
+ bool (*clean_rx)(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
+ void (*alloc_rx_buf)(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
+ struct e1000_rx_ring *rx_ring; /* One per active queue */
+ struct napi_struct napi;
+ struct net_device *polling_netdev; /* One per active queue */
+
+ int num_tx_queues;
+ int num_rx_queues;
+
+ u64 hw_csum_err;
+ u64 hw_csum_good;
+ u64 rx_hdr_split;
+ u32 alloc_rx_buff_failed;
+ u32 rx_int_delay;
+ u32 rx_abs_int_delay;
+ bool rx_csum;
+ u32 gorcl;
+ u64 gorcl_old;
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+ struct e1000_hw_stats stats;
+ struct e1000_phy_info phy_info;
+ struct e1000_phy_stats phy_stats;
+
+ u32 test_icr;
+ struct e1000_tx_ring test_tx_ring;
+ struct e1000_rx_ring test_rx_ring;
+
+ int msg_enable;
+ bool have_msi;
+
+ /* to not mess up cache alignment, always add to the bottom */
+ bool tso_force;
+ bool smart_power_down; /* phy smart power down */
+ bool quad_port_a;
+ unsigned long flags;
+ u32 eeprom_wol;
+
+ /* for ioport free */
+ int bars;
+ int need_ioport;
+};
+
+enum e1000_state_t {
+ __E1000_TESTING,
+ __E1000_RESETTING,
+ __E1000_DOWN
+};
+
+extern char e1000_driver_name[];
+extern const char e1000_driver_version[];
+
+extern int e1000_up(struct e1000_adapter *adapter);
+extern void e1000_down(struct e1000_adapter *adapter);
+extern void e1000_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000_reset(struct e1000_adapter *adapter);
+extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
+extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+
+ ec_device_t *ecdev;
+ unsigned long ec_watchdog_jiffies;
+extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_update_stats(struct e1000_adapter *adapter);
+extern void e1000_power_up_phy(struct e1000_adapter *);
+extern void e1000_set_ethtool_ops(struct net_device *netdev);
+extern void e1000_check_options(struct e1000_adapter *adapter);
+
+#endif /* _E1000_H_ */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000-2.6.28-orig.h Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,355 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/capability.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#include <net/checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#define BAR_0 0
+#define BAR_1 1
+#define BAR_5 5
+
+#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+struct e1000_adapter;
+
+#include "e1000_hw.h"
+
+#ifdef DBG
+#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
+#else
+#define E1000_DBG(args...)
+#endif
+
+#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
+
+#define PFX "e1000: "
+
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+do { \
+ if (NETIF_MSG_##nlevel & adapter->msg_enable) \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, \
+ adapter->netdev->name, __func__, ##args); \
+} while (0)
+
+#define E1000_MAX_INTR 10
+
+/* TX/RX descriptor defines */
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 256
+#define E1000_MIN_TXD 80
+#define E1000_MAX_82544_TXD 4096
+
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 256
+#define E1000_MIN_RXD 80
+#define E1000_MAX_82544_RXD 4096
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define E1000_RXBUFFER_128 128 /* Used for packet split */
+#define E1000_RXBUFFER_256 256 /* Used for packet split */
+#define E1000_RXBUFFER_512 512
+#define E1000_RXBUFFER_1024 1024
+#define E1000_RXBUFFER_2048 2048
+#define E1000_RXBUFFER_4096 4096
+#define E1000_RXBUFFER_8192 8192
+#define E1000_RXBUFFER_16384 16384
+
+/* SmartSpeed delimiters */
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX 15
+
+/* Packet Buffer allocations */
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
+
+/* Flow Control Watermarks */
+#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
+
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define E1000_TX_QUEUE_WAKE 16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_ICH8_APME 0x0004
+#define E1000_EEPROM_APME 0x0400
+
+#ifndef E1000_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define E1000_MASTER_SLAVE e1000_ms_hw_default
+#endif
+
+#define E1000_MNG_VLAN_NONE (-1)
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct e1000_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+};
+
+struct e1000_tx_ring {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+ /* number of descriptors in the ring */
+ unsigned int count;
+ /* next descriptor to associate a buffer with */
+ unsigned int next_to_use;
+ /* next descriptor to check for DD status bit */
+ unsigned int next_to_clean;
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+
+ spinlock_t tx_lock;
+ u16 tdh;
+ u16 tdt;
+ bool last_tx_tso;
+};
+
+struct e1000_rx_ring {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+ /* number of descriptors in the ring */
+ unsigned int count;
+ /* next descriptor to associate a buffer with */
+ unsigned int next_to_use;
+ /* next descriptor to check for DD status bit */
+ unsigned int next_to_clean;
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+
+ /* cpu for rx queue */
+ int cpu;
+
+ u16 rdh;
+ u16 rdt;
+};
+
+#define E1000_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) \
+ ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define E1000_RX_DESC_EXT(R, i) \
+ (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
+#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
+
+/* board specific private data structure */
+
+struct e1000_adapter {
+ struct timer_list tx_fifo_stall_timer;
+ struct timer_list watchdog_timer;
+ struct timer_list phy_info_timer;
+ struct vlan_group *vlgrp;
+ u16 mng_vlan_id;
+ u32 bd_number;
+ u32 rx_buffer_len;
+ u32 wol;
+ u32 smartspeed;
+ u32 en_mng_pt;
+ u16 link_speed;
+ u16 link_duplex;
+ spinlock_t stats_lock;
+ spinlock_t tx_queue_lock;
+ unsigned int total_tx_bytes;
+ unsigned int total_tx_packets;
+ unsigned int total_rx_bytes;
+ unsigned int total_rx_packets;
+ /* Interrupt Throttle Rate */
+ u32 itr;
+ u32 itr_setting;
+ u16 tx_itr;
+ u16 rx_itr;
+
+ struct work_struct reset_task;
+ u8 fc_autoneg;
+
+ struct timer_list blink_timer;
+ unsigned long led_status;
+
+ /* TX */
+ struct e1000_tx_ring *tx_ring; /* One per active queue */
+ unsigned int restart_queue;
+ unsigned long tx_queue_len;
+ u32 txd_cmd;
+ u32 tx_int_delay;
+ u32 tx_abs_int_delay;
+ u32 gotcl;
+ u64 gotcl_old;
+ u64 tpt_old;
+ u64 colc_old;
+ u32 tx_timeout_count;
+ u32 tx_fifo_head;
+ u32 tx_head_addr;
+ u32 tx_fifo_size;
+ u8 tx_timeout_factor;
+ atomic_t tx_fifo_stall;
+ bool pcix_82544;
+ bool detect_tx_hung;
+
+ /* RX */
+ bool (*clean_rx)(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
+ void (*alloc_rx_buf)(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
+ struct e1000_rx_ring *rx_ring; /* One per active queue */
+ struct napi_struct napi;
+ struct net_device *polling_netdev; /* One per active queue */
+
+ int num_tx_queues;
+ int num_rx_queues;
+
+ u64 hw_csum_err;
+ u64 hw_csum_good;
+ u64 rx_hdr_split;
+ u32 alloc_rx_buff_failed;
+ u32 rx_int_delay;
+ u32 rx_abs_int_delay;
+ bool rx_csum;
+ u32 gorcl;
+ u64 gorcl_old;
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+ struct e1000_hw_stats stats;
+ struct e1000_phy_info phy_info;
+ struct e1000_phy_stats phy_stats;
+
+ u32 test_icr;
+ struct e1000_tx_ring test_tx_ring;
+ struct e1000_rx_ring test_rx_ring;
+
+ int msg_enable;
+ bool have_msi;
+
+ /* to not mess up cache alignment, always add to the bottom */
+ bool tso_force;
+ bool smart_power_down; /* phy smart power down */
+ bool quad_port_a;
+ unsigned long flags;
+ u32 eeprom_wol;
+
+ /* for ioport free */
+ int bars;
+ int need_ioport;
+};
+
+enum e1000_state_t {
+ __E1000_TESTING,
+ __E1000_RESETTING,
+ __E1000_DOWN
+};
+
+extern char e1000_driver_name[];
+extern const char e1000_driver_version[];
+
+extern int e1000_up(struct e1000_adapter *adapter);
+extern void e1000_down(struct e1000_adapter *adapter);
+extern void e1000_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000_reset(struct e1000_adapter *adapter);
+extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
+extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_update_stats(struct e1000_adapter *adapter);
+extern void e1000_power_up_phy(struct e1000_adapter *);
+extern void e1000_set_ethtool_ops(struct net_device *netdev);
+extern void e1000_check_options(struct e1000_adapter *adapter);
+
+#endif /* _E1000_H_ */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_ethtool-2.6.28-ethercat.c Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,2009 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for e1000 */
+
+#include "e1000-2.6.27-ethercat.h"
+#include <asm/uaccess.h>
+
+struct e1000_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define E1000_STAT(m) FIELD_SIZEOF(struct e1000_adapter, m), \
+ offsetof(struct e1000_adapter, m)
+static const struct e1000_stats e1000_gstrings_stats[] = {
+ { "rx_packets", E1000_STAT(stats.gprc) },
+ { "tx_packets", E1000_STAT(stats.gptc) },
+ { "rx_bytes", E1000_STAT(stats.gorcl) },
+ { "tx_bytes", E1000_STAT(stats.gotcl) },
+ { "rx_broadcast", E1000_STAT(stats.bprc) },
+ { "tx_broadcast", E1000_STAT(stats.bptc) },
+ { "rx_multicast", E1000_STAT(stats.mprc) },
+ { "tx_multicast", E1000_STAT(stats.mptc) },
+ { "rx_errors", E1000_STAT(stats.rxerrc) },
+ { "tx_errors", E1000_STAT(stats.txerrc) },
+ { "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
+ { "multicast", E1000_STAT(stats.mprc) },
+ { "collisions", E1000_STAT(stats.colc) },
+ { "rx_length_errors", E1000_STAT(stats.rlerrc) },
+ { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
+ { "rx_crc_errors", E1000_STAT(stats.crcerrs) },
+ { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
+ { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
+ { "rx_missed_errors", E1000_STAT(stats.mpc) },
+ { "tx_aborted_errors", E1000_STAT(stats.ecol) },
+ { "tx_carrier_errors", E1000_STAT(stats.tncrs) },
+ { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) },
+ { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) },
+ { "tx_window_errors", E1000_STAT(stats.latecol) },
+ { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
+ { "tx_deferred_ok", E1000_STAT(stats.dc) },
+ { "tx_single_coll_ok", E1000_STAT(stats.scc) },
+ { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
+ { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
+ { "tx_restart_queue", E1000_STAT(restart_queue) },
+ { "rx_long_length_errors", E1000_STAT(stats.roc) },
+ { "rx_short_length_errors", E1000_STAT(stats.ruc) },
+ { "rx_align_errors", E1000_STAT(stats.algnerrc) },
+ { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
+ { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
+ { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
+ { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
+ { "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
+ { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
+ { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
+ { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
+ { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
+ { "rx_header_split", E1000_STAT(rx_hdr_split) },
+ { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
+ { "tx_smbus", E1000_STAT(stats.mgptc) },
+ { "rx_smbus", E1000_STAT(stats.mgprc) },
+ { "dropped_smbus", E1000_STAT(stats.mgpdc) },
+};
+
+#define E1000_QUEUE_STATS_LEN 0
+#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
+static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
+
+static int e1000_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->media_type == e1000_media_type_copper) {
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ if (hw->phy_type == e1000_phy_ife)
+ ecmd->supported &= ~SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_TP;
+
+ if (hw->autoneg == 1) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ /* the e1000 autoneg seems to match ethtool nicely */
+ ecmd->advertising |= hw->autoneg_advertised;
+ }
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy_addr;
+
+ if (hw->mac_type == e1000_82543)
+ ecmd->transceiver = XCVR_EXTERNAL;
+ else
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ } else {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
+
+ ecmd->port = PORT_FIBRE;
+
+ if (hw->mac_type >= e1000_82545)
+ ecmd->transceiver = XCVR_INTERNAL;
+ else
+ ecmd->transceiver = XCVR_EXTERNAL;
+ }
+
+ if (er32(STATUS) & E1000_STATUS_LU) {
+
+ e1000_get_speed_and_duplex(hw, &adapter->link_speed,
+ &adapter->link_duplex);
+ ecmd->speed = adapter->link_speed;
+
+ /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
+ * and HALF_DUPLEX != DUPLEX_HALF */
+
+ if (adapter->link_duplex == FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+ hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ return 0;
+}
+
+static int e1000_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->ecdev)
+ return -EBUSY;
+
+ /* When SoL/IDER sessions are active, autoneg/speed/duplex
+ * cannot be changed */
+ if (e1000_check_phy_reset_block(hw)) {
+ DPRINTK(DRV, ERR, "Cannot change link characteristics "
+ "when SoL/IDER is active.\n");
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ hw->autoneg = 1;
+ if (hw->media_type == e1000_media_type_fiber)
+ hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+ else
+ hw->autoneg_advertised = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ ecmd->advertising = hw->autoneg_advertised;
+ } else
+ if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return -EINVAL;
+ }
+
+ /* reset the link */
+
+ if (netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return 0;
+}
+
+static void e1000_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (hw->fc == E1000_FC_RX_PAUSE)
+ pause->rx_pause = 1;
+ else if (hw->fc == E1000_FC_TX_PAUSE)
+ pause->tx_pause = 1;
+ else if (hw->fc == E1000_FC_FULL) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int e1000_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 0;
+
+ if (adapter->ecdev)
+ return -EBUSY;
+
+ adapter->fc_autoneg = pause->autoneg;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc = E1000_FC_FULL;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc = E1000_FC_RX_PAUSE;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc = E1000_FC_TX_PAUSE;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc = E1000_FC_NONE;
+
+ hw->original_fc = hw->fc;
+
+ if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+ if (netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+ } else
+ retval = ((hw->media_type == e1000_media_type_fiber) ?
+ e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return retval;
+}
+
+static u32 e1000_get_rx_csum(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ return adapter->rx_csum;
+}
+
+static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->ecdev)
+ return -EBUSY;
+
+ adapter->rx_csum = data;
+
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
+ else
+ e1000_reset(adapter);
+ return 0;
+}
+
+static u32 e1000_get_tx_csum(struct net_device *netdev)
+{
+ return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->mac_type < e1000_82543) {
+ if (!data)
+ return -EINVAL;
+ return 0;
+ }
+
+ if (data)
+ netdev->features |= NETIF_F_HW_CSUM;
+ else
+ netdev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
+}
+
+static int e1000_set_tso(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if ((hw->mac_type < e1000_82544) ||
+ (hw->mac_type == e1000_82547))
+ return data ? -EINVAL : 0;
+
+ if (data)
+ netdev->features |= NETIF_F_TSO;
+ else
+ netdev->features &= ~NETIF_F_TSO;
+
+ if (data && (adapter->hw.mac_type > e1000_82547_rev_2))
+ netdev->features |= NETIF_F_TSO6;
+ else
+ netdev->features &= ~NETIF_F_TSO6;
+
+ DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
+ adapter->tso_force = true;
+ return 0;
+}
+
+static u32 e1000_get_msglevel(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void e1000_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int e1000_get_regs_len(struct net_device *netdev)
+{
+#define E1000_REGS_LEN 32
+ return E1000_REGS_LEN * sizeof(u32);
+}
+
+static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u16 phy_data;
+
+ memset(p, 0, E1000_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+ regs_buff[0] = er32(CTRL);
+ regs_buff[1] = er32(STATUS);
+
+ regs_buff[2] = er32(RCTL);
+ regs_buff[3] = er32(RDLEN);
+ regs_buff[4] = er32(RDH);
+ regs_buff[5] = er32(RDT);
+ regs_buff[6] = er32(RDTR);
+
+ regs_buff[7] = er32(TCTL);
+ regs_buff[8] = er32(TDLEN);
+ regs_buff[9] = er32(TDH);
+ regs_buff[10] = er32(TDT);
+ regs_buff[11] = er32(TIDV);
+
+ regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */
+ if (hw->phy_type == e1000_phy_igp) {
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_A);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[13] = (u32)phy_data; /* cable length */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_B);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[14] = (u32)phy_data; /* cable length */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_C);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[15] = (u32)phy_data; /* cable length */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_D);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[16] = (u32)phy_data; /* cable length */
+ regs_buff[17] = 0; /* extended 10bt distance (not needed) */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[18] = (u32)phy_data; /* cable polarity */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_PCS_INIT_REG);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[19] = (u32)phy_data; /* cable polarity */
+ regs_buff[20] = 0; /* polarity correction enabled (always) */
+ regs_buff[22] = 0; /* phy receive errors (unavailable) */
+ regs_buff[23] = regs_buff[18]; /* mdix mode */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+ } else {
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ regs_buff[13] = (u32)phy_data; /* cable length */
+ regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
+ regs_buff[18] = regs_buff[13]; /* cable polarity */
+ regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[20] = regs_buff[17]; /* polarity correction */
+ /* phy receive errors */
+ regs_buff[22] = adapter->phy_stats.receive_errors;
+ regs_buff[23] = regs_buff[13]; /* mdix mode */
+ }
+ regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ regs_buff[24] = (u32)phy_data; /* phy local receiver status */
+ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+ if (hw->mac_type >= e1000_82540 &&
+ hw->mac_type < e1000_82571 &&
+ hw->media_type == e1000_media_type_copper) {
+ regs_buff[26] = er32(MANC);
+ }
+}
+
+static int e1000_get_eeprom_len(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ return hw->eeprom.word_size * 2;
+}
+
+static int e1000_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ int first_word, last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ if (hw->eeprom.type == e1000_eeprom_spi)
+ ret_val = e1000_read_eeprom(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+ else {
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ret_val = e1000_read_eeprom(hw, first_word + i, 1,
+ &eeprom_buff[i]);
+ if (ret_val)
+ break;
+ }
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+ eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int e1000_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len, first_word, last_word, ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+ max_len = hw->eeprom.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ ret_val = e1000_read_eeprom(hw, first_word, 1,
+ &eeprom_buff[0]);
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ ret_val = e1000_read_eeprom(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ret_val = e1000_write_eeprom(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
+
+ /* Update the checksum over the first part of the EEPROM if needed
+ * and flush shadow RAM for 82573 conrollers */
+ if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
+ (hw->mac_type == e1000_82573)))
+ e1000_update_eeprom_checksum(hw);
+
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void e1000_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ char firmware_version[32];
+ u16 eeprom_data;
+
+ strncpy(drvinfo->driver, e1000_driver_name, 32);
+ strncpy(drvinfo->version, e1000_driver_version, 32);
+
+ /* EEPROM image version # is reported as firmware version # for
+ * 8257{1|2|3} controllers */
+ e1000_read_eeprom(hw, 5, 1, &eeprom_data);
+ switch (hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ sprintf(firmware_version, "%d.%d-%d",
+ (eeprom_data & 0xF000) >> 12,
+ (eeprom_data & 0x0FF0) >> 4,
+ eeprom_data & 0x000F);
+ break;
+ default:
+ sprintf(firmware_version, "N/A");
+ }
+
+ strncpy(drvinfo->fw_version, firmware_version, 32);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->regdump_len = e1000_get_regs_len(netdev);
+ drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
+}
+
+static void e1000_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ e1000_mac_type mac_type = hw->mac_type;
+ struct e1000_tx_ring *txdr = adapter->tx_ring;
+ struct e1000_rx_ring *rxdr = adapter->rx_ring;
+
+ ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
+ E1000_MAX_82544_RXD;
+ ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
+ E1000_MAX_82544_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rxdr->count;
+ ring->tx_pending = txdr->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int e1000_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ e1000_mac_type mac_type = hw->mac_type;
+ struct e1000_tx_ring *txdr, *tx_old;
+ struct e1000_rx_ring *rxdr, *rx_old;
+ int i, err;
+
+ if (adapter->ecdev)
+ return -EBUSY;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (netif_running(adapter->netdev))
+ e1000_down(adapter);
+
+ tx_old = adapter->tx_ring;
+ rx_old = adapter->rx_ring;
+
+ err = -ENOMEM;
+ txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
+ if (!txdr)
+ goto err_alloc_tx;
+
+ rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
+ if (!rxdr)
+ goto err_alloc_rx;
+
+ adapter->tx_ring = txdr;
+ adapter->rx_ring = rxdr;
+
+ rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
+ rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
+ E1000_MAX_RXD : E1000_MAX_82544_RXD));
+ rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
+ txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
+ E1000_MAX_TXD : E1000_MAX_82544_TXD));
+ txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ txdr[i].count = txdr->count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rxdr[i].count = rxdr->count;
+
+ if (netif_running(adapter->netdev)) {
+ /* Try to get new resources before deleting old */
+ err = e1000_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+ err = e1000_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* save the new, restore the old in order to free it,
+ * then restore the new back again */
+
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ e1000_free_all_rx_resources(adapter);
+ e1000_free_all_tx_resources(adapter);
+ kfree(tx_old);
+ kfree(rx_old);
+ adapter->rx_ring = rxdr;
+ adapter->tx_ring = txdr;
+ err = e1000_up(adapter);
+ if (err)
+ goto err_setup;
+ }
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return 0;
+err_setup_tx:
+ e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ kfree(rxdr);
+err_alloc_rx:
+ kfree(txdr);
+err_alloc_tx:
+ e1000_up(adapter);
+err_setup:
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return err;
+}
+
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ static const u32 test[] =
+ {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ u8 __iomem *address = hw->hw_addr + reg;
+ u32 read;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test); i++) {
+ writel(write & test[i], address);
+ read = readl(address);
+ if (read != (write & test[i] & mask)) {
+ DPRINTK(DRV, ERR, "pattern test reg %04X failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, read, (write & test[i] & mask));
+ *data = reg;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u8 __iomem *address = hw->hw_addr + reg;
+ u32 read;
+
+ writel(write & mask, address);
+ read = readl(address);
+ if ((read & mask) != (write & mask)) {
+ DPRINTK(DRV, ERR, "set/check reg %04X test failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, (read & mask), (write & mask));
+ *data = reg;
+ return true;
+ }
+ return false;
+}
+
+#define REG_PATTERN_TEST(reg, mask, write) \
+ do { \
+ if (reg_pattern_test(adapter, data, \
+ (hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg, \
+ mask, write)) \
+ return 1; \
+ } while (0)
+
+#define REG_SET_AND_CHECK(reg, mask, write) \
+ do { \
+ if (reg_set_and_check(adapter, data, \
+ (hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg, \
+ mask, write)) \
+ return 1; \
+ } while (0)
+
+static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
+{
+ u32 value, before, after;
+ u32 i, toggle;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* The status register is Read Only, so a write should fail.
+ * Some bits that get toggled are ignored.
+ */
+ switch (hw->mac_type) {
+ /* there are several bits on newer hardware that are r/w */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ toggle = 0x7FFFF3FF;
+ break;
+ case e1000_82573:
+ case e1000_ich8lan:
+ toggle = 0x7FFFF033;
+ break;
+ default:
+ toggle = 0xFFFFF833;
+ break;
+ }
+
+ before = er32(STATUS);
+ value = (er32(STATUS) & toggle);
+ ew32(STATUS, toggle);
+ after = er32(STATUS) & toggle;
+ if (value != after) {
+ DPRINTK(DRV, ERR, "failed STATUS register test got: "
+ "0x%08X expected: 0x%08X\n", after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+ ew32(STATUS, before);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
+ }
+
+ REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
+ REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8);
+ REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
+ REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
+
+ REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
+
+ before = (hw->mac_type == e1000_ich8lan ?
+ 0x06C3B33E : 0x06DFB3FE);
+ REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
+ REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
+
+ if (hw->mac_type >= e1000_82543) {
+
+ REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+ if (hw->mac_type != e1000_ich8lan)
+ REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
+ value = (hw->mac_type == e1000_ich8lan ?
+ E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
+ for (i = 0; i < value; i++) {
+ REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
+ 0xFFFFFFFF);
+ }
+
+ } else {
+
+ REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
+ REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
+
+ }
+
+ value = (hw->mac_type == e1000_ich8lan ?
+ E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
+ for (i = 0; i < value; i++)
+ REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
+
+ *data = 0;
+ return 0;
+}
+
+static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 temp;
+ u16 checksum = 0;
+ u16 i;
+
+ *data = 0;
+ /* Read and add up the contents of the EEPROM */
+ for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) {
+ *data = 1;
+ break;
+ }
+ checksum += temp;
+ }
+
+ /* If Checksum is not Correct return error else test passed */
+ if ((checksum != (u16)EEPROM_SUM) && !(*data))
+ *data = 2;
+
+ return *data;
+}
+
+static irqreturn_t e1000_test_intr(int irq, void *data)
+{
+ struct net_device *netdev = (struct net_device *)data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->test_icr |= er32(ICR);
+
+ return IRQ_HANDLED;
+}
+
+static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 mask, i = 0;
+ bool shared_int = true;
+ u32 irq = adapter->pdev->irq;
+ struct e1000_hw *hw = &adapter->hw;
+
+ *data = 0;
+
+ /* NOTE: we don't test MSI interrupts here, yet */
+ /* Hook up test interrupt handler just for this test */
+ if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+ netdev))
+ shared_int = false;
+ else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
+ netdev->name, netdev)) {
+ *data = 1;
+ return -1;
+ }
+ DPRINTK(HW, INFO, "testing %s interrupt\n",
+ (shared_int ? "shared" : "unshared"));
+
+ /* Disable all the interrupts */
+ ew32(IMC, 0xFFFFFFFF);
+ msleep(10);
+
+ /* Test each interrupt */
+ for (; i < 10; i++) {
+
+ if (hw->mac_type == e1000_ich8lan && i == 8)
+ continue;
+
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if (!shared_int) {
+ /* Disable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMC, mask);
+ ew32(ICS, mask);
+ msleep(10);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /* Enable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was not posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMS, mask);
+ ew32(ICS, mask);
+ msleep(10);
+
+ if (!(adapter->test_icr & mask)) {
+ *data = 4;
+ break;
+ }
+
+ if (!shared_int) {
+ /* Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMC, ~mask & 0x00007FFF);
+ ew32(ICS, ~mask & 0x00007FFF);
+ msleep(10);
+
+ if (adapter->test_icr) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ ew32(IMC, 0xFFFFFFFF);
+ msleep(10);
+
+ /* Unhook test interrupt handler */
+ free_irq(irq, netdev);
+
+ return *data;
+}
+
+static void e1000_free_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i;
+
+ if (txdr->desc && txdr->buffer_info) {
+ for (i = 0; i < txdr->count; i++) {
+ if (txdr->buffer_info[i].dma)
+ pci_unmap_single(pdev, txdr->buffer_info[i].dma,
+ txdr->buffer_info[i].length,
+ PCI_DMA_TODEVICE);
+ if (txdr->buffer_info[i].skb)
+ dev_kfree_skb(txdr->buffer_info[i].skb);
+ }
+ }
+
+ if (rxdr->desc && rxdr->buffer_info) {
+ for (i = 0; i < rxdr->count; i++) {
+ if (rxdr->buffer_info[i].dma)
+ pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
+ rxdr->buffer_info[i].length,
+ PCI_DMA_FROMDEVICE);
+ if (rxdr->buffer_info[i].skb)
+ dev_kfree_skb(rxdr->buffer_info[i].skb);
+ }
+ }
+
+ if (txdr->desc) {
+ pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
+ txdr->desc = NULL;
+ }
+ if (rxdr->desc) {
+ pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
+ rxdr->desc = NULL;
+ }
+
+ kfree(txdr->buffer_info);
+ txdr->buffer_info = NULL;
+ kfree(rxdr->buffer_info);
+ rxdr->buffer_info = NULL;
+
+ return;
+}
+
+static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 rctl;
+ int i, ret_val;
+
+ /* Setup Tx descriptor ring and Tx buffers */
+
+ if (!txdr->count)
+ txdr->count = E1000_DEFAULT_TXD;
+
+ txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer),
+ GFP_KERNEL);
+ if (!txdr->buffer_info) {
+ ret_val = 1;
+ goto err_nomem;
+ }
+
+ txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+ txdr->size = ALIGN(txdr->size, 4096);
+ txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ if (!txdr->desc) {
+ ret_val = 2;
+ goto err_nomem;
+ }
+ memset(txdr->desc, 0, txdr->size);
+ txdr->next_to_use = txdr->next_to_clean = 0;
+
+ ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
+ ew32(TDBAH, ((u64)txdr->dma >> 32));
+ ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc));
+ ew32(TDH, 0);
+ ew32(TDT, 0);
+ ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN |
+ E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+ E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+
+ for (i = 0; i < txdr->count; i++) {
+ struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
+ struct sk_buff *skb;
+ unsigned int size = 1024;
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 3;
+ goto err_nomem;
+ }
+ skb_put(skb, size);
+ txdr->buffer_info[i].skb = skb;
+ txdr->buffer_info[i].length = skb->len;
+ txdr->buffer_info[i].dma =
+ pci_map_single(pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
+ tx_desc->lower.data = cpu_to_le32(skb->len);
+ tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
+ E1000_TXD_CMD_IFCS |
+ E1000_TXD_CMD_RPS);
+ tx_desc->upper.data = 0;
+ }
+
+ /* Setup Rx descriptor ring and Rx buffers */
+
+ if (!rxdr->count)
+ rxdr->count = E1000_DEFAULT_RXD;
+
+ rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
+ GFP_KERNEL);
+ if (!rxdr->buffer_info) {
+ ret_val = 4;
+ goto err_nomem;
+ }
+
+ rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
+ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ if (!rxdr->desc) {
+ ret_val = 5;
+ goto err_nomem;
+ }
+ memset(rxdr->desc, 0, rxdr->size);
+ rxdr->next_to_use = rxdr->next_to_clean = 0;
+
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF));
+ ew32(RDBAH, ((u64)rxdr->dma >> 32));
+ ew32(RDLEN, rxdr->size);
+ ew32(RDH, 0);
+ ew32(RDT, 0);
+ rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
+ ew32(RCTL, rctl);
+
+ for (i = 0; i < rxdr->count; i++) {
+ struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 6;
+ goto err_nomem;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ rxdr->buffer_info[i].skb = skb;
+ rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
+ rxdr->buffer_info[i].dma =
+ pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
+ PCI_DMA_FROMDEVICE);
+ rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
+ memset(skb->data, 0x00, skb->len);
+ }
+
+ return 0;
+
+err_nomem:
+ e1000_free_desc_rings(adapter);
+ return ret_val;
+}
+
+static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ e1000_write_phy_reg(hw, 29, 0x001F);
+ e1000_write_phy_reg(hw, 30, 0x8FFC);
+ e1000_write_phy_reg(hw, 29, 0x001A);
+ e1000_write_phy_reg(hw, 30, 0x8FF0);
+}
+
+static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_reg;
+
+ /* Because we reset the PHY above, we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock. This
+ * value defaults back to a 2.5MHz clock when the PHY is reset.
+ */
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+ phy_reg |= M88E1000_EPSCR_TX_CLK_25;
+ e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
+
+ /* In addition, because of the s/w reset above, we need to enable
+ * CRS on TX. This must be set for both full and half duplex
+ * operation.
+ */
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+ phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ e1000_write_phy_reg(hw,
+ M88E1000_PHY_SPEC_CTRL, phy_reg);
+}
+
+static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_reg;
+ u16 phy_reg;
+
+ /* Setup the Device Control Register for PHY loopback test. */
+
+ ctrl_reg = er32(CTRL);
+ ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */
+ E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ ew32(CTRL, ctrl_reg);
+
+ /* Read the PHY Specific Control Register (0x10) */
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+
+ /* Clear Auto-Crossover bits in PHY Specific Control Register
+ * (bits 6:5).
+ */
+ phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
+
+ /* Perform software reset on the PHY */
+ e1000_phy_reset(hw);
+
+ /* Have to setup TX_CLK and TX_CRS after software reset */
+ e1000_phy_reset_clk_and_crs(adapter);
+
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x8100);
+
+ /* Wait for reset to complete. */
+ udelay(500);
+
+ /* Have to setup TX_CLK and TX_CRS after software reset */
+ e1000_phy_reset_clk_and_crs(adapter);
+
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ e1000_phy_disable_receiver(adapter);
+
+ /* Set the loopback bit in the PHY control register. */
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+ phy_reg |= MII_CR_LOOPBACK;
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+
+ /* Setup TX_CLK and TX_CRS one more time. */
+ e1000_phy_reset_clk_and_crs(adapter);
+
+ /* Check Phy Configuration */
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+ if (phy_reg != 0x4100)
+ return 9;
+
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+ if (phy_reg != 0x0070)
+ return 10;
+
+ e1000_read_phy_reg(hw, 29, &phy_reg);
+ if (phy_reg != 0x001A)
+ return 11;
+
+ return 0;
+}
+
+static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_reg = 0;
+ u32 stat_reg = 0;
+
+ hw->autoneg = false;
+
+ if (hw->phy_type == e1000_phy_m88) {
+ /* Auto-MDI/MDIX Off */
+ e1000_write_phy_reg(hw,
+ M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x9140);
+ /* autoneg off */
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x8140);
+ } else if (hw->phy_type == e1000_phy_gg82563)
+ e1000_write_phy_reg(hw,
+ GG82563_PHY_KMRN_MODE_CTRL,
+ 0x1CC);
+
+ ctrl_reg = er32(CTRL);
+
+ if (hw->phy_type == e1000_phy_ife) {
+ /* force 100, set loopback */
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x6100);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+ } else {
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+ }
+
+ if (hw->media_type == e1000_media_type_copper &&
+ hw->phy_type == e1000_phy_m88)
+ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+ else {
+ /* Set the ILOS bit on the fiber Nic is half
+ * duplex link is detected. */
+ stat_reg = er32(STATUS);
+ if ((stat_reg & E1000_STATUS_FD) == 0)
+ ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
+ }
+
+ ew32(CTRL, ctrl_reg);
+
+ /* Disable the receiver on the PHY so when a cable is plugged in, the
+ * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+ */
+ if (hw->phy_type == e1000_phy_m88)
+ e1000_phy_disable_receiver(adapter);
+
+ udelay(500);
+
+ return 0;
+}
+
+static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_reg = 0;
+ u16 count = 0;
+
+ switch (hw->mac_type) {
+ case e1000_82543:
+ if (hw->media_type == e1000_media_type_copper) {
+ /* Attempt to setup Loopback mode on Non-integrated PHY.
+ * Some PHY registers get corrupted at random, so
+ * attempt this 10 times.
+ */
+ while (e1000_nonintegrated_phy_loopback(adapter) &&
+ count++ < 10);
+ if (count < 11)
+ return 0;
+ }
+ break;
+
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ return e1000_integrated_phy_loopback(adapter);
+ break;
+
+ default:
+ /* Default PHY loopback work is to read the MII
+ * control register and assert bit 14 (loopback mode).
+ */
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+ phy_reg |= MII_CR_LOOPBACK;
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+ return 0;
+ break;
+ }
+
+ return 8;
+}
+
+static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes) {
+ switch (hw->mac_type) {
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ return e1000_set_phy_loopback(adapter);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+#define E1000_SERDES_LB_ON 0x410
+ e1000_set_phy_loopback(adapter);
+ ew32(SCTL, E1000_SERDES_LB_ON);
+ msleep(10);
+ return 0;
+ break;
+ default:
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_LBM_TCVR;
+ ew32(RCTL, rctl);
+ return 0;
+ }
+ } else if (hw->media_type == e1000_media_type_copper)
+ return e1000_set_phy_loopback(adapter);
+
+ return 7;
+}
+
+static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+ u16 phy_reg;
+
+ rctl = er32(RCTL);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+ ew32(RCTL, rctl);
+
+ switch (hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ if (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes) {
+#define E1000_SERDES_LB_OFF 0x400
+ ew32(SCTL, E1000_SERDES_LB_OFF);
+ msleep(10);
+ break;
+ }
+ /* Fall Through */
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ default:
+ hw->autoneg = true;
+ if (hw->phy_type == e1000_phy_gg82563)
+ e1000_write_phy_reg(hw,
+ GG82563_PHY_KMRN_MODE_CTRL,
+ 0x180);
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+ if (phy_reg & MII_CR_LOOPBACK) {
+ phy_reg &= ~MII_CR_LOOPBACK;
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+ e1000_phy_reset(hw);
+ }
+ break;
+ }
+}
+
+static void e1000_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int e1000_check_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ frame_size &= ~1;
+ if (*(skb->data + 3) == 0xFF) {
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+ return 0;
+ }
+ }
+ return 13;
+}
+
+static int e1000_run_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i, j, k, l, lc, good_cnt, ret_val=0;
+ unsigned long time;
+
+ ew32(RDT, rxdr->count - 1);
+
+ /* Calculate the loop count based on the largest descriptor ring
+ * The idea is to wrap the largest ring a number of times using 64
+ * send/receive pairs during each loop
+ */
+
+ if (rxdr->count <= txdr->count)
+ lc = ((txdr->count / 64) * 2) + 1;
+ else
+ lc = ((rxdr->count / 64) * 2) + 1;
+
+ k = l = 0;
+ for (j = 0; j <= lc; j++) { /* loop count loop */
+ for (i = 0; i < 64; i++) { /* send the packets */
+ e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
+ 1024);
+ pci_dma_sync_single_for_device(pdev,
+ txdr->buffer_info[k].dma,
+ txdr->buffer_info[k].length,
+ PCI_DMA_TODEVICE);
+ if (unlikely(++k == txdr->count)) k = 0;
+ }
+ ew32(TDT, k);
+ msleep(200);
+ time = jiffies; /* set the start time for the receive */
+ good_cnt = 0;
+ do { /* receive the sent packets */
+ pci_dma_sync_single_for_cpu(pdev,
+ rxdr->buffer_info[l].dma,
+ rxdr->buffer_info[l].length,
+ PCI_DMA_FROMDEVICE);
+
+ ret_val = e1000_check_lbtest_frame(
+ rxdr->buffer_info[l].skb,
+ 1024);
+ if (!ret_val)
+ good_cnt++;
+ if (unlikely(++l == rxdr->count)) l = 0;
+ /* time + 20 msecs (200 msecs on 2.4) is more than
+ * enough time to complete the receives, if it's
+ * exceeded, break and error off
+ */
+ } while (good_cnt < 64 && jiffies < (time + 20));
+ if (good_cnt != 64) {
+ ret_val = 13; /* ret_val is the same as mis-compare */
+ break;
+ }
+ if (jiffies >= (time + 2)) {
+ ret_val = 14; /* error code for time out error */
+ break;
+ }
+ } /* end loop count loop */
+ return ret_val;
+}
+
+static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* PHY loopback cannot be performed if SoL/IDER
+ * sessions are active */
+ if (e1000_check_phy_reset_block(hw)) {
+ DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
+ "when SoL/IDER is active.\n");
+ *data = 0;
+ goto out;
+ }
+
+ *data = e1000_setup_desc_rings(adapter);
+ if (*data)
+ goto out;
+ *data = e1000_setup_loopback_test(adapter);
+ if (*data)
+ goto err_loopback;
+ *data = e1000_run_loopback_test(adapter);
+ e1000_loopback_cleanup(adapter);
+
+err_loopback:
+ e1000_free_desc_rings(adapter);
+out:
+ return *data;
+}
+
+static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ *data = 0;
+ if (hw->media_type == e1000_media_type_internal_serdes) {
+ int i = 0;
+ hw->serdes_link_down = true;
+
+ /* On some blade server designs, link establishment
+ * could take as long as 2-3 minutes */
+ do {
+ e1000_check_for_link(hw);
+ if (!hw->serdes_link_down)
+ return *data;
+ msleep(20);
+ } while (i++ < 3750);
+
+ *data = 1;
+ } else {
+ e1000_check_for_link(hw);
+ if (hw->autoneg) /* if auto_neg is set wait for it */
+ msleep(4000);
+
+ if (!(er32(STATUS) & E1000_STATUS_LU)) {
+ *data = 1;
+ }
+ }
+ return *data;
+}
+
+static int e1000_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return E1000_TEST_LEN;
+ case ETH_SS_STATS:
+ return E1000_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void e1000_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ bool if_running;
+
+ if (adapter->ecdev)
+ return;
+
+ if_running = netif_running(netdev);
+
+ set_bit(__E1000_TESTING, &adapter->flags);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ /* save speed, duplex, autoneg settings */
+ u16 autoneg_advertised = hw->autoneg_advertised;
+ u8 forced_speed_duplex = hw->forced_speed_duplex;
+ u8 autoneg = hw->autoneg;
+
+ DPRINTK(HW, INFO, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ e1000_reset(adapter);
+
+ if (e1000_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000_reset(adapter);
+ if (e1000_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000_reset(adapter);
+ if (e1000_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000_reset(adapter);
+ /* make sure the phy is powered up */
+ e1000_power_up_phy(adapter);
+ if (e1000_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* restore speed, duplex, autoneg settings */
+ hw->autoneg_advertised = autoneg_advertised;
+ hw->forced_speed_duplex = forced_speed_duplex;
+ hw->autoneg = autoneg;
+
+ e1000_reset(adapter);
+ clear_bit(__E1000_TESTING, &adapter->flags);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ DPRINTK(HW, INFO, "online testing starting\n");
+ /* Online tests */
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+
+ clear_bit(__E1000_TESTING, &adapter->flags);
+ }
+ msleep_interruptible(4 * 1000);
+}
+
+static int e1000_wol_exclusion(struct e1000_adapter *adapter,
+ struct ethtool_wolinfo *wol)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 1; /* fail by default */
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82542:
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82543GC_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_PCIE:
+ case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ /* these don't support WoL at all */
+ wol->supported = 0;
+ break;
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_COPPER:
+ /* Wake events not supported on port B */
+ if (er32(STATUS) & E1000_STATUS_FUNC_1) {
+ wol->supported = 0;
+ break;
+ }
+ /* return success for non excluded adapter ports */
+ retval = 0;
+ break;
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ /* quad port adapters only support WoL on port A */
+ if (!adapter->quad_port_a) {
+ wol->supported = 0;
+ break;
+ }
+ /* return success for non excluded adapter ports */
+ retval = 0;
+ break;
+ default:
+ /* dual port cards only support WoL on port A from now on
+ * unless it was enabled in the eeprom for port B
+ * so exclude FUNC_1 ports from having WoL enabled */
+ if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
+ !adapter->eeprom_wol) {
+ wol->supported = 0;
+ break;
+ }
+
+ retval = 0;
+ }
+
+ return retval;
+}
+
+static void e1000_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ /* this function will set ->supported = 0 and return 1 if wol is not
+ * supported by this hardware */
+ if (e1000_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return;
+
+ /* apply any specific unsupported masks here */
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ /* KSP3 does not suppport UCAST wake-ups */
+ wol->supported &= ~WAKE_UCAST;
+
+ if (adapter->wol & E1000_WUFC_EX)
+ DPRINTK(DRV, ERR, "Interface does not support "
+ "directed (unicast) frame wake-up packets\n");
+ break;
+ default:
+ break;
+ }
+
+ if (adapter->wol & E1000_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & E1000_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & E1000_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & E1000_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+
+ return;
+}
+
+static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+
+ if (e1000_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ if (wol->wolopts & WAKE_UCAST) {
+ DPRINTK(DRV, ERR, "Interface does not support "
+ "directed (unicast) frame wake-up packets\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= E1000_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= E1000_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= E1000_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= E1000_WUFC_MAG;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+/* toggle LED 4 times per second = 2 "blinks" per second */
+#define E1000_ID_INTERVAL (HZ/4)
+
+/* bit defines for adapter->led_status */
+#define E1000_LED_ON 0
+
+static void e1000_led_blink_callback(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
+ e1000_led_off(hw);
+ else
+ e1000_led_on(hw);
+
+ mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
+}
+
+static int e1000_phys_id(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!data)
+ data = INT_MAX;
+
+ if (hw->mac_type < e1000_82571) {
+ if (!adapter->blink_timer.function) {
+ init_timer(&adapter->blink_timer);
+ adapter->blink_timer.function = e1000_led_blink_callback;
+ adapter->blink_timer.data = (unsigned long)adapter;
+ }
+ e1000_setup_led(hw);
+ mod_timer(&adapter->blink_timer, jiffies);
+ msleep_interruptible(data * 1000);
+ del_timer_sync(&adapter->blink_timer);
+ } else if (hw->phy_type == e1000_phy_ife) {
+ if (!adapter->blink_timer.function) {
+ init_timer(&adapter->blink_timer);
+ adapter->blink_timer.function = e1000_led_blink_callback;
+ adapter->blink_timer.data = (unsigned long)adapter;
+ }
+ mod_timer(&adapter->blink_timer, jiffies);
+ msleep_interruptible(data * 1000);
+ del_timer_sync(&adapter->blink_timer);
+ e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
+ } else {
+ e1000_blink_led_start(hw);
+ msleep_interruptible(data * 1000);
+ }
+
+ e1000_led_off(hw);
+ clear_bit(E1000_LED_ON, &adapter->led_status);
+ e1000_cleanup_led(hw);
+
+ return 0;
+}
+
+static int e1000_nway_reset(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->ecdev)
+ return -EBUSY;
+
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
+ return 0;
+}
+
+static void e1000_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ e1000_update_stats(adapter);
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
+ data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+/* BUG_ON(i != E1000_STATS_LEN); */
+}
+
+static void e1000_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *e1000_gstrings_test,
+ sizeof(e1000_gstrings_test));
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, e1000_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ }
+}
+
+static const struct ethtool_ops e1000_ethtool_ops = {
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .get_rx_csum = e1000_get_rx_csum,
+ .set_rx_csum = e1000_set_rx_csum,
+ .get_tx_csum = e1000_get_tx_csum,
+ .set_tx_csum = e1000_set_tx_csum,
+ .set_sg = ethtool_op_set_sg,
+ .set_tso = e1000_set_tso,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .phys_id = e1000_phys_id,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_sset_count = e1000_get_sset_count,
+};
+
+void e1000_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_ethtool-2.6.28-orig.c Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,1987 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for e1000 */
+
+#include "e1000.h"
+#include <asm/uaccess.h>
+
+struct e1000_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define E1000_STAT(m) FIELD_SIZEOF(struct e1000_adapter, m), \
+ offsetof(struct e1000_adapter, m)
+static const struct e1000_stats e1000_gstrings_stats[] = {
+ { "rx_packets", E1000_STAT(stats.gprc) },
+ { "tx_packets", E1000_STAT(stats.gptc) },
+ { "rx_bytes", E1000_STAT(stats.gorcl) },
+ { "tx_bytes", E1000_STAT(stats.gotcl) },
+ { "rx_broadcast", E1000_STAT(stats.bprc) },
+ { "tx_broadcast", E1000_STAT(stats.bptc) },
+ { "rx_multicast", E1000_STAT(stats.mprc) },
+ { "tx_multicast", E1000_STAT(stats.mptc) },
+ { "rx_errors", E1000_STAT(stats.rxerrc) },
+ { "tx_errors", E1000_STAT(stats.txerrc) },
+ { "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
+ { "multicast", E1000_STAT(stats.mprc) },
+ { "collisions", E1000_STAT(stats.colc) },
+ { "rx_length_errors", E1000_STAT(stats.rlerrc) },
+ { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
+ { "rx_crc_errors", E1000_STAT(stats.crcerrs) },
+ { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
+ { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
+ { "rx_missed_errors", E1000_STAT(stats.mpc) },
+ { "tx_aborted_errors", E1000_STAT(stats.ecol) },
+ { "tx_carrier_errors", E1000_STAT(stats.tncrs) },
+ { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) },
+ { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) },
+ { "tx_window_errors", E1000_STAT(stats.latecol) },
+ { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
+ { "tx_deferred_ok", E1000_STAT(stats.dc) },
+ { "tx_single_coll_ok", E1000_STAT(stats.scc) },
+ { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
+ { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
+ { "tx_restart_queue", E1000_STAT(restart_queue) },
+ { "rx_long_length_errors", E1000_STAT(stats.roc) },
+ { "rx_short_length_errors", E1000_STAT(stats.ruc) },
+ { "rx_align_errors", E1000_STAT(stats.algnerrc) },
+ { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
+ { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
+ { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
+ { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
+ { "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
+ { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
+ { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
+ { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
+ { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
+ { "rx_header_split", E1000_STAT(rx_hdr_split) },
+ { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
+ { "tx_smbus", E1000_STAT(stats.mgptc) },
+ { "rx_smbus", E1000_STAT(stats.mgprc) },
+ { "dropped_smbus", E1000_STAT(stats.mgpdc) },
+};
+
+#define E1000_QUEUE_STATS_LEN 0
+#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
+static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
+
+static int e1000_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->media_type == e1000_media_type_copper) {
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ if (hw->phy_type == e1000_phy_ife)
+ ecmd->supported &= ~SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_TP;
+
+ if (hw->autoneg == 1) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ /* the e1000 autoneg seems to match ethtool nicely */
+ ecmd->advertising |= hw->autoneg_advertised;
+ }
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy_addr;
+
+ if (hw->mac_type == e1000_82543)
+ ecmd->transceiver = XCVR_EXTERNAL;
+ else
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ } else {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
+
+ ecmd->port = PORT_FIBRE;
+
+ if (hw->mac_type >= e1000_82545)
+ ecmd->transceiver = XCVR_INTERNAL;
+ else
+ ecmd->transceiver = XCVR_EXTERNAL;
+ }
+
+ if (er32(STATUS) & E1000_STATUS_LU) {
+
+ e1000_get_speed_and_duplex(hw, &adapter->link_speed,
+ &adapter->link_duplex);
+ ecmd->speed = adapter->link_speed;
+
+ /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
+ * and HALF_DUPLEX != DUPLEX_HALF */
+
+ if (adapter->link_duplex == FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+ hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ return 0;
+}
+
+static int e1000_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* When SoL/IDER sessions are active, autoneg/speed/duplex
+ * cannot be changed */
+ if (e1000_check_phy_reset_block(hw)) {
+ DPRINTK(DRV, ERR, "Cannot change link characteristics "
+ "when SoL/IDER is active.\n");
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ hw->autoneg = 1;
+ if (hw->media_type == e1000_media_type_fiber)
+ hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+ else
+ hw->autoneg_advertised = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ ecmd->advertising = hw->autoneg_advertised;
+ } else
+ if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return -EINVAL;
+ }
+
+ /* reset the link */
+
+ if (netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return 0;
+}
+
+static void e1000_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (hw->fc == E1000_FC_RX_PAUSE)
+ pause->rx_pause = 1;
+ else if (hw->fc == E1000_FC_TX_PAUSE)
+ pause->tx_pause = 1;
+ else if (hw->fc == E1000_FC_FULL) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int e1000_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 0;
+
+ adapter->fc_autoneg = pause->autoneg;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc = E1000_FC_FULL;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc = E1000_FC_RX_PAUSE;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc = E1000_FC_TX_PAUSE;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc = E1000_FC_NONE;
+
+ hw->original_fc = hw->fc;
+
+ if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+ if (netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+ } else
+ retval = ((hw->media_type == e1000_media_type_fiber) ?
+ e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return retval;
+}
+
+static u32 e1000_get_rx_csum(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ return adapter->rx_csum;
+}
+
+static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ adapter->rx_csum = data;
+
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
+ else
+ e1000_reset(adapter);
+ return 0;
+}
+
+static u32 e1000_get_tx_csum(struct net_device *netdev)
+{
+ return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->mac_type < e1000_82543) {
+ if (!data)
+ return -EINVAL;
+ return 0;
+ }
+
+ if (data)
+ netdev->features |= NETIF_F_HW_CSUM;
+ else
+ netdev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
+}
+
+static int e1000_set_tso(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if ((hw->mac_type < e1000_82544) ||
+ (hw->mac_type == e1000_82547))
+ return data ? -EINVAL : 0;
+
+ if (data)
+ netdev->features |= NETIF_F_TSO;
+ else
+ netdev->features &= ~NETIF_F_TSO;
+
+ if (data && (adapter->hw.mac_type > e1000_82547_rev_2))
+ netdev->features |= NETIF_F_TSO6;
+ else
+ netdev->features &= ~NETIF_F_TSO6;
+
+ DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
+ adapter->tso_force = true;
+ return 0;
+}
+
+static u32 e1000_get_msglevel(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void e1000_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int e1000_get_regs_len(struct net_device *netdev)
+{
+#define E1000_REGS_LEN 32
+ return E1000_REGS_LEN * sizeof(u32);
+}
+
+static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u16 phy_data;
+
+ memset(p, 0, E1000_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+ regs_buff[0] = er32(CTRL);
+ regs_buff[1] = er32(STATUS);
+
+ regs_buff[2] = er32(RCTL);
+ regs_buff[3] = er32(RDLEN);
+ regs_buff[4] = er32(RDH);
+ regs_buff[5] = er32(RDT);
+ regs_buff[6] = er32(RDTR);
+
+ regs_buff[7] = er32(TCTL);
+ regs_buff[8] = er32(TDLEN);
+ regs_buff[9] = er32(TDH);
+ regs_buff[10] = er32(TDT);
+ regs_buff[11] = er32(TIDV);
+
+ regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */
+ if (hw->phy_type == e1000_phy_igp) {
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_A);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[13] = (u32)phy_data; /* cable length */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_B);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[14] = (u32)phy_data; /* cable length */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_C);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[15] = (u32)phy_data; /* cable length */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_D);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[16] = (u32)phy_data; /* cable length */
+ regs_buff[17] = 0; /* extended 10bt distance (not needed) */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[18] = (u32)phy_data; /* cable polarity */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_PCS_INIT_REG);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[19] = (u32)phy_data; /* cable polarity */
+ regs_buff[20] = 0; /* polarity correction enabled (always) */
+ regs_buff[22] = 0; /* phy receive errors (unavailable) */
+ regs_buff[23] = regs_buff[18]; /* mdix mode */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+ } else {
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ regs_buff[13] = (u32)phy_data; /* cable length */
+ regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
+ regs_buff[18] = regs_buff[13]; /* cable polarity */
+ regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[20] = regs_buff[17]; /* polarity correction */
+ /* phy receive errors */
+ regs_buff[22] = adapter->phy_stats.receive_errors;
+ regs_buff[23] = regs_buff[13]; /* mdix mode */
+ }
+ regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ regs_buff[24] = (u32)phy_data; /* phy local receiver status */
+ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+ if (hw->mac_type >= e1000_82540 &&
+ hw->mac_type < e1000_82571 &&
+ hw->media_type == e1000_media_type_copper) {
+ regs_buff[26] = er32(MANC);
+ }
+}
+
+static int e1000_get_eeprom_len(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ return hw->eeprom.word_size * 2;
+}
+
+static int e1000_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ int first_word, last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ if (hw->eeprom.type == e1000_eeprom_spi)
+ ret_val = e1000_read_eeprom(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+ else {
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ret_val = e1000_read_eeprom(hw, first_word + i, 1,
+ &eeprom_buff[i]);
+ if (ret_val)
+ break;
+ }
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+ eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int e1000_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len, first_word, last_word, ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+ max_len = hw->eeprom.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ ret_val = e1000_read_eeprom(hw, first_word, 1,
+ &eeprom_buff[0]);
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ ret_val = e1000_read_eeprom(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ret_val = e1000_write_eeprom(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
+
+ /* Update the checksum over the first part of the EEPROM if needed
+ * and flush shadow RAM for 82573 conrollers */
+ if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
+ (hw->mac_type == e1000_82573)))
+ e1000_update_eeprom_checksum(hw);
+
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void e1000_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ char firmware_version[32];
+ u16 eeprom_data;
+
+ strncpy(drvinfo->driver, e1000_driver_name, 32);
+ strncpy(drvinfo->version, e1000_driver_version, 32);
+
+ /* EEPROM image version # is reported as firmware version # for
+ * 8257{1|2|3} controllers */
+ e1000_read_eeprom(hw, 5, 1, &eeprom_data);
+ switch (hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ sprintf(firmware_version, "%d.%d-%d",
+ (eeprom_data & 0xF000) >> 12,
+ (eeprom_data & 0x0FF0) >> 4,
+ eeprom_data & 0x000F);
+ break;
+ default:
+ sprintf(firmware_version, "N/A");
+ }
+
+ strncpy(drvinfo->fw_version, firmware_version, 32);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->regdump_len = e1000_get_regs_len(netdev);
+ drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
+}
+
+static void e1000_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ e1000_mac_type mac_type = hw->mac_type;
+ struct e1000_tx_ring *txdr = adapter->tx_ring;
+ struct e1000_rx_ring *rxdr = adapter->rx_ring;
+
+ ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
+ E1000_MAX_82544_RXD;
+ ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
+ E1000_MAX_82544_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rxdr->count;
+ ring->tx_pending = txdr->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int e1000_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ e1000_mac_type mac_type = hw->mac_type;
+ struct e1000_tx_ring *txdr, *tx_old;
+ struct e1000_rx_ring *rxdr, *rx_old;
+ int i, err;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (netif_running(adapter->netdev))
+ e1000_down(adapter);
+
+ tx_old = adapter->tx_ring;
+ rx_old = adapter->rx_ring;
+
+ err = -ENOMEM;
+ txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
+ if (!txdr)
+ goto err_alloc_tx;
+
+ rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
+ if (!rxdr)
+ goto err_alloc_rx;
+
+ adapter->tx_ring = txdr;
+ adapter->rx_ring = rxdr;
+
+ rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
+ rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
+ E1000_MAX_RXD : E1000_MAX_82544_RXD));
+ rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
+ txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
+ E1000_MAX_TXD : E1000_MAX_82544_TXD));
+ txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ txdr[i].count = txdr->count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rxdr[i].count = rxdr->count;
+
+ if (netif_running(adapter->netdev)) {
+ /* Try to get new resources before deleting old */
+ err = e1000_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+ err = e1000_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* save the new, restore the old in order to free it,
+ * then restore the new back again */
+
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ e1000_free_all_rx_resources(adapter);
+ e1000_free_all_tx_resources(adapter);
+ kfree(tx_old);
+ kfree(rx_old);
+ adapter->rx_ring = rxdr;
+ adapter->tx_ring = txdr;
+ err = e1000_up(adapter);
+ if (err)
+ goto err_setup;
+ }
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return 0;
+err_setup_tx:
+ e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ kfree(rxdr);
+err_alloc_rx:
+ kfree(txdr);
+err_alloc_tx:
+ e1000_up(adapter);
+err_setup:
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return err;
+}
+
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ static const u32 test[] =
+ {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ u8 __iomem *address = hw->hw_addr + reg;
+ u32 read;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test); i++) {
+ writel(write & test[i], address);
+ read = readl(address);
+ if (read != (write & test[i] & mask)) {
+ DPRINTK(DRV, ERR, "pattern test reg %04X failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, read, (write & test[i] & mask));
+ *data = reg;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u8 __iomem *address = hw->hw_addr + reg;
+ u32 read;
+
+ writel(write & mask, address);
+ read = readl(address);
+ if ((read & mask) != (write & mask)) {
+ DPRINTK(DRV, ERR, "set/check reg %04X test failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, (read & mask), (write & mask));
+ *data = reg;
+ return true;
+ }
+ return false;
+}
+
+#define REG_PATTERN_TEST(reg, mask, write) \
+ do { \
+ if (reg_pattern_test(adapter, data, \
+ (hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg, \
+ mask, write)) \
+ return 1; \
+ } while (0)
+
+#define REG_SET_AND_CHECK(reg, mask, write) \
+ do { \
+ if (reg_set_and_check(adapter, data, \
+ (hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg, \
+ mask, write)) \
+ return 1; \
+ } while (0)
+
+static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
+{
+ u32 value, before, after;
+ u32 i, toggle;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* The status register is Read Only, so a write should fail.
+ * Some bits that get toggled are ignored.
+ */
+ switch (hw->mac_type) {
+ /* there are several bits on newer hardware that are r/w */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ toggle = 0x7FFFF3FF;
+ break;
+ case e1000_82573:
+ case e1000_ich8lan:
+ toggle = 0x7FFFF033;
+ break;
+ default:
+ toggle = 0xFFFFF833;
+ break;
+ }
+
+ before = er32(STATUS);
+ value = (er32(STATUS) & toggle);
+ ew32(STATUS, toggle);
+ after = er32(STATUS) & toggle;
+ if (value != after) {
+ DPRINTK(DRV, ERR, "failed STATUS register test got: "
+ "0x%08X expected: 0x%08X\n", after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+ ew32(STATUS, before);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
+ }
+
+ REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
+ REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8);
+ REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
+ REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
+
+ REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
+
+ before = (hw->mac_type == e1000_ich8lan ?
+ 0x06C3B33E : 0x06DFB3FE);
+ REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
+ REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
+
+ if (hw->mac_type >= e1000_82543) {
+
+ REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+ if (hw->mac_type != e1000_ich8lan)
+ REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
+ value = (hw->mac_type == e1000_ich8lan ?
+ E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
+ for (i = 0; i < value; i++) {
+ REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
+ 0xFFFFFFFF);
+ }
+
+ } else {
+
+ REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
+ REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
+
+ }
+
+ value = (hw->mac_type == e1000_ich8lan ?
+ E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
+ for (i = 0; i < value; i++)
+ REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
+
+ *data = 0;
+ return 0;
+}
+
+static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 temp;
+ u16 checksum = 0;
+ u16 i;
+
+ *data = 0;
+ /* Read and add up the contents of the EEPROM */
+ for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) {
+ *data = 1;
+ break;
+ }
+ checksum += temp;
+ }
+
+ /* If Checksum is not Correct return error else test passed */
+ if ((checksum != (u16)EEPROM_SUM) && !(*data))
+ *data = 2;
+
+ return *data;
+}
+
+static irqreturn_t e1000_test_intr(int irq, void *data)
+{
+ struct net_device *netdev = (struct net_device *)data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->test_icr |= er32(ICR);
+
+ return IRQ_HANDLED;
+}
+
+static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 mask, i = 0;
+ bool shared_int = true;
+ u32 irq = adapter->pdev->irq;
+ struct e1000_hw *hw = &adapter->hw;
+
+ *data = 0;
+
+ /* NOTE: we don't test MSI interrupts here, yet */
+ /* Hook up test interrupt handler just for this test */
+ if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+ netdev))
+ shared_int = false;
+ else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
+ netdev->name, netdev)) {
+ *data = 1;
+ return -1;
+ }
+ DPRINTK(HW, INFO, "testing %s interrupt\n",
+ (shared_int ? "shared" : "unshared"));
+
+ /* Disable all the interrupts */
+ ew32(IMC, 0xFFFFFFFF);
+ msleep(10);
+
+ /* Test each interrupt */
+ for (; i < 10; i++) {
+
+ if (hw->mac_type == e1000_ich8lan && i == 8)
+ continue;
+
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if (!shared_int) {
+ /* Disable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMC, mask);
+ ew32(ICS, mask);
+ msleep(10);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /* Enable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was not posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMS, mask);
+ ew32(ICS, mask);
+ msleep(10);
+
+ if (!(adapter->test_icr & mask)) {
+ *data = 4;
+ break;
+ }
+
+ if (!shared_int) {
+ /* Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMC, ~mask & 0x00007FFF);
+ ew32(ICS, ~mask & 0x00007FFF);
+ msleep(10);
+
+ if (adapter->test_icr) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ ew32(IMC, 0xFFFFFFFF);
+ msleep(10);
+
+ /* Unhook test interrupt handler */
+ free_irq(irq, netdev);
+
+ return *data;
+}
+
+static void e1000_free_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i;
+
+ if (txdr->desc && txdr->buffer_info) {
+ for (i = 0; i < txdr->count; i++) {
+ if (txdr->buffer_info[i].dma)
+ pci_unmap_single(pdev, txdr->buffer_info[i].dma,
+ txdr->buffer_info[i].length,
+ PCI_DMA_TODEVICE);
+ if (txdr->buffer_info[i].skb)
+ dev_kfree_skb(txdr->buffer_info[i].skb);
+ }
+ }
+
+ if (rxdr->desc && rxdr->buffer_info) {
+ for (i = 0; i < rxdr->count; i++) {
+ if (rxdr->buffer_info[i].dma)
+ pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
+ rxdr->buffer_info[i].length,
+ PCI_DMA_FROMDEVICE);
+ if (rxdr->buffer_info[i].skb)
+ dev_kfree_skb(rxdr->buffer_info[i].skb);
+ }
+ }
+
+ if (txdr->desc) {
+ pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
+ txdr->desc = NULL;
+ }
+ if (rxdr->desc) {
+ pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
+ rxdr->desc = NULL;
+ }
+
+ kfree(txdr->buffer_info);
+ txdr->buffer_info = NULL;
+ kfree(rxdr->buffer_info);
+ rxdr->buffer_info = NULL;
+
+ return;
+}
+
+static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 rctl;
+ int i, ret_val;
+
+ /* Setup Tx descriptor ring and Tx buffers */
+
+ if (!txdr->count)
+ txdr->count = E1000_DEFAULT_TXD;
+
+ txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer),
+ GFP_KERNEL);
+ if (!txdr->buffer_info) {
+ ret_val = 1;
+ goto err_nomem;
+ }
+
+ txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+ txdr->size = ALIGN(txdr->size, 4096);
+ txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ if (!txdr->desc) {
+ ret_val = 2;
+ goto err_nomem;
+ }
+ memset(txdr->desc, 0, txdr->size);
+ txdr->next_to_use = txdr->next_to_clean = 0;
+
+ ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
+ ew32(TDBAH, ((u64)txdr->dma >> 32));
+ ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc));
+ ew32(TDH, 0);
+ ew32(TDT, 0);
+ ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN |
+ E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+ E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+
+ for (i = 0; i < txdr->count; i++) {
+ struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
+ struct sk_buff *skb;
+ unsigned int size = 1024;
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 3;
+ goto err_nomem;
+ }
+ skb_put(skb, size);
+ txdr->buffer_info[i].skb = skb;
+ txdr->buffer_info[i].length = skb->len;
+ txdr->buffer_info[i].dma =
+ pci_map_single(pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
+ tx_desc->lower.data = cpu_to_le32(skb->len);
+ tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
+ E1000_TXD_CMD_IFCS |
+ E1000_TXD_CMD_RPS);
+ tx_desc->upper.data = 0;
+ }
+
+ /* Setup Rx descriptor ring and Rx buffers */
+
+ if (!rxdr->count)
+ rxdr->count = E1000_DEFAULT_RXD;
+
+ rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
+ GFP_KERNEL);
+ if (!rxdr->buffer_info) {
+ ret_val = 4;
+ goto err_nomem;
+ }
+
+ rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
+ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ if (!rxdr->desc) {
+ ret_val = 5;
+ goto err_nomem;
+ }
+ memset(rxdr->desc, 0, rxdr->size);
+ rxdr->next_to_use = rxdr->next_to_clean = 0;
+
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF));
+ ew32(RDBAH, ((u64)rxdr->dma >> 32));
+ ew32(RDLEN, rxdr->size);
+ ew32(RDH, 0);
+ ew32(RDT, 0);
+ rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
+ ew32(RCTL, rctl);
+
+ for (i = 0; i < rxdr->count; i++) {
+ struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 6;
+ goto err_nomem;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ rxdr->buffer_info[i].skb = skb;
+ rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
+ rxdr->buffer_info[i].dma =
+ pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
+ PCI_DMA_FROMDEVICE);
+ rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
+ memset(skb->data, 0x00, skb->len);
+ }
+
+ return 0;
+
+err_nomem:
+ e1000_free_desc_rings(adapter);
+ return ret_val;
+}
+
+static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ e1000_write_phy_reg(hw, 29, 0x001F);
+ e1000_write_phy_reg(hw, 30, 0x8FFC);
+ e1000_write_phy_reg(hw, 29, 0x001A);
+ e1000_write_phy_reg(hw, 30, 0x8FF0);
+}
+
+static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_reg;
+
+ /* Because we reset the PHY above, we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock. This
+ * value defaults back to a 2.5MHz clock when the PHY is reset.
+ */
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+ phy_reg |= M88E1000_EPSCR_TX_CLK_25;
+ e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
+
+ /* In addition, because of the s/w reset above, we need to enable
+ * CRS on TX. This must be set for both full and half duplex
+ * operation.
+ */
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+ phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ e1000_write_phy_reg(hw,
+ M88E1000_PHY_SPEC_CTRL, phy_reg);
+}
+
+static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_reg;
+ u16 phy_reg;
+
+ /* Setup the Device Control Register for PHY loopback test. */
+
+ ctrl_reg = er32(CTRL);
+ ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */
+ E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ ew32(CTRL, ctrl_reg);
+
+ /* Read the PHY Specific Control Register (0x10) */
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+
+ /* Clear Auto-Crossover bits in PHY Specific Control Register
+ * (bits 6:5).
+ */
+ phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
+
+ /* Perform software reset on the PHY */
+ e1000_phy_reset(hw);
+
+ /* Have to setup TX_CLK and TX_CRS after software reset */
+ e1000_phy_reset_clk_and_crs(adapter);
+
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x8100);
+
+ /* Wait for reset to complete. */
+ udelay(500);
+
+ /* Have to setup TX_CLK and TX_CRS after software reset */
+ e1000_phy_reset_clk_and_crs(adapter);
+
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ e1000_phy_disable_receiver(adapter);
+
+ /* Set the loopback bit in the PHY control register. */
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+ phy_reg |= MII_CR_LOOPBACK;
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+
+ /* Setup TX_CLK and TX_CRS one more time. */
+ e1000_phy_reset_clk_and_crs(adapter);
+
+ /* Check Phy Configuration */
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+ if (phy_reg != 0x4100)
+ return 9;
+
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+ if (phy_reg != 0x0070)
+ return 10;
+
+ e1000_read_phy_reg(hw, 29, &phy_reg);
+ if (phy_reg != 0x001A)
+ return 11;
+
+ return 0;
+}
+
+static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_reg = 0;
+ u32 stat_reg = 0;
+
+ hw->autoneg = false;
+
+ if (hw->phy_type == e1000_phy_m88) {
+ /* Auto-MDI/MDIX Off */
+ e1000_write_phy_reg(hw,
+ M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x9140);
+ /* autoneg off */
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x8140);
+ } else if (hw->phy_type == e1000_phy_gg82563)
+ e1000_write_phy_reg(hw,
+ GG82563_PHY_KMRN_MODE_CTRL,
+ 0x1CC);
+
+ ctrl_reg = er32(CTRL);
+
+ if (hw->phy_type == e1000_phy_ife) {
+ /* force 100, set loopback */
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x6100);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+ } else {
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+ }
+
+ if (hw->media_type == e1000_media_type_copper &&
+ hw->phy_type == e1000_phy_m88)
+ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+ else {
+ /* Set the ILOS bit on the fiber Nic is half
+ * duplex link is detected. */
+ stat_reg = er32(STATUS);
+ if ((stat_reg & E1000_STATUS_FD) == 0)
+ ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
+ }
+
+ ew32(CTRL, ctrl_reg);
+
+ /* Disable the receiver on the PHY so when a cable is plugged in, the
+ * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+ */
+ if (hw->phy_type == e1000_phy_m88)
+ e1000_phy_disable_receiver(adapter);
+
+ udelay(500);
+
+ return 0;
+}
+
+static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_reg = 0;
+ u16 count = 0;
+
+ switch (hw->mac_type) {
+ case e1000_82543:
+ if (hw->media_type == e1000_media_type_copper) {
+ /* Attempt to setup Loopback mode on Non-integrated PHY.
+ * Some PHY registers get corrupted at random, so
+ * attempt this 10 times.
+ */
+ while (e1000_nonintegrated_phy_loopback(adapter) &&
+ count++ < 10);
+ if (count < 11)
+ return 0;
+ }
+ break;
+
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ return e1000_integrated_phy_loopback(adapter);
+ break;
+
+ default:
+ /* Default PHY loopback work is to read the MII
+ * control register and assert bit 14 (loopback mode).
+ */
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+ phy_reg |= MII_CR_LOOPBACK;
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+ return 0;
+ break;
+ }
+
+ return 8;
+}
+
+static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes) {
+ switch (hw->mac_type) {
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ return e1000_set_phy_loopback(adapter);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+#define E1000_SERDES_LB_ON 0x410
+ e1000_set_phy_loopback(adapter);
+ ew32(SCTL, E1000_SERDES_LB_ON);
+ msleep(10);
+ return 0;
+ break;
+ default:
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_LBM_TCVR;
+ ew32(RCTL, rctl);
+ return 0;
+ }
+ } else if (hw->media_type == e1000_media_type_copper)
+ return e1000_set_phy_loopback(adapter);
+
+ return 7;
+}
+
+static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+ u16 phy_reg;
+
+ rctl = er32(RCTL);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+ ew32(RCTL, rctl);
+
+ switch (hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ if (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes) {
+#define E1000_SERDES_LB_OFF 0x400
+ ew32(SCTL, E1000_SERDES_LB_OFF);
+ msleep(10);
+ break;
+ }
+ /* Fall Through */
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ default:
+ hw->autoneg = true;
+ if (hw->phy_type == e1000_phy_gg82563)
+ e1000_write_phy_reg(hw,
+ GG82563_PHY_KMRN_MODE_CTRL,
+ 0x180);
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+ if (phy_reg & MII_CR_LOOPBACK) {
+ phy_reg &= ~MII_CR_LOOPBACK;
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+ e1000_phy_reset(hw);
+ }
+ break;
+ }
+}
+
+static void e1000_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int e1000_check_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ frame_size &= ~1;
+ if (*(skb->data + 3) == 0xFF) {
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+ return 0;
+ }
+ }
+ return 13;
+}
+
+static int e1000_run_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i, j, k, l, lc, good_cnt, ret_val=0;
+ unsigned long time;
+
+ ew32(RDT, rxdr->count - 1);
+
+ /* Calculate the loop count based on the largest descriptor ring
+ * The idea is to wrap the largest ring a number of times using 64
+ * send/receive pairs during each loop
+ */
+
+ if (rxdr->count <= txdr->count)
+ lc = ((txdr->count / 64) * 2) + 1;
+ else
+ lc = ((rxdr->count / 64) * 2) + 1;
+
+ k = l = 0;
+ for (j = 0; j <= lc; j++) { /* loop count loop */
+ for (i = 0; i < 64; i++) { /* send the packets */
+ e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
+ 1024);
+ pci_dma_sync_single_for_device(pdev,
+ txdr->buffer_info[k].dma,
+ txdr->buffer_info[k].length,
+ PCI_DMA_TODEVICE);
+ if (unlikely(++k == txdr->count)) k = 0;
+ }
+ ew32(TDT, k);
+ msleep(200);
+ time = jiffies; /* set the start time for the receive */
+ good_cnt = 0;
+ do { /* receive the sent packets */
+ pci_dma_sync_single_for_cpu(pdev,
+ rxdr->buffer_info[l].dma,
+ rxdr->buffer_info[l].length,
+ PCI_DMA_FROMDEVICE);
+
+ ret_val = e1000_check_lbtest_frame(
+ rxdr->buffer_info[l].skb,
+ 1024);
+ if (!ret_val)
+ good_cnt++;
+ if (unlikely(++l == rxdr->count)) l = 0;
+ /* time + 20 msecs (200 msecs on 2.4) is more than
+ * enough time to complete the receives, if it's
+ * exceeded, break and error off
+ */
+ } while (good_cnt < 64 && jiffies < (time + 20));
+ if (good_cnt != 64) {
+ ret_val = 13; /* ret_val is the same as mis-compare */
+ break;
+ }
+ if (jiffies >= (time + 2)) {
+ ret_val = 14; /* error code for time out error */
+ break;
+ }
+ } /* end loop count loop */
+ return ret_val;
+}
+
+static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* PHY loopback cannot be performed if SoL/IDER
+ * sessions are active */
+ if (e1000_check_phy_reset_block(hw)) {
+ DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
+ "when SoL/IDER is active.\n");
+ *data = 0;
+ goto out;
+ }
+
+ *data = e1000_setup_desc_rings(adapter);
+ if (*data)
+ goto out;
+ *data = e1000_setup_loopback_test(adapter);
+ if (*data)
+ goto err_loopback;
+ *data = e1000_run_loopback_test(adapter);
+ e1000_loopback_cleanup(adapter);
+
+err_loopback:
+ e1000_free_desc_rings(adapter);
+out:
+ return *data;
+}
+
+static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ *data = 0;
+ if (hw->media_type == e1000_media_type_internal_serdes) {
+ int i = 0;
+ hw->serdes_link_down = true;
+
+ /* On some blade server designs, link establishment
+ * could take as long as 2-3 minutes */
+ do {
+ e1000_check_for_link(hw);
+ if (!hw->serdes_link_down)
+ return *data;
+ msleep(20);
+ } while (i++ < 3750);
+
+ *data = 1;
+ } else {
+ e1000_check_for_link(hw);
+ if (hw->autoneg) /* if auto_neg is set wait for it */
+ msleep(4000);
+
+ if (!(er32(STATUS) & E1000_STATUS_LU)) {
+ *data = 1;
+ }
+ }
+ return *data;
+}
+
+static int e1000_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return E1000_TEST_LEN;
+ case ETH_SS_STATS:
+ return E1000_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void e1000_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ bool if_running = netif_running(netdev);
+
+ set_bit(__E1000_TESTING, &adapter->flags);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ /* save speed, duplex, autoneg settings */
+ u16 autoneg_advertised = hw->autoneg_advertised;
+ u8 forced_speed_duplex = hw->forced_speed_duplex;
+ u8 autoneg = hw->autoneg;
+
+ DPRINTK(HW, INFO, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ e1000_reset(adapter);
+
+ if (e1000_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000_reset(adapter);
+ if (e1000_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000_reset(adapter);
+ if (e1000_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000_reset(adapter);
+ /* make sure the phy is powered up */
+ e1000_power_up_phy(adapter);
+ if (e1000_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* restore speed, duplex, autoneg settings */
+ hw->autoneg_advertised = autoneg_advertised;
+ hw->forced_speed_duplex = forced_speed_duplex;
+ hw->autoneg = autoneg;
+
+ e1000_reset(adapter);
+ clear_bit(__E1000_TESTING, &adapter->flags);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ DPRINTK(HW, INFO, "online testing starting\n");
+ /* Online tests */
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+
+ clear_bit(__E1000_TESTING, &adapter->flags);
+ }
+ msleep_interruptible(4 * 1000);
+}
+
+static int e1000_wol_exclusion(struct e1000_adapter *adapter,
+ struct ethtool_wolinfo *wol)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 1; /* fail by default */
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82542:
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82543GC_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_PCIE:
+ case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ /* these don't support WoL at all */
+ wol->supported = 0;
+ break;
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_COPPER:
+ /* Wake events not supported on port B */
+ if (er32(STATUS) & E1000_STATUS_FUNC_1) {
+ wol->supported = 0;
+ break;
+ }
+ /* return success for non excluded adapter ports */
+ retval = 0;
+ break;
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ /* quad port adapters only support WoL on port A */
+ if (!adapter->quad_port_a) {
+ wol->supported = 0;
+ break;
+ }
+ /* return success for non excluded adapter ports */
+ retval = 0;
+ break;
+ default:
+ /* dual port cards only support WoL on port A from now on
+ * unless it was enabled in the eeprom for port B
+ * so exclude FUNC_1 ports from having WoL enabled */
+ if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
+ !adapter->eeprom_wol) {
+ wol->supported = 0;
+ break;
+ }
+
+ retval = 0;
+ }
+
+ return retval;
+}
+
+static void e1000_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ /* this function will set ->supported = 0 and return 1 if wol is not
+ * supported by this hardware */
+ if (e1000_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return;
+
+ /* apply any specific unsupported masks here */
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ /* KSP3 does not suppport UCAST wake-ups */
+ wol->supported &= ~WAKE_UCAST;
+
+ if (adapter->wol & E1000_WUFC_EX)
+ DPRINTK(DRV, ERR, "Interface does not support "
+ "directed (unicast) frame wake-up packets\n");
+ break;
+ default:
+ break;
+ }
+
+ if (adapter->wol & E1000_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & E1000_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & E1000_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & E1000_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+
+ return;
+}
+
+static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+
+ if (e1000_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ if (wol->wolopts & WAKE_UCAST) {
+ DPRINTK(DRV, ERR, "Interface does not support "
+ "directed (unicast) frame wake-up packets\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= E1000_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= E1000_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= E1000_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= E1000_WUFC_MAG;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+/* toggle LED 4 times per second = 2 "blinks" per second */
+#define E1000_ID_INTERVAL (HZ/4)
+
+/* bit defines for adapter->led_status */
+#define E1000_LED_ON 0
+
+static void e1000_led_blink_callback(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
+ e1000_led_off(hw);
+ else
+ e1000_led_on(hw);
+
+ mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
+}
+
+static int e1000_phys_id(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!data)
+ data = INT_MAX;
+
+ if (hw->mac_type < e1000_82571) {
+ if (!adapter->blink_timer.function) {
+ init_timer(&adapter->blink_timer);
+ adapter->blink_timer.function = e1000_led_blink_callback;
+ adapter->blink_timer.data = (unsigned long)adapter;
+ }
+ e1000_setup_led(hw);
+ mod_timer(&adapter->blink_timer, jiffies);
+ msleep_interruptible(data * 1000);
+ del_timer_sync(&adapter->blink_timer);
+ } else if (hw->phy_type == e1000_phy_ife) {
+ if (!adapter->blink_timer.function) {
+ init_timer(&adapter->blink_timer);
+ adapter->blink_timer.function = e1000_led_blink_callback;
+ adapter->blink_timer.data = (unsigned long)adapter;
+ }
+ mod_timer(&adapter->blink_timer, jiffies);
+ msleep_interruptible(data * 1000);
+ del_timer_sync(&adapter->blink_timer);
+ e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
+ } else {
+ e1000_blink_led_start(hw);
+ msleep_interruptible(data * 1000);
+ }
+
+ e1000_led_off(hw);
+ clear_bit(E1000_LED_ON, &adapter->led_status);
+ e1000_cleanup_led(hw);
+
+ return 0;
+}
+
+static int e1000_nway_reset(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
+ return 0;
+}
+
+static void e1000_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ e1000_update_stats(adapter);
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
+ data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+/* BUG_ON(i != E1000_STATS_LEN); */
+}
+
+static void e1000_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *e1000_gstrings_test,
+ sizeof(e1000_gstrings_test));
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, e1000_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ }
+}
+
+static const struct ethtool_ops e1000_ethtool_ops = {
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .get_rx_csum = e1000_get_rx_csum,
+ .set_rx_csum = e1000_set_rx_csum,
+ .get_tx_csum = e1000_get_tx_csum,
+ .set_tx_csum = e1000_set_tx_csum,
+ .set_sg = ethtool_op_set_sg,
+ .set_tso = e1000_set_tso,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .phys_id = e1000_phys_id,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_sset_count = e1000_get_sset_count,
+};
+
+void e1000_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_hw-2.6.28-ethercat.c Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,8878 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.c
+ * Shared functions for accessing and configuring the MAC
+ */
+
+
+#include "e1000_hw-2.6.27-ethercat.h"
+
+static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask);
+static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask);
+static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data);
+static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
+static s32 e1000_get_software_semaphore(struct e1000_hw *hw);
+static void e1000_release_software_semaphore(struct e1000_hw *hw);
+
+static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw);
+static s32 e1000_check_downshift(struct e1000_hw *hw);
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+ e1000_rev_polarity *polarity);
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
+static void e1000_clear_vfta(struct e1000_hw *hw);
+static s32 e1000_commit_shadow_ram(struct e1000_hw *hw);
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
+ bool link_up);
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
+static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank);
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+ u16 *max_length);
+static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static s32 e1000_get_software_flag(struct e1000_hw *hw);
+static s32 e1000_ich8_cycle_init(struct e1000_hw *hw);
+static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout);
+static s32 e1000_id_led_init(struct e1000_hw *hw);
+static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+ u32 cnf_base_addr,
+ u32 cnf_size);
+static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw);
+static void e1000_init_rx_addrs(struct e1000_hw *hw);
+static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
+static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
+static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum);
+static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw,
+ struct e1000_host_mng_command_header
+ *hdr);
+static s32 e1000_mng_write_commit(struct e1000_hw *hw);
+static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
+static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
+static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
+static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data);
+static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index,
+ u8 byte);
+static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
+static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data);
+static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+ u16 *data);
+static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+ u16 data);
+static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static void e1000_release_software_flag(struct e1000_hw *hw);
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+static void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
+static s32 e1000_set_phy_type(struct e1000_hw *hw);
+static void e1000_phy_init_script(struct e1000_hw *hw);
+static s32 e1000_setup_copper_link(struct e1000_hw *hw);
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data,
+ u16 count);
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count);
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 phy_data);
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr,
+ u16 *phy_data);
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
+static void e1000_release_eeprom(struct e1000_hw *hw);
+static void e1000_standby_eeprom(struct e1000_hw *hw);
+static s32 e1000_set_vco_speed(struct e1000_hw *hw);
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
+static s32 e1000_set_phy_mode(struct e1000_hw *hw);
+static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer);
+static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
+static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex);
+static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+
+/* IGP cable length table */
+static const
+u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
+ { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+ 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+ 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+ 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+ 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+ 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
+
+static const
+u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
+ { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+ 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+ 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+ 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+ 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+ 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+ 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+ 104, 109, 114, 118, 121, 124};
+
+static DEFINE_SPINLOCK(e1000_eeprom_lock);
+
+/******************************************************************************
+ * Set the phy type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_set_phy_type(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_set_phy_type");
+
+ if (hw->mac_type == e1000_undefined)
+ return -E1000_ERR_PHY_TYPE;
+
+ switch (hw->phy_id) {
+ case M88E1000_E_PHY_ID:
+ case M88E1000_I_PHY_ID:
+ case M88E1011_I_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ hw->phy_type = e1000_phy_m88;
+ break;
+ case IGP01E1000_I_PHY_ID:
+ if (hw->mac_type == e1000_82541 ||
+ hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ hw->phy_type = e1000_phy_igp;
+ break;
+ }
+ case IGP03E1000_E_PHY_ID:
+ hw->phy_type = e1000_phy_igp_3;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ hw->phy_type = e1000_phy_ife;
+ break;
+ case GG82563_E_PHY_ID:
+ if (hw->mac_type == e1000_80003es2lan) {
+ hw->phy_type = e1000_phy_gg82563;
+ break;
+ }
+ /* Fall Through */
+ default:
+ /* Should never have loaded on this device */
+ hw->phy_type = e1000_phy_undefined;
+ return -E1000_ERR_PHY_TYPE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * IGP phy init script - initializes the GbE PHY
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_phy_init_script(struct e1000_hw *hw)
+{
+ u32 ret_val;
+ u16 phy_saved_data;
+
+ DEBUGFUNC("e1000_phy_init_script");
+
+ if (hw->phy_init_script) {
+ msleep(20);
+
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of this routine. */
+ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ /* Disabled the PHY transmitter */
+ e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ msleep(20);
+
+ e1000_write_phy_reg(hw,0x0000,0x0140);
+
+ msleep(5);
+
+ switch (hw->mac_type) {
+ case e1000_82541:
+ case e1000_82547:
+ e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+
+ e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+
+ e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+
+ e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+
+ e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+
+ e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+
+ e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+
+ e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+
+ e1000_write_phy_reg(hw, 0x2010, 0x0008);
+ break;
+
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+ break;
+ default:
+ break;
+ }
+
+ e1000_write_phy_reg(hw, 0x0000, 0x3300);
+
+ msleep(20);
+
+ /* Now enable the transmitter */
+ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (hw->mac_type == e1000_82547) {
+ u16 fused, fine, coarse;
+
+ /* Move to analog registers page */
+ e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
+
+ if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+ e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
+
+ fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+ coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+ if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+ coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+ } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+ fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+ (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+ (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+ e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused);
+ e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS,
+ IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+ }
+ }
+ }
+}
+
+/******************************************************************************
+ * Set the mac type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_set_mac_type");
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82542:
+ switch (hw->revision_id) {
+ case E1000_82542_2_0_REV_ID:
+ hw->mac_type = e1000_82542_rev2_0;
+ break;
+ case E1000_82542_2_1_REV_ID:
+ hw->mac_type = e1000_82542_rev2_1;
+ break;
+ default:
+ /* Invalid 82542 revision ID */
+ return -E1000_ERR_MAC_TYPE;
+ }
+ break;
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82543GC_COPPER:
+ hw->mac_type = e1000_82543;
+ break;
+ case E1000_DEV_ID_82544EI_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82544GC_COPPER:
+ case E1000_DEV_ID_82544GC_LOM:
+ hw->mac_type = e1000_82544;
+ break;
+ case E1000_DEV_ID_82540EM:
+ case E1000_DEV_ID_82540EM_LOM:
+ case E1000_DEV_ID_82540EP:
+ case E1000_DEV_ID_82540EP_LOM:
+ case E1000_DEV_ID_82540EP_LP:
+ hw->mac_type = e1000_82540;
+ break;
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ hw->mac_type = e1000_82545;
+ break;
+ case E1000_DEV_ID_82545GM_COPPER:
+ case E1000_DEV_ID_82545GM_FIBER:
+ case E1000_DEV_ID_82545GM_SERDES:
+ hw->mac_type = e1000_82545_rev_3;
+ break;
+ case E1000_DEV_ID_82546EB_COPPER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ hw->mac_type = e1000_82546;
+ break;
+ case E1000_DEV_ID_82546GB_COPPER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82546GB_SERDES:
+ case E1000_DEV_ID_82546GB_PCIE:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ hw->mac_type = e1000_82546_rev_3;
+ break;
+ case E1000_DEV_ID_82541EI:
+ case E1000_DEV_ID_82541EI_MOBILE:
+ case E1000_DEV_ID_82541ER_LOM:
+ hw->mac_type = e1000_82541;
+ break;
+ case E1000_DEV_ID_82541ER:
+ case E1000_DEV_ID_82541GI:
+ case E1000_DEV_ID_82541GI_LF:
+ case E1000_DEV_ID_82541GI_MOBILE:
+ hw->mac_type = e1000_82541_rev_2;
+ break;
+ case E1000_DEV_ID_82547EI:
+ case E1000_DEV_ID_82547EI_MOBILE:
+ hw->mac_type = e1000_82547;
+ break;
+ case E1000_DEV_ID_82547GI:
+ hw->mac_type = e1000_82547_rev_2;
+ break;
+ case E1000_DEV_ID_82571EB_COPPER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES_DUAL:
+ case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+ hw->mac_type = e1000_82571;
+ break;
+ case E1000_DEV_ID_82572EI_COPPER:
+ case E1000_DEV_ID_82572EI_FIBER:
+ case E1000_DEV_ID_82572EI_SERDES:
+ case E1000_DEV_ID_82572EI:
+ hw->mac_type = e1000_82572;
+ break;
+ case E1000_DEV_ID_82573E:
+ case E1000_DEV_ID_82573E_IAMT:
+ case E1000_DEV_ID_82573L:
+ hw->mac_type = e1000_82573;
+ break;
+ case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
+ case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
+ case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
+ case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+ hw->mac_type = e1000_80003es2lan;
+ break;
+ case E1000_DEV_ID_ICH8_IGP_M_AMT:
+ case E1000_DEV_ID_ICH8_IGP_AMT:
+ case E1000_DEV_ID_ICH8_IGP_C:
+ case E1000_DEV_ID_ICH8_IFE:
+ case E1000_DEV_ID_ICH8_IFE_GT:
+ case E1000_DEV_ID_ICH8_IFE_G:
+ case E1000_DEV_ID_ICH8_IGP_M:
+ hw->mac_type = e1000_ich8lan;
+ break;
+ default:
+ /* Should never have loaded on this device */
+ return -E1000_ERR_MAC_TYPE;
+ }
+
+ switch (hw->mac_type) {
+ case e1000_ich8lan:
+ hw->swfwhw_semaphore_present = true;
+ hw->asf_firmware_present = true;
+ break;
+ case e1000_80003es2lan:
+ hw->swfw_sync_present = true;
+ /* fall through */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ hw->eeprom_semaphore_present = true;
+ /* fall through */
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->asf_firmware_present = true;
+ break;
+ default:
+ break;
+ }
+
+ /* The 82543 chip does not count tx_carrier_errors properly in
+ * FD mode
+ */
+ if (hw->mac_type == e1000_82543)
+ hw->bad_tx_carr_stats_fd = true;
+
+ /* capable of receiving management packets to the host */
+ if (hw->mac_type >= e1000_82571)
+ hw->has_manc2h = true;
+
+ /* In rare occasions, ESB2 systems would end up started without
+ * the RX unit being turned on.
+ */
+ if (hw->mac_type == e1000_80003es2lan)
+ hw->rx_needs_kicking = true;
+
+ if (hw->mac_type > e1000_82544)
+ hw->has_smbus = true;
+
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set media type and TBI compatibility.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * **************************************************************************/
+void e1000_set_media_type(struct e1000_hw *hw)
+{
+ u32 status;
+
+ DEBUGFUNC("e1000_set_media_type");
+
+ if (hw->mac_type != e1000_82543) {
+ /* tbi_compatibility is only valid on 82543 */
+ hw->tbi_compatibility_en = false;
+ }
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82545GM_SERDES:
+ case E1000_DEV_ID_82546GB_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES_DUAL:
+ case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ case E1000_DEV_ID_82572EI_SERDES:
+ case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+ hw->media_type = e1000_media_type_internal_serdes;
+ break;
+ default:
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ hw->media_type = e1000_media_type_fiber;
+ break;
+ case e1000_ich8lan:
+ case e1000_82573:
+ /* The STATUS_TBIMODE bit is reserved or reused for the this
+ * device.
+ */
+ hw->media_type = e1000_media_type_copper;
+ break;
+ default:
+ status = er32(STATUS);
+ if (status & E1000_STATUS_TBIMODE) {
+ hw->media_type = e1000_media_type_fiber;
+ /* tbi_compatibility not valid on fiber */
+ hw->tbi_compatibility_en = false;
+ } else {
+ hw->media_type = e1000_media_type_copper;
+ }
+ break;
+ }
+ }
+}
+
+/******************************************************************************
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 ctrl_ext;
+ u32 icr;
+ u32 manc;
+ u32 led_ctrl;
+ u32 timeout;
+ u32 extcnf_ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_reset_hw");
+
+ /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ }
+
+ if (hw->bus_type == e1000_bus_type_pci_express) {
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+ }
+ }
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ /* Disable the Transmit and Receive units. Then delay to allow
+ * any pending transactions to complete before we hit the MAC with
+ * the global reset.
+ */
+ ew32(RCTL, 0);
+ ew32(TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH();
+
+ /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
+ hw->tbi_compatibility_on = false;
+
+ /* Delay to allow any outstanding PCI transactions to complete before
+ * resetting the device
+ */
+ msleep(10);
+
+ ctrl = er32(CTRL);
+
+ /* Must reset the PHY before resetting the MAC */
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
+ msleep(5);
+ }
+
+ /* Must acquire the MDIO ownership before MAC reset.
+ * Ownership defaults to firmware after a reset. */
+ if (hw->mac_type == e1000_82573) {
+ timeout = 10;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+ do {
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+ break;
+ else
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+ msleep(2);
+ timeout--;
+ } while (timeout);
+ }
+
+ /* Workaround for ICH8 bit corruption issue in FIFO memory */
+ if (hw->mac_type == e1000_ich8lan) {
+ /* Set Tx and Rx buffer allocation to 8k apiece. */
+ ew32(PBA, E1000_PBA_8K);
+ /* Set Packet Buffer Size to 16k. */
+ ew32(PBS, E1000_PBS_16K);
+ }
+
+ /* Issue a global reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA, and link units. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ DEBUGOUT("Issuing a global reset to MAC\n");
+
+ switch (hw->mac_type) {
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ /* These controllers can't ack the 64-bit write when issuing the
+ * reset, so use IO-mapping as a workaround to issue the reset */
+ E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
+ break;
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ /* Reset is performed on a shadow of the control register */
+ ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
+ break;
+ case e1000_ich8lan:
+ if (!hw->phy_reset_disable &&
+ e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
+ /* e1000_ich8lan PHY HW reset requires MAC CORE reset
+ * at the same time to make sure the interface between
+ * MAC and the external PHY is reset.
+ */
+ ctrl |= E1000_CTRL_PHY_RST;
+ }
+
+ e1000_get_software_flag(hw);
+ ew32(CTRL, (ctrl | E1000_CTRL_RST));
+ msleep(5);
+ break;
+ default:
+ ew32(CTRL, (ctrl | E1000_CTRL_RST));
+ break;
+ }
+
+ /* After MAC reset, force reload of EEPROM to restore power-on settings to
+ * device. Later controllers reload the EEPROM automatically, so just wait
+ * for reload to complete.
+ */
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* Wait for reset to complete */
+ udelay(10);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ /* Wait for EEPROM reload */
+ msleep(2);
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ /* Wait for EEPROM reload */
+ msleep(20);
+ break;
+ case e1000_82573:
+ if (!e1000_is_onboard_nvm_eeprom(hw)) {
+ udelay(10);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ }
+ /* fall through */
+ default:
+ /* Auto read done will delay 5ms or poll based on mac type */
+ ret_val = e1000_get_auto_rd_done(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+
+ /* Disable HW ARPs on ASF enabled adapters */
+ if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
+ manc = er32(MANC);
+ manc &= ~(E1000_MANC_ARP_EN);
+ ew32(MANC, manc);
+ }
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ e1000_phy_init_script(hw);
+
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+ }
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ /* Clear any pending interrupt events. */
+ icr = er32(ICR);
+
+ /* If MWI was previously enabled, reenable it. */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ if (hw->mac_type == e1000_ich8lan) {
+ u32 kab = er32(KABGTXD);
+ kab |= E1000_KABGTXD_BGSQLBIAS;
+ ew32(KABGTXD, kab);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ *
+ * Initialize a number of hardware-dependent bits
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * This function contains hardware limitation workarounds for PCI-E adapters
+ *
+ *****************************************************************************/
+static void e1000_initialize_hardware_bits(struct e1000_hw *hw)
+{
+ if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) {
+ /* Settings common to all PCI-express silicon */
+ u32 reg_ctrl, reg_ctrl_ext;
+ u32 reg_tarc0, reg_tarc1;
+ u32 reg_tctl;
+ u32 reg_txdctl, reg_txdctl1;
+
+ /* link autonegotiation/sync workarounds */
+ reg_tarc0 = er32(TARC0);
+ reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27));
+
+ /* Enable not-done TX descriptor counting */
+ reg_txdctl = er32(TXDCTL);
+ reg_txdctl |= E1000_TXDCTL_COUNT_DESC;
+ ew32(TXDCTL, reg_txdctl);
+ reg_txdctl1 = er32(TXDCTL1);
+ reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC;
+ ew32(TXDCTL1, reg_txdctl1);
+
+ switch (hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ /* Clear PHY TX compatible mode bits */
+ reg_tarc1 = er32(TARC1);
+ reg_tarc1 &= ~((1 << 30)|(1 << 29));
+
+ /* link autonegotiation/sync workarounds */
+ reg_tarc0 |= ((1 << 26)|(1 << 25)|(1 << 24)|(1 << 23));
+
+ /* TX ring control fixes */
+ reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24));
+
+ /* Multiple read bit is reversed polarity */
+ reg_tctl = er32(TCTL);
+ if (reg_tctl & E1000_TCTL_MULR)
+ reg_tarc1 &= ~(1 << 28);
+ else
+ reg_tarc1 |= (1 << 28);
+
+ ew32(TARC1, reg_tarc1);
+ break;
+ case e1000_82573:
+ reg_ctrl_ext = er32(CTRL_EXT);
+ reg_ctrl_ext &= ~(1 << 23);
+ reg_ctrl_ext |= (1 << 22);
+
+ /* TX byte count fix */
+ reg_ctrl = er32(CTRL);
+ reg_ctrl &= ~(1 << 29);
+
+ ew32(CTRL_EXT, reg_ctrl_ext);
+ ew32(CTRL, reg_ctrl);
+ break;
+ case e1000_80003es2lan:
+ /* improve small packet performace for fiber/serdes */
+ if ((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) {
+ reg_tarc0 &= ~(1 << 20);
+ }
+
+ /* Multiple read bit is reversed polarity */
+ reg_tctl = er32(TCTL);
+ reg_tarc1 = er32(TARC1);
+ if (reg_tctl & E1000_TCTL_MULR)
+ reg_tarc1 &= ~(1 << 28);
+ else
+ reg_tarc1 |= (1 << 28);
+
+ ew32(TARC1, reg_tarc1);
+ break;
+ case e1000_ich8lan:
+ /* Reduce concurrent DMA requests to 3 from 4 */
+ if ((hw->revision_id < 3) ||
+ ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
+ (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))
+ reg_tarc0 |= ((1 << 29)|(1 << 28));
+
+ reg_ctrl_ext = er32(CTRL_EXT);
+ reg_ctrl_ext |= (1 << 22);
+ ew32(CTRL_EXT, reg_ctrl_ext);
+
+ /* workaround TX hang with TSO=on */
+ reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23));
+
+ /* Multiple read bit is reversed polarity */
+ reg_tctl = er32(TCTL);
+ reg_tarc1 = er32(TARC1);
+ if (reg_tctl & E1000_TCTL_MULR)
+ reg_tarc1 &= ~(1 << 28);
+ else
+ reg_tarc1 |= (1 << 28);
+
+ /* workaround TX hang with TSO=on */
+ reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24));
+
+ ew32(TARC1, reg_tarc1);
+ break;
+ default:
+ break;
+ }
+
+ ew32(TARC0, reg_tarc0);
+ }
+}
+
+/******************************************************************************
+ * Performs basic configuration of the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes the receive address registers,
+ * multicast table, and VLAN filter table. Calls routines to setup link
+ * configuration and flow control settings. Clears all on-chip counters. Leaves
+ * the transmit and receive units disabled and uninitialized.
+ *****************************************************************************/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 i;
+ s32 ret_val;
+ u32 mta_size;
+ u32 reg_data;
+ u32 ctrl_ext;
+
+ DEBUGFUNC("e1000_init_hw");
+
+ /* force full DMA clock frequency for 10/100 on ICH8 A0-B0 */
+ if ((hw->mac_type == e1000_ich8lan) &&
+ ((hw->revision_id < 3) ||
+ ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
+ (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) {
+ reg_data = er32(STATUS);
+ reg_data &= ~0x80000000;
+ ew32(STATUS, reg_data);
+ }
+
+ /* Initialize Identification LED */
+ ret_val = e1000_id_led_init(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Initializing Identification LED\n");
+ return ret_val;
+ }
+
+ /* Set the media type and TBI compatibility */
+ e1000_set_media_type(hw);
+
+ /* Must be called after e1000_set_media_type because media_type is used */
+ e1000_initialize_hardware_bits(hw);
+
+ /* Disabling VLAN filtering. */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */
+ if (hw->mac_type != e1000_ich8lan) {
+ if (hw->mac_type < e1000_82545_rev_3)
+ ew32(VET, 0);
+ e1000_clear_vfta(hw);
+ }
+
+ /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ ew32(RCTL, E1000_RCTL_RST);
+ E1000_WRITE_FLUSH();
+ msleep(5);
+ }
+
+ /* Setup the receive address. This involves initializing all of the Receive
+ * Address Registers (RARs 0 - 15).
+ */
+ e1000_init_rx_addrs(hw);
+
+ /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ ew32(RCTL, 0);
+ E1000_WRITE_FLUSH();
+ msleep(1);
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ mta_size = E1000_MC_TBL_SIZE;
+ if (hw->mac_type == e1000_ich8lan)
+ mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
+ for (i = 0; i < mta_size; i++) {
+ E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ /* use write flush to prevent Memory Write Block (MWB) from
+ * occuring when accessing our register space */
+ E1000_WRITE_FLUSH();
+ }
+
+ /* Set the PCI priority bit correctly in the CTRL register. This
+ * determines if the adapter gives priority to receives, or if it
+ * gives equal priority to transmits and receives. Valid only on
+ * 82542 and 82543 silicon.
+ */
+ if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
+ }
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+ if (hw->bus_type == e1000_bus_type_pcix && e1000_pcix_get_mmrbc(hw) > 2048)
+ e1000_pcix_set_mmrbc(hw, 2048);
+ break;
+ }
+
+ /* More time needed for PHY to initialize */
+ if (hw->mac_type == e1000_ich8lan)
+ msleep(15);
+
+ /* Call a subroutine to configure the link and setup flow control. */
+ ret_val = e1000_setup_link(hw);
+
+ /* Set the transmit descriptor write-back policy */
+ if (hw->mac_type > e1000_82544) {
+ ctrl = er32(TXDCTL);
+ ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+ ew32(TXDCTL, ctrl);
+ }
+
+ if (hw->mac_type == e1000_82573) {
+ e1000_enable_tx_pkt_filtering(hw);
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_80003es2lan:
+ /* Enable retransmit on late collisions */
+ reg_data = er32(TCTL);
+ reg_data |= E1000_TCTL_RTLC;
+ ew32(TCTL, reg_data);
+
+ /* Configure Gigabit Carry Extend Padding */
+ reg_data = er32(TCTL_EXT);
+ reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+ reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
+ ew32(TCTL_EXT, reg_data);
+
+ /* Configure Transmit Inter-Packet Gap */
+ reg_data = er32(TIPG);
+ reg_data &= ~E1000_TIPG_IPGT_MASK;
+ reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
+ ew32(TIPG, reg_data);
+
+ reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001);
+ reg_data &= ~0x00100000;
+ E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data);
+ /* Fall through */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_ich8lan:
+ ctrl = er32(TXDCTL1);
+ ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+ ew32(TXDCTL1, ctrl);
+ break;
+ }
+
+
+ if (hw->mac_type == e1000_82573) {
+ u32 gcr = er32(GCR);
+ gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+ ew32(GCR, gcr);
+ }
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs(hw);
+
+ /* ICH8 No-snoop bits are opposite polarity.
+ * Set to snoop by default after reset. */
+ if (hw->mac_type == e1000_ich8lan)
+ e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
+
+ if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+ hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+ ctrl_ext = er32(CTRL_EXT);
+ /* Relaxed ordering must be disabled to avoid a parity
+ * error crash in a PCI slot. */
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ return ret_val;
+}
+
+/******************************************************************************
+ * Adjust SERDES output amplitude based on EEPROM setting.
+ *
+ * hw - Struct containing variables accessed by shared code.
+ *****************************************************************************/
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
+{
+ u16 eeprom_data;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_adjust_serdes_amplitude");
+
+ if (hw->media_type != e1000_media_type_internal_serdes)
+ return E1000_SUCCESS;
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, &eeprom_data);
+ if (ret_val) {
+ return ret_val;
+ }
+
+ if (eeprom_data != EEPROM_RESERVED_WORD) {
+ /* Adjust SERDES output amplitude only. */
+ eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control and link settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Determines which flow control settings to use. Calls the apropriate media-
+ * specific link configuration function. Configures the flow control settings.
+ * Assuming the adapter has a valid link partner, a valid link should be
+ * established. Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ *****************************************************************************/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ s32 ret_val;
+ u16 eeprom_data;
+
+ DEBUGFUNC("e1000_setup_link");
+
+ /* In the case of the phy reset being blocked, we already have a link.
+ * We do not have to set it up again. */
+ if (e1000_check_phy_reset_block(hw))
+ return E1000_SUCCESS;
+
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ if (hw->fc == E1000_FC_DEFAULT) {
+ switch (hw->mac_type) {
+ case e1000_ich8lan:
+ case e1000_82573:
+ hw->fc = E1000_FC_FULL;
+ break;
+ default:
+ ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+ 1, &eeprom_data);
+ if (ret_val) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
+ hw->fc = E1000_FC_NONE;
+ else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
+ EEPROM_WORD0F_ASM_DIR)
+ hw->fc = E1000_FC_TX_PAUSE;
+ else
+ hw->fc = E1000_FC_FULL;
+ break;
+ }
+ }
+
+ /* We want to save off the original Flow Control configuration just
+ * in case we get disconnected and then reconnected into a different
+ * hub or switch with different Flow Control capabilities.
+ */
+ if (hw->mac_type == e1000_82542_rev2_0)
+ hw->fc &= (~E1000_FC_TX_PAUSE);
+
+ if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+ hw->fc &= (~E1000_FC_RX_PAUSE);
+
+ hw->original_fc = hw->fc;
+
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
+
+ /* Take the 4 bits from EEPROM word 0x0F that determine the initial
+ * polarity value for the SW controlled pins, and setup the
+ * Extended Device Control reg with that info.
+ * This is needed because one of the SW controlled pins is used for
+ * signal detection. So this should be done before e1000_setup_pcs_link()
+ * or e1000_phy_setup() is called.
+ */
+ if (hw->mac_type == e1000_82543) {
+ ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+ 1, &eeprom_data);
+ if (ret_val) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
+ SWDPIO__EXT_SHIFT);
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ /* Call the necessary subroutine to configure the link. */
+ ret_val = (hw->media_type == e1000_media_type_copper) ?
+ e1000_setup_copper_link(hw) :
+ e1000_setup_fiber_serdes_link(hw);
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+
+ /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */
+ if (hw->mac_type != e1000_ich8lan) {
+ ew32(FCT, FLOW_CONTROL_TYPE);
+ ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
+ }
+
+ ew32(FCTTV, hw->fc_pause_time);
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames in not enabled, then these
+ * registers will be set to 0.
+ */
+ if (!(hw->fc & E1000_FC_TX_PAUSE)) {
+ ew32(FCRTL, 0);
+ ew32(FCRTH, 0);
+ } else {
+ /* We need to set up the Receive Threshold high and low water marks
+ * as well as (optionally) enabling the transmission of XON frames.
+ */
+ if (hw->fc_send_xon) {
+ ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
+ ew32(FCRTH, hw->fc_high_water);
+ } else {
+ ew32(FCRTL, hw->fc_low_water);
+ ew32(FCRTH, hw->fc_high_water);
+ }
+ }
+ return ret_val;
+}
+
+/******************************************************************************
+ * Sets up link for a fiber based or serdes based adapter
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Manipulates Physical Coding Sublayer functions in order to configure
+ * link. Assumes the hardware has been previously reset and the transmitter
+ * and receiver are not enabled.
+ *****************************************************************************/
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 status;
+ u32 txcw = 0;
+ u32 i;
+ u32 signal = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_fiber_serdes_link");
+
+ /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists
+ * until explicitly turned off or a power cycle is performed. A read to
+ * the register does not indicate its status. Therefore, we ensure
+ * loopback mode is disabled during initialization.
+ */
+ if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572)
+ ew32(SCTL, E1000_DISABLE_SERDES_LOOPBACK);
+
+ /* On adapters with a MAC newer than 82544, SWDP 1 will be
+ * set when the optics detect a signal. On older adapters, it will be
+ * cleared when there is a signal. This applies to fiber media only.
+ * If we're on serdes media, adjust the output amplitude to value
+ * set in the EEPROM.
+ */
+ ctrl = er32(CTRL);
+ if (hw->media_type == e1000_media_type_fiber)
+ signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+
+ ret_val = e1000_adjust_serdes_amplitude(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Take the link out of reset */
+ ctrl &= ~(E1000_CTRL_LRST);
+
+ /* Adjust VCO speed to improve BER performance */
+ ret_val = e1000_set_vco_speed(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000_config_collision_dist(hw);
+
+ /* Check for a software override of the flow control settings, and setup
+ * the device accordingly. If auto-negotiation is enabled, then software
+ * will have to set the "PAUSE" bits to the correct value in the Tranmsit
+ * Config Word Register (TXCW) and re-start auto-negotiation. However, if
+ * auto-negotiation is disabled, then software will have to manually
+ * configure the two flow control enable bits in the CTRL register.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames, but
+ * not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do
+ * not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ */
+ switch (hw->fc) {
+ case E1000_FC_NONE:
+ /* Flow control is completely disabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+ break;
+ case E1000_FC_RX_PAUSE:
+ /* RX Flow control is enabled and TX Flow control is disabled by a
+ * software over-ride. Since there really isn't a way to advertise
+ * that we are capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ case E1000_FC_TX_PAUSE:
+ /* TX Flow control is enabled, and RX Flow control is disabled, by a
+ * software over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+ break;
+ case E1000_FC_FULL:
+ /* Flow control (both RX and TX) is enabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ /* Since auto-negotiation is enabled, take the link out of reset (the link
+ * will be in reset, because we previously reset the chip). This will
+ * restart auto-negotiation. If auto-neogtiation is successful then the
+ * link-up status bit will be set and the flow control enable bits (RFCE
+ * and TFCE) will be set according to their negotiated value.
+ */
+ DEBUGOUT("Auto-negotiation enabled\n");
+
+ ew32(TXCW, txcw);
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ hw->txcw = txcw;
+ msleep(1);
+
+ /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
+ * indication in the Device Status Register. Time-out if a link isn't
+ * seen in 500 milliseconds seconds (Auto-negotiation should complete in
+ * less than 500 milliseconds even if the other end is doing it in SW).
+ * For internal serdes, we just assume a signal is present, then poll.
+ */
+ if (hw->media_type == e1000_media_type_internal_serdes ||
+ (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
+ DEBUGOUT("Looking for Link\n");
+ for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+ msleep(10);
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU) break;
+ }
+ if (i == (LINK_UP_TIMEOUT / 10)) {
+ DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+ hw->autoneg_failed = 1;
+ /* AutoNeg failed to achieve a link, so we'll call
+ * e1000_check_for_link. This routine will force the link up if
+ * we detect a signal. This will allow us to communicate with
+ * non-autonegotiating link partners.
+ */
+ ret_val = e1000_check_for_link(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while checking for link\n");
+ return ret_val;
+ }
+ hw->autoneg_failed = 0;
+ } else {
+ hw->autoneg_failed = 0;
+ DEBUGOUT("Valid Link Found\n");
+ }
+ } else {
+ DEBUGOUT("No Signal Detected\n");
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Make sure we have a valid PHY and change PHY mode before link setup.
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_preconfig");
+
+ ctrl = er32(CTRL);
+ /* With 82543, we need to force speed and duplex on the MAC equal to what
+ * the PHY speed and duplex configuration is. In addition, we need to
+ * perform a hardware reset on the PHY to take it out of reset.
+ */
+ if (hw->mac_type > e1000_82543) {
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+ } else {
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
+ ew32(CTRL, ctrl);
+ ret_val = e1000_phy_hw_reset(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Make sure we have a valid PHY */
+ ret_val = e1000_detect_gig_phy(hw);
+ if (ret_val) {
+ DEBUGOUT("Error, did not detect valid phy.\n");
+ return ret_val;
+ }
+ DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+
+ /* Set PHY to class A mode (if necessary) */
+ ret_val = e1000_set_phy_mode(hw);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82545_rev_3) ||
+ (hw->mac_type == e1000_82546_rev_3)) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ phy_data |= 0x00000008;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ }
+
+ if (hw->mac_type <= e1000_82543 ||
+ hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
+ hw->phy_reset_disable = false;
+
+ return E1000_SUCCESS;
+}
+
+
+/********************************************************************
+* Copper link setup for e1000_phy_igp series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
+{
+ u32 led_ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_igp_setup");
+
+ if (hw->phy_reset_disable)
+ return E1000_SUCCESS;
+
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ /* Wait 15ms for MAC to configure PHY from eeprom settings */
+ msleep(15);
+ if (hw->mac_type != e1000_ich8lan) {
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+ }
+
+ /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
+ if (hw->phy_type == e1000_phy_igp) {
+ /* disable lplu d3 during driver init */
+ ret_val = e1000_set_d3_lplu_state(hw, false);
+ if (ret_val) {
+ DEBUGOUT("Error Disabling LPLU D3\n");
+ return ret_val;
+ }
+ }
+
+ /* disable lplu d0 during driver init */
+ ret_val = e1000_set_d0_lplu_state(hw, false);
+ if (ret_val) {
+ DEBUGOUT("Error Disabling LPLU D0\n");
+ return ret_val;
+ }
+ /* Configure mdi-mdix settings */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ hw->dsp_config_state = e1000_dsp_config_disabled;
+ /* Force MDI for earlier revs of the IGP PHY */
+ phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX);
+ hw->mdix = 1;
+
+ } else {
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 2:
+ phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
+ break;
+ }
+ }
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* set auto-master slave resolution settings */
+ if (hw->autoneg) {
+ e1000_ms_type phy_ms_setting = hw->master_slave;
+
+ if (hw->ffe_config_state == e1000_ffe_config_active)
+ hw->ffe_config_state = e1000_ffe_config_enabled;
+
+ if (hw->dsp_config_state == e1000_dsp_config_activated)
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+
+ /* when autonegotiation advertisment is only 1000Mbps then we
+ * should disable SmartSpeed and enable Auto MasterSlave
+ * resolution as hardware default. */
+ if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+ /* Disable SmartSpeed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ /* Set auto Master/Slave resolution process */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* load defaults for future use */
+ hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
+ ((phy_data & CR_1000T_MS_VALUE) ?
+ e1000_ms_force_master :
+ e1000_ms_force_slave) :
+ e1000_ms_auto;
+
+ switch (phy_ms_setting) {
+ case e1000_ms_force_master:
+ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_force_slave:
+ phy_data |= CR_1000T_MS_ENABLE;
+ phy_data &= ~(CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_auto:
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ default:
+ break;
+ }
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_gg82563 series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_ggp_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+ u32 reg_data;
+
+ DEBUGFUNC("e1000_copper_link_ggp_setup");
+
+ if (!hw->phy_reset_disable) {
+
+ /* Enable CRS on TX for half-duplex operation. */
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ /* Use 25MHz for both link down and 1000BASE-T for Tx clock */
+ phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ;
+
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+ break;
+ case 2:
+ phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+ if (hw->disable_polarity_correction == 1)
+ phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+
+ if (ret_val)
+ return ret_val;
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ return ret_val;
+ }
+ } /* phy_reset_disable */
+
+ if (hw->mac_type == e1000_80003es2lan) {
+ /* Bypass RX and TX FIFO's */
+ ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL,
+ E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
+ E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data);
+
+ if (ret_val)
+ return ret_val;
+
+ reg_data = er32(CTRL_EXT);
+ reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
+ ew32(CTRL_EXT, reg_data);
+
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Do not init these registers when the HW is in IAMT mode, since the
+ * firmware will have already initialized them. We only initialize
+ * them if the HW is not in IAMT mode.
+ */
+ if (!e1000_check_mng_mode(hw)) {
+ /* Enable Electrical Idle on the PHY */
+ phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ phy_data);
+
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Workaround: Disable padding in Kumeran interface in the MAC
+ * and in the PHY to avoid CRC errors.
+ */
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data |= GG82563_ICR_DIS_PADDING;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_m88 series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_mgp_setup");
+
+ if (hw->phy_reset_disable)
+ return E1000_SUCCESS;
+
+ /* Enable CRS on TX. This must be set for half-duplex operation. */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (hw->disable_polarity_correction == 1)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->phy_revision < M88E1011_I_REV_4) {
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((hw->phy_revision == E1000_REVISION_2) &&
+ (hw->phy_id == M88E1111_I_PHY_ID)) {
+ /* Vidalia Phy, set the downshift counter to 5x */
+ phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Setup auto-negotiation and flow control advertisements,
+* and then perform auto-negotiation.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_autoneg");
+
+ /* Perform some bounds checking on the hw->autoneg_advertised
+ * parameter. If this variable is zero, then set it to the default.
+ */
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (hw->autoneg_advertised == 0)
+ hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ /* IFE phy only supports 10/100 */
+ if (hw->phy_type == e1000_phy_ife)
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
+ DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ DEBUGOUT("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (hw->wait_autoneg_complete) {
+ ret_val = e1000_wait_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while waiting for autoneg to complete\n");
+ return ret_val;
+ }
+ }
+
+ hw->get_link_status = true;
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Config the MAC and the PHY after link is up.
+* 1) Set up the MAC to the current PHY speed/duplex
+* if we are on 82543. If we
+* are on newer silicon, we only need to configure
+* collision distance in the Transmit Control Register.
+* 2) Set up flow control on the MAC to that established with
+* the link partner.
+* 3) Config DSP to improve Gigabit link quality for some PHY revisions.
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ DEBUGFUNC("e1000_copper_link_postconfig");
+
+ if (hw->mac_type >= e1000_82544) {
+ e1000_config_collision_dist(hw);
+ } else {
+ ret_val = e1000_config_mac_to_phy(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring MAC to PHY settings\n");
+ return ret_val;
+ }
+ }
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Configuring Flow Control\n");
+ return ret_val;
+ }
+
+ /* Config DSP to improve Giga link quality */
+ if (hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_config_dsp_after_link_change(hw, true);
+ if (ret_val) {
+ DEBUGOUT("Error Configuring DSP after link up\n");
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Detects which PHY is present and setup the speed and duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_setup_copper_link(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 i;
+ u16 phy_data;
+ u16 reg_data;
+
+ DEBUGFUNC("e1000_setup_copper_link");
+
+ switch (hw->mac_type) {
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ /* Set the mac to wait the maximum time between each
+ * iteration and increase the max iterations when
+ * polling the phy; this fixes erroneous timeouts at 10Mbps. */
+ ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+ if (ret_val)
+ return ret_val;
+ default:
+ break;
+ }
+
+ /* Check if it is a valid PHY and set PHY mode if necessary. */
+ ret_val = e1000_copper_link_preconfig(hw);
+ if (ret_val)
+ return ret_val;
+
+ switch (hw->mac_type) {
+ case e1000_80003es2lan:
+ /* Kumeran registers are written-only */
+ reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT;
+ reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING;
+ ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ break;
+ }
+
+ if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
+ hw->phy_type == e1000_phy_igp_2) {
+ ret_val = e1000_copper_link_igp_setup(hw);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->phy_type == e1000_phy_m88) {
+ ret_val = e1000_copper_link_mgp_setup(hw);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->phy_type == e1000_phy_gg82563) {
+ ret_val = e1000_copper_link_ggp_setup(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->autoneg) {
+ /* Setup autoneg and flow control advertisement
+ * and perform autonegotiation */
+ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H,or 100F
+ * depending on value from forced_speed_duplex. */
+ DEBUGOUT("Forcing speed and duplex\n");
+ ret_val = e1000_phy_force_speed_duplex(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Forcing Speed and Duplex\n");
+ return ret_val;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ for (i = 0; i < 10; i++) {
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ /* Config the MAC and PHY after link is up */
+ ret_val = e1000_copper_link_postconfig(hw);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT("Valid link established!!!\n");
+ return E1000_SUCCESS;
+ }
+ udelay(10);
+ }
+
+ DEBUGOUT("Unable to establish link!!!\n");
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Configure the MAC-to-PHY interface for 10/100Mbps
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 tipg;
+ u16 reg_data;
+
+ DEBUGFUNC("e1000_configure_kmrn_for_10_100");
+
+ reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT;
+ ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = er32(TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
+ ew32(TIPG, tipg);
+
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data);
+
+ if (ret_val)
+ return ret_val;
+
+ if (duplex == HALF_DUPLEX)
+ reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+ else
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+ return ret_val;
+}
+
+static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 reg_data;
+ u32 tipg;
+
+ DEBUGFUNC("e1000_configure_kmrn_for_1000");
+
+ reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT;
+ ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = er32(TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
+ ew32(TIPG, tipg);
+
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data);
+
+ if (ret_val)
+ return ret_val;
+
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+ return ret_val;
+}
+
+/******************************************************************************
+* Configures PHY autoneg and flow control advertisement settings
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg;
+
+ DEBUGFUNC("e1000_phy_setup_autoneg");
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->phy_type != e1000_phy_ife) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ } else
+ mii_1000t_ctrl_reg=0;
+
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
+ mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
+
+ DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
+ DEBUGOUT("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
+ DEBUGOUT("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
+ DEBUGOUT("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
+ DEBUGOUT("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+ DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
+ }
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+ DEBUGOUT("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ if (hw->phy_type == e1000_phy_ife) {
+ DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
+ }
+ }
+
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc) {
+ case E1000_FC_NONE: /* 0 */
+ /* Flow control (RX & TX) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case E1000_FC_RX_PAUSE: /* 1 */
+ /* RX Flow control is enabled, and TX Flow control is
+ * disabled, by a software over-ride.
+ */
+ /* Since there really isn't a way to advertise that we are
+ * capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later
+ * (in e1000_config_fc_after_link_up) we will disable the
+ *hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case E1000_FC_TX_PAUSE: /* 2 */
+ /* TX Flow control is enabled, and RX Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case E1000_FC_FULL: /* 3 */
+ /* Flow control (both RX and TX) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (hw->phy_type != e1000_phy_ife) {
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Force PHY speed and duplex settings to hw->forced_speed_duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 mii_ctrl_reg;
+ u16 mii_status_reg;
+ u16 phy_data;
+ u16 i;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex");
+
+ /* Turn off Flow control if we are forcing speed and duplex. */
+ hw->fc = E1000_FC_NONE;
+
+ DEBUGOUT1("hw->fc = %d\n", hw->fc);
+
+ /* Read the Device Control Register. */
+ ctrl = er32(CTRL);
+
+ /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(DEVICE_SPEED_MASK);
+
+ /* Clear the Auto Speed Detect Enable bit. */
+ ctrl &= ~E1000_CTRL_ASDE;
+
+ /* Read the MII Control Register. */
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* We need to disable autoneg in order to force link and duplex. */
+
+ mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
+
+ /* Are we forcing Full or Half Duplex? */
+ if (hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_10_full) {
+ /* We want to force full duplex so we SET the full duplex bits in the
+ * Device and MII Control Registers.
+ */
+ ctrl |= E1000_CTRL_FD;
+ mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ /* We want to force half duplex so we CLEAR the full duplex bits in
+ * the Device and MII Control Registers.
+ */
+ ctrl &= ~E1000_CTRL_FD;
+ mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ }
+
+ /* Are we forcing 100Mbps??? */
+ if (hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_100_half) {
+ /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
+ ctrl |= E1000_CTRL_SPD_100;
+ mii_ctrl_reg |= MII_CR_SPEED_100;
+ mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+ DEBUGOUT("Forcing 100mb ");
+ } else {
+ /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ mii_ctrl_reg |= MII_CR_SPEED_10;
+ mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ DEBUGOUT("Forcing 10mb ");
+ }
+
+ e1000_config_collision_dist(hw);
+
+ /* Write the configured values back to the Device Control Reg. */
+ ew32(CTRL, ctrl);
+
+ if ((hw->phy_type == e1000_phy_m88) ||
+ (hw->phy_type == e1000_phy_gg82563)) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed are duplex are forced.
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+
+ /* Need to reset the PHY or these changes will be ignored */
+ mii_ctrl_reg |= MII_CR_RESET;
+
+ /* Disable MDI-X support for 10/100 */
+ } else if (hw->phy_type == e1000_phy_ife) {
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IFE_PMC_AUTO_MDIX;
+ phy_data &= ~IFE_PMC_FORCE_MDIX;
+
+ ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ } else {
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed or duplex are forced.
+ */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Write back the modified PHY MII control register. */
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+
+ /* The wait_autoneg_complete flag may be a little misleading here.
+ * Since we are forcing speed and duplex, Auto-Neg is not enabled.
+ * But we do want to delay for a period while forcing only so we
+ * don't generate false No Link messages. So we will wait here
+ * only if the user has set wait_autoneg_complete to 1, which is
+ * the default.
+ */
+ if (hw->wait_autoneg_complete) {
+ /* We will wait for autoneg to complete. */
+ DEBUGOUT("Waiting for forced speed/duplex link.\n");
+ mii_status_reg = 0;
+
+ /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Auto-Neg Complete bit
+ * to be set.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_LINK_STATUS) break;
+ msleep(100);
+ }
+ if ((i == 0) &&
+ ((hw->phy_type == e1000_phy_m88) ||
+ (hw->phy_type == e1000_phy_gg82563))) {
+ /* We didn't get link. Reset the DSP and wait again for link. */
+ ret_val = e1000_phy_reset_dsp(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting PHY DSP\n");
+ return ret_val;
+ }
+ }
+ /* This loop will early-out if the link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ if (mii_status_reg & MII_SR_LINK_STATUS) break;
+ msleep(100);
+ /* Read the MII Status Register and wait for Auto-Neg Complete bit
+ * to be set.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ if (hw->phy_type == e1000_phy_m88) {
+ /* Because we reset the PHY above, we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock. This value
+ * defaults back to a 2.5MHz clock when the PHY is reset.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* In addition, because of the s/w reset above, we need to enable CRS on
+ * TX. This must be set for both full and half duplex operation.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+ (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full ||
+ hw->forced_speed_duplex == e1000_10_half)) {
+ ret_val = e1000_polarity_reversal_workaround(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if (hw->phy_type == e1000_phy_gg82563) {
+ /* The TX_CLK of the Extended PHY Specific Control Register defaults
+ * to 2.5MHz on a reset. We need to re-force it back to 25MHz, if
+ * we're not in a forced 10/duplex configuration. */
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+ if ((hw->forced_speed_duplex == e1000_10_full) ||
+ (hw->forced_speed_duplex == e1000_10_half))
+ phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ;
+ else
+ phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ;
+
+ /* Also due to the reset, we need to enable CRS on Tx. */
+ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Sets the collision distance in the Transmit Control register
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Link should have been established previously. Reads the speed and duplex
+* information from the Device Status register.
+******************************************************************************/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+ u32 tctl, coll_dist;
+
+ DEBUGFUNC("e1000_config_collision_dist");
+
+ if (hw->mac_type < e1000_82543)
+ coll_dist = E1000_COLLISION_DISTANCE_82542;
+ else
+ coll_dist = E1000_COLLISION_DISTANCE;
+
+ tctl = er32(TCTL);
+
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= coll_dist << E1000_COLD_SHIFT;
+
+ ew32(TCTL, tctl);
+ E1000_WRITE_FLUSH();
+}
+
+/******************************************************************************
+* Sets MAC speed and duplex settings to reflect the those in the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* mii_reg - data to write to the MII control register
+*
+* The contents of the PHY register containing the needed information need to
+* be passed in.
+******************************************************************************/
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_config_mac_to_phy");
+
+ /* 82544 or newer MAC, Auto Speed Detection takes care of
+ * MAC speed/duplex configuration.*/
+ if (hw->mac_type >= e1000_82544)
+ return E1000_SUCCESS;
+
+ /* Read the Device Control Register and set the bits to Force Speed
+ * and Duplex.
+ */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+ /* Set up duplex in the Device Control and Transmit Control
+ * registers depending on negotiated values.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & M88E1000_PSSR_DPLX)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
+
+ e1000_config_collision_dist(hw);
+
+ /* Set up speed in the Device Control register depending on
+ * negotiated values.
+ */
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+ ctrl |= E1000_CTRL_SPD_1000;
+ else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+ ctrl |= E1000_CTRL_SPD_100;
+
+ /* Write the configured values back to the Device Control Reg. */
+ ew32(CTRL, ctrl);
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Forces the MAC's flow control settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets the TFCE and RFCE bits in the device control register to reflect
+ * the adapter settings. TFCE and RFCE need to be explicitly set by
+ * software when a Copper PHY is used because autonegotiation is managed
+ * by the PHY rather than the MAC. Software must also configure these
+ * bits when link is forced on a fiber connection.
+ *****************************************************************************/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_force_mac_fc");
+
+ /* Get the current configuration of the Device Control Register */
+ ctrl = er32(CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and TX flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+
+ switch (hw->fc) {
+ case E1000_FC_NONE:
+ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+ break;
+ case E1000_FC_RX_PAUSE:
+ ctrl &= (~E1000_CTRL_TFCE);
+ ctrl |= E1000_CTRL_RFCE;
+ break;
+ case E1000_FC_TX_PAUSE:
+ ctrl &= (~E1000_CTRL_RFCE);
+ ctrl |= E1000_CTRL_TFCE;
+ break;
+ case E1000_FC_FULL:
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Disable TX Flow Control for 82542 (rev 2.0) */
+ if (hw->mac_type == e1000_82542_rev2_0)
+ ctrl &= (~E1000_CTRL_TFCE);
+
+ ew32(CTRL, ctrl);
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control settings after link is established
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Should be called immediately after a valid link has been established.
+ * Forces MAC flow control settings if link was forced. When in MII/GMII mode
+ * and autonegotiation is enabled, the MAC flow control settings will be set
+ * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
+ * and RFCE bits will be automaticaly set to the negotiated flow control mode.
+ *****************************************************************************/
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_status_reg;
+ u16 mii_nway_adv_reg;
+ u16 mii_nway_lp_ability_reg;
+ u16 speed;
+ u16 duplex;
+
+ DEBUGFUNC("e1000_config_fc_after_link_up");
+
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
+ ((hw->media_type == e1000_media_type_internal_serdes) &&
+ (hw->autoneg_failed)) ||
+ ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
+ ret_val = e1000_force_mac_fc(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement Register
+ * (Address 4) and the Auto_Negotiation Base Page Ability
+ * Register (Address 5) to determine how flow control was
+ * negotiated.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | E1000_FC_NONE
+ * 0 | 1 | 0 | DC | E1000_FC_NONE
+ * 0 | 1 | 1 | 0 | E1000_FC_NONE
+ * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
+ * 1 | 0 | 0 | DC | E1000_FC_NONE
+ * 1 | DC | 1 | DC | E1000_FC_FULL
+ * 1 | 1 | 0 | 0 | E1000_FC_NONE
+ * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
+ *
+ */
+ /* Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | E1000_FC_FULL
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected RX ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->original_fc == E1000_FC_FULL) {
+ hw->fc = E1000_FC_FULL;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc = E1000_FC_RX_PAUSE;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
+ *
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc = E1000_FC_TX_PAUSE;
+ DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
+ *
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc = E1000_FC_RX_PAUSE;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ }
+ /* Per the IEEE spec, at this point flow control should be
+ * disabled. However, we want to consider that we could
+ * be connected to a legacy switch that doesn't advertise
+ * desired flow control, but can be forced on the link
+ * partner. So if we advertised no flow control, that is
+ * what we will resolve to. If we advertised some kind of
+ * receive capability (Rx Pause Only or Full Flow Control)
+ * and the link partner advertised none, we will configure
+ * ourselves to enable Rx Flow Control only. We can do
+ * this safely for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx, no
+ * harm done since we won't be receiving any PAUSE frames
+ * anyway. If the intent on the link partner was to have
+ * flow control enabled, then by us enabling RX only, we
+ * can at least receive pause frames and process them.
+ * This is a good idea because in most cases, since we are
+ * predominantly a server NIC, more times than not we will
+ * be asked to delay transmission of packets than asking
+ * our link partner to pause transmission of frames.
+ */
+ else if ((hw->original_fc == E1000_FC_NONE ||
+ hw->original_fc == E1000_FC_TX_PAUSE) ||
+ hw->fc_strict_ieee) {
+ hw->fc = E1000_FC_NONE;
+ DEBUGOUT("Flow Control = NONE.\n");
+ } else {
+ hw->fc = E1000_FC_RX_PAUSE;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc = E1000_FC_NONE;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = e1000_force_mac_fc(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ } else {
+ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Checks to see if the link status of the hardware has changed.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Called by any function that needs to check the link status of the adapter.
+ *****************************************************************************/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+ u32 rxcw = 0;
+ u32 ctrl;
+ u32 status;
+ u32 rctl;
+ u32 icr;
+ u32 signal = 0;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_check_for_link");
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+
+ /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
+ * set when the optics detect a signal. On older adapters, it will be
+ * cleared when there is a signal. This applies to fiber media only.
+ */
+ if ((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) {
+ rxcw = er32(RXCW);
+
+ if (hw->media_type == e1000_media_type_fiber) {
+ signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+ if (status & E1000_STATUS_LU)
+ hw->get_link_status = false;
+ }
+ }
+
+ /* If we have a copper PHY then we only want to go out to the PHY
+ * registers to see if Auto-Neg has completed and/or if our link
+ * status has changed. The get_link_status flag will be set if we
+ * receive a Link Status Change interrupt or we have Rx Sequence
+ * Errors.
+ */
+ if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ * Read the register twice since the link bit is sticky.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ hw->get_link_status = false;
+ /* Check if there was DownShift, must be checked immediately after
+ * link-up */
+ e1000_check_downshift(hw);
+
+ /* If we are on 82544 or 82543 silicon and speed/duplex
+ * are forced to 10H or 10F, then we will implement the polarity
+ * reversal workaround. We disable interrupts first, and upon
+ * returning, place the devices interrupt state to its previous
+ * value except for the link status change interrupt which will
+ * happen due to the execution of this workaround.
+ */
+
+ if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+ (!hw->autoneg) &&
+ (hw->forced_speed_duplex == e1000_10_full ||
+ hw->forced_speed_duplex == e1000_10_half)) {
+ ew32(IMC, 0xffffffff);
+ ret_val = e1000_polarity_reversal_workaround(hw);
+ icr = er32(ICR);
+ ew32(ICS, (icr & ~E1000_ICS_LSC));
+ ew32(IMS, IMS_ENABLE_MASK);
+ }
+
+ } else {
+ /* No link detected */
+ e1000_config_dsp_after_link_change(hw, false);
+ return 0;
+ }
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!hw->autoneg) return -E1000_ERR_CONFIG;
+
+ /* optimize the dsp settings for the igp phy */
+ e1000_config_dsp_after_link_change(hw, true);
+
+ /* We have a M88E1000 PHY and Auto-Neg is enabled. If we
+ * have Si on board that is 82544 or newer, Auto
+ * Speed Detection takes care of MAC speed/duplex
+ * configuration. So we only need to configure Collision
+ * Distance in the MAC. Otherwise, we need to force
+ * speed/duplex on the MAC to the current PHY speed/duplex
+ * settings.
+ */
+ if (hw->mac_type >= e1000_82544)
+ e1000_config_collision_dist(hw);
+ else {
+ ret_val = e1000_config_mac_to_phy(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring MAC to PHY settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Configure Flow Control now that Auto-Neg has completed. First, we
+ * need to restore the desired flow control settings because we may
+ * have had to re-autoneg with a different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+
+ /* At this point we know that we are on copper and we have
+ * auto-negotiated link. These are conditions for checking the link
+ * partner capability register. We use the link speed to determine if
+ * TBI compatibility needs to be turned on or off. If the link is not
+ * at gigabit speed, then TBI compatibility is not needed. If we are
+ * at gigabit speed, we turn on TBI compatibility.
+ */
+ if (hw->tbi_compatibility_en) {
+ u16 speed, duplex;
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+ if (speed != SPEED_1000) {
+ /* If link speed is not set to gigabit speed, we do not need
+ * to enable TBI compatibility.
+ */
+ if (hw->tbi_compatibility_on) {
+ /* If we previously were in the mode, turn it off. */
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_SBP;
+ ew32(RCTL, rctl);
+ hw->tbi_compatibility_on = false;
+ }
+ } else {
+ /* If TBI compatibility is was previously off, turn it on. For
+ * compatibility with a TBI link partner, we will store bad
+ * packets. Some frames have an additional byte on the end and
+ * will look like CRC errors to to the hardware.
+ */
+ if (!hw->tbi_compatibility_on) {
+ hw->tbi_compatibility_on = true;
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_SBP;
+ ew32(RCTL, rctl);
+ }
+ }
+ }
+ }
+ /* If we don't have link (auto-negotiation failed or link partner cannot
+ * auto-negotiate), the cable is plugged in (we have signal), and our
+ * link partner is not trying to auto-negotiate with us (we are receiving
+ * idles or data), we need to force link up. We also need to give
+ * auto-negotiation time to complete, in case the cable was just plugged
+ * in. The autoneg_failed flag does this.
+ */
+ else if ((((hw->media_type == e1000_media_type_fiber) &&
+ ((ctrl & E1000_CTRL_SWDPIN1) == signal)) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) &&
+ (!(status & E1000_STATUS_LU)) &&
+ (!(rxcw & E1000_RXCW_C))) {
+ if (hw->autoneg_failed == 0) {
+ hw->autoneg_failed = 1;
+ return 0;
+ }
+ DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+ }
+ /* If we are forcing link and we are receiving /C/ ordered sets, re-enable
+ * auto-negotiation in the TXCW register and disable forced link in the
+ * Device Control register in an attempt to auto-negotiate with our link
+ * partner.
+ */
+ else if (((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) &&
+ (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ ew32(TXCW, hw->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ hw->serdes_link_down = false;
+ }
+ /* If we force link for non-auto-negotiation switch, check link status
+ * based on MAC synchronization for internal serdes media type.
+ */
+ else if ((hw->media_type == e1000_media_type_internal_serdes) &&
+ !(E1000_TXCW_ANE & er32(TXCW))) {
+ /* SYNCH bit and IV bit are sticky. */
+ udelay(10);
+ if (E1000_RXCW_SYNCH & er32(RXCW)) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ hw->serdes_link_down = false;
+ DEBUGOUT("SERDES: Link is up.\n");
+ }
+ } else {
+ hw->serdes_link_down = true;
+ DEBUGOUT("SERDES: Link is down.\n");
+ }
+ }
+ if ((hw->media_type == e1000_media_type_internal_serdes) &&
+ (E1000_TXCW_ANE & er32(TXCW))) {
+ hw->serdes_link_down = !(E1000_STATUS_LU & er32(STATUS));
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ *****************************************************************************/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+ u32 status;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_get_speed_and_duplex");
+
+ if (hw->mac_type >= e1000_82543) {
+ status = er32(STATUS);
+ if (status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+ DEBUGOUT("1000 Mbs, ");
+ } else if (status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ DEBUGOUT("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ DEBUGOUT("10 Mbs, ");
+ }
+
+ if (status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ DEBUGOUT(" Half Duplex\n");
+ }
+ } else {
+ DEBUGOUT("1000 Mbs, Full Duplex\n");
+ *speed = SPEED_1000;
+ *duplex = FULL_DUPLEX;
+ }
+
+ /* IGP01 PHY may advertise full duplex operation after speed downgrade even
+ * if it is operating at half duplex. Here we set the duplex settings to
+ * match the duplex in the link partner's capabilities.
+ */
+ if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+ *duplex = HALF_DUPLEX;
+ else {
+ ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
+ if (ret_val)
+ return ret_val;
+ if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
+ (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
+ *duplex = HALF_DUPLEX;
+ }
+ }
+
+ if ((hw->mac_type == e1000_80003es2lan) &&
+ (hw->media_type == e1000_media_type_copper)) {
+ if (*speed == SPEED_1000)
+ ret_val = e1000_configure_kmrn_for_1000(hw);
+ else
+ ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
+ ret_val = e1000_kumeran_lock_loss_workaround(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Blocks until autoneg completes or times out (~4.5 seconds)
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 i;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_wait_autoneg");
+ DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+
+ /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ if (phy_data & MII_SR_AUTONEG_COMPLETE) {
+ return E1000_SUCCESS;
+ }
+ msleep(100);
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Raises the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+ /* Raise the clock input to the Management Data Clock (by setting the MDC
+ * bit), and then delay 10 microseconds.
+ */
+ ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH();
+ udelay(10);
+}
+
+/******************************************************************************
+* Lowers the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+ /* Lower the clock input to the Management Data Clock (by clearing the MDC
+ * bit), and then delay 10 microseconds.
+ */
+ ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH();
+ udelay(10);
+}
+
+/******************************************************************************
+* Shifts data bits out to the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* data - Data to send out to the PHY
+* count - Number of bits to shift out
+*
+* Bits are shifted out in MSB to LSB order.
+******************************************************************************/
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
+{
+ u32 ctrl;
+ u32 mask;
+
+ /* We need to shift "count" number of bits out to the PHY. So, the value
+ * in the "data" parameter will be shifted out to the PHY one bit at a
+ * time. In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01;
+ mask <<= (count - 1);
+
+ ctrl = er32(CTRL);
+
+ /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+ ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+ while (mask) {
+ /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
+ * then raising and lowering the Management Data Clock. A "0" is
+ * shifted out to the PHY by setting the MDIO bit to "0" and then
+ * raising and lowering the clock.
+ */
+ if (data & mask)
+ ctrl |= E1000_CTRL_MDIO;
+ else
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ udelay(10);
+
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ mask = mask >> 1;
+ }
+}
+
+/******************************************************************************
+* Shifts data bits in from the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Bits are shifted in in MSB to LSB order.
+******************************************************************************/
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u16 data = 0;
+ u8 i;
+
+ /* In order to read a register from the PHY, we need to shift in a total
+ * of 18 bits from the PHY. The first two bit (turnaround) times are used
+ * to avoid contention on the MDIO pin when a read operation is performed.
+ * These two bits are ignored by us and thrown away. Bits are "shifted in"
+ * by raising the input to the Management Data Clock (setting the MDC bit),
+ * and then reading the value of the MDIO bit.
+ */
+ ctrl = er32(CTRL);
+
+ /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+ ctrl &= ~E1000_CTRL_MDIO_DIR;
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ /* Raise and Lower the clock before reading in the data. This accounts for
+ * the turnaround bits. The first clock occurred when we clocked out the
+ * last bit of the Register Address.
+ */
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ for (data = 0, i = 0; i < 16; i++) {
+ data = data << 1;
+ e1000_raise_mdi_clk(hw, &ctrl);
+ ctrl = er32(CTRL);
+ /* Check to see if we shifted in a "1". */
+ if (ctrl & E1000_CTRL_MDIO)
+ data |= 1;
+ e1000_lower_mdi_clk(hw, &ctrl);
+ }
+
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ return data;
+}
+
+static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync = 0;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 timeout = 200;
+
+ DEBUGFUNC("e1000_swfw_sync_acquire");
+
+ if (hw->swfwhw_semaphore_present)
+ return e1000_get_software_flag(hw);
+
+ if (!hw->swfw_sync_present)
+ return e1000_get_hw_eeprom_semaphore(hw);
+
+ while (timeout) {
+ if (e1000_get_hw_eeprom_semaphore(hw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ swfw_sync = er32(SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask))) {
+ break;
+ }
+
+ /* firmware currently using resource (fwmask) */
+ /* or other software thread currently using resource (swmask) */
+ e1000_put_hw_eeprom_semaphore(hw);
+ mdelay(5);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ return -E1000_ERR_SWFW_SYNC;
+ }
+
+ swfw_sync |= swmask;
+ ew32(SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_eeprom_semaphore(hw);
+ return E1000_SUCCESS;
+}
+
+static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+
+ DEBUGFUNC("e1000_swfw_sync_release");
+
+ if (hw->swfwhw_semaphore_present) {
+ e1000_release_software_flag(hw);
+ return;
+ }
+
+ if (!hw->swfw_sync_present) {
+ e1000_put_hw_eeprom_semaphore(hw);
+ return;
+ }
+
+ /* if (e1000_get_hw_eeprom_semaphore(hw))
+ * return -E1000_ERR_SWFW_SYNC; */
+ while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS);
+ /* empty */
+
+ swfw_sync = er32(SW_FW_SYNC);
+ swfw_sync &= ~swmask;
+ ew32(SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_eeprom_semaphore(hw);
+}
+
+/*****************************************************************************
+* Reads the value from a PHY register, if the value is on a specific non zero
+* page, sets the page first.
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to read
+******************************************************************************/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
+{
+ u32 ret_val;
+ u16 swfw;
+
+ DEBUGFUNC("e1000_read_phy_reg");
+
+ if ((hw->mac_type == e1000_80003es2lan) &&
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+ swfw = E1000_SWFW_PHY1_SM;
+ } else {
+ swfw = E1000_SWFW_PHY0_SM;
+ }
+ if (e1000_swfw_sync_acquire(hw, swfw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ if ((hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
+ hw->phy_type == e1000_phy_igp_2) &&
+ (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+ ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (u16)reg_addr);
+ if (ret_val) {
+ e1000_swfw_sync_release(hw, swfw);
+ return ret_val;
+ }
+ } else if (hw->phy_type == e1000_phy_gg82563) {
+ if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
+ (hw->mac_type == e1000_80003es2lan)) {
+ /* Select Configuration Page */
+ if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
+ (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+ } else {
+ /* Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ ret_val = e1000_write_phy_reg_ex(hw,
+ GG82563_PHY_PAGE_SELECT_ALT,
+ (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+ }
+
+ if (ret_val) {
+ e1000_swfw_sync_release(hw, swfw);
+ return ret_val;
+ }
+ }
+ }
+
+ ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+ phy_data);
+
+ e1000_swfw_sync_release(hw, swfw);
+ return ret_val;
+}
+
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 *phy_data)
+{
+ u32 i;
+ u32 mdic = 0;
+ const u32 phy_addr = 1;
+
+ DEBUGFUNC("e1000_read_phy_reg_ex");
+
+ if (reg_addr > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ return -E1000_ERR_PARAM;
+ }
+
+ if (hw->mac_type > e1000_82543) {
+ /* Set up Op-code, Phy Address, and register address in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY) break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ DEBUGOUT("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16)mdic;
+ } else {
+ /* We must first send a preamble through the MDIO pin to signal the
+ * beginning of an MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /* Now combine the next few fields that are required for a read
+ * operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine five different times. The format of
+ * a MII read instruction consists of a shift out of 14 bits and is
+ * defined as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
+ * followed by a shift in of 18 bits. This first two bits shifted in
+ * are TurnAround bits used to avoid contention on the MDIO pin when a
+ * READ operation is performed. These two bits are thrown away
+ * followed by a shift in of 16 bits which contains the desired data.
+ */
+ mdic = ((reg_addr) | (phy_addr << 5) |
+ (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+ e1000_shift_out_mdi_bits(hw, mdic, 14);
+
+ /* Now that we've shifted out the read command to the MII, we need to
+ * "shift in" the 16-bit value (18 total bits) of the requested PHY
+ * register address.
+ */
+ *phy_data = e1000_shift_in_mdi_bits(hw);
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Writes a value to a PHY register
+*
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to write
+* data - data to write to the PHY
+******************************************************************************/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
+{
+ u32 ret_val;
+ u16 swfw;
+
+ DEBUGFUNC("e1000_write_phy_reg");
+
+ if ((hw->mac_type == e1000_80003es2lan) &&
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+ swfw = E1000_SWFW_PHY1_SM;
+ } else {
+ swfw = E1000_SWFW_PHY0_SM;
+ }
+ if (e1000_swfw_sync_acquire(hw, swfw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ if ((hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
+ hw->phy_type == e1000_phy_igp_2) &&
+ (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+ ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (u16)reg_addr);
+ if (ret_val) {
+ e1000_swfw_sync_release(hw, swfw);
+ return ret_val;
+ }
+ } else if (hw->phy_type == e1000_phy_gg82563) {
+ if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
+ (hw->mac_type == e1000_80003es2lan)) {
+ /* Select Configuration Page */
+ if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
+ (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+ } else {
+ /* Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ ret_val = e1000_write_phy_reg_ex(hw,
+ GG82563_PHY_PAGE_SELECT_ALT,
+ (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+ }
+
+ if (ret_val) {
+ e1000_swfw_sync_release(hw, swfw);
+ return ret_val;
+ }
+ }
+ }
+
+ ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+ phy_data);
+
+ e1000_swfw_sync_release(hw, swfw);
+ return ret_val;
+}
+
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 phy_data)
+{
+ u32 i;
+ u32 mdic = 0;
+ const u32 phy_addr = 1;
+
+ DEBUGFUNC("e1000_write_phy_reg_ex");
+
+ if (reg_addr > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ return -E1000_ERR_PARAM;
+ }
+
+ if (hw->mac_type > e1000_82543) {
+ /* Set up Op-code, Phy Address, register address, and data intended
+ * for the PHY register in the MDI Control register. The MAC will take
+ * care of interfacing with the PHY to send the desired data.
+ */
+ mdic = (((u32)phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for (i = 0; i < 641; i++) {
+ udelay(5);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY) break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ } else {
+ /* We'll need to use the SW defined pins to shift the write command
+ * out to the PHY. We first send a preamble to the PHY to signal the
+ * beginning of the MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /* Now combine the remaining required fields that will indicate a
+ * write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the command. The
+ * format of a MII write instruction is as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ */
+ mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
+ (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+ mdic <<= 16;
+ mdic |= (u32)phy_data;
+
+ e1000_shift_out_mdi_bits(hw, mdic, 32);
+ }
+
+ return E1000_SUCCESS;
+}
+
+static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data)
+{
+ u32 reg_val;
+ u16 swfw;
+ DEBUGFUNC("e1000_read_kmrn_reg");
+
+ if ((hw->mac_type == e1000_80003es2lan) &&
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+ swfw = E1000_SWFW_PHY1_SM;
+ } else {
+ swfw = E1000_SWFW_PHY0_SM;
+ }
+ if (e1000_swfw_sync_acquire(hw, swfw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ /* Write register address */
+ reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
+ E1000_KUMCTRLSTA_OFFSET) |
+ E1000_KUMCTRLSTA_REN;
+ ew32(KUMCTRLSTA, reg_val);
+ udelay(2);
+
+ /* Read the data returned */
+ reg_val = er32(KUMCTRLSTA);
+ *data = (u16)reg_val;
+
+ e1000_swfw_sync_release(hw, swfw);
+ return E1000_SUCCESS;
+}
+
+static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data)
+{
+ u32 reg_val;
+ u16 swfw;
+ DEBUGFUNC("e1000_write_kmrn_reg");
+
+ if ((hw->mac_type == e1000_80003es2lan) &&
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+ swfw = E1000_SWFW_PHY1_SM;
+ } else {
+ swfw = E1000_SWFW_PHY0_SM;
+ }
+ if (e1000_swfw_sync_acquire(hw, swfw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
+ E1000_KUMCTRLSTA_OFFSET) | data;
+ ew32(KUMCTRLSTA, reg_val);
+ udelay(2);
+
+ e1000_swfw_sync_release(hw, swfw);
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Returns the PHY to the power-on reset state
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+ u32 ctrl, ctrl_ext;
+ u32 led_ctrl;
+ s32 ret_val;
+ u16 swfw;
+
+ DEBUGFUNC("e1000_phy_hw_reset");
+
+ /* In the case of the phy reset being blocked, it's not an error, we
+ * simply return success without performing the reset. */
+ ret_val = e1000_check_phy_reset_block(hw);
+ if (ret_val)
+ return E1000_SUCCESS;
+
+ DEBUGOUT("Resetting Phy...\n");
+
+ if (hw->mac_type > e1000_82543) {
+ if ((hw->mac_type == e1000_80003es2lan) &&
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+ swfw = E1000_SWFW_PHY1_SM;
+ } else {
+ swfw = E1000_SWFW_PHY0_SM;
+ }
+ if (e1000_swfw_sync_acquire(hw, swfw)) {
+ DEBUGOUT("Unable to acquire swfw sync\n");
+ return -E1000_ERR_SWFW_SYNC;
+ }
+ /* Read the device control register and assert the E1000_CTRL_PHY_RST
+ * bit. Then, take it out of reset.
+ * For pre-e1000_82571 hardware, we delay for 10ms between the assert
+ * and deassert. For e1000_82571 hardware and later, we instead delay
+ * for 50us between and 10ms after the deassertion.
+ */
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+ E1000_WRITE_FLUSH();
+
+ if (hw->mac_type < e1000_82571)
+ msleep(10);
+ else
+ udelay(100);
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ if (hw->mac_type >= e1000_82571)
+ mdelay(10);
+
+ e1000_swfw_sync_release(hw, swfw);
+ } else {
+ /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
+ * bit to put the PHY into reset. Then, take it out of reset.
+ */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ msleep(10);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ }
+ udelay(150);
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+ }
+
+ /* Wait for FW to finish PHY configuration. */
+ ret_val = e1000_get_phy_cfg_done(hw);
+ if (ret_val != E1000_SUCCESS)
+ return ret_val;
+ e1000_release_software_semaphore(hw);
+
+ if ((hw->mac_type == e1000_ich8lan) && (hw->phy_type == e1000_phy_igp_3))
+ ret_val = e1000_init_lcd_from_nvm(hw);
+
+ return ret_val;
+}
+
+/******************************************************************************
+* Resets the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Sets bit 15 of the MII Control register
+******************************************************************************/
+s32 e1000_phy_reset(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_phy_reset");
+
+ /* In the case of the phy reset being blocked, it's not an error, we
+ * simply return success without performing the reset. */
+ ret_val = e1000_check_phy_reset_block(hw);
+ if (ret_val)
+ return E1000_SUCCESS;
+
+ switch (hw->phy_type) {
+ case e1000_phy_igp:
+ case e1000_phy_igp_2:
+ case e1000_phy_igp_3:
+ case e1000_phy_ife:
+ ret_val = e1000_phy_hw_reset(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= MII_CR_RESET;
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+ break;
+ }
+
+ if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
+ e1000_phy_init_script(hw);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Work-around for 82566 power-down: on D3 entry-
+* 1) disable gigabit link
+* 2) write VR power-down enable
+* 3) read it back
+* if successful continue, else issue LCD reset and repeat
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw)
+{
+ s32 reg;
+ u16 phy_data;
+ s32 retry = 0;
+
+ DEBUGFUNC("e1000_phy_powerdown_workaround");
+
+ if (hw->phy_type != e1000_phy_igp_3)
+ return;
+
+ do {
+ /* Disable link */
+ reg = er32(PHY_CTRL);
+ ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+ /* Write VR power-down enable - bits 9:8 should be 10b */
+ e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+ phy_data |= (1 << 9);
+ phy_data &= ~(1 << 8);
+ e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data);
+
+ /* Read it back and test */
+ e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+ if (((phy_data & IGP3_VR_CTRL_MODE_MASK) == IGP3_VR_CTRL_MODE_SHUT) || retry)
+ break;
+
+ /* Issue PHY reset and repeat at most one more time */
+ reg = er32(CTRL);
+ ew32(CTRL, reg | E1000_CTRL_PHY_RST);
+ retry++;
+ } while (retry);
+
+ return;
+
+}
+
+/******************************************************************************
+* Work-around for 82566 Kumeran PCS lock loss:
+* On link status change (i.e. PCI reset, speed change) and link is up and
+* speed is gigabit-
+* 0) if workaround is optionally disabled do nothing
+* 1) wait 1ms for Kumeran link to come up
+* 2) check Kumeran Diagnostic register PCS lock loss bit
+* 3) if not set the link is locked (all is good), otherwise...
+* 4) reset the PHY
+* 5) repeat up to 10 times
+* Note: this is only called for IGP3 copper when speed is 1gb.
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ s32 reg;
+ s32 cnt;
+ u16 phy_data;
+
+ if (hw->kmrn_lock_loss_workaround_disabled)
+ return E1000_SUCCESS;
+
+ /* Make sure link is up before proceeding. If not just return.
+ * Attempting this while link is negotiating fouled up link
+ * stability */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ for (cnt = 0; cnt < 10; cnt++) {
+ /* read once to clear */
+ ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+ if (ret_val)
+ return ret_val;
+ /* and again to get new status */
+ ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* check for PCS lock */
+ if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+ return E1000_SUCCESS;
+
+ /* Issue PHY reset */
+ e1000_phy_hw_reset(hw);
+ mdelay(5);
+ }
+ /* Disable GigE link negotiation */
+ reg = er32(PHY_CTRL);
+ ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+ /* unable to acquire PCS lock */
+ return E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Probes the expected PHY address for known PHY IDs
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
+{
+ s32 phy_init_status, ret_val;
+ u16 phy_id_high, phy_id_low;
+ bool match = false;
+
+ DEBUGFUNC("e1000_detect_gig_phy");
+
+ if (hw->phy_id != 0)
+ return E1000_SUCCESS;
+
+ /* The 82571 firmware may still be configuring the PHY. In this
+ * case, we cannot access the PHY until the configuration is done. So
+ * we explicitly set the PHY values. */
+ if (hw->mac_type == e1000_82571 ||
+ hw->mac_type == e1000_82572) {
+ hw->phy_id = IGP01E1000_I_PHY_ID;
+ hw->phy_type = e1000_phy_igp_2;
+ return E1000_SUCCESS;
+ }
+
+ /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work-
+ * around that forces PHY page 0 to be set or the reads fail. The rest of
+ * the code in this routine uses e1000_read_phy_reg to read the PHY ID.
+ * So for ESB-2 we need to have this set so our reads won't fail. If the
+ * attached PHY is not a e1000_phy_gg82563, the routines below will figure
+ * this out as well. */
+ if (hw->mac_type == e1000_80003es2lan)
+ hw->phy_type = e1000_phy_gg82563;
+
+ /* Read the PHY ID Registers to identify which PHY is onboard. */
+ ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_id = (u32)(phy_id_high << 16);
+ udelay(20);
+ ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK);
+ hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK;
+
+ switch (hw->mac_type) {
+ case e1000_82543:
+ if (hw->phy_id == M88E1000_E_PHY_ID) match = true;
+ break;
+ case e1000_82544:
+ if (hw->phy_id == M88E1000_I_PHY_ID) match = true;
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if (hw->phy_id == M88E1011_I_PHY_ID) match = true;
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (hw->phy_id == IGP01E1000_I_PHY_ID) match = true;
+ break;
+ case e1000_82573:
+ if (hw->phy_id == M88E1111_I_PHY_ID) match = true;
+ break;
+ case e1000_80003es2lan:
+ if (hw->phy_id == GG82563_E_PHY_ID) match = true;
+ break;
+ case e1000_ich8lan:
+ if (hw->phy_id == IGP03E1000_E_PHY_ID) match = true;
+ if (hw->phy_id == IFE_E_PHY_ID) match = true;
+ if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = true;
+ if (hw->phy_id == IFE_C_E_PHY_ID) match = true;
+ break;
+ default:
+ DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+ return -E1000_ERR_CONFIG;
+ }
+ phy_init_status = e1000_set_phy_type(hw);
+
+ if ((match) && (phy_init_status == E1000_SUCCESS)) {
+ DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+ return -E1000_ERR_PHY;
+}
+
+/******************************************************************************
+* Resets the PHY's DSP
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ DEBUGFUNC("e1000_phy_reset_dsp");
+
+ do {
+ if (hw->phy_type != e1000_phy_gg82563) {
+ ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
+ if (ret_val) break;
+ }
+ ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
+ if (ret_val) break;
+ ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
+ if (ret_val) break;
+ ret_val = E1000_SUCCESS;
+ } while (0);
+
+ return ret_val;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for igp PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data, min_length, max_length, average;
+ e1000_rev_polarity polarity;
+
+ DEBUGFUNC("e1000_phy_igp_get_info");
+
+ /* The downshift status is checked only once, after link is established,
+ * and it stored in the hw->speed_downgraded parameter. */
+ phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+
+ /* IGP01E1000 does not need to support it. */
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+ /* IGP01E1000 always correct polarity reversal */
+ phy_info->polarity_correction = e1000_polarity_reversal_enabled;
+
+ /* Check polarity status */
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & IGP01E1000_PSSR_MDIX) >>
+ IGP01E1000_PSSR_MDIX_SHIFT);
+
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+ SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+ SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ /* Get cable length */
+ ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+ if (ret_val)
+ return ret_val;
+
+ /* Translate to old method */
+ average = (max_length + min_length) / 2;
+
+ if (average <= e1000_igp_cable_length_50)
+ phy_info->cable_length = e1000_cable_length_50;
+ else if (average <= e1000_igp_cable_length_80)
+ phy_info->cable_length = e1000_cable_length_50_80;
+ else if (average <= e1000_igp_cable_length_110)
+ phy_info->cable_length = e1000_cable_length_80_110;
+ else if (average <= e1000_igp_cable_length_140)
+ phy_info->cable_length = e1000_cable_length_110_140;
+ else
+ phy_info->cable_length = e1000_cable_length_140;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for ife PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data;
+ e1000_rev_polarity polarity;
+
+ DEBUGFUNC("e1000_phy_ife_get_info");
+
+ phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_info->polarity_correction =
+ ((phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
+ IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT) ?
+ e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+ if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Polarity is forced. */
+ polarity = ((phy_data & IFE_PSC_FORCE_POLARITY) >>
+ IFE_PSC_FORCE_POLARITY_SHIFT) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+ }
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode = (e1000_auto_x_mode)
+ ((phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
+ IFE_PMC_MDIX_MODE_SHIFT);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers fot m88 PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data;
+ e1000_rev_polarity polarity;
+
+ DEBUGFUNC("e1000_phy_m88_get_info");
+
+ /* The downshift status is checked only once, after link is established,
+ * and it stored in the hw->speed_downgraded parameter. */
+ phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->extended_10bt_distance =
+ ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
+ M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
+ e1000_10bt_ext_dist_enable_lower : e1000_10bt_ext_dist_enable_normal;
+
+ phy_info->polarity_correction =
+ ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
+ M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
+ e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+ /* Check polarity status */
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & M88E1000_PSSR_MDIX) >>
+ M88E1000_PSSR_MDIX_SHIFT);
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ /* Cable Length Estimation and Local/Remote Receiver Information
+ * are only valid at 1000 Mbps.
+ */
+ if (hw->phy_type != e1000_phy_gg82563) {
+ phy_info->cable_length = (e1000_cable_length)((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+ } else {
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->cable_length = (e1000_cable_length)(phy_data & GG82563_DSPD_CABLE_LENGTH);
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+ SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+ SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_phy_get_info");
+
+ phy_info->cable_length = e1000_cable_length_undefined;
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
+ phy_info->cable_polarity = e1000_rev_polarity_undefined;
+ phy_info->downshift = e1000_downshift_undefined;
+ phy_info->polarity_correction = e1000_polarity_reversal_undefined;
+ phy_info->mdix_mode = e1000_auto_x_mode_undefined;
+ phy_info->local_rx = e1000_1000t_rx_status_undefined;
+ phy_info->remote_rx = e1000_1000t_rx_status_undefined;
+
+ if (hw->media_type != e1000_media_type_copper) {
+ DEBUGOUT("PHY info is only valid for copper media\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+ DEBUGOUT("PHY info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
+ hw->phy_type == e1000_phy_igp_2)
+ return e1000_phy_igp_get_info(hw, phy_info);
+ else if (hw->phy_type == e1000_phy_ife)
+ return e1000_phy_ife_get_info(hw, phy_info);
+ else
+ return e1000_phy_m88_get_info(hw, phy_info);
+}
+
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_settings");
+
+ if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+ DEBUGOUT("Invalid MDI setting detected\n");
+ hw->mdix = 1;
+ return -E1000_ERR_CONFIG;
+ }
+ return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Sets up eeprom variables in the hw struct. Must be called after mac_type
+ * is configured. Additionally, if this is ICH8, the flash controller GbE
+ * registers must be mapped, or this will crash.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_init_eeprom_params(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd = er32(EECD);
+ s32 ret_val = E1000_SUCCESS;
+ u16 eeprom_size;
+
+ DEBUGFUNC("e1000_init_eeprom_params");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->word_size = 64;
+ eeprom->opcode_bits = 3;
+ eeprom->address_bits = 6;
+ eeprom->delay_usec = 50;
+ eeprom->use_eerd = false;
+ eeprom->use_eewr = false;
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->opcode_bits = 3;
+ eeprom->delay_usec = 50;
+ if (eecd & E1000_EECD_SIZE) {
+ eeprom->word_size = 256;
+ eeprom->address_bits = 8;
+ } else {
+ eeprom->word_size = 64;
+ eeprom->address_bits = 6;
+ }
+ eeprom->use_eerd = false;
+ eeprom->use_eewr = false;
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (eecd & E1000_EECD_TYPE) {
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ } else {
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->opcode_bits = 3;
+ eeprom->delay_usec = 50;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->word_size = 256;
+ eeprom->address_bits = 8;
+ } else {
+ eeprom->word_size = 64;
+ eeprom->address_bits = 6;
+ }
+ }
+ eeprom->use_eerd = false;
+ eeprom->use_eewr = false;
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ eeprom->use_eerd = false;
+ eeprom->use_eewr = false;
+ break;
+ case e1000_82573:
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ eeprom->use_eerd = true;
+ eeprom->use_eewr = true;
+ if (!e1000_is_onboard_nvm_eeprom(hw)) {
+ eeprom->type = e1000_eeprom_flash;
+ eeprom->word_size = 2048;
+
+ /* Ensure that the Autonomous FLASH update bit is cleared due to
+ * Flash update issue on parts which use a FLASH for NVM. */
+ eecd &= ~E1000_EECD_AUPDEN;
+ ew32(EECD, eecd);
+ }
+ break;
+ case e1000_80003es2lan:
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ eeprom->use_eerd = true;
+ eeprom->use_eewr = false;
+ break;
+ case e1000_ich8lan:
+ {
+ s32 i = 0;
+ u32 flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
+
+ eeprom->type = e1000_eeprom_ich8;
+ eeprom->use_eerd = false;
+ eeprom->use_eewr = false;
+ eeprom->word_size = E1000_SHADOW_RAM_WORDS;
+
+ /* Zero the shadow RAM structure. But don't load it from NVM
+ * so as to save time for driver init */
+ if (hw->eeprom_shadow_ram != NULL) {
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ hw->eeprom_shadow_ram[i].modified = false;
+ hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+ }
+ }
+
+ hw->flash_base_addr = (flash_size & ICH_GFPREG_BASE_MASK) *
+ ICH_FLASH_SECTOR_SIZE;
+
+ hw->flash_bank_size = ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
+ hw->flash_bank_size -= (flash_size & ICH_GFPREG_BASE_MASK);
+
+ hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
+
+ hw->flash_bank_size /= 2 * sizeof(u16);
+
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (eeprom->type == e1000_eeprom_spi) {
+ /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
+ * 32KB (incremented by powers of 2).
+ */
+ if (hw->mac_type <= e1000_82547_rev_2) {
+ /* Set to default value for initial eeprom read. */
+ eeprom->word_size = 64;
+ ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
+ if (ret_val)
+ return ret_val;
+ eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+ /* 256B eeprom size was not supported in earlier hardware, so we
+ * bump eeprom_size up one to ensure that "1" (which maps to 256B)
+ * is never the result used in the shifting logic below. */
+ if (eeprom_size)
+ eeprom_size++;
+ } else {
+ eeprom_size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+ }
+
+ eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
+ }
+ return ret_val;
+}
+
+/******************************************************************************
+ * Raises the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+ * wait <delay> microseconds.
+ */
+ *eecd = *eecd | E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Lowers the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+ * wait 50 microseconds.
+ */
+ *eecd = *eecd & ~E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Shift data bits out to the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * data - data to send to the EEPROM
+ * count - number of bits to shift out
+ *****************************************************************************/
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+ u32 mask;
+
+ /* We need to shift "count" bits out to the EEPROM. So, value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01 << (count - 1);
+ eecd = er32(EECD);
+ if (eeprom->type == e1000_eeprom_microwire) {
+ eecd &= ~E1000_EECD_DO;
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ eecd |= E1000_EECD_DO;
+ }
+ do {
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+ * and then raising and then lowering the clock (the SK bit controls
+ * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
+ * by setting "DI" to "0" and then raising and then lowering the clock.
+ */
+ eecd &= ~E1000_EECD_DI;
+
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+
+ udelay(eeprom->delay_usec);
+
+ e1000_raise_ee_clk(hw, &eecd);
+ e1000_lower_ee_clk(hw, &eecd);
+
+ mask = mask >> 1;
+
+ } while (mask);
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eecd &= ~E1000_EECD_DI;
+ ew32(EECD, eecd);
+}
+
+/******************************************************************************
+ * Shift data bits in from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
+{
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+ /* In order to read a register from the EEPROM, we need to shift 'count'
+ * bits in from the EEPROM. Bits are "shifted in" by raising the clock
+ * input to the EEPROM (setting the SK bit), and then reading the value of
+ * the "DO" bit. During this "shifting in" process the "DI" bit should
+ * always be clear.
+ */
+
+ eecd = er32(EECD);
+
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data = data << 1;
+ e1000_raise_ee_clk(hw, &eecd);
+
+ eecd = er32(EECD);
+
+ eecd &= ~(E1000_EECD_DI);
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
+
+ e1000_lower_ee_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/******************************************************************************
+ * Prepares EEPROM for access
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
+ * function should be called before issuing a command to the EEPROM.
+ *****************************************************************************/
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd, i=0;
+
+ DEBUGFUNC("e1000_acquire_eeprom");
+
+ if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
+ return -E1000_ERR_SWFW_SYNC;
+ eecd = er32(EECD);
+
+ if (hw->mac_type != e1000_82573) {
+ /* Request EEPROM Access */
+ if (hw->mac_type > e1000_82544) {
+ eecd |= E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ eecd = er32(EECD);
+ while ((!(eecd & E1000_EECD_GNT)) &&
+ (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+ i++;
+ udelay(5);
+ eecd = er32(EECD);
+ }
+ if (!(eecd & E1000_EECD_GNT)) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ DEBUGOUT("Could not acquire EEPROM grant\n");
+ e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+ return -E1000_ERR_EEPROM;
+ }
+ }
+ }
+
+ /* Setup EEPROM for Read/Write */
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ /* Clear SK and DI */
+ eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+ ew32(EECD, eecd);
+
+ /* Set CS */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ udelay(1);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Returns EEPROM to a "standby" state
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_standby_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+
+ eecd = er32(EECD);
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Clock high */
+ eecd |= E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Select EEPROM */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Clock low */
+ eecd &= ~E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ }
+}
+
+/******************************************************************************
+ * Terminates a command by inverting the EEPROM's chip select pin
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_release_eeprom(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ DEBUGFUNC("e1000_release_eeprom");
+
+ eecd = er32(EECD);
+
+ if (hw->eeprom.type == e1000_eeprom_spi) {
+ eecd |= E1000_EECD_CS; /* Pull CS high */
+ eecd &= ~E1000_EECD_SK; /* Lower SCK */
+
+ ew32(EECD, eecd);
+
+ udelay(hw->eeprom.delay_usec);
+ } else if (hw->eeprom.type == e1000_eeprom_microwire) {
+ /* cleanup eeprom */
+
+ /* CS on Microwire is active-high */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+
+ ew32(EECD, eecd);
+
+ /* Rising edge of clock */
+ eecd |= E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+
+ /* Falling edge of clock */
+ eecd &= ~E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+ }
+
+ /* Stop requesting EEPROM access */
+ if (hw->mac_type > e1000_82544) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ }
+
+ e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
+{
+ u16 retry_count = 0;
+ u8 spi_stat_reg;
+
+ DEBUGFUNC("e1000_spi_eeprom_ready");
+
+ /* Read "Status Register" repeatedly until the LSB is cleared. The
+ * EEPROM will signal that the command has been completed by clearing
+ * bit 0 of the internal status register. If it's not cleared within
+ * 5 milliseconds, then error out.
+ */
+ retry_count = 0;
+ do {
+ e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
+ hw->eeprom.opcode_bits);
+ spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8);
+ if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
+ break;
+
+ udelay(5);
+ retry_count += 5;
+
+ e1000_standby_eeprom(hw);
+ } while (retry_count < EEPROM_MAX_RETRY_SPI);
+
+ /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
+ * only 0-5mSec on 5V devices)
+ */
+ if (retry_count >= EEPROM_MAX_RETRY_SPI) {
+ DEBUGOUT("SPI EEPROM Status error\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ s32 ret;
+ spin_lock(&e1000_eeprom_lock);
+ ret = e1000_do_read_eeprom(hw, offset, words, data);
+ spin_unlock(&e1000_eeprom_lock);
+ return ret;
+}
+
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 i = 0;
+
+ DEBUGFUNC("e1000_read_eeprom");
+
+ /* If eeprom is not yet detected, do so now */
+ if (eeprom->word_size == 0)
+ e1000_init_eeprom_params(hw);
+
+ /* A check for invalid values: offset too large, too many words, and not
+ * enough words.
+ */
+ if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT2("\"words\" parameter out of bounds. Words = %d, size = %d\n", offset, eeprom->word_size);
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
+ * directly. In this case, we need to acquire the EEPROM so that
+ * FW or other port software does not interrupt.
+ */
+ if (e1000_is_onboard_nvm_eeprom(hw) && !hw->eeprom.use_eerd) {
+ /* Prepare the EEPROM for bit-bang reading */
+ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Eerd register EEPROM access requires no eeprom aquire/release */
+ if (eeprom->use_eerd)
+ return e1000_read_eeprom_eerd(hw, offset, words, data);
+
+ /* ICH EEPROM access is done via the ICH flash controller */
+ if (eeprom->type == e1000_eeprom_ich8)
+ return e1000_read_eeprom_ich8(hw, offset, words, data);
+
+ /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
+ * acquired the EEPROM at this point, so any returns should relase it */
+ if (eeprom->type == e1000_eeprom_spi) {
+ u16 word_in;
+ u8 read_opcode = EEPROM_READ_OPCODE_SPI;
+
+ if (e1000_spi_eeprom_ready(hw)) {
+ e1000_release_eeprom(hw);
+ return -E1000_ERR_EEPROM;
+ }
+
+ e1000_standby_eeprom(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ if ((eeprom->address_bits == 8) && (offset >= 128))
+ read_opcode |= EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
+ e1000_shift_out_ee_bits(hw, (u16)(offset*2), eeprom->address_bits);
+
+ /* Read the data. The address of the eeprom internally increments with
+ * each byte (spi) being read, saving on the overhead of eeprom setup
+ * and tear-down. The address counter will roll over if reading beyond
+ * the size of the eeprom, thus allowing the entire memory to be read
+ * starting from any offset. */
+ for (i = 0; i < words; i++) {
+ word_in = e1000_shift_in_ee_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+ } else if (eeprom->type == e1000_eeprom_microwire) {
+ for (i = 0; i < words; i++) {
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
+ eeprom->opcode_bits);
+ e1000_shift_out_ee_bits(hw, (u16)(offset + i),
+ eeprom->address_bits);
+
+ /* Read the data. For microwire, each word requires the overhead
+ * of eeprom setup and tear-down. */
+ data[i] = e1000_shift_in_ee_bits(hw, 16);
+ e1000_standby_eeprom(hw);
+ }
+ }
+
+ /* End this read operation */
+ e1000_release_eeprom(hw);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ u32 i, eerd = 0;
+ s32 error = 0;
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
+ E1000_EEPROM_RW_REG_START;
+
+ ew32(EERD, eerd);
+ error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
+
+ if (error) {
+ break;
+ }
+ data[i] = (er32(EERD) >> E1000_EEPROM_RW_REG_DATA);
+
+ }
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word from the EEPROM using the EEWR register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ u32 register_value = 0;
+ u32 i = 0;
+ s32 error = 0;
+
+ if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
+ return -E1000_ERR_SWFW_SYNC;
+
+ for (i = 0; i < words; i++) {
+ register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
+ ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
+ E1000_EEPROM_RW_REG_START;
+
+ error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+ if (error) {
+ break;
+ }
+
+ ew32(EEWR, register_value);
+
+ error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+
+ if (error) {
+ break;
+ }
+ }
+
+ e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+ return error;
+}
+
+/******************************************************************************
+ * Polls the status bit (bit 1) of the EERD to determine when the read is done.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
+{
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+ s32 done = E1000_ERR_EEPROM;
+
+ for (i = 0; i < attempts; i++) {
+ if (eerd == E1000_EEPROM_POLL_READ)
+ reg = er32(EERD);
+ else
+ reg = er32(EEWR);
+
+ if (reg & E1000_EEPROM_RW_REG_DONE) {
+ done = E1000_SUCCESS;
+ break;
+ }
+ udelay(5);
+ }
+
+ return done;
+}
+
+/***************************************************************************
+* Description: Determines if the onboard NVM is FLASH or EEPROM.
+*
+* hw - Struct containing variables accessed by shared code
+****************************************************************************/
+static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
+{
+ u32 eecd = 0;
+
+ DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
+
+ if (hw->mac_type == e1000_ich8lan)
+ return false;
+
+ if (hw->mac_type == e1000_82573) {
+ eecd = er32(EECD);
+
+ /* Isolate bits 15 & 16 */
+ eecd = ((eecd >> 15) & 0x03);
+
+ /* If both bits are set, device is Flash type */
+ if (eecd == 0x03) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/******************************************************************************
+ * Verifies that the EEPROM has a valid checksum
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Reads the first 64 16 bit words of the EEPROM and sums the values read.
+ * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * valid.
+ *****************************************************************************/
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
+{
+ u16 checksum = 0;
+ u16 i, eeprom_data;
+
+ DEBUGFUNC("e1000_validate_eeprom_checksum");
+
+ if ((hw->mac_type == e1000_82573) && !e1000_is_onboard_nvm_eeprom(hw)) {
+ /* Check bit 4 of word 10h. If it is 0, firmware is done updating
+ * 10h-12h. Checksum may need to be fixed. */
+ e1000_read_eeprom(hw, 0x10, 1, &eeprom_data);
+ if ((eeprom_data & 0x10) == 0) {
+ /* Read 0x23 and check bit 15. This bit is a 1 when the checksum
+ * has already been fixed. If the checksum is still wrong and this
+ * bit is a 1, we need to return bad checksum. Otherwise, we need
+ * to set this bit to a 1 and update the checksum. */
+ e1000_read_eeprom(hw, 0x23, 1, &eeprom_data);
+ if ((eeprom_data & 0x8000) == 0) {
+ eeprom_data |= 0x8000;
+ e1000_write_eeprom(hw, 0x23, 1, &eeprom_data);
+ e1000_update_eeprom_checksum(hw);
+ }
+ }
+ }
+
+ if (hw->mac_type == e1000_ich8lan) {
+ /* Drivers must allocate the shadow ram structure for the
+ * EEPROM checksum to be updated. Otherwise, this bit as well
+ * as the checksum must both be set correctly for this
+ * validation to pass.
+ */
+ e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
+ if ((eeprom_data & 0x40) == 0) {
+ eeprom_data |= 0x40;
+ e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
+ e1000_update_eeprom_checksum(hw);
+ }
+ }
+
+ for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ checksum += eeprom_data;
+ }
+
+ if (checksum == (u16)EEPROM_SUM)
+ return E1000_SUCCESS;
+ else {
+ DEBUGOUT("EEPROM Checksum Invalid\n");
+ return -E1000_ERR_EEPROM;
+ }
+}
+
+/******************************************************************************
+ * Calculates the EEPROM checksum and writes it to the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
+ * Writes the difference to word offset 63 of the EEPROM.
+ *****************************************************************************/
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ u16 checksum = 0;
+ u16 i, eeprom_data;
+
+ DEBUGFUNC("e1000_update_eeprom_checksum");
+
+ for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ checksum += eeprom_data;
+ }
+ checksum = (u16)EEPROM_SUM - checksum;
+ if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+ DEBUGOUT("EEPROM Write Error\n");
+ return -E1000_ERR_EEPROM;
+ } else if (hw->eeprom.type == e1000_eeprom_flash) {
+ e1000_commit_shadow_ram(hw);
+ } else if (hw->eeprom.type == e1000_eeprom_ich8) {
+ e1000_commit_shadow_ram(hw);
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after next adapter reset. */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ msleep(10);
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Parent function for writing words to the different EEPROM types.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - 16 bit word to be written to the EEPROM
+ *
+ * If e1000_update_eeprom_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ *****************************************************************************/
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ s32 ret;
+ spin_lock(&e1000_eeprom_lock);
+ ret = e1000_do_write_eeprom(hw, offset, words, data);
+ spin_unlock(&e1000_eeprom_lock);
+ return ret;
+}
+
+
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ s32 status = 0;
+
+ DEBUGFUNC("e1000_write_eeprom");
+
+ /* If eeprom is not yet detected, do so now */
+ if (eeprom->word_size == 0)
+ e1000_init_eeprom_params(hw);
+
+ /* A check for invalid values: offset too large, too many words, and not
+ * enough words.
+ */
+ if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT("\"words\" parameter out of bounds\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* 82573 writes only through eewr */
+ if (eeprom->use_eewr)
+ return e1000_write_eeprom_eewr(hw, offset, words, data);
+
+ if (eeprom->type == e1000_eeprom_ich8)
+ return e1000_write_eeprom_ich8(hw, offset, words, data);
+
+ /* Prepare the EEPROM for writing */
+ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ status = e1000_write_eeprom_microwire(hw, offset, words, data);
+ } else {
+ status = e1000_write_eeprom_spi(hw, offset, words, data);
+ msleep(10);
+ }
+
+ /* Done with writing */
+ e1000_release_eeprom(hw);
+
+ return status;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in an SPI EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 8 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u16 widx = 0;
+
+ DEBUGFUNC("e1000_write_eeprom_spi");
+
+ while (widx < words) {
+ u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
+
+ if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
+
+ e1000_standby_eeprom(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
+ eeprom->opcode_bits);
+
+ e1000_standby_eeprom(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ if ((eeprom->address_bits == 8) && (offset >= 128))
+ write_opcode |= EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
+
+ e1000_shift_out_ee_bits(hw, (u16)((offset + widx)*2),
+ eeprom->address_bits);
+
+ /* Send the data */
+
+ /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ e1000_shift_out_ee_bits(hw, word_out, 16);
+ widx++;
+
+ /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
+ * operation, while the smaller eeproms are capable of an 8-byte
+ * PAGE WRITE operation. Break the inner loop to pass new address
+ */
+ if ((((offset + widx)*2) % eeprom->page_size) == 0) {
+ e1000_standby_eeprom(hw);
+ break;
+ }
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in a Microwire EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 16 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+ u16 words_written = 0;
+ u16 i = 0;
+
+ DEBUGFUNC("e1000_write_eeprom_microwire");
+
+ /* Send the write enable command to the EEPROM (3-bit opcode plus
+ * 6/8-bit dummy address beginning with 11). It's less work to include
+ * the 11 of the dummy address as part of the opcode than it is to shift
+ * it over the correct number of bits for the address. This puts the
+ * EEPROM into write/erase mode.
+ */
+ e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
+ (u16)(eeprom->opcode_bits + 2));
+
+ e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
+
+ /* Prepare the EEPROM */
+ e1000_standby_eeprom(hw);
+
+ while (words_written < words) {
+ /* Send the Write command (3-bit opcode + addr) */
+ e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
+ eeprom->opcode_bits);
+
+ e1000_shift_out_ee_bits(hw, (u16)(offset + words_written),
+ eeprom->address_bits);
+
+ /* Send the data */
+ e1000_shift_out_ee_bits(hw, data[words_written], 16);
+
+ /* Toggle the CS line. This in effect tells the EEPROM to execute
+ * the previous command.
+ */
+ e1000_standby_eeprom(hw);
+
+ /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
+ * signal that the command has been completed by raising the DO signal.
+ * If DO does not go high in 10 milliseconds, then error out.
+ */
+ for (i = 0; i < 200; i++) {
+ eecd = er32(EECD);
+ if (eecd & E1000_EECD_DO) break;
+ udelay(50);
+ }
+ if (i == 200) {
+ DEBUGOUT("EEPROM Write did not complete\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Recover from write */
+ e1000_standby_eeprom(hw);
+
+ words_written++;
+ }
+
+ /* Send the write disable command to the EEPROM (3-bit opcode plus
+ * 6/8-bit dummy address beginning with 10). It's less work to include
+ * the 10 of the dummy address as part of the opcode than it is to shift
+ * it over the correct number of bits for the address. This takes the
+ * EEPROM out of write/erase mode.
+ */
+ e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
+ (u16)(eeprom->opcode_bits + 2));
+
+ e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Flushes the cached eeprom to NVM. This is done by saving the modified values
+ * in the eeprom cache and the non modified values in the currently active bank
+ * to the new bank.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_commit_shadow_ram(struct e1000_hw *hw)
+{
+ u32 attempts = 100000;
+ u32 eecd = 0;
+ u32 flop = 0;
+ u32 i = 0;
+ s32 error = E1000_SUCCESS;
+ u32 old_bank_offset = 0;
+ u32 new_bank_offset = 0;
+ u8 low_byte = 0;
+ u8 high_byte = 0;
+ bool sector_write_failed = false;
+
+ if (hw->mac_type == e1000_82573) {
+ /* The flop register will be used to determine if flash type is STM */
+ flop = er32(FLOP);
+ for (i=0; i < attempts; i++) {
+ eecd = er32(EECD);
+ if ((eecd & E1000_EECD_FLUPD) == 0) {
+ break;
+ }
+ udelay(5);
+ }
+
+ if (i == attempts) {
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* If STM opcode located in bits 15:8 of flop, reset firmware */
+ if ((flop & 0xFF00) == E1000_STM_OPCODE) {
+ ew32(HICR, E1000_HICR_FW_RESET);
+ }
+
+ /* Perform the flash update */
+ ew32(EECD, eecd | E1000_EECD_FLUPD);
+
+ for (i=0; i < attempts; i++) {
+ eecd = er32(EECD);
+ if ((eecd & E1000_EECD_FLUPD) == 0) {
+ break;
+ }
+ udelay(5);
+ }
+
+ if (i == attempts) {
+ return -E1000_ERR_EEPROM;
+ }
+ }
+
+ if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) {
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written */
+ if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
+ new_bank_offset = hw->flash_bank_size * 2;
+ old_bank_offset = 0;
+ e1000_erase_ich8_4k_segment(hw, 1);
+ } else {
+ old_bank_offset = hw->flash_bank_size * 2;
+ new_bank_offset = 0;
+ e1000_erase_ich8_4k_segment(hw, 0);
+ }
+
+ sector_write_failed = false;
+ /* Loop for every byte in the shadow RAM,
+ * which is in units of words. */
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ /* Determine whether to write the value stored
+ * in the other NVM bank or a modified value stored
+ * in the shadow RAM */
+ if (hw->eeprom_shadow_ram[i].modified) {
+ low_byte = (u8)hw->eeprom_shadow_ram[i].eeprom_word;
+ udelay(100);
+ error = e1000_verify_write_ich8_byte(hw,
+ (i << 1) + new_bank_offset, low_byte);
+
+ if (error != E1000_SUCCESS)
+ sector_write_failed = true;
+ else {
+ high_byte =
+ (u8)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
+ udelay(100);
+ }
+ } else {
+ e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
+ &low_byte);
+ udelay(100);
+ error = e1000_verify_write_ich8_byte(hw,
+ (i << 1) + new_bank_offset, low_byte);
+
+ if (error != E1000_SUCCESS)
+ sector_write_failed = true;
+ else {
+ e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
+ &high_byte);
+ udelay(100);
+ }
+ }
+
+ /* If the write of the low byte was successful, go ahead and
+ * write the high byte while checking to make sure that if it
+ * is the signature byte, then it is handled properly */
+ if (!sector_write_failed) {
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress */
+ if (i == E1000_ICH_NVM_SIG_WORD)
+ high_byte = E1000_ICH_NVM_SIG_MASK | high_byte;
+
+ error = e1000_verify_write_ich8_byte(hw,
+ (i << 1) + new_bank_offset + 1, high_byte);
+ if (error != E1000_SUCCESS)
+ sector_write_failed = true;
+
+ } else {
+ /* If the write failed then break from the loop and
+ * return an error */
+ break;
+ }
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed. */
+ if (!sector_write_failed) {
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b */
+ e1000_read_ich8_byte(hw,
+ E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+ &high_byte);
+ high_byte &= 0xBF;
+ error = e1000_verify_write_ich8_byte(hw,
+ E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase */
+ if (error == E1000_SUCCESS) {
+ error = e1000_verify_write_ich8_byte(hw,
+ E1000_ICH_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
+ }
+
+ /* Clear the now not used entry in the cache */
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ hw->eeprom_shadow_ram[i].modified = false;
+ hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+ }
+ }
+ }
+
+ return error;
+}
+
+/******************************************************************************
+ * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
+ * second function of dual function devices
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+ u16 offset;
+ u16 eeprom_data, i;
+
+ DEBUGFUNC("e1000_read_mac_addr");
+
+ for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+ offset = i >> 1;
+ if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF);
+ hw->perm_mac_addr[i+1] = (u8)(eeprom_data >> 8);
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82571:
+ case e1000_80003es2lan:
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
+ hw->perm_mac_addr[5] ^= 0x01;
+ break;
+ }
+
+ for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+ hw->mac_addr[i] = hw->perm_mac_addr[i];
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Initializes receive address filters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive addresss registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ *****************************************************************************/
+static void e1000_init_rx_addrs(struct e1000_hw *hw)
+{
+ u32 i;
+ u32 rar_num;
+
+ DEBUGFUNC("e1000_init_rx_addrs");
+
+ /* Setup the receive address. */
+ DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+ e1000_rar_set(hw, hw->mac_addr, 0);
+
+ rar_num = E1000_RAR_ENTRIES;
+
+ /* Reserve a spot for the Locally Administered Address to work around
+ * an 82571 issue in which a reset on one port will reload the MAC on
+ * the other port. */
+ if ((hw->mac_type == e1000_82571) && (hw->laa_is_present))
+ rar_num -= 1;
+ if (hw->mac_type == e1000_ich8lan)
+ rar_num = E1000_RAR_ENTRIES_ICH8LAN;
+
+ /* Zero out the other 15 receive addresses. */
+ DEBUGOUT("Clearing RAR[1-15]\n");
+ for (i = 1; i < rar_num; i++) {
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+/******************************************************************************
+ * Hashes an address to determine its location in the multicast table
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *****************************************************************************/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value = 0;
+
+ /* The portion of the address that is used for the hash table is
+ * determined by the mc_filter_type setting.
+ */
+ switch (hw->mc_filter_type) {
+ /* [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ */
+ case 0:
+ if (hw->mac_type == e1000_ich8lan) {
+ /* [47:38] i.e. 0x158 for above example address */
+ hash_value = ((mc_addr[4] >> 6) | (((u16)mc_addr[5]) << 2));
+ } else {
+ /* [47:36] i.e. 0x563 for above example address */
+ hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ }
+ break;
+ case 1:
+ if (hw->mac_type == e1000_ich8lan) {
+ /* [46:37] i.e. 0x2B1 for above example address */
+ hash_value = ((mc_addr[4] >> 5) | (((u16)mc_addr[5]) << 3));
+ } else {
+ /* [46:35] i.e. 0xAC6 for above example address */
+ hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ }
+ break;
+ case 2:
+ if (hw->mac_type == e1000_ich8lan) {
+ /*[45:36] i.e. 0x163 for above example address */
+ hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ } else {
+ /* [45:34] i.e. 0x5D8 for above example address */
+ hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ }
+ break;
+ case 3:
+ if (hw->mac_type == e1000_ich8lan) {
+ /* [43:34] i.e. 0x18D for above example address */
+ hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ } else {
+ /* [43:32] i.e. 0x634 for above example address */
+ hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ }
+ break;
+ }
+
+ hash_value &= 0xFFF;
+ if (hw->mac_type == e1000_ich8lan)
+ hash_value &= 0x3FF;
+
+ return hash_value;
+}
+
+/******************************************************************************
+ * Sets the bit in the multicast table corresponding to the hash value.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ *****************************************************************************/
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
+{
+ u32 hash_bit, hash_reg;
+ u32 mta;
+ u32 temp;
+
+ /* The MTA is a register array of 128 32-bit registers.
+ * It is treated like an array of 4096 bits. We want to set
+ * bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The register is determined by the
+ * upper 7 bits of the hash value and the bit within that
+ * register are determined by the lower 5 bits of the value.
+ */
+ hash_reg = (hash_value >> 5) & 0x7F;
+ if (hw->mac_type == e1000_ich8lan)
+ hash_reg &= 0x1F;
+
+ hash_bit = hash_value & 0x1F;
+
+ mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
+
+ mta |= (1 << hash_bit);
+
+ /* If we are on an 82544 and we are trying to write an odd offset
+ * in the MTA, save off the previous entry before writing and
+ * restore the old value after writing.
+ */
+ if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
+ temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
+ E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp);
+ E1000_WRITE_FLUSH();
+ } else {
+ E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+/******************************************************************************
+ * Puts an ethernet address into a receive address register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * addr - Address to put into receive address register
+ * index - Receive address register to write
+ *****************************************************************************/
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+ rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
+ * unit hang.
+ *
+ * Description:
+ * If there are any Rx frames queued up or otherwise present in the HW
+ * before RSS is enabled, and then we enable RSS, the HW Rx unit will
+ * hang. To work around this issue, we have to disable receives and
+ * flush out all Rx frames before we enable RSS. To do so, we modify we
+ * redirect all Rx traffic to manageability and then reset the HW.
+ * This flushes away Rx frames, and (since the redirections to
+ * manageability persists across resets) keeps new ones from coming in
+ * while we work. Then, we clear the Address Valid AV bit for all MAC
+ * addresses and undo the re-direction to manageability.
+ * Now, frames are coming in again, but the MAC won't accept them, so
+ * far so good. We now proceed to initialize RSS (if necessary) and
+ * configure the Rx unit. Last, we re-enable the AV bits and continue
+ * on our merry way.
+ */
+ switch (hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ if (hw->leave_av_bit_off)
+ break;
+ default:
+ /* Indicate to hardware the Address is Valid. */
+ rar_high |= E1000_RAH_AV;
+ break;
+ }
+
+ E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+ E1000_WRITE_FLUSH();
+}
+
+/******************************************************************************
+ * Writes a value to the specified offset in the VLAN filter table.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - Offset in VLAN filer table to write
+ * value - Value to write into VLAN filter table
+ *****************************************************************************/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ u32 temp;
+
+ if (hw->mac_type == e1000_ich8lan)
+ return;
+
+ if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+ temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+ E1000_WRITE_FLUSH();
+ } else {
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+/******************************************************************************
+ * Clears the VLAN filer table
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_clear_vfta(struct e1000_hw *hw)
+{
+ u32 offset;
+ u32 vfta_value = 0;
+ u32 vfta_offset = 0;
+ u32 vfta_bit_in_reg = 0;
+
+ if (hw->mac_type == e1000_ich8lan)
+ return;
+
+ if (hw->mac_type == e1000_82573) {
+ if (hw->mng_cookie.vlan_id != 0) {
+ /* The VFTA is a 4096b bit-field, each identifying a single VLAN
+ * ID. The following operations determine which 32b entry
+ * (i.e. offset) into the array we want to set the VLAN ID
+ * (i.e. bit) of the manageability unit. */
+ vfta_offset = (hw->mng_cookie.vlan_id >>
+ E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK;
+ vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ }
+ }
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ /* If the offset we want to clear is the same offset of the
+ * manageability VLAN ID, then clear all bits except that of the
+ * manageability unit */
+ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+static s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+ u32 ledctl;
+ const u32 ledctl_mask = 0x000000FF;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+ u16 eeprom_data, i, temp;
+ const u16 led_mask = 0x0F;
+
+ DEBUGFUNC("e1000_id_led_init");
+
+ if (hw->mac_type < e1000_82540) {
+ /* Nothing to do */
+ return E1000_SUCCESS;
+ }
+
+ ledctl = er32(LEDCTL);
+ hw->ledctl_default = ledctl;
+ hw->ledctl_mode1 = hw->ledctl_default;
+ hw->ledctl_mode2 = hw->ledctl_default;
+
+ if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ if ((hw->mac_type == e1000_82573) &&
+ (eeprom_data == ID_LED_RESERVED_82573))
+ eeprom_data = ID_LED_DEFAULT_82573;
+ else if ((eeprom_data == ID_LED_RESERVED_0000) ||
+ (eeprom_data == ID_LED_RESERVED_FFFF)) {
+ if (hw->mac_type == e1000_ich8lan)
+ eeprom_data = ID_LED_DEFAULT_ICH8LAN;
+ else
+ eeprom_data = ID_LED_DEFAULT;
+ }
+
+ for (i = 0; i < 4; i++) {
+ temp = (eeprom_data >> (i << 2)) & led_mask;
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode1 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode1 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode2 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode2 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Prepares SW controlable LED for use and saves the current state of the LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+ u32 ledctl;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_setup_led");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* No setup necessary */
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ /* Turn off PHY Smart Power Down (if enabled) */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ &hw->phy_spd_default);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ (u16)(hw->phy_spd_default &
+ ~IGP01E1000_GMII_SPD));
+ if (ret_val)
+ return ret_val;
+ /* Fall Through */
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ ledctl = er32(LEDCTL);
+ /* Save current LEDCTL settings */
+ hw->ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+ E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ ew32(LEDCTL, ledctl);
+ } else if (hw->media_type == e1000_media_type_copper)
+ ew32(LEDCTL, hw->ledctl_mode1);
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Used on 82571 and later Si that has LED blink bits.
+ * Callers must use their own timer and should have already called
+ * e1000_id_led_init()
+ * Call e1000_cleanup led() to stop blinking
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_blink_led_start(struct e1000_hw *hw)
+{
+ s16 i;
+ u32 ledctl_blink = 0;
+
+ DEBUGFUNC("e1000_id_led_blink_on");
+
+ if (hw->mac_type < e1000_82571) {
+ /* Nothing to do */
+ return E1000_SUCCESS;
+ }
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* always blink LED0 for PCI-E fiber */
+ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+ } else {
+ /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */
+ ledctl_blink = hw->ledctl_mode2;
+ for (i=0; i < 4; i++)
+ if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) ==
+ E1000_LEDCTL_MODE_LED_ON)
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8));
+ }
+
+ ew32(LEDCTL, ledctl_blink);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Restores the saved state of the SW controlable LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_cleanup_led");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* No cleanup necessary */
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ /* Turn on PHY Smart Power Down (if previously enabled) */
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ hw->phy_spd_default);
+ if (ret_val)
+ return ret_val;
+ /* Fall Through */
+ default:
+ if (hw->phy_type == e1000_phy_ife) {
+ e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+ break;
+ }
+ /* Restore LEDCTL settings */
+ ew32(LEDCTL, hw->ledctl_default);
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns on the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+ u32 ctrl = er32(CTRL);
+
+ DEBUGFUNC("e1000_led_on");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ /* Set SW Defineable Pin 0 to turn on the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ break;
+ case e1000_82544:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Set SW Defineable Pin 0 to turn on the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Clear SW Defineable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ break;
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Clear SW Defineable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else if (hw->phy_type == e1000_phy_ife) {
+ e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+ } else if (hw->media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->ledctl_mode2);
+ return E1000_SUCCESS;
+ }
+ break;
+ }
+
+ ew32(CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns off the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+ u32 ctrl = er32(CTRL);
+
+ DEBUGFUNC("e1000_led_off");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ /* Clear SW Defineable Pin 0 to turn off the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ break;
+ case e1000_82544:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Clear SW Defineable Pin 0 to turn off the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Set SW Defineable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ break;
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Set SW Defineable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else if (hw->phy_type == e1000_phy_ife) {
+ e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+ } else if (hw->media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->ledctl_mode1);
+ return E1000_SUCCESS;
+ }
+ break;
+ }
+
+ ew32(CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Clears all hardware statistics counters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
+{
+ volatile u32 temp;
+
+ temp = er32(CRCERRS);
+ temp = er32(SYMERRS);
+ temp = er32(MPC);
+ temp = er32(SCC);
+ temp = er32(ECOL);
+ temp = er32(MCC);
+ temp = er32(LATECOL);
+ temp = er32(COLC);
+ temp = er32(DC);
+ temp = er32(SEC);
+ temp = er32(RLEC);
+ temp = er32(XONRXC);
+ temp = er32(XONTXC);
+ temp = er32(XOFFRXC);
+ temp = er32(XOFFTXC);
+ temp = er32(FCRUC);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ temp = er32(PRC64);
+ temp = er32(PRC127);
+ temp = er32(PRC255);
+ temp = er32(PRC511);
+ temp = er32(PRC1023);
+ temp = er32(PRC1522);
+ }
+
+ temp = er32(GPRC);
+ temp = er32(BPRC);
+ temp = er32(MPRC);
+ temp = er32(GPTC);
+ temp = er32(GORCL);
+ temp = er32(GORCH);
+ temp = er32(GOTCL);
+ temp = er32(GOTCH);
+ temp = er32(RNBC);
+ temp = er32(RUC);
+ temp = er32(RFC);
+ temp = er32(ROC);
+ temp = er32(RJC);
+ temp = er32(TORL);
+ temp = er32(TORH);
+ temp = er32(TOTL);
+ temp = er32(TOTH);
+ temp = er32(TPR);
+ temp = er32(TPT);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ temp = er32(PTC64);
+ temp = er32(PTC127);
+ temp = er32(PTC255);
+ temp = er32(PTC511);
+ temp = er32(PTC1023);
+ temp = er32(PTC1522);
+ }
+
+ temp = er32(MPTC);
+ temp = er32(BPTC);
+
+ if (hw->mac_type < e1000_82543) return;
+
+ temp = er32(ALGNERRC);
+ temp = er32(RXERRC);
+ temp = er32(TNCRS);
+ temp = er32(CEXTERR);
+ temp = er32(TSCTC);
+ temp = er32(TSCTFC);
+
+ if (hw->mac_type <= e1000_82544) return;
+
+ temp = er32(MGTPRC);
+ temp = er32(MGTPDC);
+ temp = er32(MGTPTC);
+
+ if (hw->mac_type <= e1000_82547_rev_2) return;
+
+ temp = er32(IAC);
+ temp = er32(ICRXOC);
+
+ if (hw->mac_type == e1000_ich8lan) return;
+
+ temp = er32(ICRXPTC);
+ temp = er32(ICRXATC);
+ temp = er32(ICTXPTC);
+ temp = er32(ICTXATC);
+ temp = er32(ICTXQEC);
+ temp = er32(ICTXQMTC);
+ temp = er32(ICRXDMTC);
+}
+
+/******************************************************************************
+ * Resets Adaptive IFS to its default state.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Call this after e1000_init_hw. You may override the IFS defaults by setting
+ * hw->ifs_params_forced to true. However, you must initialize hw->
+ * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
+ * before calling this function.
+ *****************************************************************************/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_reset_adaptive");
+
+ if (hw->adaptive_ifs) {
+ if (!hw->ifs_params_forced) {
+ hw->current_ifs_val = 0;
+ hw->ifs_min_val = IFS_MIN;
+ hw->ifs_max_val = IFS_MAX;
+ hw->ifs_step_size = IFS_STEP;
+ hw->ifs_ratio = IFS_RATIO;
+ }
+ hw->in_ifs_mode = false;
+ ew32(AIT, 0);
+ } else {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ }
+}
+
+/******************************************************************************
+ * Called during the callback/watchdog routine to update IFS value based on
+ * the ratio of transmits to collisions.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * tx_packets - Number of transmits since last callback
+ * total_collisions - Number of collisions since last callback
+ *****************************************************************************/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_update_adaptive");
+
+ if (hw->adaptive_ifs) {
+ if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
+ if (hw->tx_packet_delta > MIN_NUM_XMITS) {
+ hw->in_ifs_mode = true;
+ if (hw->current_ifs_val < hw->ifs_max_val) {
+ if (hw->current_ifs_val == 0)
+ hw->current_ifs_val = hw->ifs_min_val;
+ else
+ hw->current_ifs_val += hw->ifs_step_size;
+ ew32(AIT, hw->current_ifs_val);
+ }
+ }
+ } else {
+ if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+ hw->current_ifs_val = 0;
+ hw->in_ifs_mode = false;
+ ew32(AIT, 0);
+ }
+ }
+ } else {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ }
+}
+
+/******************************************************************************
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ *
+ * hw - Struct containing variables accessed by shared code
+ * frame_len - The length of the frame in question
+ * mac_addr - The Ethernet destination address of the frame in question
+ *****************************************************************************/
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
+ u32 frame_len, u8 *mac_addr)
+{
+ u64 carry_bit;
+
+ /* First adjust the frame length. */
+ frame_len--;
+ /* We need to adjust the statistics counters, since the hardware
+ * counters overcount this packet as a CRC error and undercount
+ * the packet as a good packet
+ */
+ /* This packet should not be counted as a CRC error. */
+ stats->crcerrs--;
+ /* This packet does count as a Good Packet Received. */
+ stats->gprc++;
+
+ /* Adjust the Good Octets received counters */
+ carry_bit = 0x80000000 & stats->gorcl;
+ stats->gorcl += frame_len;
+ /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+ * Received Count) was one before the addition,
+ * AND it is zero after, then we lost the carry out,
+ * need to add one to Gorch (Good Octets Received Count High).
+ * This could be simplified if all environments supported
+ * 64-bit integers.
+ */
+ if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
+ stats->gorch++;
+ /* Is this a broadcast or multicast? Check broadcast first,
+ * since the test for a multicast frame will test positive on
+ * a broadcast frame.
+ */
+ if ((mac_addr[0] == (u8)0xff) && (mac_addr[1] == (u8)0xff))
+ /* Broadcast packet */
+ stats->bprc++;
+ else if (*mac_addr & 0x01)
+ /* Multicast packet */
+ stats->mprc++;
+
+ if (frame_len == hw->max_frame_size) {
+ /* In this case, the hardware has overcounted the number of
+ * oversize frames.
+ */
+ if (stats->roc > 0)
+ stats->roc--;
+ }
+
+ /* Adjust the bin counters when the extra byte put the frame in the
+ * wrong bin. Remember that the frame_len was adjusted above.
+ */
+ if (frame_len == 64) {
+ stats->prc64++;
+ stats->prc127--;
+ } else if (frame_len == 127) {
+ stats->prc127++;
+ stats->prc255--;
+ } else if (frame_len == 255) {
+ stats->prc255++;
+ stats->prc511--;
+ } else if (frame_len == 511) {
+ stats->prc511++;
+ stats->prc1023--;
+ } else if (frame_len == 1023) {
+ stats->prc1023++;
+ stats->prc1522--;
+ } else if (frame_len == 1522) {
+ stats->prc1522++;
+ }
+}
+
+/******************************************************************************
+ * Gets the current PCI bus type, speed, and width of the hardware
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void e1000_get_bus_info(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 pci_ex_link_status;
+ u32 status;
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ hw->bus_type = e1000_bus_type_pci;
+ hw->bus_speed = e1000_bus_speed_unknown;
+ hw->bus_width = e1000_bus_width_unknown;
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ hw->bus_type = e1000_bus_type_pci_express;
+ hw->bus_speed = e1000_bus_speed_2500;
+ ret_val = e1000_read_pcie_cap_reg(hw,
+ PCI_EX_LINK_STATUS,
+ &pci_ex_link_status);
+ if (ret_val)
+ hw->bus_width = e1000_bus_width_unknown;
+ else
+ hw->bus_width = (pci_ex_link_status & PCI_EX_LINK_WIDTH_MASK) >>
+ PCI_EX_LINK_WIDTH_SHIFT;
+ break;
+ case e1000_ich8lan:
+ hw->bus_type = e1000_bus_type_pci_express;
+ hw->bus_speed = e1000_bus_speed_2500;
+ hw->bus_width = e1000_bus_width_pciex_1;
+ break;
+ default:
+ status = er32(STATUS);
+ hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
+ e1000_bus_type_pcix : e1000_bus_type_pci;
+
+ if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+ hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
+ e1000_bus_speed_66 : e1000_bus_speed_120;
+ } else if (hw->bus_type == e1000_bus_type_pci) {
+ hw->bus_speed = (status & E1000_STATUS_PCI66) ?
+ e1000_bus_speed_66 : e1000_bus_speed_33;
+ } else {
+ switch (status & E1000_STATUS_PCIX_SPEED) {
+ case E1000_STATUS_PCIX_SPEED_66:
+ hw->bus_speed = e1000_bus_speed_66;
+ break;
+ case E1000_STATUS_PCIX_SPEED_100:
+ hw->bus_speed = e1000_bus_speed_100;
+ break;
+ case E1000_STATUS_PCIX_SPEED_133:
+ hw->bus_speed = e1000_bus_speed_133;
+ break;
+ default:
+ hw->bus_speed = e1000_bus_speed_reserved;
+ break;
+ }
+ }
+ hw->bus_width = (status & E1000_STATUS_BUS64) ?
+ e1000_bus_width_64 : e1000_bus_width_32;
+ break;
+ }
+}
+
+/******************************************************************************
+ * Writes a value to one of the devices registers using port I/O (as opposed to
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset to write to
+ * value - value to write
+ *****************************************************************************/
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ unsigned long io_addr = hw->io_base;
+ unsigned long io_data = hw->io_base + 4;
+
+ e1000_io_write(hw, io_addr, offset);
+ e1000_io_write(hw, io_data, value);
+}
+
+/******************************************************************************
+ * Estimates the cable length.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * min_length - The estimated minimum length
+ * max_length - The estimated maximum length
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * This function always returns a ranged length (minimum & maximum).
+ * So for M88 phy's, this function interprets the one value returned from the
+ * register to the minimum and maximum range.
+ * For IGP phy's, the function calculates the range by the AGC registers.
+ *****************************************************************************/
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+ u16 *max_length)
+{
+ s32 ret_val;
+ u16 agc_value = 0;
+ u16 i, phy_data;
+ u16 cable_length;
+
+ DEBUGFUNC("e1000_get_cable_length");
+
+ *min_length = *max_length = 0;
+
+ /* Use old method for Phy older than IGP */
+ if (hw->phy_type == e1000_phy_m88) {
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+ /* Convert the enum value to ranged values */
+ switch (cable_length) {
+ case e1000_cable_length_50:
+ *min_length = 0;
+ *max_length = e1000_igp_cable_length_50;
+ break;
+ case e1000_cable_length_50_80:
+ *min_length = e1000_igp_cable_length_50;
+ *max_length = e1000_igp_cable_length_80;
+ break;
+ case e1000_cable_length_80_110:
+ *min_length = e1000_igp_cable_length_80;
+ *max_length = e1000_igp_cable_length_110;
+ break;
+ case e1000_cable_length_110_140:
+ *min_length = e1000_igp_cable_length_110;
+ *max_length = e1000_igp_cable_length_140;
+ break;
+ case e1000_cable_length_140:
+ *min_length = e1000_igp_cable_length_140;
+ *max_length = e1000_igp_cable_length_170;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+ } else if (hw->phy_type == e1000_phy_gg82563) {
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+ switch (cable_length) {
+ case e1000_gg_cable_length_60:
+ *min_length = 0;
+ *max_length = e1000_igp_cable_length_60;
+ break;
+ case e1000_gg_cable_length_60_115:
+ *min_length = e1000_igp_cable_length_60;
+ *max_length = e1000_igp_cable_length_115;
+ break;
+ case e1000_gg_cable_length_115_150:
+ *min_length = e1000_igp_cable_length_115;
+ *max_length = e1000_igp_cable_length_150;
+ break;
+ case e1000_gg_cable_length_150:
+ *min_length = e1000_igp_cable_length_150;
+ *max_length = e1000_igp_cable_length_180;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+ } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+ u16 cur_agc_value;
+ u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+ u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+ {IGP01E1000_PHY_AGC_A,
+ IGP01E1000_PHY_AGC_B,
+ IGP01E1000_PHY_AGC_C,
+ IGP01E1000_PHY_AGC_D};
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+
+ ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+ /* Value bound check. */
+ if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+ (cur_agc_value == 0))
+ return -E1000_ERR_PHY;
+
+ agc_value += cur_agc_value;
+
+ /* Update minimal AGC value. */
+ if (min_agc_value > cur_agc_value)
+ min_agc_value = cur_agc_value;
+ }
+
+ /* Remove the minimal AGC result for length < 50m */
+ if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+ agc_value -= min_agc_value;
+
+ /* Get the average length of the remaining 3 channels */
+ agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+ } else {
+ /* Get the average length of all the 4 channels. */
+ agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+ }
+
+ /* Set the range of the calculated length. */
+ *min_length = ((e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE) > 0) ?
+ (e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE) : 0;
+ *max_length = e1000_igp_cable_length_table[agc_value] +
+ IGP01E1000_AGC_RANGE;
+ } else if (hw->phy_type == e1000_phy_igp_2 ||
+ hw->phy_type == e1000_phy_igp_3) {
+ u16 cur_agc_index, max_agc_index = 0;
+ u16 min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
+ u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
+ {IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D};
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Getting bits 15:9, which represent the combination of course and
+ * fine gain values. The result is a number that can be put into
+ * the lookup table to obtain the approximate cable length. */
+ cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK;
+
+ /* Array index bound check. */
+ if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
+ (cur_agc_index == 0))
+ return -E1000_ERR_PHY;
+
+ /* Remove min & max AGC values from calculation. */
+ if (e1000_igp_2_cable_length_table[min_agc_index] >
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ min_agc_index = cur_agc_index;
+ if (e1000_igp_2_cable_length_table[max_agc_index] <
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ max_agc_index = cur_agc_index;
+
+ agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+ }
+
+ agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+ e1000_igp_2_cable_length_table[max_agc_index]);
+ agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+ /* Calculate cable length with the error range of +/- 10 meters. */
+ *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0;
+ *max_length = agc_value + IGP02E1000_AGC_RANGE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check the cable polarity
+ *
+ * hw - Struct containing variables accessed by shared code
+ * polarity - output parameter : 0 - Polarity is not reversed
+ * 1 - Polarity is reversed.
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * For phy's older then IGP, this function simply reads the polarity bit in the
+ * Phy Status register. For IGP phy's, this bit is valid only if link speed is
+ * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will
+ * return 0. If the link speed is 1000 Mbps the polarity status is in the
+ * IGP01E1000_PHY_PCS_INIT_REG.
+ *****************************************************************************/
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+ e1000_rev_polarity *polarity)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_check_polarity");
+
+ if ((hw->phy_type == e1000_phy_m88) ||
+ (hw->phy_type == e1000_phy_gg82563)) {
+ /* return the Polarity bit in the Status register. */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
+ M88E1000_PSSR_REV_POLARITY_SHIFT) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+
+ } else if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
+ hw->phy_type == e1000_phy_igp_2) {
+ /* Read the Status register to check the speed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
+ * find the polarity status */
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+
+ /* Read the GIG initialization PCS register (0x00B4) */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check the polarity bits */
+ *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+ } else {
+ /* For 10 Mbps, read the polarity bit in the status register. (for
+ * 100 Mbps this bit is always 0) */
+ *polarity = (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+ }
+ } else if (hw->phy_type == e1000_phy_ife) {
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ *polarity = ((phy_data & IFE_PESC_POLARITY_REVERSED) >>
+ IFE_PESC_POLARITY_REVERSED_SHIFT) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check if Downshift occured
+ *
+ * hw - Struct containing variables accessed by shared code
+ * downshift - output parameter : 0 - No Downshift ocured.
+ * 1 - Downshift ocured.
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * For phy's older then IGP, this function reads the Downshift bit in the Phy
+ * Specific Status register. For IGP phy's, it reads the Downgrade bit in the
+ * Link Health register. In IGP this bit is latched high, so the driver must
+ * read it immediately after link is established.
+ *****************************************************************************/
+static s32 e1000_check_downshift(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_check_downshift");
+
+ if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
+ hw->phy_type == e1000_phy_igp_2) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
+ } else if ((hw->phy_type == e1000_phy_m88) ||
+ (hw->phy_type == e1000_phy_gg82563)) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
+ M88E1000_PSSR_DOWNSHIFT_SHIFT;
+ } else if (hw->phy_type == e1000_phy_ife) {
+ /* e1000_phy_ife supports 10/100 speed only */
+ hw->speed_downgraded = false;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
+{
+ s32 ret_val;
+ u16 phy_data, phy_saved_data, speed, duplex, i;
+ u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+ {IGP01E1000_PHY_AGC_PARAM_A,
+ IGP01E1000_PHY_AGC_PARAM_B,
+ IGP01E1000_PHY_AGC_PARAM_C,
+ IGP01E1000_PHY_AGC_PARAM_D};
+ u16 min_length, max_length;
+
+ DEBUGFUNC("e1000_config_dsp_after_link_change");
+
+ if (hw->phy_type != e1000_phy_igp)
+ return E1000_SUCCESS;
+
+ if (link_up) {
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (speed == SPEED_1000) {
+
+ ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
+ min_length >= e1000_igp_cable_length_50) {
+
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+ ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ hw->dsp_config_state = e1000_dsp_config_activated;
+ }
+
+ if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
+ (min_length < e1000_igp_cable_length_50)) {
+
+ u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+ u32 idle_errs = 0;
+
+ /* clear previous idle error counts */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ for (i = 0; i < ffe_idle_err_timeout; i++) {
+ udelay(1000);
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
+ if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+ hw->ffe_config_state = e1000_ffe_config_active;
+
+ ret_val = e1000_write_phy_reg(hw,
+ IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_CM_CP);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+
+ if (idle_errs)
+ ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+ }
+ }
+ }
+ } else {
+ if (hw->dsp_config_state == e1000_dsp_config_activated) {
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of the routines. */
+ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ /* Disable the PHY transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIGA);
+ if (ret_val)
+ return ret_val;
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+ phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+ ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ /* Now enable the transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+ }
+
+ if (hw->ffe_config_state == e1000_ffe_config_active) {
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of the routines. */
+ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ /* Disable the PHY transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIGA);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_DEFAULT);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ /* Now enable the transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ hw->ffe_config_state = e1000_ffe_config_enabled;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set PHY to class A mode
+ * Assumes the following operations will follow to enable the new class mode.
+ * 1. Do a PHY soft reset
+ * 2. Restart auto-negotiation or force link.
+ *
+ * hw - Struct containing variables accessed by shared code
+ ****************************************************************************/
+static s32 e1000_set_phy_mode(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 eeprom_data;
+
+ DEBUGFUNC("e1000_set_phy_mode");
+
+ if ((hw->mac_type == e1000_82545_rev_3) &&
+ (hw->media_type == e1000_media_type_copper)) {
+ ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
+ if (ret_val) {
+ return ret_val;
+ }
+
+ if ((eeprom_data != EEPROM_RESERVED_WORD) &&
+ (eeprom_data & EEPROM_PHY_CLASS_A)) {
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_reset_disable = false;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu state according to the active flag. When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+ u32 phy_ctrl = 0;
+ s32 ret_val;
+ u16 phy_data;
+ DEBUGFUNC("e1000_set_d3_lplu_state");
+
+ if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
+ && hw->phy_type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* During driver activity LPLU should not be used or it will attain link
+ * from the lowest speeds starting from 10Mbps. The capability is used for
+ * Dx transitions and states */
+ if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->mac_type == e1000_ich8lan) {
+ /* MAC writes into PHY register based on the state transition
+ * and start auto-negotiation. SW driver can overwrite the settings
+ * in CSR PHY power control E1000_PHY_CTRL register. */
+ phy_ctrl = er32(PHY_CTRL);
+ } else {
+ ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (!active) {
+ if (hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+ } else {
+ phy_data &= ~IGP02E1000_PM_D3_LPLU;
+ ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
+ * Dx states where the power conservation is most important. During
+ * driver activity we should enable SmartSpeed, so performance is
+ * maintained. */
+ if (hw->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
+ (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
+ (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
+
+ if (hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ phy_data |= IGP01E1000_GMII_FLEX_SPD;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+ } else {
+ phy_data |= IGP02E1000_PM_D3_LPLU;
+ ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* When LPLU is enabled we should disable SmartSpeed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ }
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu d0 state according to the active flag. When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+ u32 phy_ctrl = 0;
+ s32 ret_val;
+ u16 phy_data;
+ DEBUGFUNC("e1000_set_d0_lplu_state");
+
+ if (hw->mac_type <= e1000_82547_rev_2)
+ return E1000_SUCCESS;
+
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl = er32(PHY_CTRL);
+ } else {
+ ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (!active) {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+ } else {
+ phy_data &= ~IGP02E1000_PM_D0_LPLU;
+ ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
+ * Dx states where the power conservation is most important. During
+ * driver activity we should enable SmartSpeed, so performance is
+ * maintained. */
+ if (hw->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+
+ } else {
+
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+ } else {
+ phy_data |= IGP02E1000_PM_D0_LPLU;
+ ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* When LPLU is enabled we should disable SmartSpeed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Change VCO speed register to improve Bit Error Rate performance of SERDES.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_set_vco_speed(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 default_page = 0;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_set_vco_speed");
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ /* Set PHY register 30, page 5, bit 8 to 0 */
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set PHY register 30, page 4, bit 11 to 1 */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
+ if (ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function reads the cookie from ARC ram.
+ *
+ * returns: - E1000_SUCCESS .
+ ****************************************************************************/
+static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer)
+{
+ u8 i;
+ u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET;
+ u8 length = E1000_MNG_DHCP_COOKIE_LENGTH;
+
+ length = (length >> 2);
+ offset = (offset >> 2);
+
+ for (i = 0; i < length; i++) {
+ *((u32 *)buffer + i) =
+ E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
+ }
+ return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function checks whether the HOST IF is enabled for command operaton
+ * and also checks whether the previous command is completed.
+ * It busy waits in case of previous command is not completed.
+ *
+ * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
+ * timeout
+ * - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+ u32 hicr;
+ u8 i;
+
+ /* Check that the host interface is enabled. */
+ hicr = er32(HICR);
+ if ((hicr & E1000_HICR_EN) == 0) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+ /* check the previous command is completed */
+ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+ hicr = er32(HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ mdelay(1);
+ }
+
+ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+ DEBUGOUT("Previous command timeout failed .\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient way.
+ * Also fills up the sum of the buffer in *buffer parameter.
+ *
+ * returns - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum)
+{
+ u8 *tmp;
+ u8 *bufptr = buffer;
+ u32 data = 0;
+ u16 remaining, i, j, prev_bytes;
+
+ /* sum = only sum of the data and it is not checksum */
+
+ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
+ return -E1000_ERR_PARAM;
+ }
+
+ tmp = (u8 *)&data;
+ prev_bytes = offset & 0x3;
+ offset &= 0xFFFC;
+ offset >>= 2;
+
+ if (prev_bytes) {
+ data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
+ for (j = prev_bytes; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data);
+ length -= j - prev_bytes;
+ offset++;
+ }
+
+ remaining = length & 0x3;
+ length -= remaining;
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant command block into the
+ * ram area. */
+ for (i = 0; i < length; i++) {
+ for (j = 0; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+
+ E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
+ }
+ if (remaining) {
+ for (j = 0; j < sizeof(u32); j++) {
+ if (j < remaining)
+ *(tmp + j) = *bufptr++;
+ else
+ *(tmp + j) = 0;
+
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
+ }
+
+ return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function writes the command header after does the checksum calculation.
+ *
+ * returns - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ u16 i;
+ u8 sum;
+ u8 *buffer;
+
+ /* Write the whole command header structure which includes sum of
+ * the buffer */
+
+ u16 length = sizeof(struct e1000_host_mng_command_header);
+
+ sum = hdr->checksum;
+ hdr->checksum = 0;
+
+ buffer = (u8 *)hdr;
+ i = length;
+ while (i--)
+ sum += buffer[i];
+
+ hdr->checksum = 0 - sum;
+
+ length >>= 2;
+ /* The device driver writes the relevant command block into the ram area. */
+ for (i = 0; i < length; i++) {
+ E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *)hdr + i));
+ E1000_WRITE_FLUSH();
+ }
+
+ return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function indicates to ARC that a new command is pending which completes
+ * one write operation by the driver.
+ *
+ * returns - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_write_commit(struct e1000_hw *hw)
+{
+ u32 hicr;
+
+ hicr = er32(HICR);
+ /* Setting this bit tells the ARC that a new command is pending. */
+ ew32(HICR, hicr | E1000_HICR_C);
+
+ return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function checks the mode of the firmware.
+ *
+ * returns - true when the mode is IAMT or false.
+ ****************************************************************************/
+bool e1000_check_mng_mode(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ fwsm = er32(FWSM);
+
+ if (hw->mac_type == e1000_ich8lan) {
+ if ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+ return true;
+ } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+ return true;
+
+ return false;
+}
+
+
+/*****************************************************************************
+ * This function writes the dhcp info .
+ ****************************************************************************/
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+ s32 ret_val;
+ struct e1000_host_mng_command_header hdr;
+
+ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+ hdr.command_length = length;
+ hdr.reserved1 = 0;
+ hdr.reserved2 = 0;
+ hdr.checksum = 0;
+
+ ret_val = e1000_mng_enable_host_if(hw);
+ if (ret_val == E1000_SUCCESS) {
+ ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr),
+ &(hdr.checksum));
+ if (ret_val == E1000_SUCCESS) {
+ ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+ if (ret_val == E1000_SUCCESS)
+ ret_val = e1000_mng_write_commit(hw);
+ }
+ }
+ return ret_val;
+}
+
+
+/*****************************************************************************
+ * This function calculates the checksum.
+ *
+ * returns - checksum of buffer contents.
+ ****************************************************************************/
+static u8 e1000_calculate_mng_checksum(char *buffer, u32 length)
+{
+ u8 sum = 0;
+ u32 i;
+
+ if (!buffer)
+ return 0;
+
+ for (i=0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8)(0 - sum);
+}
+
+/*****************************************************************************
+ * This function checks whether tx pkt filtering needs to be enabled or not.
+ *
+ * returns - true for packet filtering or false.
+ ****************************************************************************/
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+ /* called in init as well as watchdog timer functions */
+
+ s32 ret_val, checksum;
+ bool tx_filter = false;
+ struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
+ u8 *buffer = (u8 *) &(hw->mng_cookie);
+
+ if (e1000_check_mng_mode(hw)) {
+ ret_val = e1000_mng_enable_host_if(hw);
+ if (ret_val == E1000_SUCCESS) {
+ ret_val = e1000_host_if_read_cookie(hw, buffer);
+ if (ret_val == E1000_SUCCESS) {
+ checksum = hdr->checksum;
+ hdr->checksum = 0;
+ if ((hdr->signature == E1000_IAMT_SIGNATURE) &&
+ checksum == e1000_calculate_mng_checksum((char *)buffer,
+ E1000_MNG_DHCP_COOKIE_LENGTH)) {
+ if (hdr->status &
+ E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
+ tx_filter = true;
+ } else
+ tx_filter = true;
+ } else
+ tx_filter = true;
+ }
+ }
+
+ hw->tx_pkt_filtering = tx_filter;
+ return tx_filter;
+}
+
+/******************************************************************************
+ * Verifies the hardware needs to allow ARPs to be processed by the host
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * returns: - true/false
+ *
+ *****************************************************************************/
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+ u32 manc;
+ u32 fwsm, factps;
+
+ if (hw->asf_firmware_present) {
+ manc = er32(MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+ !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+ return false;
+ if (e1000_arc_subsystem_valid(hw)) {
+ fwsm = er32(FWSM);
+ factps = er32(FACTPS);
+
+ if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
+ e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
+ return true;
+ } else
+ if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
+ return true;
+ }
+ return false;
+}
+
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_status_reg;
+ u16 i;
+
+ /* Polarity reversal workaround for forced 10F/10H links. */
+
+ /* Disable the transmitter on the PHY */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ /* This loop will early-out if the NO link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Link Status bit
+ * to be clear.
+ */
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
+ mdelay(100);
+ }
+
+ /* Recommended delay time after link has been lost */
+ mdelay(1000);
+
+ /* Now we will re-enable th transmitter on the PHY */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ return ret_val;
+ mdelay(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+ if (ret_val)
+ return ret_val;
+ mdelay(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+ if (ret_val)
+ return ret_val;
+ mdelay(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ /* This loop will early-out if the link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Link Status bit
+ * to be set.
+ */
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_LINK_STATUS) break;
+ mdelay(100);
+ }
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Disables PCI-Express master access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - none.
+ *
+ ***************************************************************************/
+static void e1000_set_pci_express_master_disable(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_set_pci_express_master_disable");
+
+ if (hw->bus_type != e1000_bus_type_pci_express)
+ return;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+ ew32(CTRL, ctrl);
+}
+
+/*******************************************************************************
+ *
+ * Disables PCI-Express master access and verifies there are no pending requests
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't
+ * caused the master requests to be disabled.
+ * E1000_SUCCESS master requests disabled.
+ *
+ ******************************************************************************/
+s32 e1000_disable_pciex_master(struct e1000_hw *hw)
+{
+ s32 timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */
+
+ DEBUGFUNC("e1000_disable_pciex_master");
+
+ if (hw->bus_type != e1000_bus_type_pci_express)
+ return E1000_SUCCESS;
+
+ e1000_set_pci_express_master_disable(hw);
+
+ while (timeout) {
+ if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
+ break;
+ else
+ udelay(100);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Master requests are pending.\n");
+ return -E1000_ERR_MASTER_REQUESTS_PENDING;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/*******************************************************************************
+ *
+ * Check for EEPROM Auto Read bit done.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ * E1000_SUCCESS at any other case.
+ *
+ ******************************************************************************/
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
+{
+ s32 timeout = AUTO_READ_DONE_TIMEOUT;
+
+ DEBUGFUNC("e1000_get_auto_rd_done");
+
+ switch (hw->mac_type) {
+ default:
+ msleep(5);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ while (timeout) {
+ if (er32(EECD) & E1000_EECD_AUTO_RD)
+ break;
+ else msleep(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+ break;
+ }
+
+ /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
+ * Need to wait for PHY configuration completion before accessing NVM
+ * and PHY. */
+ if (hw->mac_type == e1000_82573)
+ msleep(25);
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ * Checks if the PHY configuration is done
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ * E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 cfg_mask = E1000_EEPROM_CFG_DONE;
+
+ DEBUGFUNC("e1000_get_phy_cfg_done");
+
+ switch (hw->mac_type) {
+ default:
+ mdelay(10);
+ break;
+ case e1000_80003es2lan:
+ /* Separate *_CFG_DONE_* bit for each port */
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
+ cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1;
+ /* Fall Through */
+ case e1000_82571:
+ case e1000_82572:
+ while (timeout) {
+ if (er32(EEMNGCTL) & cfg_mask)
+ break;
+ else
+ msleep(1);
+ timeout--;
+ }
+ if (!timeout) {
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Using the combination of SMBI and SWESMBI semaphore bits when resetting
+ * adapter or Eeprom access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_EEPROM if fail to access EEPROM.
+ * E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
+{
+ s32 timeout;
+ u32 swsm;
+
+ DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
+
+ if (!hw->eeprom_semaphore_present)
+ return E1000_SUCCESS;
+
+ if (hw->mac_type == e1000_80003es2lan) {
+ /* Get the SW semaphore. */
+ if (e1000_get_software_semaphore(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Get the FW semaphore. */
+ timeout = hw->eeprom.word_size + 1;
+ while (timeout) {
+ swsm = er32(SWSM);
+ swsm |= E1000_SWSM_SWESMBI;
+ ew32(SWSM, swsm);
+ /* if we managed to set the bit we got the semaphore. */
+ swsm = er32(SWSM);
+ if (swsm & E1000_SWSM_SWESMBI)
+ break;
+
+ udelay(50);
+ timeout--;
+ }
+
+ if (!timeout) {
+ /* Release semaphores */
+ e1000_put_hw_eeprom_semaphore(hw);
+ DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ * This function clears HW semaphore bits.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - None.
+ *
+ ***************************************************************************/
+static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
+
+ if (!hw->eeprom_semaphore_present)
+ return;
+
+ swsm = er32(SWSM);
+ if (hw->mac_type == e1000_80003es2lan) {
+ /* Release both semaphores. */
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+ } else
+ swsm &= ~(E1000_SWSM_SWESMBI);
+ ew32(SWSM, swsm);
+}
+
+/***************************************************************************
+ *
+ * Obtaining software semaphore bit (SMBI) before resetting PHY.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to obtain semaphore.
+ * E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+static s32 e1000_get_software_semaphore(struct e1000_hw *hw)
+{
+ s32 timeout = hw->eeprom.word_size + 1;
+ u32 swsm;
+
+ DEBUGFUNC("e1000_get_software_semaphore");
+
+ if (hw->mac_type != e1000_80003es2lan) {
+ return E1000_SUCCESS;
+ }
+
+ while (timeout) {
+ swsm = er32(SWSM);
+ /* If SMBI bit cleared, it is now set and we hold the semaphore */
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+ mdelay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release semaphore bit (SMBI).
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static void e1000_release_software_semaphore(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("e1000_release_software_semaphore");
+
+ if (hw->mac_type != e1000_80003es2lan) {
+ return;
+ }
+
+ swsm = er32(SWSM);
+ /* Release the SW semaphores.*/
+ swsm &= ~E1000_SWSM_SMBI;
+ ew32(SWSM, swsm);
+}
+
+/******************************************************************************
+ * Checks if PHY reset is blocked due to SOL/IDER session, for example.
+ * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to
+ * the caller to figure out how to deal with it.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_BLK_PHY_RESET
+ * E1000_SUCCESS
+ *
+ *****************************************************************************/
+s32 e1000_check_phy_reset_block(struct e1000_hw *hw)
+{
+ u32 manc = 0;
+ u32 fwsm = 0;
+
+ if (hw->mac_type == e1000_ich8lan) {
+ fwsm = er32(FWSM);
+ return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
+ : E1000_BLK_PHY_RESET;
+ }
+
+ if (hw->mac_type > e1000_82547_rev_2)
+ manc = er32(MANC);
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+ E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
+ * may not be provided a DMA clock when no manageability features are
+ * enabled. We do not want to perform any reads/writes to these registers
+ * if this is the case. We read FWSM to determine the manageability mode.
+ */
+ switch (hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ fwsm = er32(FWSM);
+ if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
+ return true;
+ break;
+ case e1000_ich8lan:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+
+/******************************************************************************
+ * Configure PCI-Ex no-snoop
+ *
+ * hw - Struct containing variables accessed by shared code.
+ * no_snoop - Bitmap of no-snoop events.
+ *
+ * returns: E1000_SUCCESS
+ *
+ *****************************************************************************/
+static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
+{
+ u32 gcr_reg = 0;
+
+ DEBUGFUNC("e1000_set_pci_ex_no_snoop");
+
+ if (hw->bus_type == e1000_bus_type_unknown)
+ e1000_get_bus_info(hw);
+
+ if (hw->bus_type != e1000_bus_type_pci_express)
+ return E1000_SUCCESS;
+
+ if (no_snoop) {
+ gcr_reg = er32(GCR);
+ gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
+ gcr_reg |= no_snoop;
+ ew32(GCR, gcr_reg);
+ }
+ if (hw->mac_type == e1000_ich8lan) {
+ u32 ctrl_ext;
+
+ ew32(GCR, PCI_EX_82566_SNOOP_ALL);
+
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Get software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static s32 e1000_get_software_flag(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 extcnf_ctrl;
+
+ DEBUGFUNC("e1000_get_software_flag");
+
+ if (hw->mac_type == e1000_ich8lan) {
+ while (timeout) {
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+ break;
+ mdelay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("FW or HW locks the resource too long.\n");
+ return -E1000_ERR_CONFIG;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static void e1000_release_software_flag(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ DEBUGFUNC("e1000_release_software_flag");
+
+ if (hw->mac_type == e1000_ich8lan) {
+ extcnf_ctrl= er32(EXTCNF_CTRL);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ }
+
+ return;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
+ * register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 error = E1000_SUCCESS;
+ u32 flash_bank = 0;
+ u32 act_offset = 0;
+ u32 bank_offset = 0;
+ u16 word = 0;
+ u16 i = 0;
+
+ /* We need to know which is the valid flash bank. In the event
+ * that we didn't allocate eeprom_shadow_ram, we may not be
+ * managing flash_bank. So it cannot be trusted and needs
+ * to be updated with each read.
+ */
+ /* Value of bit 22 corresponds to the flash bank we're on. */
+ flash_bank = (er32(EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
+
+ /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
+ bank_offset = flash_bank * (hw->flash_bank_size * 2);
+
+ error = e1000_get_software_flag(hw);
+ if (error != E1000_SUCCESS)
+ return error;
+
+ for (i = 0; i < words; i++) {
+ if (hw->eeprom_shadow_ram != NULL &&
+ hw->eeprom_shadow_ram[offset+i].modified) {
+ data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
+ } else {
+ /* The NVM part needs a byte offset, hence * 2 */
+ act_offset = bank_offset + ((offset + i) * 2);
+ error = e1000_read_ich8_word(hw, act_offset, &word);
+ if (error != E1000_SUCCESS)
+ break;
+ data[i] = word;
+ }
+ }
+
+ e1000_release_software_flag(hw);
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
+ * register. Actually, writes are written to the shadow ram cache in the hw
+ * structure hw->e1000_shadow_ram. e1000_commit_shadow_ram flushes this to
+ * the NVM, which occurs when the NVM checksum is updated.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to write
+ * words - number of words to write
+ * data - words to write to the EEPROM
+ *****************************************************************************/
+static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ u32 i = 0;
+ s32 error = E1000_SUCCESS;
+
+ error = e1000_get_software_flag(hw);
+ if (error != E1000_SUCCESS)
+ return error;
+
+ /* A driver can write to the NVM only if it has eeprom_shadow_ram
+ * allocated. Subsequent reads to the modified words are read from
+ * this cached structure as well. Writes will only go into this
+ * cached structure unless it's followed by a call to
+ * e1000_update_eeprom_checksum() where it will commit the changes
+ * and clear the "modified" field.
+ */
+ if (hw->eeprom_shadow_ram != NULL) {
+ for (i = 0; i < words; i++) {
+ if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
+ hw->eeprom_shadow_ram[offset+i].modified = true;
+ hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
+ } else {
+ error = -E1000_ERR_EEPROM;
+ break;
+ }
+ }
+ } else {
+ /* Drivers have the option to not allocate eeprom_shadow_ram as long
+ * as they don't perform any NVM writes. An attempt in doing so
+ * will result in this error.
+ */
+ error = -E1000_ERR_EEPROM;
+ }
+
+ e1000_release_software_flag(hw);
+
+ return error;
+}
+
+/******************************************************************************
+ * This function does initial flash setup so that a new read/write/erase cycle
+ * can be started.
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+static s32 e1000_ich8_cycle_init(struct e1000_hw *hw)
+{
+ union ich8_hws_flash_status hsfsts;
+ s32 error = E1000_ERR_EEPROM;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_ich8_cycle_init");
+
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+ /* May be check the Flash Des Valid bit in Hw status */
+ if (hsfsts.hsf_status.fldesvalid == 0) {
+ DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.");
+ return error;
+ }
+
+ /* Clear FCERR in Hw status by writing 1 */
+ /* Clear DAEL in Hw status by writing a 1 */
+ hsfsts.hsf_status.flcerr = 1;
+ hsfsts.hsf_status.dael = 1;
+
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+
+ /* Either we should have a hardware SPI cycle in progress bit to check
+ * against, in order to start a new cycle or FDONE bit should be changed
+ * in the hardware so that it is 1 after harware reset, which can then be
+ * used as an indication whether a cycle is in progress or has been
+ * completed .. we should also have some software semaphore mechanism to
+ * guard FDONE or the cycle in progress bit so that two threads access to
+ * those bits can be sequentiallized or a way so that 2 threads dont
+ * start the cycle at the same time */
+
+ if (hsfsts.hsf_status.flcinprog == 0) {
+ /* There is no cycle running at present, so we can start a cycle */
+ /* Begin by setting Flash Cycle Done. */
+ hsfsts.hsf_status.flcdone = 1;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+ error = E1000_SUCCESS;
+ } else {
+ /* otherwise poll for sometime so the current cycle has a chance
+ * to end before giving up. */
+ for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcinprog == 0) {
+ error = E1000_SUCCESS;
+ break;
+ }
+ udelay(1);
+ }
+ if (error == E1000_SUCCESS) {
+ /* Successful in waiting for previous cycle to timeout,
+ * now set the Flash Cycle Done. */
+ hsfsts.hsf_status.flcdone = 1;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+ } else {
+ DEBUGOUT("Flash controller busy, cannot get access");
+ }
+ }
+ return error;
+}
+
+/******************************************************************************
+ * This function starts a flash cycle and waits for its completion
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
+{
+ union ich8_hws_flash_ctrl hsflctl;
+ union ich8_hws_flash_status hsfsts;
+ s32 error = E1000_ERR_EEPROM;
+ u32 i = 0;
+
+ /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcgo = 1;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* wait till FDONE bit is set to 1 */
+ do {
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcdone == 1)
+ break;
+ udelay(1);
+ i++;
+ } while (i < timeout);
+ if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
+ error = E1000_SUCCESS;
+ }
+ return error;
+}
+
+/******************************************************************************
+ * Reads a byte or word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte or word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - Pointer to the word to store the value read.
+ *****************************************************************************/
+static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+ u16 *data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_address;
+ u32 flash_data = 0;
+ s32 error = -E1000_ERR_EEPROM;
+ s32 count = 0;
+
+ DEBUGFUNC("e1000_read_ich8_data");
+
+ if (size < 1 || size > 2 || data == NULL ||
+ index > ICH_FLASH_LINEAR_ADDR_MASK)
+ return error;
+
+ flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
+ hw->flash_base_addr;
+
+ do {
+ udelay(1);
+ /* Steps */
+ error = e1000_ich8_cycle_init(hw);
+ if (error != E1000_SUCCESS)
+ break;
+
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of index into Flash Linear address field in
+ * Flash Address */
+ /* TODO: TBD maybe check the index against the size of flash */
+
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
+
+ error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
+ * sequence a few more times, else read in (shift in) the Flash Data0,
+ * the order is least significant byte first msb to lsb */
+ if (error == E1000_SUCCESS) {
+ flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
+ if (size == 1) {
+ *data = (u8)(flash_data & 0x000000FF);
+ } else if (size == 2) {
+ *data = (u16)(flash_data & 0x0000FFFF);
+ }
+ break;
+ } else {
+ /* If we've gotten here, then things are probably completely hosed,
+ * but if the error condition is detected, it won't hurt to give
+ * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes One /two bytes to the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte/word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - The byte(s) to write to the NVM.
+ *****************************************************************************/
+static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+ u16 data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_address;
+ u32 flash_data = 0;
+ s32 error = -E1000_ERR_EEPROM;
+ s32 count = 0;
+
+ DEBUGFUNC("e1000_write_ich8_data");
+
+ if (size < 1 || size > 2 || data > size * 0xff ||
+ index > ICH_FLASH_LINEAR_ADDR_MASK)
+ return error;
+
+ flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
+ hw->flash_base_addr;
+
+ do {
+ udelay(1);
+ /* Steps */
+ error = e1000_ich8_cycle_init(hw);
+ if (error != E1000_SUCCESS)
+ break;
+
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size -1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of index into Flash Linear address field in
+ * Flash Address */
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
+
+ if (size == 1)
+ flash_data = (u32)data & 0x00FF;
+ else
+ flash_data = (u32)data;
+
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
+ * sequence a few more times else done */
+ error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
+ if (error == E1000_SUCCESS) {
+ break;
+ } else {
+ /* If we're here, then things are most likely completely hosed,
+ * but if the error condition is detected, it won't hurt to give
+ * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return error;
+}
+
+/******************************************************************************
+ * Reads a single byte from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - Pointer to a byte to store the value read.
+ *****************************************************************************/
+static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 word = 0;
+
+ status = e1000_read_ich8_data(hw, index, 1, &word);
+ if (status == E1000_SUCCESS) {
+ *data = (u8)word;
+ }
+
+ return status;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ * Performs verification by reading back the value and then going through
+ * a retry algorithm before giving up.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to write.
+ * byte - The byte to write to the NVM.
+ *****************************************************************************/
+static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
+{
+ s32 error = E1000_SUCCESS;
+ s32 program_retries = 0;
+
+ DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index);
+
+ error = e1000_write_ich8_byte(hw, index, byte);
+
+ if (error != E1000_SUCCESS) {
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ DEBUGOUT2("Retrying \t Byte := %2.2X Offset := %d\n", byte, index);
+ error = e1000_write_ich8_byte(hw, index, byte);
+ udelay(100);
+ if (error == E1000_SUCCESS)
+ break;
+ }
+ }
+
+ if (program_retries == 100)
+ error = E1000_ERR_EEPROM;
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - The byte to write to the NVM.
+ *****************************************************************************/
+static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 word = (u16)data;
+
+ status = e1000_write_ich8_data(hw, index, 1, word);
+
+ return status;
+}
+
+/******************************************************************************
+ * Reads a word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The starting byte index of the word to read.
+ * data - Pointer to a word to store the value read.
+ *****************************************************************************/
+static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ status = e1000_read_ich8_data(hw, index, 2, data);
+ return status;
+}
+
+/******************************************************************************
+ * Erases the bank specified. Each bank may be a 4, 8 or 64k block. Banks are 0
+ * based.
+ *
+ * hw - pointer to e1000_hw structure
+ * bank - 0 for first bank, 1 for second bank
+ *
+ * Note that this function may actually erase as much as 8 or 64 KBytes. The
+ * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the
+ * bank size may be 4, 8 or 64 KBytes
+ *****************************************************************************/
+static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_address;
+ s32 count = 0;
+ s32 error = E1000_ERR_EEPROM;
+ s32 iteration;
+ s32 sub_sector_size = 0;
+ s32 bank_size;
+ s32 j = 0;
+ s32 error_flag = 0;
+
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+ /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
+ /* 00: The Hw sector is 256 bytes, hence we need to erase 16
+ * consecutive sectors. The start index for the nth Hw sector can be
+ * calculated as bank * 4096 + n * 256
+ * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+ * The start index for the nth Hw sector can be calculated
+ * as bank * 4096
+ * 10: The HW sector is 8K bytes
+ * 11: The Hw sector size is 64K bytes */
+ if (hsfsts.hsf_status.berasesz == 0x0) {
+ /* Hw sector size 256 */
+ sub_sector_size = ICH_FLASH_SEG_SIZE_256;
+ bank_size = ICH_FLASH_SECTOR_SIZE;
+ iteration = ICH_FLASH_SECTOR_SIZE / ICH_FLASH_SEG_SIZE_256;
+ } else if (hsfsts.hsf_status.berasesz == 0x1) {
+ bank_size = ICH_FLASH_SEG_SIZE_4K;
+ iteration = 1;
+ } else if (hsfsts.hsf_status.berasesz == 0x3) {
+ bank_size = ICH_FLASH_SEG_SIZE_64K;
+ iteration = 1;
+ } else {
+ return error;
+ }
+
+ for (j = 0; j < iteration ; j++) {
+ do {
+ count++;
+ /* Steps */
+ error = e1000_ich8_cycle_init(hw);
+ if (error != E1000_SUCCESS) {
+ error_flag = 1;
+ break;
+ }
+
+ /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
+ * Control */
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of an index within the block into Flash
+ * Linear address field in Flash Address. This probably needs to
+ * be calculated here based off the on-chip erase sector size and
+ * the software bank size (4, 8 or 64 KBytes) */
+ flash_linear_address = bank * bank_size + j * sub_sector_size;
+ flash_linear_address += hw->flash_base_addr;
+ flash_linear_address &= ICH_FLASH_LINEAR_ADDR_MASK;
+
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
+
+ error = e1000_ich8_flash_cycle(hw, ICH_FLASH_ERASE_TIMEOUT);
+ /* Check if FCERR is set to 1. If 1, clear it and try the whole
+ * sequence a few more times else Done */
+ if (error == E1000_SUCCESS) {
+ break;
+ } else {
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* repeat for some time before giving up */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ error_flag = 1;
+ break;
+ }
+ }
+ } while ((count < ICH_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
+ if (error_flag == 1)
+ break;
+ }
+ if (error_flag != 1)
+ error = E1000_SUCCESS;
+ return error;
+}
+
+static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+ u32 cnf_base_addr,
+ u32 cnf_size)
+{
+ u32 ret_val = E1000_SUCCESS;
+ u16 word_addr, reg_data, reg_addr;
+ u16 i;
+
+ /* cnf_base_addr is in DWORD */
+ word_addr = (u16)(cnf_base_addr << 1);
+
+ /* cnf_size is returned in size of dwords */
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, ®_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, ®_addr);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_get_software_flag(hw);
+ if (ret_val != E1000_SUCCESS)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_ex(hw, (u32)reg_addr, reg_data);
+
+ e1000_release_software_flag(hw);
+ }
+
+ return ret_val;
+}
+
+
+/******************************************************************************
+ * This function initializes the PHY from the NVM on ICH8 platforms. This
+ * is needed due to an issue where the NVM configuration is not properly
+ * autoloaded after power transitions. Therefore, after each PHY reset, we
+ * will load the configuration data out of the NVM manually.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw)
+{
+ u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop;
+
+ if (hw->phy_type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* Check if SW needs configure the PHY */
+ reg_data = er32(FEXTNVM);
+ if (!(reg_data & FEXTNVM_SW_CONFIG))
+ return E1000_SUCCESS;
+
+ /* Wait for basic configuration completes before proceeding*/
+ loop = 0;
+ do {
+ reg_data = er32(STATUS) & E1000_STATUS_LAN_INIT_DONE;
+ udelay(100);
+ loop++;
+ } while ((!reg_data) && (loop < 50));
+
+ /* Clear the Init Done bit for the next init event */
+ reg_data = er32(STATUS);
+ reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
+ ew32(STATUS, reg_data);
+
+ /* Make sure HW does not configure LCD from PHY extended configuration
+ before SW configuration */
+ reg_data = er32(EXTCNF_CTRL);
+ if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
+ reg_data = er32(EXTCNF_SIZE);
+ cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
+ cnf_size >>= 16;
+ if (cnf_size) {
+ reg_data = er32(EXTCNF_CTRL);
+ cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
+ /* cnf_base_addr is in DWORD */
+ cnf_base_addr >>= 16;
+
+ /* Configure LCD from extended configuration region. */
+ ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
+ cnf_size);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_hw-2.6.28-ethercat.h Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,3406 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.h
+ * Structures, enums, and macros for the MAC
+ */
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep-2.6.28-ethercat.h"
+
+
+/* Forward declarations of structures used by the shared code */
+struct e1000_hw;
+struct e1000_hw_stats;
+
+/* Enumerated types specific to the e1000 hardware */
+/* Media Access Controlers */
+typedef enum {
+ e1000_undefined = 0,
+ e1000_82542_rev2_0,
+ e1000_82542_rev2_1,
+ e1000_82543,
+ e1000_82544,
+ e1000_82540,
+ e1000_82545,
+ e1000_82545_rev_3,
+ e1000_82546,
+ e1000_82546_rev_3,
+ e1000_82541,
+ e1000_82541_rev_2,
+ e1000_82547,
+ e1000_82547_rev_2,
+ e1000_82571,
+ e1000_82572,
+ e1000_82573,
+ e1000_80003es2lan,
+ e1000_ich8lan,
+ e1000_num_macs
+} e1000_mac_type;
+
+typedef enum {
+ e1000_eeprom_uninitialized = 0,
+ e1000_eeprom_spi,
+ e1000_eeprom_microwire,
+ e1000_eeprom_flash,
+ e1000_eeprom_ich8,
+ e1000_eeprom_none, /* No NVM support */
+ e1000_num_eeprom_types
+} e1000_eeprom_type;
+
+/* Media Types */
+typedef enum {
+ e1000_media_type_copper = 0,
+ e1000_media_type_fiber = 1,
+ e1000_media_type_internal_serdes = 2,
+ e1000_num_media_types
+} e1000_media_type;
+
+typedef enum {
+ e1000_10_half = 0,
+ e1000_10_full = 1,
+ e1000_100_half = 2,
+ e1000_100_full = 3
+} e1000_speed_duplex_type;
+
+/* Flow Control Settings */
+typedef enum {
+ E1000_FC_NONE = 0,
+ E1000_FC_RX_PAUSE = 1,
+ E1000_FC_TX_PAUSE = 2,
+ E1000_FC_FULL = 3,
+ E1000_FC_DEFAULT = 0xFF
+} e1000_fc_type;
+
+struct e1000_shadow_ram {
+ u16 eeprom_word;
+ bool modified;
+};
+
+/* PCI bus types */
+typedef enum {
+ e1000_bus_type_unknown = 0,
+ e1000_bus_type_pci,
+ e1000_bus_type_pcix,
+ e1000_bus_type_pci_express,
+ e1000_bus_type_reserved
+} e1000_bus_type;
+
+/* PCI bus speeds */
+typedef enum {
+ e1000_bus_speed_unknown = 0,
+ e1000_bus_speed_33,
+ e1000_bus_speed_66,
+ e1000_bus_speed_100,
+ e1000_bus_speed_120,
+ e1000_bus_speed_133,
+ e1000_bus_speed_2500,
+ e1000_bus_speed_reserved
+} e1000_bus_speed;
+
+/* PCI bus widths */
+typedef enum {
+ e1000_bus_width_unknown = 0,
+ /* These PCIe values should literally match the possible return values
+ * from config space */
+ e1000_bus_width_pciex_1 = 1,
+ e1000_bus_width_pciex_2 = 2,
+ e1000_bus_width_pciex_4 = 4,
+ e1000_bus_width_32,
+ e1000_bus_width_64,
+ e1000_bus_width_reserved
+} e1000_bus_width;
+
+/* PHY status info structure and supporting enums */
+typedef enum {
+ e1000_cable_length_50 = 0,
+ e1000_cable_length_50_80,
+ e1000_cable_length_80_110,
+ e1000_cable_length_110_140,
+ e1000_cable_length_140,
+ e1000_cable_length_undefined = 0xFF
+} e1000_cable_length;
+
+typedef enum {
+ e1000_gg_cable_length_60 = 0,
+ e1000_gg_cable_length_60_115 = 1,
+ e1000_gg_cable_length_115_150 = 2,
+ e1000_gg_cable_length_150 = 4
+} e1000_gg_cable_length;
+
+typedef enum {
+ e1000_igp_cable_length_10 = 10,
+ e1000_igp_cable_length_20 = 20,
+ e1000_igp_cable_length_30 = 30,
+ e1000_igp_cable_length_40 = 40,
+ e1000_igp_cable_length_50 = 50,
+ e1000_igp_cable_length_60 = 60,
+ e1000_igp_cable_length_70 = 70,
+ e1000_igp_cable_length_80 = 80,
+ e1000_igp_cable_length_90 = 90,
+ e1000_igp_cable_length_100 = 100,
+ e1000_igp_cable_length_110 = 110,
+ e1000_igp_cable_length_115 = 115,
+ e1000_igp_cable_length_120 = 120,
+ e1000_igp_cable_length_130 = 130,
+ e1000_igp_cable_length_140 = 140,
+ e1000_igp_cable_length_150 = 150,
+ e1000_igp_cable_length_160 = 160,
+ e1000_igp_cable_length_170 = 170,
+ e1000_igp_cable_length_180 = 180
+} e1000_igp_cable_length;
+
+typedef enum {
+ e1000_10bt_ext_dist_enable_normal = 0,
+ e1000_10bt_ext_dist_enable_lower,
+ e1000_10bt_ext_dist_enable_undefined = 0xFF
+} e1000_10bt_ext_dist_enable;
+
+typedef enum {
+ e1000_rev_polarity_normal = 0,
+ e1000_rev_polarity_reversed,
+ e1000_rev_polarity_undefined = 0xFF
+} e1000_rev_polarity;
+
+typedef enum {
+ e1000_downshift_normal = 0,
+ e1000_downshift_activated,
+ e1000_downshift_undefined = 0xFF
+} e1000_downshift;
+
+typedef enum {
+ e1000_smart_speed_default = 0,
+ e1000_smart_speed_on,
+ e1000_smart_speed_off
+} e1000_smart_speed;
+
+typedef enum {
+ e1000_polarity_reversal_enabled = 0,
+ e1000_polarity_reversal_disabled,
+ e1000_polarity_reversal_undefined = 0xFF
+} e1000_polarity_reversal;
+
+typedef enum {
+ e1000_auto_x_mode_manual_mdi = 0,
+ e1000_auto_x_mode_manual_mdix,
+ e1000_auto_x_mode_auto1,
+ e1000_auto_x_mode_auto2,
+ e1000_auto_x_mode_undefined = 0xFF
+} e1000_auto_x_mode;
+
+typedef enum {
+ e1000_1000t_rx_status_not_ok = 0,
+ e1000_1000t_rx_status_ok,
+ e1000_1000t_rx_status_undefined = 0xFF
+} e1000_1000t_rx_status;
+
+typedef enum {
+ e1000_phy_m88 = 0,
+ e1000_phy_igp,
+ e1000_phy_igp_2,
+ e1000_phy_gg82563,
+ e1000_phy_igp_3,
+ e1000_phy_ife,
+ e1000_phy_undefined = 0xFF
+} e1000_phy_type;
+
+typedef enum {
+ e1000_ms_hw_default = 0,
+ e1000_ms_force_master,
+ e1000_ms_force_slave,
+ e1000_ms_auto
+} e1000_ms_type;
+
+typedef enum {
+ e1000_ffe_config_enabled = 0,
+ e1000_ffe_config_active,
+ e1000_ffe_config_blocked
+} e1000_ffe_config;
+
+typedef enum {
+ e1000_dsp_config_disabled = 0,
+ e1000_dsp_config_enabled,
+ e1000_dsp_config_activated,
+ e1000_dsp_config_undefined = 0xFF
+} e1000_dsp_config;
+
+struct e1000_phy_info {
+ e1000_cable_length cable_length;
+ e1000_10bt_ext_dist_enable extended_10bt_distance;
+ e1000_rev_polarity cable_polarity;
+ e1000_downshift downshift;
+ e1000_polarity_reversal polarity_correction;
+ e1000_auto_x_mode mdix_mode;
+ e1000_1000t_rx_status local_rx;
+ e1000_1000t_rx_status remote_rx;
+};
+
+struct e1000_phy_stats {
+ u32 idle_errors;
+ u32 receive_errors;
+};
+
+struct e1000_eeprom_info {
+ e1000_eeprom_type type;
+ u16 word_size;
+ u16 opcode_bits;
+ u16 address_bits;
+ u16 delay_usec;
+ u16 page_size;
+ bool use_eerd;
+ bool use_eewr;
+};
+
+/* Flex ASF Information */
+#define E1000_HOST_IF_MAX_SIZE 2048
+
+typedef enum {
+ e1000_byte_align = 0,
+ e1000_word_align = 1,
+ e1000_dword_align = 2
+} e1000_align_type;
+
+
+
+/* Error Codes */
+#define E1000_SUCCESS 0
+#define E1000_ERR_EEPROM 1
+#define E1000_ERR_PHY 2
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_PARAM 4
+#define E1000_ERR_MAC_TYPE 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET 9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET 12
+#define E1000_ERR_SWFW_SYNC 13
+
+#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \
+ (((_value) & 0xff00) >> 8))
+
+/* Function prototypes */
+/* Initialization */
+s32 e1000_reset_hw(struct e1000_hw *hw);
+s32 e1000_init_hw(struct e1000_hw *hw);
+s32 e1000_set_mac_type(struct e1000_hw *hw);
+void e1000_set_media_type(struct e1000_hw *hw);
+
+/* Link Configuration */
+s32 e1000_setup_link(struct e1000_hw *hw);
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+s32 e1000_check_for_link(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+s32 e1000_force_mac_fc(struct e1000_hw *hw);
+
+/* PHY */
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data);
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
+s32 e1000_phy_hw_reset(struct e1000_hw *hw);
+s32 e1000_phy_reset(struct e1000_hw *hw);
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
+
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
+
+/* EEPROM Functions */
+s32 e1000_init_eeprom_params(struct e1000_hw *hw);
+
+/* MNG HOST IF functions */
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
+
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_ICH_IAMT_MODE 0x2
+#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
+
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */
+#define E1000_VFTA_ENTRY_SHIFT 0x5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+struct e1000_host_mng_command_header {
+ u8 command_id;
+ u8 checksum;
+ u16 reserved1;
+ u16 reserved2;
+ u16 command_length;
+};
+
+struct e1000_host_mng_command_info {
+ struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
+ u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/
+};
+#ifdef __BIG_ENDIAN
+struct e1000_host_mng_dhcp_cookie{
+ u32 signature;
+ u16 vlan_id;
+ u8 reserved0;
+ u8 status;
+ u32 reserved1;
+ u8 checksum;
+ u8 reserved3;
+ u16 reserved2;
+};
+#else
+struct e1000_host_mng_dhcp_cookie{
+ u32 signature;
+ u8 status;
+ u8 reserved0;
+ u16 vlan_id;
+ u32 reserved1;
+ u16 reserved2;
+ u8 reserved3;
+ u8 checksum;
+};
+#endif
+
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer,
+ u16 length);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
+s32 e1000_read_mac_addr(struct e1000_hw * hw);
+
+/* Filters (multicast, vlan, receive) */
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
+void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+
+/* LED functions */
+s32 e1000_setup_led(struct e1000_hw *hw);
+s32 e1000_cleanup_led(struct e1000_hw *hw);
+s32 e1000_led_on(struct e1000_hw *hw);
+s32 e1000_led_off(struct e1000_hw *hw);
+s32 e1000_blink_led_start(struct e1000_hw *hw);
+
+/* Adaptive IFS Functions */
+
+/* Everything else */
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr);
+void e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
+int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
+/* Port I/O is only supported on 82544 and newer */
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
+s32 e1000_disable_pciex_master(struct e1000_hw *hw);
+s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
+
+
+#define E1000_READ_REG_IO(a, reg) \
+ e1000_read_reg_io((a), E1000_##reg)
+#define E1000_WRITE_REG_IO(a, reg, val) \
+ e1000_write_reg_io((a), E1000_##reg, val)
+
+/* PCI Device IDs */
+#define E1000_DEV_ID_82542 0x1000
+#define E1000_DEV_ID_82543GC_FIBER 0x1001
+#define E1000_DEV_ID_82543GC_COPPER 0x1004
+#define E1000_DEV_ID_82544EI_COPPER 0x1008
+#define E1000_DEV_ID_82544EI_FIBER 0x1009
+#define E1000_DEV_ID_82544GC_COPPER 0x100C
+#define E1000_DEV_ID_82544GC_LOM 0x100D
+#define E1000_DEV_ID_82540EM 0x100E
+#define E1000_DEV_ID_82540EM_LOM 0x1015
+#define E1000_DEV_ID_82540EP_LOM 0x1016
+#define E1000_DEV_ID_82540EP 0x1017
+#define E1000_DEV_ID_82540EP_LP 0x101E
+#define E1000_DEV_ID_82545EM_COPPER 0x100F
+#define E1000_DEV_ID_82545EM_FIBER 0x1011
+#define E1000_DEV_ID_82545GM_COPPER 0x1026
+#define E1000_DEV_ID_82545GM_FIBER 0x1027
+#define E1000_DEV_ID_82545GM_SERDES 0x1028
+#define E1000_DEV_ID_82546EB_COPPER 0x1010
+#define E1000_DEV_ID_82546EB_FIBER 0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
+#define E1000_DEV_ID_82541EI 0x1013
+#define E1000_DEV_ID_82541EI_MOBILE 0x1018
+#define E1000_DEV_ID_82541ER_LOM 0x1014
+#define E1000_DEV_ID_82541ER 0x1078
+#define E1000_DEV_ID_82547GI 0x1075
+#define E1000_DEV_ID_82541GI 0x1076
+#define E1000_DEV_ID_82541GI_MOBILE 0x1077
+#define E1000_DEV_ID_82541GI_LF 0x107C
+#define E1000_DEV_ID_82546GB_COPPER 0x1079
+#define E1000_DEV_ID_82546GB_FIBER 0x107A
+#define E1000_DEV_ID_82546GB_SERDES 0x107B
+#define E1000_DEV_ID_82546GB_PCIE 0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
+#define E1000_DEV_ID_82547EI 0x1019
+#define E1000_DEV_ID_82547EI_MOBILE 0x101A
+#define E1000_DEV_ID_82571EB_COPPER 0x105E
+#define E1000_DEV_ID_82571EB_FIBER 0x105F
+#define E1000_DEV_ID_82571EB_SERDES 0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC
+#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
+#define E1000_DEV_ID_82572EI_COPPER 0x107D
+#define E1000_DEV_ID_82572EI_FIBER 0x107E
+#define E1000_DEV_ID_82572EI_SERDES 0x107F
+#define E1000_DEV_ID_82572EI 0x10B9
+#define E1000_DEV_ID_82573E 0x108B
+#define E1000_DEV_ID_82573E_IAMT 0x108C
+#define E1000_DEV_ID_82573L 0x109A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+
+#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
+#define E1000_DEV_ID_ICH8_IGP_C 0x104B
+#define E1000_DEV_ID_ICH8_IFE 0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M 0x104D
+
+
+#define NODE_ADDRESS_SIZE 6
+#define ETH_LENGTH_OF_ADDRESS 6
+
+/* MAC decode size is 128K - This is the size of BAR0 */
+#define MAC_DECODE_SIZE (128 * 1024)
+
+#define E1000_82542_2_0_REV_ID 2
+#define E1000_82542_2_1_REV_ID 3
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* The sizes (in bytes) of a ethernet packet */
+#define ENET_HEADER_SIZE 14
+#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* With FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
+#define ETHERNET_FCS_SIZE 4
+#define MAXIMUM_ETHERNET_PACKET_SIZE \
+ (MAXIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define MINIMUM_ETHERNET_PACKET_SIZE \
+ (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define CRC_LENGTH ETHERNET_FCS_SIZE
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+
+
+/* 802.1q VLAN Packet Sizes */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+#define ETHERNET_IP_TYPE 0x0800 /* IP packets */
+#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */
+
+/* Packet Header defines */
+#define IP_PROTOCOL_TCP 6
+#define IP_PROTOCOL_UDP 0x11
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ)
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
+
+/* Additional interrupts need to be handled for e1000_ich8lan:
+ DSW = The FW changed the status of the DISSW bit in FWSM
+ PHYINT = The LAN connected device generates an interrupt
+ EPRST = Manageability reset event */
+#define IMS_ICH8LAN_ENABLE_MASK (\
+ E1000_IMS_DSW | \
+ E1000_IMS_PHYINT | \
+ E1000_IMS_EPRST)
+
+/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor. We
+ * reserve one of these spots for our directed address, allowing us room for
+ * E1000_RAR_ENTRIES - 1 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+
+#define E1000_RAR_ENTRIES_ICH8LAN 6
+
+#define MIN_NUMBER_OF_DESCRIPTORS 8
+#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+ struct {
+ __le64 buffer_addr;
+ __le64 reserved;
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length;
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+ struct {
+ /* one buffer for protocol header(s), three data buffers */
+ __le64 buffer_addr[MAX_PS_BUFFERS];
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length0; /* length of buffer 0 */
+ __le16 vlan; /* VLAN tag */
+ } middle;
+ struct {
+ __le16 header_status;
+ __le16 length[3]; /* length of buffers 1-3 */
+ } upper;
+ __le64 reserved;
+ } wb; /* writeback */
+};
+
+/* Receive Decriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
+#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_TCPE 0x20000000
+#define E1000_RXDEXT_STATERR_IPE 0x40000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
+
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
+
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+ union {
+ __le32 ip_config;
+ struct {
+ u8 ipcss; /* IP checksum start */
+ u8 ipcso; /* IP checksum offset */
+ __le16 ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ __le32 tcp_config;
+ struct {
+ u8 tucss; /* TCP checksum start */
+ u8 tucso; /* TCP checksum offset */
+ __le16 tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ __le32 cmd_and_length; /* */
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 hdr_len; /* Header length */
+ __le16 mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's buffer address */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 typ_len_ext; /* */
+ u8 cmd; /* */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 popts; /* Packet Options */
+ __le16 special; /* */
+ } fields;
+ } upper;
+};
+
+/* Filters */
+#define E1000_NUM_UNICAST 16 /* Unicast filter entries */
+#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+#define E1000_NUM_UNICAST_ICH8LAN 7
+#define E1000_MC_TBL_SIZE_ICH8LAN 32
+
+
+/* Receive Address Register */
+struct e1000_rar {
+ volatile __le32 low; /* receive address low */
+ volatile __le32 high; /* receive address high */
+};
+
+/* Number of entries in the Multicast Table Array (MTA). */
+#define E1000_NUM_MTA_REGISTERS 128
+#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
+
+/* IPv4 Address Table Entry */
+struct e1000_ipv4_at_entry {
+ volatile u32 ipv4_addr; /* IP Address (RW) */
+ volatile u32 reserved;
+};
+
+/* Four wakeup IP addresses are supported */
+#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
+#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
+#define E1000_IP4AT_SIZE_ICH8LAN 3
+#define E1000_IP6AT_SIZE 1
+
+/* IPv6 Address Table Entry */
+struct e1000_ipv6_at_entry {
+ volatile u8 ipv6_addr[16];
+};
+
+/* Flexible Filter Length Table Entry */
+struct e1000_fflt_entry {
+ volatile u32 length; /* Flexible Filter Length (RW) */
+ volatile u32 reserved;
+};
+
+/* Flexible Filter Mask Table Entry */
+struct e1000_ffmt_entry {
+ volatile u32 mask; /* Flexible Filter Mask (RW) */
+ volatile u32 reserved;
+};
+
+/* Flexible Filter Value Table Entry */
+struct e1000_ffvt_entry {
+ volatile u32 value; /* Flexible Filter Value (RW) */
+ volatile u32 reserved;
+};
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+#define E1000_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Register Set. (82543, 82544)
+ *
+ * Registers are defined to be 32 bits and should be accessed as 32 bit values.
+ * These registers are physically located on the NIC, but are mapped into the
+ * host memory address space.
+ *
+ * RW - register is both readable and writable
+ * RO - register is read only
+ * WO - register is write only
+ * R/clr - register is read only and is cleared when read
+ * A - register array
+ */
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL 0x00100 /* RX Control - RW */
+#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
+#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
+#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */
+#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */
+#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */
+#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
+#define E1000_TCTL 0x00400 /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
+#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
+#define E1000_TBT 0x00448 /* TX Burst Timer - RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define FEXTNVM_SW_CONFIG 0x0001
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_FLASH_UPDATES 1000
+#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL 0x01030 /* FLASH control register */
+#define E1000_FLSWDATA 0x01034 /* FLASH data register */
+#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */
+#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */
+#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */
+#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */
+#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */
+#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */
+#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
+#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */
+#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */
+#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */
+#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */
+#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */
+#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */
+#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */
+#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
+#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
+#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
+#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */
+#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */
+#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */
+#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */
+#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */
+#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */
+#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
+#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
+#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
+#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
+#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
+#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
+#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
+
+#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA 0x0003C /* PHY address - RW */
+#define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */
+#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Inteface Control */
+
+/* RSS registers */
+#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */
+#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */
+#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
+/* Register Set (82542)
+ *
+ * Some of the 82542 registers are located at different offsets than they are
+ * in more current versions of the 8254x. Despite the difference in location,
+ * the registers function in the same manner.
+ */
+#define E1000_82542_CTRL E1000_CTRL
+#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
+#define E1000_82542_STATUS E1000_STATUS
+#define E1000_82542_EECD E1000_EECD
+#define E1000_82542_EERD E1000_EERD
+#define E1000_82542_CTRL_EXT E1000_CTRL_EXT
+#define E1000_82542_FLA E1000_FLA
+#define E1000_82542_MDIC E1000_MDIC
+#define E1000_82542_SCTL E1000_SCTL
+#define E1000_82542_FEXTNVM E1000_FEXTNVM
+#define E1000_82542_FCAL E1000_FCAL
+#define E1000_82542_FCAH E1000_FCAH
+#define E1000_82542_FCT E1000_FCT
+#define E1000_82542_VET E1000_VET
+#define E1000_82542_RA 0x00040
+#define E1000_82542_ICR E1000_ICR
+#define E1000_82542_ITR E1000_ITR
+#define E1000_82542_ICS E1000_ICS
+#define E1000_82542_IMS E1000_IMS
+#define E1000_82542_IMC E1000_IMC
+#define E1000_82542_RCTL E1000_RCTL
+#define E1000_82542_RDTR 0x00108
+#define E1000_82542_RDBAL 0x00110
+#define E1000_82542_RDBAH 0x00114
+#define E1000_82542_RDLEN 0x00118
+#define E1000_82542_RDH 0x00120
+#define E1000_82542_RDT 0x00128
+#define E1000_82542_RDTR0 E1000_82542_RDTR
+#define E1000_82542_RDBAL0 E1000_82542_RDBAL
+#define E1000_82542_RDBAH0 E1000_82542_RDBAH
+#define E1000_82542_RDLEN0 E1000_82542_RDLEN
+#define E1000_82542_RDH0 E1000_82542_RDH
+#define E1000_82542_RDT0 E1000_82542_RDT
+#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
+ * RX Control - RW */
+#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
+#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */
+#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */
+#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */
+#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */
+#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */
+#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */
+#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */
+#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */
+#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */
+#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */
+#define E1000_82542_RDTR1 0x00130
+#define E1000_82542_RDBAL1 0x00138
+#define E1000_82542_RDBAH1 0x0013C
+#define E1000_82542_RDLEN1 0x00140
+#define E1000_82542_RDH1 0x00148
+#define E1000_82542_RDT1 0x00150
+#define E1000_82542_FCRTH 0x00160
+#define E1000_82542_FCRTL 0x00168
+#define E1000_82542_FCTTV E1000_FCTTV
+#define E1000_82542_TXCW E1000_TXCW
+#define E1000_82542_RXCW E1000_RXCW
+#define E1000_82542_MTA 0x00200
+#define E1000_82542_TCTL E1000_TCTL
+#define E1000_82542_TCTL_EXT E1000_TCTL_EXT
+#define E1000_82542_TIPG E1000_TIPG
+#define E1000_82542_TDBAL 0x00420
+#define E1000_82542_TDBAH 0x00424
+#define E1000_82542_TDLEN 0x00428
+#define E1000_82542_TDH 0x00430
+#define E1000_82542_TDT 0x00438
+#define E1000_82542_TIDV 0x00440
+#define E1000_82542_TBT E1000_TBT
+#define E1000_82542_AIT E1000_AIT
+#define E1000_82542_VFTA 0x00600
+#define E1000_82542_LEDCTL E1000_LEDCTL
+#define E1000_82542_PBA E1000_PBA
+#define E1000_82542_PBS E1000_PBS
+#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
+#define E1000_82542_EEARBC E1000_EEARBC
+#define E1000_82542_FLASHT E1000_FLASHT
+#define E1000_82542_EEWR E1000_EEWR
+#define E1000_82542_FLSWCTL E1000_FLSWCTL
+#define E1000_82542_FLSWDATA E1000_FLSWDATA
+#define E1000_82542_FLSWCNT E1000_FLSWCNT
+#define E1000_82542_FLOP E1000_FLOP
+#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL
+#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE
+#define E1000_82542_PHY_CTRL E1000_PHY_CTRL
+#define E1000_82542_ERT E1000_ERT
+#define E1000_82542_RXDCTL E1000_RXDCTL
+#define E1000_82542_RXDCTL1 E1000_RXDCTL1
+#define E1000_82542_RADV E1000_RADV
+#define E1000_82542_RSRPD E1000_RSRPD
+#define E1000_82542_TXDMAC E1000_TXDMAC
+#define E1000_82542_KABGTXD E1000_KABGTXD
+#define E1000_82542_TDFHS E1000_TDFHS
+#define E1000_82542_TDFTS E1000_TDFTS
+#define E1000_82542_TDFPC E1000_TDFPC
+#define E1000_82542_TXDCTL E1000_TXDCTL
+#define E1000_82542_TADV E1000_TADV
+#define E1000_82542_TSPMT E1000_TSPMT
+#define E1000_82542_CRCERRS E1000_CRCERRS
+#define E1000_82542_ALGNERRC E1000_ALGNERRC
+#define E1000_82542_SYMERRS E1000_SYMERRS
+#define E1000_82542_RXERRC E1000_RXERRC
+#define E1000_82542_MPC E1000_MPC
+#define E1000_82542_SCC E1000_SCC
+#define E1000_82542_ECOL E1000_ECOL
+#define E1000_82542_MCC E1000_MCC
+#define E1000_82542_LATECOL E1000_LATECOL
+#define E1000_82542_COLC E1000_COLC
+#define E1000_82542_DC E1000_DC
+#define E1000_82542_TNCRS E1000_TNCRS
+#define E1000_82542_SEC E1000_SEC
+#define E1000_82542_CEXTERR E1000_CEXTERR
+#define E1000_82542_RLEC E1000_RLEC
+#define E1000_82542_XONRXC E1000_XONRXC
+#define E1000_82542_XONTXC E1000_XONTXC
+#define E1000_82542_XOFFRXC E1000_XOFFRXC
+#define E1000_82542_XOFFTXC E1000_XOFFTXC
+#define E1000_82542_FCRUC E1000_FCRUC
+#define E1000_82542_PRC64 E1000_PRC64
+#define E1000_82542_PRC127 E1000_PRC127
+#define E1000_82542_PRC255 E1000_PRC255
+#define E1000_82542_PRC511 E1000_PRC511
+#define E1000_82542_PRC1023 E1000_PRC1023
+#define E1000_82542_PRC1522 E1000_PRC1522
+#define E1000_82542_GPRC E1000_GPRC
+#define E1000_82542_BPRC E1000_BPRC
+#define E1000_82542_MPRC E1000_MPRC
+#define E1000_82542_GPTC E1000_GPTC
+#define E1000_82542_GORCL E1000_GORCL
+#define E1000_82542_GORCH E1000_GORCH
+#define E1000_82542_GOTCL E1000_GOTCL
+#define E1000_82542_GOTCH E1000_GOTCH
+#define E1000_82542_RNBC E1000_RNBC
+#define E1000_82542_RUC E1000_RUC
+#define E1000_82542_RFC E1000_RFC
+#define E1000_82542_ROC E1000_ROC
+#define E1000_82542_RJC E1000_RJC
+#define E1000_82542_MGTPRC E1000_MGTPRC
+#define E1000_82542_MGTPDC E1000_MGTPDC
+#define E1000_82542_MGTPTC E1000_MGTPTC
+#define E1000_82542_TORL E1000_TORL
+#define E1000_82542_TORH E1000_TORH
+#define E1000_82542_TOTL E1000_TOTL
+#define E1000_82542_TOTH E1000_TOTH
+#define E1000_82542_TPR E1000_TPR
+#define E1000_82542_TPT E1000_TPT
+#define E1000_82542_PTC64 E1000_PTC64
+#define E1000_82542_PTC127 E1000_PTC127
+#define E1000_82542_PTC255 E1000_PTC255
+#define E1000_82542_PTC511 E1000_PTC511
+#define E1000_82542_PTC1023 E1000_PTC1023
+#define E1000_82542_PTC1522 E1000_PTC1522
+#define E1000_82542_MPTC E1000_MPTC
+#define E1000_82542_BPTC E1000_BPTC
+#define E1000_82542_TSCTC E1000_TSCTC
+#define E1000_82542_TSCTFC E1000_TSCTFC
+#define E1000_82542_RXCSUM E1000_RXCSUM
+#define E1000_82542_WUC E1000_WUC
+#define E1000_82542_WUFC E1000_WUFC
+#define E1000_82542_WUS E1000_WUS
+#define E1000_82542_MANC E1000_MANC
+#define E1000_82542_IPAV E1000_IPAV
+#define E1000_82542_IP4AT E1000_IP4AT
+#define E1000_82542_IP6AT E1000_IP6AT
+#define E1000_82542_WUPL E1000_WUPL
+#define E1000_82542_WUPM E1000_WUPM
+#define E1000_82542_FFLT E1000_FFLT
+#define E1000_82542_TDFH 0x08010
+#define E1000_82542_TDFT 0x08018
+#define E1000_82542_FFMT E1000_FFMT
+#define E1000_82542_FFVT E1000_FFVT
+#define E1000_82542_HOST_IF E1000_HOST_IF
+#define E1000_82542_IAM E1000_IAM
+#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
+#define E1000_82542_PSRCTL E1000_PSRCTL
+#define E1000_82542_RAID E1000_RAID
+#define E1000_82542_TARC0 E1000_TARC0
+#define E1000_82542_TDBAL1 E1000_TDBAL1
+#define E1000_82542_TDBAH1 E1000_TDBAH1
+#define E1000_82542_TDLEN1 E1000_TDLEN1
+#define E1000_82542_TDH1 E1000_TDH1
+#define E1000_82542_TDT1 E1000_TDT1
+#define E1000_82542_TXDCTL1 E1000_TXDCTL1
+#define E1000_82542_TARC1 E1000_TARC1
+#define E1000_82542_RFCTL E1000_RFCTL
+#define E1000_82542_GCR E1000_GCR
+#define E1000_82542_GSCL_1 E1000_GSCL_1
+#define E1000_82542_GSCL_2 E1000_GSCL_2
+#define E1000_82542_GSCL_3 E1000_GSCL_3
+#define E1000_82542_GSCL_4 E1000_GSCL_4
+#define E1000_82542_FACTPS E1000_FACTPS
+#define E1000_82542_SWSM E1000_SWSM
+#define E1000_82542_FWSM E1000_FWSM
+#define E1000_82542_FFLT_DBG E1000_FFLT_DBG
+#define E1000_82542_IAC E1000_IAC
+#define E1000_82542_ICRXPTC E1000_ICRXPTC
+#define E1000_82542_ICRXATC E1000_ICRXATC
+#define E1000_82542_ICTXPTC E1000_ICTXPTC
+#define E1000_82542_ICTXATC E1000_ICTXATC
+#define E1000_82542_ICTXQEC E1000_ICTXQEC
+#define E1000_82542_ICTXQMTC E1000_ICTXQMTC
+#define E1000_82542_ICRXDMTC E1000_ICRXDMTC
+#define E1000_82542_ICRXOC E1000_ICRXOC
+#define E1000_82542_HICR E1000_HICR
+
+#define E1000_82542_CPUVEC E1000_CPUVEC
+#define E1000_82542_MRQC E1000_MRQC
+#define E1000_82542_RETA E1000_RETA
+#define E1000_82542_RSSRK E1000_RSSRK
+#define E1000_82542_RSSIM E1000_RSSIM
+#define E1000_82542_RSSIR E1000_RSSIR
+#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
+#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
+#define E1000_82542_MANC2H E1000_MANC2H
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 txerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 cexterr;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorcl;
+ u64 gorch;
+ u64 gotcl;
+ u64 gotch;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rlerrc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 torl;
+ u64 torh;
+ u64 totl;
+ u64 toth;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 iac;
+ u64 icrxptc;
+ u64 icrxatc;
+ u64 ictxptc;
+ u64 ictxatc;
+ u64 ictxqec;
+ u64 ictxqmtc;
+ u64 icrxdmtc;
+ u64 icrxoc;
+};
+
+/* Structure containing variables used by the shared code (e1000_hw.c) */
+struct e1000_hw {
+ u8 __iomem *hw_addr;
+ u8 __iomem *flash_address;
+ e1000_mac_type mac_type;
+ e1000_phy_type phy_type;
+ u32 phy_init_script;
+ e1000_media_type media_type;
+ void *back;
+ struct e1000_shadow_ram *eeprom_shadow_ram;
+ u32 flash_bank_size;
+ u32 flash_base_addr;
+ e1000_fc_type fc;
+ e1000_bus_speed bus_speed;
+ e1000_bus_width bus_width;
+ e1000_bus_type bus_type;
+ struct e1000_eeprom_info eeprom;
+ e1000_ms_type master_slave;
+ e1000_ms_type original_master_slave;
+ e1000_ffe_config ffe_config_state;
+ u32 asf_firmware_present;
+ u32 eeprom_semaphore_present;
+ u32 swfw_sync_present;
+ u32 swfwhw_semaphore_present;
+ unsigned long io_base;
+ u32 phy_id;
+ u32 phy_revision;
+ u32 phy_addr;
+ u32 original_fc;
+ u32 txcw;
+ u32 autoneg_failed;
+ u32 max_frame_size;
+ u32 min_frame_size;
+ u32 mc_filter_type;
+ u32 num_mc_addrs;
+ u32 collision_delta;
+ u32 tx_packet_delta;
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ bool tx_pkt_filtering;
+ struct e1000_host_mng_dhcp_cookie mng_cookie;
+ u16 phy_spd_default;
+ u16 autoneg_advertised;
+ u16 pci_cmd_word;
+ u16 fc_high_water;
+ u16 fc_low_water;
+ u16 fc_pause_time;
+ u16 current_ifs_val;
+ u16 ifs_min_val;
+ u16 ifs_max_val;
+ u16 ifs_step_size;
+ u16 ifs_ratio;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 autoneg;
+ u8 mdix;
+ u8 forced_speed_duplex;
+ u8 wait_autoneg_complete;
+ u8 dma_fairness;
+ u8 mac_addr[NODE_ADDRESS_SIZE];
+ u8 perm_mac_addr[NODE_ADDRESS_SIZE];
+ bool disable_polarity_correction;
+ bool speed_downgraded;
+ e1000_smart_speed smart_speed;
+ e1000_dsp_config dsp_config_state;
+ bool get_link_status;
+ bool serdes_link_down;
+ bool tbi_compatibility_en;
+ bool tbi_compatibility_on;
+ bool laa_is_present;
+ bool phy_reset_disable;
+ bool initialize_hw_bits_disable;
+ bool fc_send_xon;
+ bool fc_strict_ieee;
+ bool report_tx_early;
+ bool adaptive_ifs;
+ bool ifs_params_forced;
+ bool in_ifs_mode;
+ bool mng_reg_access_disabled;
+ bool leave_av_bit_off;
+ bool kmrn_lock_loss_workaround_disabled;
+ bool bad_tx_carr_stats_fd;
+ bool has_manc2h;
+ bool rx_needs_kicking;
+ bool has_smbus;
+};
+
+
+#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
+#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
+#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */
+#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */
+#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
+/* Register Bit Masks */
+/* Device Control */
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */
+
+/* Device Status */
+#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion
+ by EEPROM/Flash */
+#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
+#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
+#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
+#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_FUSE_8 0x04000000
+#define E1000_STATUS_FUSE_9 0x08000000
+#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */
+
+/* Constants used to intrepret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+
+/* EEPROM/Flash Control */
+#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */
+#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */
+#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */
+#define E1000_EECD_FWE_MASK 0x00000030
+#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */
+#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
+#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
+ * (0-small, 1-large) */
+#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_EEPROM_GRANT_ATTEMPTS
+#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */
+#define E1000_EECD_SIZE_EX_SHIFT 11
+#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
+#define E1000_EECD_SECVAL_SHIFT 22
+#define E1000_STM_OPCODE 0xDB00
+#define E1000_HICR_FW_RESET 0xC0
+
+#define E1000_SHADOW_RAM_WORDS 2048
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC0
+
+/* EEPROM Read */
+#define E1000_EERD_START 0x00000001 /* Start Read */
+#define E1000_EERD_DONE 0x00000010 /* Read Done */
+#define E1000_EERD_ADDR_SHIFT 8
+#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */
+#define E1000_EERD_DATA_SHIFT 16
+#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */
+
+/* SPI EEPROM Status Register */
+#define EEPROM_STATUS_RDY_SPI 0x01
+#define EEPROM_STATUS_WEN_SPI 0x02
+#define EEPROM_STATUS_BP0_SPI 0x04
+#define EEPROM_STATUS_BP1_SPI 0x08
+#define EEPROM_STATUS_WPEN_SPI 0x80
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK 0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK 0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE 0x04000000
+#define E1000_MDIC_OP_READ 0x08000000
+#define E1000_MDIC_READY 0x10000000
+#define E1000_MDIC_INT_EN 0x20000000
+#define E1000_MDIC_ERROR 0x40000000
+
+#define E1000_KUMCTRLSTA_MASK 0x0000FFFF
+#define E1000_KUMCTRLSTA_OFFSET 0x001F0000
+#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KUMCTRLSTA_REN 0x00200000
+
+#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000
+#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001
+#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002
+#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003
+#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004
+#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009
+#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010
+#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E
+#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F
+
+/* FIFO Control */
+#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008
+#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800
+
+/* In-Band Control */
+#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500
+#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010
+
+/* Half-Duplex Control */
+#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004
+#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000
+
+#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E
+
+#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000
+#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000
+
+#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000
+#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000
+#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003
+
+#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+
+#define E1000_PHY_CTRL_SPD_EN 0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
+#define E1000_PHY_CTRL_B2B_EN 0x00000080
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020
+#define E1000_LEDCTL_LED0_IVRT 0x00000040
+#define E1000_LEDCTL_LED0_BLINK 0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT 8
+#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000
+#define E1000_LEDCTL_LED1_IVRT 0x00004000
+#define E1000_LEDCTL_LED1_BLINK 0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT 16
+#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000
+#define E1000_LEDCTL_LED2_IVRT 0x00400000
+#define E1000_LEDCTL_LED2_BLINK 0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT 24
+#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000
+#define E1000_LEDCTL_LED3_IVRT 0x40000000
+#define E1000_LEDCTL_LED3_BLINK 0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000 0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP 0x2
+#define E1000_LEDCTL_MODE_ACTIVITY 0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10 0x5
+#define E1000_LEDCTL_MODE_LINK_100 0x6
+#define E1000_LEDCTL_MODE_LINK_1000 0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE 0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9
+#define E1000_LEDCTL_MODE_COLLISION 0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED 0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE 0xC
+#define E1000_LEDCTL_MODE_PAUSED 0xD
+#define E1000_LEDCTL_MODE_LED_ON 0xE
+#define E1000_LEDCTL_MODE_LED_OFF 0xF
+
+/* Receive Address */
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO 0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */
+#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW 0x00008000
+#define E1000_ICR_SRPD 0x00010000
+#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG 0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD E1000_ICR_SRPD
+#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW E1000_ICR_DSW
+#define E1000_ICS_PHYINT E1000_ICR_PHYINT
+#define E1000_ICS_EPRST E1000_ICR_EPRST
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD E1000_ICR_SRPD
+#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW E1000_ICR_DSW
+#define E1000_IMS_PHYINT E1000_ICR_PHYINT
+#define E1000_IMS_EPRST E1000_ICR_EPRST
+
+/* Interrupt Mask Clear */
+#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMC_SRPD E1000_ICR_SRPD
+#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMC_DSW E1000_ICR_DSW
+#define E1000_IMC_PHYINT E1000_ICR_PHYINT
+#define E1000_IMC_EPRST E1000_ICR_EPRST
+
+/* Receive Control */
+#define E1000_RCTL_RST 0x00000001 /* Software reset */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ * E1000_PSRCTL_BSIZE0_MASK) |
+ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ * E1000_PSRCTL_BSIZE1_MASK) |
+ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ * E1000_PSRCTL_BSIZE2_MASK) |
+ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ * E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256], default=256
+ * value1 = [1024..64512], default=4096
+ * value2 = [0..64512], default=4096
+ * value3 = [0..64512], default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+
+/* SW_W_SYNC definitions */
+#define E1000_SWFW_EEP_SM 0x0001
+#define E1000_SWFW_PHY0_SM 0x0002
+#define E1000_SWFW_PHY1_SM 0x0004
+#define E1000_SWFW_MAC_CSR_SM 0x0008
+
+/* Receive Descriptor */
+#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */
+#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */
+#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */
+#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */
+#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */
+
+/* Flow Control */
+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */
+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+
+/* Header split receive */
+#define E1000_RFCTL_ISCSI_DIS 0x00000001
+#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E
+#define E1000_RFCTL_ISCSI_DWC_SHIFT 1
+#define E1000_RFCTL_NFSW_DIS 0x00000040
+#define E1000_RFCTL_NFSR_DIS 0x00000080
+#define E1000_RFCTL_NFS_VER_MASK 0x00000300
+#define E1000_RFCTL_NFS_VER_SHIFT 8
+#define E1000_RFCTL_IPV6_DIS 0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800
+#define E1000_RFCTL_ACK_DIS 0x00001000
+#define E1000_RFCTL_ACKD_DIS 0x00002000
+#define E1000_RFCTL_IPFRSP_DIS 0x00004000
+#define E1000_RFCTL_EXTEN 0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* Receive Descriptor Control */
+#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
+#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
+#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
+#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
+ still to be processed. */
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
+#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
+#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */
+#define E1000_TXCW_NP 0x00008000 /* TXCW next page */
+#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */
+#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */
+#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
+#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */
+#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
+#define E1000_RXCW_CC 0x10000000 /* Receive config change */
+#define E1000_RXCW_C 0x20000000 /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
+
+/* Transmit Control */
+#define E1000_TCTL_RST 0x00000001 /* software reset */
+#define E1000_TCTL_EN 0x00000002 /* enable tx */
+#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
+#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
+/* Extended Transmit Control */
+#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+
+#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX 0x00010000
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
+#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* Multiple Receive Queue Control */
+#define E1000_MRQC_ENABLE_MASK 0x00000003
+#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT 0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_SPM 0x80000000 /* Enable SPM */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
+#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */
+#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */
+#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */
+#define E1000_WUS_BC 0x00000010 /* Broadcast Received */
+#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */
+#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
+#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
+#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
+#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
+#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
+#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
+#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery
+ * Filtering */
+#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
+ * filtering */
+#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
+ * memory */
+#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
+ * filtering */
+#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+
+/* FW Semaphore Register */
+#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */
+#define E1000_FWSM_MODE_SHIFT 1
+#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */
+
+#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */
+#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */
+#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */
+#define E1000_FWSM_SKUEL_SHIFT 29
+#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */
+#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */
+#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
+#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
+
+/* FFLT Debug Register */
+#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */
+
+typedef enum {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_interface_only
+} e1000_mng_mode;
+
+/* Host Inteface Control Register */
+#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */
+#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done
+ * to put command in RAM */
+#define E1000_HICR_SV 0x00000004 /* Status Validity */
+#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */
+
+/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
+#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */
+
+struct e1000_host_command_header {
+ u8 command_id;
+ u8 command_length;
+ u8 command_options; /* I/F bits for command, status for return */
+ u8 checksum;
+};
+struct e1000_host_command_info {
+ struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
+ u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
+};
+
+/* Host SMB register #0 */
+#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */
+#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */
+#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */
+#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */
+
+/* Host SMB register #1 */
+#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN
+#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN
+#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT
+#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT
+
+/* FW Status Register */
+#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
+
+#define E1000_MDALIGN 4096
+
+/* PCI-Ex registers*/
+
+/* PCI-Ex Control Register */
+#define E1000_GCR_RXD_NO_SNOOP 0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
+#define E1000_GCR_TXD_NO_SNOOP 0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
+
+#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
+ E1000_GCR_RXDSCW_NO_SNOOP | \
+ E1000_GCR_RXDSCR_NO_SNOOP | \
+ E1000_GCR_TXD_NO_SNOOP | \
+ E1000_GCR_TXDSCW_NO_SNOOP | \
+ E1000_GCR_TXDSCR_NO_SNOOP)
+
+#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+/* Function Active and Power State to MNG */
+#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
+#define E1000_FACTPS_LAN0_VALID 0x00000004
+#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008
+#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0
+#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6
+#define E1000_FACTPS_LAN1_VALID 0x00000100
+#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200
+#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000
+#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12
+#define E1000_FACTPS_IDE_ENABLE 0x00004000
+#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000
+#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000
+#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18
+#define E1000_FACTPS_SP_ENABLE 0x00100000
+#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000
+#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000
+#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24
+#define E1000_FACTPS_IPMI_ENABLE 0x04000000
+#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000
+#define E1000_FACTPS_MNGCG 0x20000000
+#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000
+#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000
+
+/* PCI-Ex Config Space */
+#define PCI_EX_LINK_STATUS 0x12
+#define PCI_EX_LINK_WIDTH_MASK 0x3F0
+#define PCI_EX_LINK_WIDTH_SHIFT 4
+
+/* EEPROM Commands - Microwire */
+#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */
+
+/* EEPROM Commands - SPI */
+#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
+#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */
+#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */
+#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */
+#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */
+#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
+#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
+#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
+
+/* EEPROM Size definitions */
+#define EEPROM_WORD_SIZE_SHIFT 6
+#define EEPROM_SIZE_SHIFT 10
+#define EEPROM_SIZE_MASK 0x1C00
+
+/* EEPROM Word Offsets */
+#define EEPROM_COMPAT 0x0003
+#define EEPROM_ID_LED_SETTINGS 0x0004
+#define EEPROM_VERSION 0x0005
+#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */
+#define EEPROM_PHY_CLASS_WORD 0x0007
+#define EEPROM_INIT_CONTROL1_REG 0x000A
+#define EEPROM_INIT_CONTROL2_REG 0x000F
+#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define EEPROM_INIT_CONTROL3_PORT_B 0x0014
+#define EEPROM_INIT_3GIO_3 0x001A
+#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define EEPROM_INIT_CONTROL3_PORT_A 0x0024
+#define EEPROM_CFG 0x0012
+#define EEPROM_FLASH_VERSION 0x0032
+#define EEPROM_CHECKSUM_REG 0x003F
+
+#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */
+#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_RESERVED_82573 0xF746
+#define ID_LED_DEFAULT_82573 0x1811
+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_DEF1_OFF2 << 8) | \
+ (ID_LED_DEF1_ON2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2 0x1
+#define ID_LED_DEF1_ON2 0x2
+#define ID_LED_DEF1_OFF2 0x3
+#define ID_LED_ON1_DEF2 0x4
+#define ID_LED_ON1_ON2 0x5
+#define ID_LED_ON1_OFF2 0x6
+#define ID_LED_OFF1_DEF2 0x7
+#define ID_LED_OFF1_ON2 0x8
+#define ID_LED_OFF1_OFF2 0x9
+
+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE 0x07000000
+
+
+/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */
+#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F
+
+/* Mask bit for PHY class in Word 7 of the EEPROM */
+#define EEPROM_PHY_CLASS_A 0x8000
+
+/* Mask bits for fields in Word 0x0a of the EEPROM */
+#define EEPROM_WORD0A_ILOS 0x0010
+#define EEPROM_WORD0A_SWDPIO 0x01E0
+#define EEPROM_WORD0A_LRST 0x0200
+#define EEPROM_WORD0A_FD 0x0400
+#define EEPROM_WORD0A_66MHZ 0x0800
+
+/* Mask bits for fields in Word 0x0f of the EEPROM */
+#define EEPROM_WORD0F_PAUSE_MASK 0x3000
+#define EEPROM_WORD0F_PAUSE 0x1000
+#define EEPROM_WORD0F_ASM_DIR 0x2000
+#define EEPROM_WORD0F_ANE 0x0800
+#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0
+#define EEPROM_WORD0F_LPLU 0x0001
+
+/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */
+#define EEPROM_WORD1020_GIGA_DISABLE 0x0010
+#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008
+
+/* Mask bits for fields in Word 0x1a of the EEPROM */
+#define EEPROM_WORD1A_ASPM_MASK 0x000C
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
+#define EEPROM_SUM 0xBABA
+
+/* EEPROM Map defines (WORD OFFSETS)*/
+#define EEPROM_NODE_ADDRESS_BYTE_0 0
+#define EEPROM_PBA_BYTE_1 8
+
+#define EEPROM_RESERVED_WORD 0xFFFF
+
+/* EEPROM Map Sizes (Byte Counts) */
+#define PBA_SIZE 4
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD 15
+#define E1000_CT_SHIFT 4
+/* Collision distance is a 0-based value that applies to
+ * half-duplex-capable hardware only. */
+#define E1000_COLLISION_DISTANCE 63
+#define E1000_COLLISION_DISTANCE_82542 64
+#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
+#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
+#define E1000_COLD_SHIFT 12
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82542_TIPG_IPGT 10
+#define DEFAULT_82543_TIPG_IPGT_FIBER 9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK 0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82542_TIPG_IPGR1 2
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT 10
+
+#define DEFAULT_82542_TIPG_IPGR2 10
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT 20
+
+#define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009
+#define DEFAULT_80003ES2LAN_TIPG_IPGT_1000 0x00000008
+#define E1000_TXDMAC_DPP 0x00000001
+
+/* Adaptive IFS defines */
+#define TX_THRESHOLD_START 8
+#define TX_THRESHOLD_INCREMENT 10
+#define TX_THRESHOLD_DECREMENT 1
+#define TX_THRESHOLD_STOP 190
+#define TX_THRESHOLD_DISABLE 0
+#define TX_THRESHOLD_TIMER_MS 10000
+#define MIN_NUM_XMITS 1000
+#define IFS_MAX 80
+#define IFS_STEP 10
+#define IFS_MIN 40
+#define IFS_RATIO 4
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002
+#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004
+#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008
+#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000
+
+#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF
+#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
+
+/* PBA constants */
+#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
+#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
+#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+
+/* The historical defaults for the flow control values are given below. */
+#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
+#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
+#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
+
+/* PCIX Config space */
+#define PCIX_COMMAND_REGISTER 0xE6
+#define PCIX_STATUS_REGISTER_LO 0xE8
+#define PCIX_STATUS_REGISTER_HI 0xEA
+
+#define PCIX_COMMAND_MMRBC_MASK 0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT 0x2
+#define PCIX_STATUS_HI_MMRBC_MASK 0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5
+#define PCIX_STATUS_HI_MMRBC_4K 0x3
+#define PCIX_STATUS_HI_MMRBC_2K 0x2
+
+
+/* Number of bits required to shift right the "pause" bits from the
+ * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register.
+ */
+#define PAUSE_SHIFT 5
+
+/* Number of bits required to shift left the "SWDPIO" bits from the
+ * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register.
+ */
+#define SWDPIO_SHIFT 17
+
+/* Number of bits required to shift left the "SWDPIO_EXT" bits from the
+ * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register.
+ */
+#define SWDPIO__EXT_SHIFT 4
+
+/* Number of bits required to shift left the "ILOS" bit from the EEPROM
+ * (bit 4) to the "ILOS" (bit 7) field in the CTRL register.
+ */
+#define ILOS_SHIFT 3
+
+
+#define RECEIVE_BUFFER_ALIGN_SIZE (256)
+
+/* Number of milliseconds we wait for auto-negotiation to complete */
+#define LINK_UP_TIMEOUT 500
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT 800
+/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
+#define AUTO_READ_DONE_TIMEOUT 10
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT 100
+
+#define E1000_TX_BUFFER_SIZE ((u32)1514)
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION 0x0F
+
+/* TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ * adapter = a pointer to struct e1000_hw
+ * status = the 8 bit status field of the RX descriptor with EOP set
+ * error = the 8 bit error field of the RX descriptor with EOP set
+ * length = the sum of all the length fields of the RX descriptors that
+ * make up the current frame
+ * last_byte = the last byte of the frame DMAed by the hardware
+ * max_frame_length = the maximum frame length we want to accept.
+ * min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ * ...
+ * if (TBI_ACCEPT) {
+ * accept_frame = true;
+ * e1000_tbi_adjust_stats(adapter, MacAddress);
+ * frame_length--;
+ * } else {
+ * accept_frame = false;
+ * }
+ * ...
+ */
+
+#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \
+ ((adapter)->tbi_compatibility_on && \
+ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+ ((last_byte) == CARRIER_EXTENSION) && \
+ (((status) & E1000_RXD_STAT_VP) ? \
+ (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \
+ ((length) <= ((adapter)->max_frame_size + 1))) : \
+ (((length) > (adapter)->min_frame_size) && \
+ ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+
+/* Structures, enums, and macros for the PHY */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CTRL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Regiser */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
+
+#define IGP01E1000_IEEE_REGS_PAGE 0x0000
+#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
+#define IGP01E1000_IEEE_FORCE_GIGA 0x0140
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
+#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */
+#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
+#define IGP02E1000_PHY_POWER_MGMT 0x19
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */
+
+/* IGP01E1000 AGC Registers - stores the cable length values*/
+#define IGP01E1000_PHY_AGC_A 0x1172
+#define IGP01E1000_PHY_AGC_B 0x1272
+#define IGP01E1000_PHY_AGC_C 0x1472
+#define IGP01E1000_PHY_AGC_D 0x1872
+
+/* IGP02E1000 AGC Registers for cable length values */
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+/* IGP01E1000 DSP Reset Register */
+#define IGP01E1000_PHY_DSP_RESET 0x1F33
+#define IGP01E1000_PHY_DSP_SET 0x1F71
+#define IGP01E1000_PHY_DSP_FFE 0x1F35
+
+#define IGP01E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+
+#define IGP01E1000_PHY_AGC_PARAM_A 0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B 0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C 0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D 0x1871
+
+#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000
+
+#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890
+#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000
+#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004
+#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069
+
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A
+/* IGP01E1000 PCS Initialization register - stores the polarity status when
+ * speed = 1000 Mbps. */
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5
+
+#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT 5
+#define GG82563_REG(page, reg) \
+ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG 30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL \
+ GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_SPEC_STATUS \
+ GG82563_REG(0, 17) /* PHY Specific Status */
+#define GG82563_PHY_INT_ENABLE \
+ GG82563_REG(0, 18) /* Interrupt Enable */
+#define GG82563_PHY_SPEC_STATUS_2 \
+ GG82563_REG(0, 19) /* PHY Specific Status 2 */
+#define GG82563_PHY_RX_ERR_CNTR \
+ GG82563_REG(0, 21) /* Receive Error Counter */
+#define GG82563_PHY_PAGE_SELECT \
+ GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2 \
+ GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT \
+ GG82563_REG(0, 29) /* Alternate Page Select */
+#define GG82563_PHY_TEST_CLK_CTRL \
+ GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
+
+#define GG82563_PHY_MAC_SPEC_CTRL \
+ GG82563_REG(2, 21) /* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL_2 \
+ GG82563_REG(2, 26) /* MAC Specific Control 2 */
+
+#define GG82563_PHY_DSP_DISTANCE \
+ GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL \
+ GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PORT_RESET \
+ GG82563_REG(193, 17) /* Port Reset */
+#define GG82563_PHY_REVISION_ID \
+ GG82563_REG(193, 18) /* Revision ID */
+#define GG82563_PHY_DEVICE_ID \
+ GG82563_REG(193, 19) /* Device ID */
+#define GG82563_PHY_PWR_MGMT_CTRL \
+ GG82563_REG(193, 20) /* Power Management Control */
+#define GG82563_PHY_RATE_ADAPT_CTRL \
+ GG82563_REG(193, 25) /* Rate Adaptation Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
+ GG82563_REG(194, 16) /* FIFO's Control/Status */
+#define GG82563_PHY_KMRN_CTRL \
+ GG82563_REG(194, 17) /* Control */
+#define GG82563_PHY_INBAND_CTRL \
+ GG82563_REG(194, 18) /* Inband Control */
+#define GG82563_PHY_KMRN_DIAGNOSTIC \
+ GG82563_REG(194, 19) /* Diagnostic */
+#define GG82563_PHY_ACK_TIMEOUTS \
+ GG82563_REG(194, 20) /* Acknowledge Timeouts */
+#define GG82563_PHY_ADV_ABILITY \
+ GG82563_REG(194, 21) /* Advertised Ability */
+#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
+ GG82563_REG(194, 23) /* Link Partner Advertised Ability */
+#define GG82563_PHY_ADV_NEXT_PAGE \
+ GG82563_REG(194, 24) /* Advertised Next Page */
+#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
+ GG82563_REG(194, 25) /* Link Partner Advertised Next page */
+#define GG82563_PHY_KMRN_MISC \
+ GG82563_REG(194, 26) /* Misc. */
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* Next Page TX Register */
+#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges
+ * of different NP
+ */
+#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
+ * 0 = cannot comply with msg
+ */
+#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
+#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
+ * 0 = sending last NP
+ */
+
+/* Link Partner Next Page Register */
+#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges
+ * of different NP
+ */
+#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
+ * 0 = cannot comply with msg
+ */
+#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
+#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */
+#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
+ * 0 = sending last NP
+ */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
+ /* 0=DTE device */
+#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
+ /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
+ /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
+
+/* Extended Status Register */
+#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
+#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
+#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
+#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+
+#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */
+#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */
+
+#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */
+ /* (0=enable, 1=disable) */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
+#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low,
+ * 0=CLK125 toggling
+ */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
+ /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
+ * 100BASE-TX/10BASE-T:
+ * MDI Mode
+ */
+#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
+ * all speeds.
+ */
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080
+ /* 1=Enable Extended 10BASE-T distance
+ * (Lower 10BASE-T RX Threshold)
+ * 0=Normal 10BASE-T RX Threshold */
+#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100
+ /* 1=5-Bit interface in 100BASE-TX
+ * 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+
+#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1
+#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M;
+ * 3=110-140M;4=>140M */
+#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_REV_POLARITY_SHIFT 1
+#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5
+#define M88E1000_PSSR_MDIX_SHIFT 6
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled.
+ * Will assert lost lock and bring
+ * link down if idle not seen
+ * within 1ms in 1000BASE-T
+ */
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00
+
+/* IGP01E1000 Specific Port Config Register - R/W */
+#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010
+#define IGP01E1000_PSCFR_PRE_EN 0x0020
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100
+#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400
+#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000
+
+/* IGP01E1000 Specific Port Status Register - R/O */
+#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C
+#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200
+#define IGP01E1000_PSSR_LINK_UP 0x0400
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */
+#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000
+#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */
+#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */
+
+/* IGP01E1000 Specific Port Control Register - R/W */
+#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010
+#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200
+#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400
+#define IGP01E1000_PSCR_FLIP_CHIP 0x0800
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */
+
+/* IGP01E1000 Specific Port Link Health Register */
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000
+#define IGP01E1000_PLHR_MASTER_FAULT 0x2000
+#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000
+#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */
+#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_0 0x0100
+#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040
+#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010
+#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008
+#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004
+#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002
+#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001
+
+/* IGP01E1000 Channel Quality Register */
+#define IGP01E1000_MSE_CHANNEL_D 0x000F
+#define IGP01E1000_MSE_CHANNEL_C 0x00F0
+#define IGP01E1000_MSE_CHANNEL_B 0x0F00
+#define IGP01E1000_MSE_CHANNEL_A 0xF000
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */
+
+/* IGP01E1000 DSP reset macros */
+#define DSP_RESET_ENABLE 0x0
+#define DSP_RESET_DISABLE 0x2
+#define E1000_MAX_DSP_RESETS 10
+
+/* IGP01E1000 & IGP02E1000 AGC Registers */
+
+#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */
+
+/* IGP02E1000 AGC Register Length 9-bit mask */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+
+/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
+#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
+#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113
+
+/* The precision error of the cable length is +/- 10 meters */
+#define IGP01E1000_AGC_RANGE 10
+#define IGP02E1000_AGC_RANGE 15
+
+/* IGP01E1000 PCS Initialization register */
+/* bits 3:6 in the PCS registers stores the channels polarity */
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+/* IGP01E1000 GMII FIFO Register */
+#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed
+ * on Link-Up */
+#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */
+
+/* IGP01E1000 Analog Register */
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1
+#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0
+#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE
+
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100
+#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002
+
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_DISABLE_JABBER 0x0001 /* 1=Disable Jabber */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Polarity Reversal Disabled */
+#define GG82563_PSCR_POWER_DOWN 0x0004 /* 1=Power Down */
+#define GG82563_PSCR_COPPER_TRANSMITER_DISABLE 0x0008 /* 1=Transmitter Disabled */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI configuration */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX configuration */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Automatic crossover */
+#define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE 0x0080 /* 1=Enable Extended Distance */
+#define GG82563_PSCR_ENERGY_DETECT_MASK 0x0300
+#define GG82563_PSCR_ENERGY_DETECT_OFF 0x0000 /* 00,01=Off */
+#define GG82563_PSCR_ENERGY_DETECT_RX 0x0200 /* 10=Sense on Rx only (Energy Detect) */
+#define GG82563_PSCR_ENERGY_DETECT_RX_TM 0x0300 /* 11=Sense and Tx NLP */
+#define GG82563_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force Link Good */
+#define GG82563_PSCR_DOWNSHIFT_ENABLE 0x0800 /* 1=Enable Downshift */
+#define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK 0x7000
+#define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT 12
+
+/* PHY Specific Status Register (Page 0, Register 17) */
+#define GG82563_PSSR_JABBER 0x0001 /* 1=Jabber */
+#define GG82563_PSSR_POLARITY 0x0002 /* 1=Polarity Reversed */
+#define GG82563_PSSR_LINK 0x0008 /* 1=Link is Up */
+#define GG82563_PSSR_ENERGY_DETECT 0x0010 /* 1=Sleep, 0=Active */
+#define GG82563_PSSR_DOWNSHIFT 0x0020 /* 1=Downshift */
+#define GG82563_PSSR_CROSSOVER_STATUS 0x0040 /* 1=MDIX, 0=MDI */
+#define GG82563_PSSR_RX_PAUSE_ENABLED 0x0100 /* 1=Receive Pause Enabled */
+#define GG82563_PSSR_TX_PAUSE_ENABLED 0x0200 /* 1=Transmit Pause Enabled */
+#define GG82563_PSSR_LINK_UP 0x0400 /* 1=Link Up */
+#define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */
+#define GG82563_PSSR_PAGE_RECEIVED 0x1000 /* 1=Page Received */
+#define GG82563_PSSR_DUPLEX 0x2000 /* 1-Full-Duplex */
+#define GG82563_PSSR_SPEED_MASK 0xC000
+#define GG82563_PSSR_SPEED_10MBPS 0x0000 /* 00=10Mbps */
+#define GG82563_PSSR_SPEED_100MBPS 0x4000 /* 01=100Mbps */
+#define GG82563_PSSR_SPEED_1000MBPS 0x8000 /* 10=1000Mbps */
+
+/* PHY Specific Status Register 2 (Page 0, Register 19) */
+#define GG82563_PSSR2_JABBER 0x0001 /* 1=Jabber */
+#define GG82563_PSSR2_POLARITY_CHANGED 0x0002 /* 1=Polarity Changed */
+#define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */
+#define GG82563_PSSR2_DOWNSHIFT_INTERRUPT 0x0020 /* 1=Downshift Detected */
+#define GG82563_PSSR2_MDI_CROSSOVER_CHANGE 0x0040 /* 1=Crossover Changed */
+#define GG82563_PSSR2_FALSE_CARRIER 0x0100 /* 1=False Carrier */
+#define GG82563_PSSR2_SYMBOL_ERROR 0x0200 /* 1=Symbol Error */
+#define GG82563_PSSR2_LINK_STATUS_CHANGED 0x0400 /* 1=Link Status Changed */
+#define GG82563_PSSR2_AUTO_NEG_COMPLETED 0x0800 /* 1=Auto-Neg Completed */
+#define GG82563_PSSR2_PAGE_RECEIVED 0x1000 /* 1=Page Received */
+#define GG82563_PSSR2_DUPLEX_CHANGED 0x2000 /* 1=Duplex Changed */
+#define GG82563_PSSR2_SPEED_CHANGED 0x4000 /* 1=Speed Changed */
+#define GG82563_PSSR2_AUTO_NEG_ERROR 0x8000 /* 1=Auto-Neg Error */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_10BT_POLARITY_FORCE 0x0002 /* 1=Force Negative Polarity */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_MASK 0x000C
+#define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL 0x0000 /* 00,01=Normal Operation */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_112NS 0x0008 /* 10=Select 112ns Sequence */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_16NS 0x000C /* 11=Select 16ns Sequence */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Negotiation */
+#define GG82563_PSCR2_1000BT_DISABLE 0x4000 /* 1=Disable 1000BASE-T */
+#define GG82563_PSCR2_TRANSMITER_TYPE_MASK 0x8000
+#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B 0x0000 /* 0=Class B */
+#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A 0x8000 /* 1=Class A */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25MHZ 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ 0x0006
+#define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26) */
+#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M;
+ 1 = 50-80M;
+ 2 = 80-110M;
+ 3 = 110-140M;
+ 4 = >140M */
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
+#define GG82563_KMCR_FORCE_LINK_UP 0x0040 /* 1=Force Link Up */
+#define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT 0x0080
+#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK 0x0400
+#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT 0x0400 /* 1=6.25MHz, 0=0.8MHz */
+#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
+
+/* Power Management Control Register (Page 193, Register 20) */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 /* 1=Enalbe SERDES Electrical Idle */
+#define GG82563_PMCR_DISABLE_PORT 0x0002 /* 1=Disable Port */
+#define GG82563_PMCR_DISABLE_SERDES 0x0004 /* 1=Disable SERDES */
+#define GG82563_PMCR_REVERSE_AUTO_NEG 0x0008 /* 1=Enable Reverse Auto-Negotiation */
+#define GG82563_PMCR_DISABLE_1000_NON_D0 0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */
+#define GG82563_PMCR_DISABLE_1000 0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */
+#define GG82563_PMCR_REVERSE_AUTO_NEG_D0A 0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */
+#define GG82563_PMCR_FORCE_POWER_STATE 0x0080 /* 1=Force Power State */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK 0x0300
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR 0x0000 /* 00=Dr */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U 0x0100 /* 01=D0u */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A 0x0200 /* 10=D0a */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3 0x0300 /* 11=D3 */
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding Use */
+
+
+/* Bit definitions for valid PHY IDs. */
+/* I = Integrated
+ * E = External
+ */
+#define M88_VENDOR 0x0141
+#define M88E1000_E_PHY_ID 0x01410C50
+#define M88E1000_I_PHY_ID 0x01410C30
+#define M88E1011_I_PHY_ID 0x01410C20
+#define IGP01E1000_I_PHY_ID 0x02A80380
+#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID
+#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
+#define M88E1011_I_REV_4 0x04
+#define M88E1111_I_PHY_ID 0x01410CC0
+#define L1LXT971A_PHY_ID 0x001378E0
+#define GG82563_E_PHY_ID 0x01410CA0
+
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) \
+ (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+
+#define IGP3_PHY_PORT_CTRL \
+ PHY_REG(769, 17) /* Port General Configuration */
+#define IGP3_PHY_RATE_ADAPT_CTRL \
+ PHY_REG(769, 25) /* Rate Adapter Control Register */
+
+#define IGP3_KMRN_FIFO_CTRL_STATS \
+ PHY_REG(770, 16) /* KMRN FIFO's control/status register */
+#define IGP3_KMRN_POWER_MNG_CTRL \
+ PHY_REG(770, 17) /* KMRN Power Management Control Register */
+#define IGP3_KMRN_INBAND_CTRL \
+ PHY_REG(770, 18) /* KMRN Inband Control Register */
+#define IGP3_KMRN_DIAG \
+ PHY_REG(770, 19) /* KMRN Diagnostic register */
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
+#define IGP3_KMRN_ACK_TIMEOUT \
+ PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
+
+#define IGP3_VR_CTRL \
+ PHY_REG(776, 18) /* Voltage regulator control register */
+#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */
+#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */
+
+#define IGP3_CAPABILITY \
+ PHY_REG(776, 19) /* IGP3 Capability Register */
+
+/* Capabilities for SKU Control */
+#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */
+#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */
+#define IGP3_CAP_ASF 0x0004 /* Support ASF */
+#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */
+#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */
+#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */
+#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */
+#define IGP3_CAP_RSS 0x0080 /* Support RSS */
+#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */
+#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */
+
+#define IGP3_PPC_JORDAN_EN 0x0001
+#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002
+
+#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001
+#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040
+
+#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */
+#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */
+
+#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18)
+#define IGP3_KMRN_EC_DIS_INBAND 0x0080
+
+#define IGP03E1000_E_PHY_ID 0x02A80390
+#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */
+#define IFE_PLUS_E_PHY_ID 0x02A80320
+#define IFE_C_E_PHY_ID 0x02A80310
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */
+#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */
+#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnet Counter */
+#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */
+#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */
+#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */
+#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */
+#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */
+#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */
+#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */
+
+#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Defaut 1 = Disable auto reduced power down */
+#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */
+#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */
+#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */
+#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */
+#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
+#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */
+#define IFE_PESC_POLARITY_REVERSED_SHIFT 8
+
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dyanmic Power Down disabled */
+#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */
+#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */
+#define IFE_PSC_FORCE_POLARITY_SHIFT 5
+#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4
+
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */
+#define IFE_PMC_MDIX_MODE_SHIFT 6
+#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */
+
+#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */
+#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */
+#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */
+#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */
+#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */
+#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */
+#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */
+#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */
+#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */
+#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+#define ICH_CYCLE_READ 0x0
+#define ICH_CYCLE_RESERVED 0x1
+#define ICH_CYCLE_WRITE 0x2
+#define ICH_CYCLE_ERASE 0x3
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+#define ICH_FLASH_FRACC 0x0050
+#define ICH_FLASH_FREG0 0x0054
+#define ICH_FLASH_FREG1 0x0058
+#define ICH_FLASH_FREG2 0x005C
+#define ICH_FLASH_FREG3 0x0060
+#define ICH_FLASH_FPR0 0x0074
+#define ICH_FLASH_FPR1 0x0078
+#define ICH_FLASH_SSFSTS 0x0090
+#define ICH_FLASH_SSFCTL 0x0092
+#define ICH_FLASH_PREOP 0x0094
+#define ICH_FLASH_OPTYPE 0x0096
+#define ICH_FLASH_OPMENU 0x0098
+
+#define ICH_FLASH_REG_MAPSIZE 0x00A0
+#define ICH_FLASH_SECTOR_SIZE 4096
+#define ICH_GFPREG_BASE_MASK 0x1FFF
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+
+/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+ struct ich8_hsfsts {
+#ifdef __BIG_ENDIAN
+ u16 reserved2 :6;
+ u16 fldesvalid :1;
+ u16 flockdn :1;
+ u16 flcdone :1;
+ u16 flcerr :1;
+ u16 dael :1;
+ u16 berasesz :2;
+ u16 flcinprog :1;
+ u16 reserved1 :2;
+#else
+ u16 flcdone :1; /* bit 0 Flash Cycle Done */
+ u16 flcerr :1; /* bit 1 Flash Cycle Error */
+ u16 dael :1; /* bit 2 Direct Access error Log */
+ u16 berasesz :2; /* bit 4:3 Block/Sector Erase Size */
+ u16 flcinprog :1; /* bit 5 flash SPI cycle in Progress */
+ u16 reserved1 :2; /* bit 13:6 Reserved */
+ u16 reserved2 :6; /* bit 13:6 Reserved */
+ u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
+ u16 flockdn :1; /* bit 15 Flash Configuration Lock-Down */
+#endif
+ } hsf_status;
+ u16 regval;
+};
+
+/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+ struct ich8_hsflctl {
+#ifdef __BIG_ENDIAN
+ u16 fldbcount :2;
+ u16 flockdn :6;
+ u16 flcgo :1;
+ u16 flcycle :2;
+ u16 reserved :5;
+#else
+ u16 flcgo :1; /* 0 Flash Cycle Go */
+ u16 flcycle :2; /* 2:1 Flash Cycle */
+ u16 reserved :5; /* 7:3 Reserved */
+ u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
+ u16 flockdn :6; /* 15:10 Reserved */
+#endif
+ } hsf_ctrl;
+ u16 regval;
+};
+
+/* ICH8 Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+ struct ich8_flracc {
+#ifdef __BIG_ENDIAN
+ u32 gmwag :8;
+ u32 gmrag :8;
+ u32 grwa :8;
+ u32 grra :8;
+#else
+ u32 grra :8; /* 0:7 GbE region Read Access */
+ u32 grwa :8; /* 8:15 GbE region Write Access */
+ u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
+ u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
+#endif
+ } hsf_flregacc;
+ u16 regval;
+};
+
+/* Miscellaneous PHY bit definitions. */
+#define PHY_PREAMBLE 0xFFFFFFFF
+#define PHY_SOF 0x01
+#define PHY_OP_READ 0x02
+#define PHY_OP_WRITE 0x01
+#define PHY_TURNAROUND 0x02
+#define PHY_PREAMBLE_SIZE 32
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+#define E1000_PHY_ADDRESS 0x01
+#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */
+#define REG4_SPEED_MASK 0x01E0
+#define REG9_SPEED_MASK 0x0300
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010
+#define ADVERTISE_1000_FULL 0x0020
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
+#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds*/
+#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds*/
+
+#endif /* _E1000_HW_H_ */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_hw-2.6.28-orig.c Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,8878 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.c
+ * Shared functions for accessing and configuring the MAC
+ */
+
+
+#include "e1000_hw.h"
+
+static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask);
+static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask);
+static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data);
+static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
+static s32 e1000_get_software_semaphore(struct e1000_hw *hw);
+static void e1000_release_software_semaphore(struct e1000_hw *hw);
+
+static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw);
+static s32 e1000_check_downshift(struct e1000_hw *hw);
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+ e1000_rev_polarity *polarity);
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
+static void e1000_clear_vfta(struct e1000_hw *hw);
+static s32 e1000_commit_shadow_ram(struct e1000_hw *hw);
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
+ bool link_up);
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
+static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank);
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+ u16 *max_length);
+static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static s32 e1000_get_software_flag(struct e1000_hw *hw);
+static s32 e1000_ich8_cycle_init(struct e1000_hw *hw);
+static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout);
+static s32 e1000_id_led_init(struct e1000_hw *hw);
+static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+ u32 cnf_base_addr,
+ u32 cnf_size);
+static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw);
+static void e1000_init_rx_addrs(struct e1000_hw *hw);
+static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
+static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
+static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum);
+static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw,
+ struct e1000_host_mng_command_header
+ *hdr);
+static s32 e1000_mng_write_commit(struct e1000_hw *hw);
+static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
+static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
+static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
+static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data);
+static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index,
+ u8 byte);
+static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
+static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data);
+static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+ u16 *data);
+static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+ u16 data);
+static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static void e1000_release_software_flag(struct e1000_hw *hw);
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+static void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
+static s32 e1000_set_phy_type(struct e1000_hw *hw);
+static void e1000_phy_init_script(struct e1000_hw *hw);
+static s32 e1000_setup_copper_link(struct e1000_hw *hw);
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data,
+ u16 count);
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count);
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 phy_data);
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr,
+ u16 *phy_data);
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
+static void e1000_release_eeprom(struct e1000_hw *hw);
+static void e1000_standby_eeprom(struct e1000_hw *hw);
+static s32 e1000_set_vco_speed(struct e1000_hw *hw);
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
+static s32 e1000_set_phy_mode(struct e1000_hw *hw);
+static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer);
+static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
+static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex);
+static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+
+/* IGP cable length table */
+static const
+u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
+ { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+ 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+ 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+ 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+ 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+ 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
+
+static const
+u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
+ { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+ 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+ 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+ 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+ 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+ 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+ 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+ 104, 109, 114, 118, 121, 124};
+
+static DEFINE_SPINLOCK(e1000_eeprom_lock);
+
+/******************************************************************************
+ * Set the phy type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_set_phy_type(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_set_phy_type");
+
+ if (hw->mac_type == e1000_undefined)
+ return -E1000_ERR_PHY_TYPE;
+
+ switch (hw->phy_id) {
+ case M88E1000_E_PHY_ID:
+ case M88E1000_I_PHY_ID:
+ case M88E1011_I_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ hw->phy_type = e1000_phy_m88;
+ break;
+ case IGP01E1000_I_PHY_ID:
+ if (hw->mac_type == e1000_82541 ||
+ hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ hw->phy_type = e1000_phy_igp;
+ break;
+ }
+ case IGP03E1000_E_PHY_ID:
+ hw->phy_type = e1000_phy_igp_3;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ hw->phy_type = e1000_phy_ife;
+ break;
+ case GG82563_E_PHY_ID:
+ if (hw->mac_type == e1000_80003es2lan) {
+ hw->phy_type = e1000_phy_gg82563;
+ break;
+ }
+ /* Fall Through */
+ default:
+ /* Should never have loaded on this device */
+ hw->phy_type = e1000_phy_undefined;
+ return -E1000_ERR_PHY_TYPE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * IGP phy init script - initializes the GbE PHY
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_phy_init_script(struct e1000_hw *hw)
+{
+ u32 ret_val;
+ u16 phy_saved_data;
+
+ DEBUGFUNC("e1000_phy_init_script");
+
+ if (hw->phy_init_script) {
+ msleep(20);
+
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of this routine. */
+ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ /* Disabled the PHY transmitter */
+ e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ msleep(20);
+
+ e1000_write_phy_reg(hw,0x0000,0x0140);
+
+ msleep(5);
+
+ switch (hw->mac_type) {
+ case e1000_82541:
+ case e1000_82547:
+ e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+
+ e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+
+ e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+
+ e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+
+ e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+
+ e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+
+ e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+
+ e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+
+ e1000_write_phy_reg(hw, 0x2010, 0x0008);
+ break;
+
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+ break;
+ default:
+ break;
+ }
+
+ e1000_write_phy_reg(hw, 0x0000, 0x3300);
+
+ msleep(20);
+
+ /* Now enable the transmitter */
+ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (hw->mac_type == e1000_82547) {
+ u16 fused, fine, coarse;
+
+ /* Move to analog registers page */
+ e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
+
+ if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+ e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
+
+ fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+ coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+ if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+ coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+ } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+ fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+ (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+ (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+ e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused);
+ e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS,
+ IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+ }
+ }
+ }
+}
+
+/******************************************************************************
+ * Set the mac type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_set_mac_type");
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82542:
+ switch (hw->revision_id) {
+ case E1000_82542_2_0_REV_ID:
+ hw->mac_type = e1000_82542_rev2_0;
+ break;
+ case E1000_82542_2_1_REV_ID:
+ hw->mac_type = e1000_82542_rev2_1;
+ break;
+ default:
+ /* Invalid 82542 revision ID */
+ return -E1000_ERR_MAC_TYPE;
+ }
+ break;
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82543GC_COPPER:
+ hw->mac_type = e1000_82543;
+ break;
+ case E1000_DEV_ID_82544EI_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82544GC_COPPER:
+ case E1000_DEV_ID_82544GC_LOM:
+ hw->mac_type = e1000_82544;
+ break;
+ case E1000_DEV_ID_82540EM:
+ case E1000_DEV_ID_82540EM_LOM:
+ case E1000_DEV_ID_82540EP:
+ case E1000_DEV_ID_82540EP_LOM:
+ case E1000_DEV_ID_82540EP_LP:
+ hw->mac_type = e1000_82540;
+ break;
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ hw->mac_type = e1000_82545;
+ break;
+ case E1000_DEV_ID_82545GM_COPPER:
+ case E1000_DEV_ID_82545GM_FIBER:
+ case E1000_DEV_ID_82545GM_SERDES:
+ hw->mac_type = e1000_82545_rev_3;
+ break;
+ case E1000_DEV_ID_82546EB_COPPER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ hw->mac_type = e1000_82546;
+ break;
+ case E1000_DEV_ID_82546GB_COPPER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82546GB_SERDES:
+ case E1000_DEV_ID_82546GB_PCIE:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ hw->mac_type = e1000_82546_rev_3;
+ break;
+ case E1000_DEV_ID_82541EI:
+ case E1000_DEV_ID_82541EI_MOBILE:
+ case E1000_DEV_ID_82541ER_LOM:
+ hw->mac_type = e1000_82541;
+ break;
+ case E1000_DEV_ID_82541ER:
+ case E1000_DEV_ID_82541GI:
+ case E1000_DEV_ID_82541GI_LF:
+ case E1000_DEV_ID_82541GI_MOBILE:
+ hw->mac_type = e1000_82541_rev_2;
+ break;
+ case E1000_DEV_ID_82547EI:
+ case E1000_DEV_ID_82547EI_MOBILE:
+ hw->mac_type = e1000_82547;
+ break;
+ case E1000_DEV_ID_82547GI:
+ hw->mac_type = e1000_82547_rev_2;
+ break;
+ case E1000_DEV_ID_82571EB_COPPER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES_DUAL:
+ case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+ hw->mac_type = e1000_82571;
+ break;
+ case E1000_DEV_ID_82572EI_COPPER:
+ case E1000_DEV_ID_82572EI_FIBER:
+ case E1000_DEV_ID_82572EI_SERDES:
+ case E1000_DEV_ID_82572EI:
+ hw->mac_type = e1000_82572;
+ break;
+ case E1000_DEV_ID_82573E:
+ case E1000_DEV_ID_82573E_IAMT:
+ case E1000_DEV_ID_82573L:
+ hw->mac_type = e1000_82573;
+ break;
+ case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
+ case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
+ case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
+ case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+ hw->mac_type = e1000_80003es2lan;
+ break;
+ case E1000_DEV_ID_ICH8_IGP_M_AMT:
+ case E1000_DEV_ID_ICH8_IGP_AMT:
+ case E1000_DEV_ID_ICH8_IGP_C:
+ case E1000_DEV_ID_ICH8_IFE:
+ case E1000_DEV_ID_ICH8_IFE_GT:
+ case E1000_DEV_ID_ICH8_IFE_G:
+ case E1000_DEV_ID_ICH8_IGP_M:
+ hw->mac_type = e1000_ich8lan;
+ break;
+ default:
+ /* Should never have loaded on this device */
+ return -E1000_ERR_MAC_TYPE;
+ }
+
+ switch (hw->mac_type) {
+ case e1000_ich8lan:
+ hw->swfwhw_semaphore_present = true;
+ hw->asf_firmware_present = true;
+ break;
+ case e1000_80003es2lan:
+ hw->swfw_sync_present = true;
+ /* fall through */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ hw->eeprom_semaphore_present = true;
+ /* fall through */
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->asf_firmware_present = true;
+ break;
+ default:
+ break;
+ }
+
+ /* The 82543 chip does not count tx_carrier_errors properly in
+ * FD mode
+ */
+ if (hw->mac_type == e1000_82543)
+ hw->bad_tx_carr_stats_fd = true;
+
+ /* capable of receiving management packets to the host */
+ if (hw->mac_type >= e1000_82571)
+ hw->has_manc2h = true;
+
+ /* In rare occasions, ESB2 systems would end up started without
+ * the RX unit being turned on.
+ */
+ if (hw->mac_type == e1000_80003es2lan)
+ hw->rx_needs_kicking = true;
+
+ if (hw->mac_type > e1000_82544)
+ hw->has_smbus = true;
+
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set media type and TBI compatibility.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * **************************************************************************/
+void e1000_set_media_type(struct e1000_hw *hw)
+{
+ u32 status;
+
+ DEBUGFUNC("e1000_set_media_type");
+
+ if (hw->mac_type != e1000_82543) {
+ /* tbi_compatibility is only valid on 82543 */
+ hw->tbi_compatibility_en = false;
+ }
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82545GM_SERDES:
+ case E1000_DEV_ID_82546GB_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES_DUAL:
+ case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ case E1000_DEV_ID_82572EI_SERDES:
+ case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+ hw->media_type = e1000_media_type_internal_serdes;
+ break;
+ default:
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ hw->media_type = e1000_media_type_fiber;
+ break;
+ case e1000_ich8lan:
+ case e1000_82573:
+ /* The STATUS_TBIMODE bit is reserved or reused for the this
+ * device.
+ */
+ hw->media_type = e1000_media_type_copper;
+ break;
+ default:
+ status = er32(STATUS);
+ if (status & E1000_STATUS_TBIMODE) {
+ hw->media_type = e1000_media_type_fiber;
+ /* tbi_compatibility not valid on fiber */
+ hw->tbi_compatibility_en = false;
+ } else {
+ hw->media_type = e1000_media_type_copper;
+ }
+ break;
+ }
+ }
+}
+
+/******************************************************************************
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 ctrl_ext;
+ u32 icr;
+ u32 manc;
+ u32 led_ctrl;
+ u32 timeout;
+ u32 extcnf_ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_reset_hw");
+
+ /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ }
+
+ if (hw->bus_type == e1000_bus_type_pci_express) {
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+ }
+ }
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ /* Disable the Transmit and Receive units. Then delay to allow
+ * any pending transactions to complete before we hit the MAC with
+ * the global reset.
+ */
+ ew32(RCTL, 0);
+ ew32(TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH();
+
+ /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
+ hw->tbi_compatibility_on = false;
+
+ /* Delay to allow any outstanding PCI transactions to complete before
+ * resetting the device
+ */
+ msleep(10);
+
+ ctrl = er32(CTRL);
+
+ /* Must reset the PHY before resetting the MAC */
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
+ msleep(5);
+ }
+
+ /* Must acquire the MDIO ownership before MAC reset.
+ * Ownership defaults to firmware after a reset. */
+ if (hw->mac_type == e1000_82573) {
+ timeout = 10;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+ do {
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+ break;
+ else
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+ msleep(2);
+ timeout--;
+ } while (timeout);
+ }
+
+ /* Workaround for ICH8 bit corruption issue in FIFO memory */
+ if (hw->mac_type == e1000_ich8lan) {
+ /* Set Tx and Rx buffer allocation to 8k apiece. */
+ ew32(PBA, E1000_PBA_8K);
+ /* Set Packet Buffer Size to 16k. */
+ ew32(PBS, E1000_PBS_16K);
+ }
+
+ /* Issue a global reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA, and link units. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ DEBUGOUT("Issuing a global reset to MAC\n");
+
+ switch (hw->mac_type) {
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ /* These controllers can't ack the 64-bit write when issuing the
+ * reset, so use IO-mapping as a workaround to issue the reset */
+ E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
+ break;
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ /* Reset is performed on a shadow of the control register */
+ ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
+ break;
+ case e1000_ich8lan:
+ if (!hw->phy_reset_disable &&
+ e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
+ /* e1000_ich8lan PHY HW reset requires MAC CORE reset
+ * at the same time to make sure the interface between
+ * MAC and the external PHY is reset.
+ */
+ ctrl |= E1000_CTRL_PHY_RST;
+ }
+
+ e1000_get_software_flag(hw);
+ ew32(CTRL, (ctrl | E1000_CTRL_RST));
+ msleep(5);
+ break;
+ default:
+ ew32(CTRL, (ctrl | E1000_CTRL_RST));
+ break;
+ }
+
+ /* After MAC reset, force reload of EEPROM to restore power-on settings to
+ * device. Later controllers reload the EEPROM automatically, so just wait
+ * for reload to complete.
+ */
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* Wait for reset to complete */
+ udelay(10);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ /* Wait for EEPROM reload */
+ msleep(2);
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ /* Wait for EEPROM reload */
+ msleep(20);
+ break;
+ case e1000_82573:
+ if (!e1000_is_onboard_nvm_eeprom(hw)) {
+ udelay(10);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ }
+ /* fall through */
+ default:
+ /* Auto read done will delay 5ms or poll based on mac type */
+ ret_val = e1000_get_auto_rd_done(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+
+ /* Disable HW ARPs on ASF enabled adapters */
+ if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
+ manc = er32(MANC);
+ manc &= ~(E1000_MANC_ARP_EN);
+ ew32(MANC, manc);
+ }
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ e1000_phy_init_script(hw);
+
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+ }
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ /* Clear any pending interrupt events. */
+ icr = er32(ICR);
+
+ /* If MWI was previously enabled, reenable it. */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ if (hw->mac_type == e1000_ich8lan) {
+ u32 kab = er32(KABGTXD);
+ kab |= E1000_KABGTXD_BGSQLBIAS;
+ ew32(KABGTXD, kab);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ *
+ * Initialize a number of hardware-dependent bits
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * This function contains hardware limitation workarounds for PCI-E adapters
+ *
+ *****************************************************************************/
+static void e1000_initialize_hardware_bits(struct e1000_hw *hw)
+{
+ if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) {
+ /* Settings common to all PCI-express silicon */
+ u32 reg_ctrl, reg_ctrl_ext;
+ u32 reg_tarc0, reg_tarc1;
+ u32 reg_tctl;
+ u32 reg_txdctl, reg_txdctl1;
+
+ /* link autonegotiation/sync workarounds */
+ reg_tarc0 = er32(TARC0);
+ reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27));
+
+ /* Enable not-done TX descriptor counting */
+ reg_txdctl = er32(TXDCTL);
+ reg_txdctl |= E1000_TXDCTL_COUNT_DESC;
+ ew32(TXDCTL, reg_txdctl);
+ reg_txdctl1 = er32(TXDCTL1);
+ reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC;
+ ew32(TXDCTL1, reg_txdctl1);
+
+ switch (hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ /* Clear PHY TX compatible mode bits */
+ reg_tarc1 = er32(TARC1);
+ reg_tarc1 &= ~((1 << 30)|(1 << 29));
+
+ /* link autonegotiation/sync workarounds */
+ reg_tarc0 |= ((1 << 26)|(1 << 25)|(1 << 24)|(1 << 23));
+
+ /* TX ring control fixes */
+ reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24));
+
+ /* Multiple read bit is reversed polarity */
+ reg_tctl = er32(TCTL);
+ if (reg_tctl & E1000_TCTL_MULR)
+ reg_tarc1 &= ~(1 << 28);
+ else
+ reg_tarc1 |= (1 << 28);
+
+ ew32(TARC1, reg_tarc1);
+ break;
+ case e1000_82573:
+ reg_ctrl_ext = er32(CTRL_EXT);
+ reg_ctrl_ext &= ~(1 << 23);
+ reg_ctrl_ext |= (1 << 22);
+
+ /* TX byte count fix */
+ reg_ctrl = er32(CTRL);
+ reg_ctrl &= ~(1 << 29);
+
+ ew32(CTRL_EXT, reg_ctrl_ext);
+ ew32(CTRL, reg_ctrl);
+ break;
+ case e1000_80003es2lan:
+ /* improve small packet performace for fiber/serdes */
+ if ((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) {
+ reg_tarc0 &= ~(1 << 20);
+ }
+
+ /* Multiple read bit is reversed polarity */
+ reg_tctl = er32(TCTL);
+ reg_tarc1 = er32(TARC1);
+ if (reg_tctl & E1000_TCTL_MULR)
+ reg_tarc1 &= ~(1 << 28);
+ else
+ reg_tarc1 |= (1 << 28);
+
+ ew32(TARC1, reg_tarc1);
+ break;
+ case e1000_ich8lan:
+ /* Reduce concurrent DMA requests to 3 from 4 */
+ if ((hw->revision_id < 3) ||
+ ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
+ (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))
+ reg_tarc0 |= ((1 << 29)|(1 << 28));
+
+ reg_ctrl_ext = er32(CTRL_EXT);
+ reg_ctrl_ext |= (1 << 22);
+ ew32(CTRL_EXT, reg_ctrl_ext);
+
+ /* workaround TX hang with TSO=on */
+ reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23));
+
+ /* Multiple read bit is reversed polarity */
+ reg_tctl = er32(TCTL);
+ reg_tarc1 = er32(TARC1);
+ if (reg_tctl & E1000_TCTL_MULR)
+ reg_tarc1 &= ~(1 << 28);
+ else
+ reg_tarc1 |= (1 << 28);
+
+ /* workaround TX hang with TSO=on */
+ reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24));
+
+ ew32(TARC1, reg_tarc1);
+ break;
+ default:
+ break;
+ }
+
+ ew32(TARC0, reg_tarc0);
+ }
+}
+
+/******************************************************************************
+ * Performs basic configuration of the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes the receive address registers,
+ * multicast table, and VLAN filter table. Calls routines to setup link
+ * configuration and flow control settings. Clears all on-chip counters. Leaves
+ * the transmit and receive units disabled and uninitialized.
+ *****************************************************************************/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 i;
+ s32 ret_val;
+ u32 mta_size;
+ u32 reg_data;
+ u32 ctrl_ext;
+
+ DEBUGFUNC("e1000_init_hw");
+
+ /* force full DMA clock frequency for 10/100 on ICH8 A0-B0 */
+ if ((hw->mac_type == e1000_ich8lan) &&
+ ((hw->revision_id < 3) ||
+ ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
+ (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) {
+ reg_data = er32(STATUS);
+ reg_data &= ~0x80000000;
+ ew32(STATUS, reg_data);
+ }
+
+ /* Initialize Identification LED */
+ ret_val = e1000_id_led_init(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Initializing Identification LED\n");
+ return ret_val;
+ }
+
+ /* Set the media type and TBI compatibility */
+ e1000_set_media_type(hw);
+
+ /* Must be called after e1000_set_media_type because media_type is used */
+ e1000_initialize_hardware_bits(hw);
+
+ /* Disabling VLAN filtering. */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */
+ if (hw->mac_type != e1000_ich8lan) {
+ if (hw->mac_type < e1000_82545_rev_3)
+ ew32(VET, 0);
+ e1000_clear_vfta(hw);
+ }
+
+ /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ ew32(RCTL, E1000_RCTL_RST);
+ E1000_WRITE_FLUSH();
+ msleep(5);
+ }
+
+ /* Setup the receive address. This involves initializing all of the Receive
+ * Address Registers (RARs 0 - 15).
+ */
+ e1000_init_rx_addrs(hw);
+
+ /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ ew32(RCTL, 0);
+ E1000_WRITE_FLUSH();
+ msleep(1);
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ mta_size = E1000_MC_TBL_SIZE;
+ if (hw->mac_type == e1000_ich8lan)
+ mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
+ for (i = 0; i < mta_size; i++) {
+ E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ /* use write flush to prevent Memory Write Block (MWB) from
+ * occuring when accessing our register space */
+ E1000_WRITE_FLUSH();
+ }
+
+ /* Set the PCI priority bit correctly in the CTRL register. This
+ * determines if the adapter gives priority to receives, or if it
+ * gives equal priority to transmits and receives. Valid only on
+ * 82542 and 82543 silicon.
+ */
+ if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
+ }
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+ if (hw->bus_type == e1000_bus_type_pcix && e1000_pcix_get_mmrbc(hw) > 2048)
+ e1000_pcix_set_mmrbc(hw, 2048);
+ break;
+ }
+
+ /* More time needed for PHY to initialize */
+ if (hw->mac_type == e1000_ich8lan)
+ msleep(15);
+
+ /* Call a subroutine to configure the link and setup flow control. */
+ ret_val = e1000_setup_link(hw);
+
+ /* Set the transmit descriptor write-back policy */
+ if (hw->mac_type > e1000_82544) {
+ ctrl = er32(TXDCTL);
+ ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+ ew32(TXDCTL, ctrl);
+ }
+
+ if (hw->mac_type == e1000_82573) {
+ e1000_enable_tx_pkt_filtering(hw);
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_80003es2lan:
+ /* Enable retransmit on late collisions */
+ reg_data = er32(TCTL);
+ reg_data |= E1000_TCTL_RTLC;
+ ew32(TCTL, reg_data);
+
+ /* Configure Gigabit Carry Extend Padding */
+ reg_data = er32(TCTL_EXT);
+ reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+ reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
+ ew32(TCTL_EXT, reg_data);
+
+ /* Configure Transmit Inter-Packet Gap */
+ reg_data = er32(TIPG);
+ reg_data &= ~E1000_TIPG_IPGT_MASK;
+ reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
+ ew32(TIPG, reg_data);
+
+ reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001);
+ reg_data &= ~0x00100000;
+ E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data);
+ /* Fall through */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_ich8lan:
+ ctrl = er32(TXDCTL1);
+ ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+ ew32(TXDCTL1, ctrl);
+ break;
+ }
+
+
+ if (hw->mac_type == e1000_82573) {
+ u32 gcr = er32(GCR);
+ gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+ ew32(GCR, gcr);
+ }
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs(hw);
+
+ /* ICH8 No-snoop bits are opposite polarity.
+ * Set to snoop by default after reset. */
+ if (hw->mac_type == e1000_ich8lan)
+ e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
+
+ if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+ hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+ ctrl_ext = er32(CTRL_EXT);
+ /* Relaxed ordering must be disabled to avoid a parity
+ * error crash in a PCI slot. */
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ return ret_val;
+}
+
+/******************************************************************************
+ * Adjust SERDES output amplitude based on EEPROM setting.
+ *
+ * hw - Struct containing variables accessed by shared code.
+ *****************************************************************************/
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
+{
+ u16 eeprom_data;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_adjust_serdes_amplitude");
+
+ if (hw->media_type != e1000_media_type_internal_serdes)
+ return E1000_SUCCESS;
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, &eeprom_data);
+ if (ret_val) {
+ return ret_val;
+ }
+
+ if (eeprom_data != EEPROM_RESERVED_WORD) {
+ /* Adjust SERDES output amplitude only. */
+ eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control and link settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Determines which flow control settings to use. Calls the apropriate media-
+ * specific link configuration function. Configures the flow control settings.
+ * Assuming the adapter has a valid link partner, a valid link should be
+ * established. Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ *****************************************************************************/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ s32 ret_val;
+ u16 eeprom_data;
+
+ DEBUGFUNC("e1000_setup_link");
+
+ /* In the case of the phy reset being blocked, we already have a link.
+ * We do not have to set it up again. */
+ if (e1000_check_phy_reset_block(hw))
+ return E1000_SUCCESS;
+
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ if (hw->fc == E1000_FC_DEFAULT) {
+ switch (hw->mac_type) {
+ case e1000_ich8lan:
+ case e1000_82573:
+ hw->fc = E1000_FC_FULL;
+ break;
+ default:
+ ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+ 1, &eeprom_data);
+ if (ret_val) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
+ hw->fc = E1000_FC_NONE;
+ else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
+ EEPROM_WORD0F_ASM_DIR)
+ hw->fc = E1000_FC_TX_PAUSE;
+ else
+ hw->fc = E1000_FC_FULL;
+ break;
+ }
+ }
+
+ /* We want to save off the original Flow Control configuration just
+ * in case we get disconnected and then reconnected into a different
+ * hub or switch with different Flow Control capabilities.
+ */
+ if (hw->mac_type == e1000_82542_rev2_0)
+ hw->fc &= (~E1000_FC_TX_PAUSE);
+
+ if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+ hw->fc &= (~E1000_FC_RX_PAUSE);
+
+ hw->original_fc = hw->fc;
+
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
+
+ /* Take the 4 bits from EEPROM word 0x0F that determine the initial
+ * polarity value for the SW controlled pins, and setup the
+ * Extended Device Control reg with that info.
+ * This is needed because one of the SW controlled pins is used for
+ * signal detection. So this should be done before e1000_setup_pcs_link()
+ * or e1000_phy_setup() is called.
+ */
+ if (hw->mac_type == e1000_82543) {
+ ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+ 1, &eeprom_data);
+ if (ret_val) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
+ SWDPIO__EXT_SHIFT);
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ /* Call the necessary subroutine to configure the link. */
+ ret_val = (hw->media_type == e1000_media_type_copper) ?
+ e1000_setup_copper_link(hw) :
+ e1000_setup_fiber_serdes_link(hw);
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+
+ /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */
+ if (hw->mac_type != e1000_ich8lan) {
+ ew32(FCT, FLOW_CONTROL_TYPE);
+ ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
+ }
+
+ ew32(FCTTV, hw->fc_pause_time);
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames in not enabled, then these
+ * registers will be set to 0.
+ */
+ if (!(hw->fc & E1000_FC_TX_PAUSE)) {
+ ew32(FCRTL, 0);
+ ew32(FCRTH, 0);
+ } else {
+ /* We need to set up the Receive Threshold high and low water marks
+ * as well as (optionally) enabling the transmission of XON frames.
+ */
+ if (hw->fc_send_xon) {
+ ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
+ ew32(FCRTH, hw->fc_high_water);
+ } else {
+ ew32(FCRTL, hw->fc_low_water);
+ ew32(FCRTH, hw->fc_high_water);
+ }
+ }
+ return ret_val;
+}
+
+/******************************************************************************
+ * Sets up link for a fiber based or serdes based adapter
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Manipulates Physical Coding Sublayer functions in order to configure
+ * link. Assumes the hardware has been previously reset and the transmitter
+ * and receiver are not enabled.
+ *****************************************************************************/
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 status;
+ u32 txcw = 0;
+ u32 i;
+ u32 signal = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_fiber_serdes_link");
+
+ /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists
+ * until explicitly turned off or a power cycle is performed. A read to
+ * the register does not indicate its status. Therefore, we ensure
+ * loopback mode is disabled during initialization.
+ */
+ if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572)
+ ew32(SCTL, E1000_DISABLE_SERDES_LOOPBACK);
+
+ /* On adapters with a MAC newer than 82544, SWDP 1 will be
+ * set when the optics detect a signal. On older adapters, it will be
+ * cleared when there is a signal. This applies to fiber media only.
+ * If we're on serdes media, adjust the output amplitude to value
+ * set in the EEPROM.
+ */
+ ctrl = er32(CTRL);
+ if (hw->media_type == e1000_media_type_fiber)
+ signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+
+ ret_val = e1000_adjust_serdes_amplitude(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Take the link out of reset */
+ ctrl &= ~(E1000_CTRL_LRST);
+
+ /* Adjust VCO speed to improve BER performance */
+ ret_val = e1000_set_vco_speed(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000_config_collision_dist(hw);
+
+ /* Check for a software override of the flow control settings, and setup
+ * the device accordingly. If auto-negotiation is enabled, then software
+ * will have to set the "PAUSE" bits to the correct value in the Tranmsit
+ * Config Word Register (TXCW) and re-start auto-negotiation. However, if
+ * auto-negotiation is disabled, then software will have to manually
+ * configure the two flow control enable bits in the CTRL register.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames, but
+ * not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do
+ * not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ */
+ switch (hw->fc) {
+ case E1000_FC_NONE:
+ /* Flow control is completely disabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+ break;
+ case E1000_FC_RX_PAUSE:
+ /* RX Flow control is enabled and TX Flow control is disabled by a
+ * software over-ride. Since there really isn't a way to advertise
+ * that we are capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ case E1000_FC_TX_PAUSE:
+ /* TX Flow control is enabled, and RX Flow control is disabled, by a
+ * software over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+ break;
+ case E1000_FC_FULL:
+ /* Flow control (both RX and TX) is enabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ /* Since auto-negotiation is enabled, take the link out of reset (the link
+ * will be in reset, because we previously reset the chip). This will
+ * restart auto-negotiation. If auto-neogtiation is successful then the
+ * link-up status bit will be set and the flow control enable bits (RFCE
+ * and TFCE) will be set according to their negotiated value.
+ */
+ DEBUGOUT("Auto-negotiation enabled\n");
+
+ ew32(TXCW, txcw);
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ hw->txcw = txcw;
+ msleep(1);
+
+ /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
+ * indication in the Device Status Register. Time-out if a link isn't
+ * seen in 500 milliseconds seconds (Auto-negotiation should complete in
+ * less than 500 milliseconds even if the other end is doing it in SW).
+ * For internal serdes, we just assume a signal is present, then poll.
+ */
+ if (hw->media_type == e1000_media_type_internal_serdes ||
+ (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
+ DEBUGOUT("Looking for Link\n");
+ for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+ msleep(10);
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU) break;
+ }
+ if (i == (LINK_UP_TIMEOUT / 10)) {
+ DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+ hw->autoneg_failed = 1;
+ /* AutoNeg failed to achieve a link, so we'll call
+ * e1000_check_for_link. This routine will force the link up if
+ * we detect a signal. This will allow us to communicate with
+ * non-autonegotiating link partners.
+ */
+ ret_val = e1000_check_for_link(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while checking for link\n");
+ return ret_val;
+ }
+ hw->autoneg_failed = 0;
+ } else {
+ hw->autoneg_failed = 0;
+ DEBUGOUT("Valid Link Found\n");
+ }
+ } else {
+ DEBUGOUT("No Signal Detected\n");
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Make sure we have a valid PHY and change PHY mode before link setup.
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_preconfig");
+
+ ctrl = er32(CTRL);
+ /* With 82543, we need to force speed and duplex on the MAC equal to what
+ * the PHY speed and duplex configuration is. In addition, we need to
+ * perform a hardware reset on the PHY to take it out of reset.
+ */
+ if (hw->mac_type > e1000_82543) {
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+ } else {
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
+ ew32(CTRL, ctrl);
+ ret_val = e1000_phy_hw_reset(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Make sure we have a valid PHY */
+ ret_val = e1000_detect_gig_phy(hw);
+ if (ret_val) {
+ DEBUGOUT("Error, did not detect valid phy.\n");
+ return ret_val;
+ }
+ DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+
+ /* Set PHY to class A mode (if necessary) */
+ ret_val = e1000_set_phy_mode(hw);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82545_rev_3) ||
+ (hw->mac_type == e1000_82546_rev_3)) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ phy_data |= 0x00000008;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ }
+
+ if (hw->mac_type <= e1000_82543 ||
+ hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
+ hw->phy_reset_disable = false;
+
+ return E1000_SUCCESS;
+}
+
+
+/********************************************************************
+* Copper link setup for e1000_phy_igp series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
+{
+ u32 led_ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_igp_setup");
+
+ if (hw->phy_reset_disable)
+ return E1000_SUCCESS;
+
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ /* Wait 15ms for MAC to configure PHY from eeprom settings */
+ msleep(15);
+ if (hw->mac_type != e1000_ich8lan) {
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+ }
+
+ /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
+ if (hw->phy_type == e1000_phy_igp) {
+ /* disable lplu d3 during driver init */
+ ret_val = e1000_set_d3_lplu_state(hw, false);
+ if (ret_val) {
+ DEBUGOUT("Error Disabling LPLU D3\n");
+ return ret_val;
+ }
+ }
+
+ /* disable lplu d0 during driver init */
+ ret_val = e1000_set_d0_lplu_state(hw, false);
+ if (ret_val) {
+ DEBUGOUT("Error Disabling LPLU D0\n");
+ return ret_val;
+ }
+ /* Configure mdi-mdix settings */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ hw->dsp_config_state = e1000_dsp_config_disabled;
+ /* Force MDI for earlier revs of the IGP PHY */
+ phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX);
+ hw->mdix = 1;
+
+ } else {
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 2:
+ phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
+ break;
+ }
+ }
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* set auto-master slave resolution settings */
+ if (hw->autoneg) {
+ e1000_ms_type phy_ms_setting = hw->master_slave;
+
+ if (hw->ffe_config_state == e1000_ffe_config_active)
+ hw->ffe_config_state = e1000_ffe_config_enabled;
+
+ if (hw->dsp_config_state == e1000_dsp_config_activated)
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+
+ /* when autonegotiation advertisment is only 1000Mbps then we
+ * should disable SmartSpeed and enable Auto MasterSlave
+ * resolution as hardware default. */
+ if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+ /* Disable SmartSpeed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ /* Set auto Master/Slave resolution process */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* load defaults for future use */
+ hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
+ ((phy_data & CR_1000T_MS_VALUE) ?
+ e1000_ms_force_master :
+ e1000_ms_force_slave) :
+ e1000_ms_auto;
+
+ switch (phy_ms_setting) {
+ case e1000_ms_force_master:
+ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_force_slave:
+ phy_data |= CR_1000T_MS_ENABLE;
+ phy_data &= ~(CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_auto:
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ default:
+ break;
+ }
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_gg82563 series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_ggp_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+ u32 reg_data;
+
+ DEBUGFUNC("e1000_copper_link_ggp_setup");
+
+ if (!hw->phy_reset_disable) {
+
+ /* Enable CRS on TX for half-duplex operation. */
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ /* Use 25MHz for both link down and 1000BASE-T for Tx clock */
+ phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ;
+
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+ break;
+ case 2:
+ phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+ if (hw->disable_polarity_correction == 1)
+ phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+
+ if (ret_val)
+ return ret_val;
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ return ret_val;
+ }
+ } /* phy_reset_disable */
+
+ if (hw->mac_type == e1000_80003es2lan) {
+ /* Bypass RX and TX FIFO's */
+ ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL,
+ E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
+ E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data);
+
+ if (ret_val)
+ return ret_val;
+
+ reg_data = er32(CTRL_EXT);
+ reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
+ ew32(CTRL_EXT, reg_data);
+
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Do not init these registers when the HW is in IAMT mode, since the
+ * firmware will have already initialized them. We only initialize
+ * them if the HW is not in IAMT mode.
+ */
+ if (!e1000_check_mng_mode(hw)) {
+ /* Enable Electrical Idle on the PHY */
+ phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+ phy_data);
+
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Workaround: Disable padding in Kumeran interface in the MAC
+ * and in the PHY to avoid CRC errors.
+ */
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data |= GG82563_ICR_DIS_PADDING;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_m88 series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_mgp_setup");
+
+ if (hw->phy_reset_disable)
+ return E1000_SUCCESS;
+
+ /* Enable CRS on TX. This must be set for half-duplex operation. */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (hw->disable_polarity_correction == 1)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->phy_revision < M88E1011_I_REV_4) {
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((hw->phy_revision == E1000_REVISION_2) &&
+ (hw->phy_id == M88E1111_I_PHY_ID)) {
+ /* Vidalia Phy, set the downshift counter to 5x */
+ phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Setup auto-negotiation and flow control advertisements,
+* and then perform auto-negotiation.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_autoneg");
+
+ /* Perform some bounds checking on the hw->autoneg_advertised
+ * parameter. If this variable is zero, then set it to the default.
+ */
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (hw->autoneg_advertised == 0)
+ hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ /* IFE phy only supports 10/100 */
+ if (hw->phy_type == e1000_phy_ife)
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
+ DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ DEBUGOUT("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (hw->wait_autoneg_complete) {
+ ret_val = e1000_wait_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while waiting for autoneg to complete\n");
+ return ret_val;
+ }
+ }
+
+ hw->get_link_status = true;
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Config the MAC and the PHY after link is up.
+* 1) Set up the MAC to the current PHY speed/duplex
+* if we are on 82543. If we
+* are on newer silicon, we only need to configure
+* collision distance in the Transmit Control Register.
+* 2) Set up flow control on the MAC to that established with
+* the link partner.
+* 3) Config DSP to improve Gigabit link quality for some PHY revisions.
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ DEBUGFUNC("e1000_copper_link_postconfig");
+
+ if (hw->mac_type >= e1000_82544) {
+ e1000_config_collision_dist(hw);
+ } else {
+ ret_val = e1000_config_mac_to_phy(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring MAC to PHY settings\n");
+ return ret_val;
+ }
+ }
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Configuring Flow Control\n");
+ return ret_val;
+ }
+
+ /* Config DSP to improve Giga link quality */
+ if (hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_config_dsp_after_link_change(hw, true);
+ if (ret_val) {
+ DEBUGOUT("Error Configuring DSP after link up\n");
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Detects which PHY is present and setup the speed and duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_setup_copper_link(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 i;
+ u16 phy_data;
+ u16 reg_data;
+
+ DEBUGFUNC("e1000_setup_copper_link");
+
+ switch (hw->mac_type) {
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ /* Set the mac to wait the maximum time between each
+ * iteration and increase the max iterations when
+ * polling the phy; this fixes erroneous timeouts at 10Mbps. */
+ ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+ if (ret_val)
+ return ret_val;
+ default:
+ break;
+ }
+
+ /* Check if it is a valid PHY and set PHY mode if necessary. */
+ ret_val = e1000_copper_link_preconfig(hw);
+ if (ret_val)
+ return ret_val;
+
+ switch (hw->mac_type) {
+ case e1000_80003es2lan:
+ /* Kumeran registers are written-only */
+ reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT;
+ reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING;
+ ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ break;
+ }
+
+ if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
+ hw->phy_type == e1000_phy_igp_2) {
+ ret_val = e1000_copper_link_igp_setup(hw);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->phy_type == e1000_phy_m88) {
+ ret_val = e1000_copper_link_mgp_setup(hw);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->phy_type == e1000_phy_gg82563) {
+ ret_val = e1000_copper_link_ggp_setup(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->autoneg) {
+ /* Setup autoneg and flow control advertisement
+ * and perform autonegotiation */
+ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H,or 100F
+ * depending on value from forced_speed_duplex. */
+ DEBUGOUT("Forcing speed and duplex\n");
+ ret_val = e1000_phy_force_speed_duplex(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Forcing Speed and Duplex\n");
+ return ret_val;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ for (i = 0; i < 10; i++) {
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ /* Config the MAC and PHY after link is up */
+ ret_val = e1000_copper_link_postconfig(hw);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT("Valid link established!!!\n");
+ return E1000_SUCCESS;
+ }
+ udelay(10);
+ }
+
+ DEBUGOUT("Unable to establish link!!!\n");
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Configure the MAC-to-PHY interface for 10/100Mbps
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 tipg;
+ u16 reg_data;
+
+ DEBUGFUNC("e1000_configure_kmrn_for_10_100");
+
+ reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT;
+ ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = er32(TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
+ ew32(TIPG, tipg);
+
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data);
+
+ if (ret_val)
+ return ret_val;
+
+ if (duplex == HALF_DUPLEX)
+ reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+ else
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+ return ret_val;
+}
+
+static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u16 reg_data;
+ u32 tipg;
+
+ DEBUGFUNC("e1000_configure_kmrn_for_1000");
+
+ reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT;
+ ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = er32(TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
+ ew32(TIPG, tipg);
+
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data);
+
+ if (ret_val)
+ return ret_val;
+
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+ return ret_val;
+}
+
+/******************************************************************************
+* Configures PHY autoneg and flow control advertisement settings
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg;
+
+ DEBUGFUNC("e1000_phy_setup_autoneg");
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->phy_type != e1000_phy_ife) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ } else
+ mii_1000t_ctrl_reg=0;
+
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
+ mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
+
+ DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
+ DEBUGOUT("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
+ DEBUGOUT("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
+ DEBUGOUT("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
+ DEBUGOUT("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+ DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
+ }
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+ DEBUGOUT("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ if (hw->phy_type == e1000_phy_ife) {
+ DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
+ }
+ }
+
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc) {
+ case E1000_FC_NONE: /* 0 */
+ /* Flow control (RX & TX) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case E1000_FC_RX_PAUSE: /* 1 */
+ /* RX Flow control is enabled, and TX Flow control is
+ * disabled, by a software over-ride.
+ */
+ /* Since there really isn't a way to advertise that we are
+ * capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later
+ * (in e1000_config_fc_after_link_up) we will disable the
+ *hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case E1000_FC_TX_PAUSE: /* 2 */
+ /* TX Flow control is enabled, and RX Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case E1000_FC_FULL: /* 3 */
+ /* Flow control (both RX and TX) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (hw->phy_type != e1000_phy_ife) {
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Force PHY speed and duplex settings to hw->forced_speed_duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 mii_ctrl_reg;
+ u16 mii_status_reg;
+ u16 phy_data;
+ u16 i;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex");
+
+ /* Turn off Flow control if we are forcing speed and duplex. */
+ hw->fc = E1000_FC_NONE;
+
+ DEBUGOUT1("hw->fc = %d\n", hw->fc);
+
+ /* Read the Device Control Register. */
+ ctrl = er32(CTRL);
+
+ /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(DEVICE_SPEED_MASK);
+
+ /* Clear the Auto Speed Detect Enable bit. */
+ ctrl &= ~E1000_CTRL_ASDE;
+
+ /* Read the MII Control Register. */
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* We need to disable autoneg in order to force link and duplex. */
+
+ mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
+
+ /* Are we forcing Full or Half Duplex? */
+ if (hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_10_full) {
+ /* We want to force full duplex so we SET the full duplex bits in the
+ * Device and MII Control Registers.
+ */
+ ctrl |= E1000_CTRL_FD;
+ mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ /* We want to force half duplex so we CLEAR the full duplex bits in
+ * the Device and MII Control Registers.
+ */
+ ctrl &= ~E1000_CTRL_FD;
+ mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ }
+
+ /* Are we forcing 100Mbps??? */
+ if (hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_100_half) {
+ /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
+ ctrl |= E1000_CTRL_SPD_100;
+ mii_ctrl_reg |= MII_CR_SPEED_100;
+ mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+ DEBUGOUT("Forcing 100mb ");
+ } else {
+ /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ mii_ctrl_reg |= MII_CR_SPEED_10;
+ mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ DEBUGOUT("Forcing 10mb ");
+ }
+
+ e1000_config_collision_dist(hw);
+
+ /* Write the configured values back to the Device Control Reg. */
+ ew32(CTRL, ctrl);
+
+ if ((hw->phy_type == e1000_phy_m88) ||
+ (hw->phy_type == e1000_phy_gg82563)) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed are duplex are forced.
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+
+ /* Need to reset the PHY or these changes will be ignored */
+ mii_ctrl_reg |= MII_CR_RESET;
+
+ /* Disable MDI-X support for 10/100 */
+ } else if (hw->phy_type == e1000_phy_ife) {
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IFE_PMC_AUTO_MDIX;
+ phy_data &= ~IFE_PMC_FORCE_MDIX;
+
+ ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ } else {
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed or duplex are forced.
+ */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Write back the modified PHY MII control register. */
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+
+ /* The wait_autoneg_complete flag may be a little misleading here.
+ * Since we are forcing speed and duplex, Auto-Neg is not enabled.
+ * But we do want to delay for a period while forcing only so we
+ * don't generate false No Link messages. So we will wait here
+ * only if the user has set wait_autoneg_complete to 1, which is
+ * the default.
+ */
+ if (hw->wait_autoneg_complete) {
+ /* We will wait for autoneg to complete. */
+ DEBUGOUT("Waiting for forced speed/duplex link.\n");
+ mii_status_reg = 0;
+
+ /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Auto-Neg Complete bit
+ * to be set.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_LINK_STATUS) break;
+ msleep(100);
+ }
+ if ((i == 0) &&
+ ((hw->phy_type == e1000_phy_m88) ||
+ (hw->phy_type == e1000_phy_gg82563))) {
+ /* We didn't get link. Reset the DSP and wait again for link. */
+ ret_val = e1000_phy_reset_dsp(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting PHY DSP\n");
+ return ret_val;
+ }
+ }
+ /* This loop will early-out if the link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ if (mii_status_reg & MII_SR_LINK_STATUS) break;
+ msleep(100);
+ /* Read the MII Status Register and wait for Auto-Neg Complete bit
+ * to be set.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ if (hw->phy_type == e1000_phy_m88) {
+ /* Because we reset the PHY above, we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock. This value
+ * defaults back to a 2.5MHz clock when the PHY is reset.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* In addition, because of the s/w reset above, we need to enable CRS on
+ * TX. This must be set for both full and half duplex operation.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+ (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full ||
+ hw->forced_speed_duplex == e1000_10_half)) {
+ ret_val = e1000_polarity_reversal_workaround(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if (hw->phy_type == e1000_phy_gg82563) {
+ /* The TX_CLK of the Extended PHY Specific Control Register defaults
+ * to 2.5MHz on a reset. We need to re-force it back to 25MHz, if
+ * we're not in a forced 10/duplex configuration. */
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+ if ((hw->forced_speed_duplex == e1000_10_full) ||
+ (hw->forced_speed_duplex == e1000_10_half))
+ phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ;
+ else
+ phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ;
+
+ /* Also due to the reset, we need to enable CRS on Tx. */
+ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+
+ ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Sets the collision distance in the Transmit Control register
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Link should have been established previously. Reads the speed and duplex
+* information from the Device Status register.
+******************************************************************************/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+ u32 tctl, coll_dist;
+
+ DEBUGFUNC("e1000_config_collision_dist");
+
+ if (hw->mac_type < e1000_82543)
+ coll_dist = E1000_COLLISION_DISTANCE_82542;
+ else
+ coll_dist = E1000_COLLISION_DISTANCE;
+
+ tctl = er32(TCTL);
+
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= coll_dist << E1000_COLD_SHIFT;
+
+ ew32(TCTL, tctl);
+ E1000_WRITE_FLUSH();
+}
+
+/******************************************************************************
+* Sets MAC speed and duplex settings to reflect the those in the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* mii_reg - data to write to the MII control register
+*
+* The contents of the PHY register containing the needed information need to
+* be passed in.
+******************************************************************************/
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_config_mac_to_phy");
+
+ /* 82544 or newer MAC, Auto Speed Detection takes care of
+ * MAC speed/duplex configuration.*/
+ if (hw->mac_type >= e1000_82544)
+ return E1000_SUCCESS;
+
+ /* Read the Device Control Register and set the bits to Force Speed
+ * and Duplex.
+ */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+ /* Set up duplex in the Device Control and Transmit Control
+ * registers depending on negotiated values.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & M88E1000_PSSR_DPLX)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
+
+ e1000_config_collision_dist(hw);
+
+ /* Set up speed in the Device Control register depending on
+ * negotiated values.
+ */
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+ ctrl |= E1000_CTRL_SPD_1000;
+ else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+ ctrl |= E1000_CTRL_SPD_100;
+
+ /* Write the configured values back to the Device Control Reg. */
+ ew32(CTRL, ctrl);
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Forces the MAC's flow control settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets the TFCE and RFCE bits in the device control register to reflect
+ * the adapter settings. TFCE and RFCE need to be explicitly set by
+ * software when a Copper PHY is used because autonegotiation is managed
+ * by the PHY rather than the MAC. Software must also configure these
+ * bits when link is forced on a fiber connection.
+ *****************************************************************************/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_force_mac_fc");
+
+ /* Get the current configuration of the Device Control Register */
+ ctrl = er32(CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and TX flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+
+ switch (hw->fc) {
+ case E1000_FC_NONE:
+ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+ break;
+ case E1000_FC_RX_PAUSE:
+ ctrl &= (~E1000_CTRL_TFCE);
+ ctrl |= E1000_CTRL_RFCE;
+ break;
+ case E1000_FC_TX_PAUSE:
+ ctrl &= (~E1000_CTRL_RFCE);
+ ctrl |= E1000_CTRL_TFCE;
+ break;
+ case E1000_FC_FULL:
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Disable TX Flow Control for 82542 (rev 2.0) */
+ if (hw->mac_type == e1000_82542_rev2_0)
+ ctrl &= (~E1000_CTRL_TFCE);
+
+ ew32(CTRL, ctrl);
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control settings after link is established
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Should be called immediately after a valid link has been established.
+ * Forces MAC flow control settings if link was forced. When in MII/GMII mode
+ * and autonegotiation is enabled, the MAC flow control settings will be set
+ * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
+ * and RFCE bits will be automaticaly set to the negotiated flow control mode.
+ *****************************************************************************/
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_status_reg;
+ u16 mii_nway_adv_reg;
+ u16 mii_nway_lp_ability_reg;
+ u16 speed;
+ u16 duplex;
+
+ DEBUGFUNC("e1000_config_fc_after_link_up");
+
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
+ ((hw->media_type == e1000_media_type_internal_serdes) &&
+ (hw->autoneg_failed)) ||
+ ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
+ ret_val = e1000_force_mac_fc(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement Register
+ * (Address 4) and the Auto_Negotiation Base Page Ability
+ * Register (Address 5) to determine how flow control was
+ * negotiated.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | E1000_FC_NONE
+ * 0 | 1 | 0 | DC | E1000_FC_NONE
+ * 0 | 1 | 1 | 0 | E1000_FC_NONE
+ * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
+ * 1 | 0 | 0 | DC | E1000_FC_NONE
+ * 1 | DC | 1 | DC | E1000_FC_FULL
+ * 1 | 1 | 0 | 0 | E1000_FC_NONE
+ * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
+ *
+ */
+ /* Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | E1000_FC_FULL
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected RX ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->original_fc == E1000_FC_FULL) {
+ hw->fc = E1000_FC_FULL;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc = E1000_FC_RX_PAUSE;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
+ *
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc = E1000_FC_TX_PAUSE;
+ DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
+ *
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc = E1000_FC_RX_PAUSE;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ }
+ /* Per the IEEE spec, at this point flow control should be
+ * disabled. However, we want to consider that we could
+ * be connected to a legacy switch that doesn't advertise
+ * desired flow control, but can be forced on the link
+ * partner. So if we advertised no flow control, that is
+ * what we will resolve to. If we advertised some kind of
+ * receive capability (Rx Pause Only or Full Flow Control)
+ * and the link partner advertised none, we will configure
+ * ourselves to enable Rx Flow Control only. We can do
+ * this safely for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx, no
+ * harm done since we won't be receiving any PAUSE frames
+ * anyway. If the intent on the link partner was to have
+ * flow control enabled, then by us enabling RX only, we
+ * can at least receive pause frames and process them.
+ * This is a good idea because in most cases, since we are
+ * predominantly a server NIC, more times than not we will
+ * be asked to delay transmission of packets than asking
+ * our link partner to pause transmission of frames.
+ */
+ else if ((hw->original_fc == E1000_FC_NONE ||
+ hw->original_fc == E1000_FC_TX_PAUSE) ||
+ hw->fc_strict_ieee) {
+ hw->fc = E1000_FC_NONE;
+ DEBUGOUT("Flow Control = NONE.\n");
+ } else {
+ hw->fc = E1000_FC_RX_PAUSE;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc = E1000_FC_NONE;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = e1000_force_mac_fc(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ } else {
+ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Checks to see if the link status of the hardware has changed.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Called by any function that needs to check the link status of the adapter.
+ *****************************************************************************/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+ u32 rxcw = 0;
+ u32 ctrl;
+ u32 status;
+ u32 rctl;
+ u32 icr;
+ u32 signal = 0;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_check_for_link");
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+
+ /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
+ * set when the optics detect a signal. On older adapters, it will be
+ * cleared when there is a signal. This applies to fiber media only.
+ */
+ if ((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) {
+ rxcw = er32(RXCW);
+
+ if (hw->media_type == e1000_media_type_fiber) {
+ signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+ if (status & E1000_STATUS_LU)
+ hw->get_link_status = false;
+ }
+ }
+
+ /* If we have a copper PHY then we only want to go out to the PHY
+ * registers to see if Auto-Neg has completed and/or if our link
+ * status has changed. The get_link_status flag will be set if we
+ * receive a Link Status Change interrupt or we have Rx Sequence
+ * Errors.
+ */
+ if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ * Read the register twice since the link bit is sticky.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ hw->get_link_status = false;
+ /* Check if there was DownShift, must be checked immediately after
+ * link-up */
+ e1000_check_downshift(hw);
+
+ /* If we are on 82544 or 82543 silicon and speed/duplex
+ * are forced to 10H or 10F, then we will implement the polarity
+ * reversal workaround. We disable interrupts first, and upon
+ * returning, place the devices interrupt state to its previous
+ * value except for the link status change interrupt which will
+ * happen due to the execution of this workaround.
+ */
+
+ if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+ (!hw->autoneg) &&
+ (hw->forced_speed_duplex == e1000_10_full ||
+ hw->forced_speed_duplex == e1000_10_half)) {
+ ew32(IMC, 0xffffffff);
+ ret_val = e1000_polarity_reversal_workaround(hw);
+ icr = er32(ICR);
+ ew32(ICS, (icr & ~E1000_ICS_LSC));
+ ew32(IMS, IMS_ENABLE_MASK);
+ }
+
+ } else {
+ /* No link detected */
+ e1000_config_dsp_after_link_change(hw, false);
+ return 0;
+ }
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!hw->autoneg) return -E1000_ERR_CONFIG;
+
+ /* optimize the dsp settings for the igp phy */
+ e1000_config_dsp_after_link_change(hw, true);
+
+ /* We have a M88E1000 PHY and Auto-Neg is enabled. If we
+ * have Si on board that is 82544 or newer, Auto
+ * Speed Detection takes care of MAC speed/duplex
+ * configuration. So we only need to configure Collision
+ * Distance in the MAC. Otherwise, we need to force
+ * speed/duplex on the MAC to the current PHY speed/duplex
+ * settings.
+ */
+ if (hw->mac_type >= e1000_82544)
+ e1000_config_collision_dist(hw);
+ else {
+ ret_val = e1000_config_mac_to_phy(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring MAC to PHY settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Configure Flow Control now that Auto-Neg has completed. First, we
+ * need to restore the desired flow control settings because we may
+ * have had to re-autoneg with a different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+
+ /* At this point we know that we are on copper and we have
+ * auto-negotiated link. These are conditions for checking the link
+ * partner capability register. We use the link speed to determine if
+ * TBI compatibility needs to be turned on or off. If the link is not
+ * at gigabit speed, then TBI compatibility is not needed. If we are
+ * at gigabit speed, we turn on TBI compatibility.
+ */
+ if (hw->tbi_compatibility_en) {
+ u16 speed, duplex;
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+ if (speed != SPEED_1000) {
+ /* If link speed is not set to gigabit speed, we do not need
+ * to enable TBI compatibility.
+ */
+ if (hw->tbi_compatibility_on) {
+ /* If we previously were in the mode, turn it off. */
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_SBP;
+ ew32(RCTL, rctl);
+ hw->tbi_compatibility_on = false;
+ }
+ } else {
+ /* If TBI compatibility is was previously off, turn it on. For
+ * compatibility with a TBI link partner, we will store bad
+ * packets. Some frames have an additional byte on the end and
+ * will look like CRC errors to to the hardware.
+ */
+ if (!hw->tbi_compatibility_on) {
+ hw->tbi_compatibility_on = true;
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_SBP;
+ ew32(RCTL, rctl);
+ }
+ }
+ }
+ }
+ /* If we don't have link (auto-negotiation failed or link partner cannot
+ * auto-negotiate), the cable is plugged in (we have signal), and our
+ * link partner is not trying to auto-negotiate with us (we are receiving
+ * idles or data), we need to force link up. We also need to give
+ * auto-negotiation time to complete, in case the cable was just plugged
+ * in. The autoneg_failed flag does this.
+ */
+ else if ((((hw->media_type == e1000_media_type_fiber) &&
+ ((ctrl & E1000_CTRL_SWDPIN1) == signal)) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) &&
+ (!(status & E1000_STATUS_LU)) &&
+ (!(rxcw & E1000_RXCW_C))) {
+ if (hw->autoneg_failed == 0) {
+ hw->autoneg_failed = 1;
+ return 0;
+ }
+ DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+ }
+ /* If we are forcing link and we are receiving /C/ ordered sets, re-enable
+ * auto-negotiation in the TXCW register and disable forced link in the
+ * Device Control register in an attempt to auto-negotiate with our link
+ * partner.
+ */
+ else if (((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) &&
+ (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ ew32(TXCW, hw->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ hw->serdes_link_down = false;
+ }
+ /* If we force link for non-auto-negotiation switch, check link status
+ * based on MAC synchronization for internal serdes media type.
+ */
+ else if ((hw->media_type == e1000_media_type_internal_serdes) &&
+ !(E1000_TXCW_ANE & er32(TXCW))) {
+ /* SYNCH bit and IV bit are sticky. */
+ udelay(10);
+ if (E1000_RXCW_SYNCH & er32(RXCW)) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ hw->serdes_link_down = false;
+ DEBUGOUT("SERDES: Link is up.\n");
+ }
+ } else {
+ hw->serdes_link_down = true;
+ DEBUGOUT("SERDES: Link is down.\n");
+ }
+ }
+ if ((hw->media_type == e1000_media_type_internal_serdes) &&
+ (E1000_TXCW_ANE & er32(TXCW))) {
+ hw->serdes_link_down = !(E1000_STATUS_LU & er32(STATUS));
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ *****************************************************************************/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+ u32 status;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_get_speed_and_duplex");
+
+ if (hw->mac_type >= e1000_82543) {
+ status = er32(STATUS);
+ if (status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+ DEBUGOUT("1000 Mbs, ");
+ } else if (status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ DEBUGOUT("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ DEBUGOUT("10 Mbs, ");
+ }
+
+ if (status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ DEBUGOUT(" Half Duplex\n");
+ }
+ } else {
+ DEBUGOUT("1000 Mbs, Full Duplex\n");
+ *speed = SPEED_1000;
+ *duplex = FULL_DUPLEX;
+ }
+
+ /* IGP01 PHY may advertise full duplex operation after speed downgrade even
+ * if it is operating at half duplex. Here we set the duplex settings to
+ * match the duplex in the link partner's capabilities.
+ */
+ if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+ *duplex = HALF_DUPLEX;
+ else {
+ ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
+ if (ret_val)
+ return ret_val;
+ if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
+ (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
+ *duplex = HALF_DUPLEX;
+ }
+ }
+
+ if ((hw->mac_type == e1000_80003es2lan) &&
+ (hw->media_type == e1000_media_type_copper)) {
+ if (*speed == SPEED_1000)
+ ret_val = e1000_configure_kmrn_for_1000(hw);
+ else
+ ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
+ ret_val = e1000_kumeran_lock_loss_workaround(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Blocks until autoneg completes or times out (~4.5 seconds)
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 i;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_wait_autoneg");
+ DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+
+ /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ if (phy_data & MII_SR_AUTONEG_COMPLETE) {
+ return E1000_SUCCESS;
+ }
+ msleep(100);
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Raises the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+ /* Raise the clock input to the Management Data Clock (by setting the MDC
+ * bit), and then delay 10 microseconds.
+ */
+ ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH();
+ udelay(10);
+}
+
+/******************************************************************************
+* Lowers the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+ /* Lower the clock input to the Management Data Clock (by clearing the MDC
+ * bit), and then delay 10 microseconds.
+ */
+ ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH();
+ udelay(10);
+}
+
+/******************************************************************************
+* Shifts data bits out to the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* data - Data to send out to the PHY
+* count - Number of bits to shift out
+*
+* Bits are shifted out in MSB to LSB order.
+******************************************************************************/
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
+{
+ u32 ctrl;
+ u32 mask;
+
+ /* We need to shift "count" number of bits out to the PHY. So, the value
+ * in the "data" parameter will be shifted out to the PHY one bit at a
+ * time. In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01;
+ mask <<= (count - 1);
+
+ ctrl = er32(CTRL);
+
+ /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+ ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+ while (mask) {
+ /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
+ * then raising and lowering the Management Data Clock. A "0" is
+ * shifted out to the PHY by setting the MDIO bit to "0" and then
+ * raising and lowering the clock.
+ */
+ if (data & mask)
+ ctrl |= E1000_CTRL_MDIO;
+ else
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ udelay(10);
+
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ mask = mask >> 1;
+ }
+}
+
+/******************************************************************************
+* Shifts data bits in from the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Bits are shifted in in MSB to LSB order.
+******************************************************************************/
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u16 data = 0;
+ u8 i;
+
+ /* In order to read a register from the PHY, we need to shift in a total
+ * of 18 bits from the PHY. The first two bit (turnaround) times are used
+ * to avoid contention on the MDIO pin when a read operation is performed.
+ * These two bits are ignored by us and thrown away. Bits are "shifted in"
+ * by raising the input to the Management Data Clock (setting the MDC bit),
+ * and then reading the value of the MDIO bit.
+ */
+ ctrl = er32(CTRL);
+
+ /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+ ctrl &= ~E1000_CTRL_MDIO_DIR;
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ /* Raise and Lower the clock before reading in the data. This accounts for
+ * the turnaround bits. The first clock occurred when we clocked out the
+ * last bit of the Register Address.
+ */
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ for (data = 0, i = 0; i < 16; i++) {
+ data = data << 1;
+ e1000_raise_mdi_clk(hw, &ctrl);
+ ctrl = er32(CTRL);
+ /* Check to see if we shifted in a "1". */
+ if (ctrl & E1000_CTRL_MDIO)
+ data |= 1;
+ e1000_lower_mdi_clk(hw, &ctrl);
+ }
+
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ return data;
+}
+
+static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync = 0;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 timeout = 200;
+
+ DEBUGFUNC("e1000_swfw_sync_acquire");
+
+ if (hw->swfwhw_semaphore_present)
+ return e1000_get_software_flag(hw);
+
+ if (!hw->swfw_sync_present)
+ return e1000_get_hw_eeprom_semaphore(hw);
+
+ while (timeout) {
+ if (e1000_get_hw_eeprom_semaphore(hw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ swfw_sync = er32(SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask))) {
+ break;
+ }
+
+ /* firmware currently using resource (fwmask) */
+ /* or other software thread currently using resource (swmask) */
+ e1000_put_hw_eeprom_semaphore(hw);
+ mdelay(5);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ return -E1000_ERR_SWFW_SYNC;
+ }
+
+ swfw_sync |= swmask;
+ ew32(SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_eeprom_semaphore(hw);
+ return E1000_SUCCESS;
+}
+
+static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+
+ DEBUGFUNC("e1000_swfw_sync_release");
+
+ if (hw->swfwhw_semaphore_present) {
+ e1000_release_software_flag(hw);
+ return;
+ }
+
+ if (!hw->swfw_sync_present) {
+ e1000_put_hw_eeprom_semaphore(hw);
+ return;
+ }
+
+ /* if (e1000_get_hw_eeprom_semaphore(hw))
+ * return -E1000_ERR_SWFW_SYNC; */
+ while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS);
+ /* empty */
+
+ swfw_sync = er32(SW_FW_SYNC);
+ swfw_sync &= ~swmask;
+ ew32(SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_eeprom_semaphore(hw);
+}
+
+/*****************************************************************************
+* Reads the value from a PHY register, if the value is on a specific non zero
+* page, sets the page first.
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to read
+******************************************************************************/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
+{
+ u32 ret_val;
+ u16 swfw;
+
+ DEBUGFUNC("e1000_read_phy_reg");
+
+ if ((hw->mac_type == e1000_80003es2lan) &&
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+ swfw = E1000_SWFW_PHY1_SM;
+ } else {
+ swfw = E1000_SWFW_PHY0_SM;
+ }
+ if (e1000_swfw_sync_acquire(hw, swfw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ if ((hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
+ hw->phy_type == e1000_phy_igp_2) &&
+ (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+ ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (u16)reg_addr);
+ if (ret_val) {
+ e1000_swfw_sync_release(hw, swfw);
+ return ret_val;
+ }
+ } else if (hw->phy_type == e1000_phy_gg82563) {
+ if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
+ (hw->mac_type == e1000_80003es2lan)) {
+ /* Select Configuration Page */
+ if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
+ (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+ } else {
+ /* Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ ret_val = e1000_write_phy_reg_ex(hw,
+ GG82563_PHY_PAGE_SELECT_ALT,
+ (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+ }
+
+ if (ret_val) {
+ e1000_swfw_sync_release(hw, swfw);
+ return ret_val;
+ }
+ }
+ }
+
+ ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+ phy_data);
+
+ e1000_swfw_sync_release(hw, swfw);
+ return ret_val;
+}
+
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 *phy_data)
+{
+ u32 i;
+ u32 mdic = 0;
+ const u32 phy_addr = 1;
+
+ DEBUGFUNC("e1000_read_phy_reg_ex");
+
+ if (reg_addr > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ return -E1000_ERR_PARAM;
+ }
+
+ if (hw->mac_type > e1000_82543) {
+ /* Set up Op-code, Phy Address, and register address in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY) break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ DEBUGOUT("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16)mdic;
+ } else {
+ /* We must first send a preamble through the MDIO pin to signal the
+ * beginning of an MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /* Now combine the next few fields that are required for a read
+ * operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine five different times. The format of
+ * a MII read instruction consists of a shift out of 14 bits and is
+ * defined as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
+ * followed by a shift in of 18 bits. This first two bits shifted in
+ * are TurnAround bits used to avoid contention on the MDIO pin when a
+ * READ operation is performed. These two bits are thrown away
+ * followed by a shift in of 16 bits which contains the desired data.
+ */
+ mdic = ((reg_addr) | (phy_addr << 5) |
+ (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+ e1000_shift_out_mdi_bits(hw, mdic, 14);
+
+ /* Now that we've shifted out the read command to the MII, we need to
+ * "shift in" the 16-bit value (18 total bits) of the requested PHY
+ * register address.
+ */
+ *phy_data = e1000_shift_in_mdi_bits(hw);
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Writes a value to a PHY register
+*
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to write
+* data - data to write to the PHY
+******************************************************************************/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
+{
+ u32 ret_val;
+ u16 swfw;
+
+ DEBUGFUNC("e1000_write_phy_reg");
+
+ if ((hw->mac_type == e1000_80003es2lan) &&
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+ swfw = E1000_SWFW_PHY1_SM;
+ } else {
+ swfw = E1000_SWFW_PHY0_SM;
+ }
+ if (e1000_swfw_sync_acquire(hw, swfw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ if ((hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
+ hw->phy_type == e1000_phy_igp_2) &&
+ (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+ ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (u16)reg_addr);
+ if (ret_val) {
+ e1000_swfw_sync_release(hw, swfw);
+ return ret_val;
+ }
+ } else if (hw->phy_type == e1000_phy_gg82563) {
+ if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
+ (hw->mac_type == e1000_80003es2lan)) {
+ /* Select Configuration Page */
+ if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
+ (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+ } else {
+ /* Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ ret_val = e1000_write_phy_reg_ex(hw,
+ GG82563_PHY_PAGE_SELECT_ALT,
+ (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
+ }
+
+ if (ret_val) {
+ e1000_swfw_sync_release(hw, swfw);
+ return ret_val;
+ }
+ }
+ }
+
+ ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+ phy_data);
+
+ e1000_swfw_sync_release(hw, swfw);
+ return ret_val;
+}
+
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 phy_data)
+{
+ u32 i;
+ u32 mdic = 0;
+ const u32 phy_addr = 1;
+
+ DEBUGFUNC("e1000_write_phy_reg_ex");
+
+ if (reg_addr > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ return -E1000_ERR_PARAM;
+ }
+
+ if (hw->mac_type > e1000_82543) {
+ /* Set up Op-code, Phy Address, register address, and data intended
+ * for the PHY register in the MDI Control register. The MAC will take
+ * care of interfacing with the PHY to send the desired data.
+ */
+ mdic = (((u32)phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for (i = 0; i < 641; i++) {
+ udelay(5);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY) break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ } else {
+ /* We'll need to use the SW defined pins to shift the write command
+ * out to the PHY. We first send a preamble to the PHY to signal the
+ * beginning of the MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /* Now combine the remaining required fields that will indicate a
+ * write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the command. The
+ * format of a MII write instruction is as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ */
+ mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
+ (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+ mdic <<= 16;
+ mdic |= (u32)phy_data;
+
+ e1000_shift_out_mdi_bits(hw, mdic, 32);
+ }
+
+ return E1000_SUCCESS;
+}
+
+static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data)
+{
+ u32 reg_val;
+ u16 swfw;
+ DEBUGFUNC("e1000_read_kmrn_reg");
+
+ if ((hw->mac_type == e1000_80003es2lan) &&
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+ swfw = E1000_SWFW_PHY1_SM;
+ } else {
+ swfw = E1000_SWFW_PHY0_SM;
+ }
+ if (e1000_swfw_sync_acquire(hw, swfw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ /* Write register address */
+ reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
+ E1000_KUMCTRLSTA_OFFSET) |
+ E1000_KUMCTRLSTA_REN;
+ ew32(KUMCTRLSTA, reg_val);
+ udelay(2);
+
+ /* Read the data returned */
+ reg_val = er32(KUMCTRLSTA);
+ *data = (u16)reg_val;
+
+ e1000_swfw_sync_release(hw, swfw);
+ return E1000_SUCCESS;
+}
+
+static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data)
+{
+ u32 reg_val;
+ u16 swfw;
+ DEBUGFUNC("e1000_write_kmrn_reg");
+
+ if ((hw->mac_type == e1000_80003es2lan) &&
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+ swfw = E1000_SWFW_PHY1_SM;
+ } else {
+ swfw = E1000_SWFW_PHY0_SM;
+ }
+ if (e1000_swfw_sync_acquire(hw, swfw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
+ E1000_KUMCTRLSTA_OFFSET) | data;
+ ew32(KUMCTRLSTA, reg_val);
+ udelay(2);
+
+ e1000_swfw_sync_release(hw, swfw);
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Returns the PHY to the power-on reset state
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+ u32 ctrl, ctrl_ext;
+ u32 led_ctrl;
+ s32 ret_val;
+ u16 swfw;
+
+ DEBUGFUNC("e1000_phy_hw_reset");
+
+ /* In the case of the phy reset being blocked, it's not an error, we
+ * simply return success without performing the reset. */
+ ret_val = e1000_check_phy_reset_block(hw);
+ if (ret_val)
+ return E1000_SUCCESS;
+
+ DEBUGOUT("Resetting Phy...\n");
+
+ if (hw->mac_type > e1000_82543) {
+ if ((hw->mac_type == e1000_80003es2lan) &&
+ (er32(STATUS) & E1000_STATUS_FUNC_1)) {
+ swfw = E1000_SWFW_PHY1_SM;
+ } else {
+ swfw = E1000_SWFW_PHY0_SM;
+ }
+ if (e1000_swfw_sync_acquire(hw, swfw)) {
+ DEBUGOUT("Unable to acquire swfw sync\n");
+ return -E1000_ERR_SWFW_SYNC;
+ }
+ /* Read the device control register and assert the E1000_CTRL_PHY_RST
+ * bit. Then, take it out of reset.
+ * For pre-e1000_82571 hardware, we delay for 10ms between the assert
+ * and deassert. For e1000_82571 hardware and later, we instead delay
+ * for 50us between and 10ms after the deassertion.
+ */
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+ E1000_WRITE_FLUSH();
+
+ if (hw->mac_type < e1000_82571)
+ msleep(10);
+ else
+ udelay(100);
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ if (hw->mac_type >= e1000_82571)
+ mdelay(10);
+
+ e1000_swfw_sync_release(hw, swfw);
+ } else {
+ /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
+ * bit to put the PHY into reset. Then, take it out of reset.
+ */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ msleep(10);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ }
+ udelay(150);
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+ }
+
+ /* Wait for FW to finish PHY configuration. */
+ ret_val = e1000_get_phy_cfg_done(hw);
+ if (ret_val != E1000_SUCCESS)
+ return ret_val;
+ e1000_release_software_semaphore(hw);
+
+ if ((hw->mac_type == e1000_ich8lan) && (hw->phy_type == e1000_phy_igp_3))
+ ret_val = e1000_init_lcd_from_nvm(hw);
+
+ return ret_val;
+}
+
+/******************************************************************************
+* Resets the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Sets bit 15 of the MII Control register
+******************************************************************************/
+s32 e1000_phy_reset(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_phy_reset");
+
+ /* In the case of the phy reset being blocked, it's not an error, we
+ * simply return success without performing the reset. */
+ ret_val = e1000_check_phy_reset_block(hw);
+ if (ret_val)
+ return E1000_SUCCESS;
+
+ switch (hw->phy_type) {
+ case e1000_phy_igp:
+ case e1000_phy_igp_2:
+ case e1000_phy_igp_3:
+ case e1000_phy_ife:
+ ret_val = e1000_phy_hw_reset(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= MII_CR_RESET;
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+ break;
+ }
+
+ if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
+ e1000_phy_init_script(hw);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Work-around for 82566 power-down: on D3 entry-
+* 1) disable gigabit link
+* 2) write VR power-down enable
+* 3) read it back
+* if successful continue, else issue LCD reset and repeat
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw)
+{
+ s32 reg;
+ u16 phy_data;
+ s32 retry = 0;
+
+ DEBUGFUNC("e1000_phy_powerdown_workaround");
+
+ if (hw->phy_type != e1000_phy_igp_3)
+ return;
+
+ do {
+ /* Disable link */
+ reg = er32(PHY_CTRL);
+ ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+ /* Write VR power-down enable - bits 9:8 should be 10b */
+ e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+ phy_data |= (1 << 9);
+ phy_data &= ~(1 << 8);
+ e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data);
+
+ /* Read it back and test */
+ e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+ if (((phy_data & IGP3_VR_CTRL_MODE_MASK) == IGP3_VR_CTRL_MODE_SHUT) || retry)
+ break;
+
+ /* Issue PHY reset and repeat at most one more time */
+ reg = er32(CTRL);
+ ew32(CTRL, reg | E1000_CTRL_PHY_RST);
+ retry++;
+ } while (retry);
+
+ return;
+
+}
+
+/******************************************************************************
+* Work-around for 82566 Kumeran PCS lock loss:
+* On link status change (i.e. PCI reset, speed change) and link is up and
+* speed is gigabit-
+* 0) if workaround is optionally disabled do nothing
+* 1) wait 1ms for Kumeran link to come up
+* 2) check Kumeran Diagnostic register PCS lock loss bit
+* 3) if not set the link is locked (all is good), otherwise...
+* 4) reset the PHY
+* 5) repeat up to 10 times
+* Note: this is only called for IGP3 copper when speed is 1gb.
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ s32 reg;
+ s32 cnt;
+ u16 phy_data;
+
+ if (hw->kmrn_lock_loss_workaround_disabled)
+ return E1000_SUCCESS;
+
+ /* Make sure link is up before proceeding. If not just return.
+ * Attempting this while link is negotiating fouled up link
+ * stability */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ for (cnt = 0; cnt < 10; cnt++) {
+ /* read once to clear */
+ ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+ if (ret_val)
+ return ret_val;
+ /* and again to get new status */
+ ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* check for PCS lock */
+ if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+ return E1000_SUCCESS;
+
+ /* Issue PHY reset */
+ e1000_phy_hw_reset(hw);
+ mdelay(5);
+ }
+ /* Disable GigE link negotiation */
+ reg = er32(PHY_CTRL);
+ ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+ /* unable to acquire PCS lock */
+ return E1000_ERR_PHY;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Probes the expected PHY address for known PHY IDs
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
+{
+ s32 phy_init_status, ret_val;
+ u16 phy_id_high, phy_id_low;
+ bool match = false;
+
+ DEBUGFUNC("e1000_detect_gig_phy");
+
+ if (hw->phy_id != 0)
+ return E1000_SUCCESS;
+
+ /* The 82571 firmware may still be configuring the PHY. In this
+ * case, we cannot access the PHY until the configuration is done. So
+ * we explicitly set the PHY values. */
+ if (hw->mac_type == e1000_82571 ||
+ hw->mac_type == e1000_82572) {
+ hw->phy_id = IGP01E1000_I_PHY_ID;
+ hw->phy_type = e1000_phy_igp_2;
+ return E1000_SUCCESS;
+ }
+
+ /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work-
+ * around that forces PHY page 0 to be set or the reads fail. The rest of
+ * the code in this routine uses e1000_read_phy_reg to read the PHY ID.
+ * So for ESB-2 we need to have this set so our reads won't fail. If the
+ * attached PHY is not a e1000_phy_gg82563, the routines below will figure
+ * this out as well. */
+ if (hw->mac_type == e1000_80003es2lan)
+ hw->phy_type = e1000_phy_gg82563;
+
+ /* Read the PHY ID Registers to identify which PHY is onboard. */
+ ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_id = (u32)(phy_id_high << 16);
+ udelay(20);
+ ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK);
+ hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK;
+
+ switch (hw->mac_type) {
+ case e1000_82543:
+ if (hw->phy_id == M88E1000_E_PHY_ID) match = true;
+ break;
+ case e1000_82544:
+ if (hw->phy_id == M88E1000_I_PHY_ID) match = true;
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if (hw->phy_id == M88E1011_I_PHY_ID) match = true;
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (hw->phy_id == IGP01E1000_I_PHY_ID) match = true;
+ break;
+ case e1000_82573:
+ if (hw->phy_id == M88E1111_I_PHY_ID) match = true;
+ break;
+ case e1000_80003es2lan:
+ if (hw->phy_id == GG82563_E_PHY_ID) match = true;
+ break;
+ case e1000_ich8lan:
+ if (hw->phy_id == IGP03E1000_E_PHY_ID) match = true;
+ if (hw->phy_id == IFE_E_PHY_ID) match = true;
+ if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = true;
+ if (hw->phy_id == IFE_C_E_PHY_ID) match = true;
+ break;
+ default:
+ DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+ return -E1000_ERR_CONFIG;
+ }
+ phy_init_status = e1000_set_phy_type(hw);
+
+ if ((match) && (phy_init_status == E1000_SUCCESS)) {
+ DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+ return -E1000_ERR_PHY;
+}
+
+/******************************************************************************
+* Resets the PHY's DSP
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ DEBUGFUNC("e1000_phy_reset_dsp");
+
+ do {
+ if (hw->phy_type != e1000_phy_gg82563) {
+ ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
+ if (ret_val) break;
+ }
+ ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
+ if (ret_val) break;
+ ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
+ if (ret_val) break;
+ ret_val = E1000_SUCCESS;
+ } while (0);
+
+ return ret_val;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for igp PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data, min_length, max_length, average;
+ e1000_rev_polarity polarity;
+
+ DEBUGFUNC("e1000_phy_igp_get_info");
+
+ /* The downshift status is checked only once, after link is established,
+ * and it stored in the hw->speed_downgraded parameter. */
+ phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+
+ /* IGP01E1000 does not need to support it. */
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+ /* IGP01E1000 always correct polarity reversal */
+ phy_info->polarity_correction = e1000_polarity_reversal_enabled;
+
+ /* Check polarity status */
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & IGP01E1000_PSSR_MDIX) >>
+ IGP01E1000_PSSR_MDIX_SHIFT);
+
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+ SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+ SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ /* Get cable length */
+ ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+ if (ret_val)
+ return ret_val;
+
+ /* Translate to old method */
+ average = (max_length + min_length) / 2;
+
+ if (average <= e1000_igp_cable_length_50)
+ phy_info->cable_length = e1000_cable_length_50;
+ else if (average <= e1000_igp_cable_length_80)
+ phy_info->cable_length = e1000_cable_length_50_80;
+ else if (average <= e1000_igp_cable_length_110)
+ phy_info->cable_length = e1000_cable_length_80_110;
+ else if (average <= e1000_igp_cable_length_140)
+ phy_info->cable_length = e1000_cable_length_110_140;
+ else
+ phy_info->cable_length = e1000_cable_length_140;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for ife PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data;
+ e1000_rev_polarity polarity;
+
+ DEBUGFUNC("e1000_phy_ife_get_info");
+
+ phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_info->polarity_correction =
+ ((phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
+ IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT) ?
+ e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+ if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Polarity is forced. */
+ polarity = ((phy_data & IFE_PSC_FORCE_POLARITY) >>
+ IFE_PSC_FORCE_POLARITY_SHIFT) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+ }
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode = (e1000_auto_x_mode)
+ ((phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
+ IFE_PMC_MDIX_MODE_SHIFT);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers fot m88 PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data;
+ e1000_rev_polarity polarity;
+
+ DEBUGFUNC("e1000_phy_m88_get_info");
+
+ /* The downshift status is checked only once, after link is established,
+ * and it stored in the hw->speed_downgraded parameter. */
+ phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->extended_10bt_distance =
+ ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
+ M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
+ e1000_10bt_ext_dist_enable_lower : e1000_10bt_ext_dist_enable_normal;
+
+ phy_info->polarity_correction =
+ ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
+ M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
+ e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+ /* Check polarity status */
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & M88E1000_PSSR_MDIX) >>
+ M88E1000_PSSR_MDIX_SHIFT);
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ /* Cable Length Estimation and Local/Remote Receiver Information
+ * are only valid at 1000 Mbps.
+ */
+ if (hw->phy_type != e1000_phy_gg82563) {
+ phy_info->cable_length = (e1000_cable_length)((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+ } else {
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->cable_length = (e1000_cable_length)(phy_data & GG82563_DSPD_CABLE_LENGTH);
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+ SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+ SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_phy_get_info");
+
+ phy_info->cable_length = e1000_cable_length_undefined;
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
+ phy_info->cable_polarity = e1000_rev_polarity_undefined;
+ phy_info->downshift = e1000_downshift_undefined;
+ phy_info->polarity_correction = e1000_polarity_reversal_undefined;
+ phy_info->mdix_mode = e1000_auto_x_mode_undefined;
+ phy_info->local_rx = e1000_1000t_rx_status_undefined;
+ phy_info->remote_rx = e1000_1000t_rx_status_undefined;
+
+ if (hw->media_type != e1000_media_type_copper) {
+ DEBUGOUT("PHY info is only valid for copper media\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+ DEBUGOUT("PHY info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
+ hw->phy_type == e1000_phy_igp_2)
+ return e1000_phy_igp_get_info(hw, phy_info);
+ else if (hw->phy_type == e1000_phy_ife)
+ return e1000_phy_ife_get_info(hw, phy_info);
+ else
+ return e1000_phy_m88_get_info(hw, phy_info);
+}
+
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_settings");
+
+ if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+ DEBUGOUT("Invalid MDI setting detected\n");
+ hw->mdix = 1;
+ return -E1000_ERR_CONFIG;
+ }
+ return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Sets up eeprom variables in the hw struct. Must be called after mac_type
+ * is configured. Additionally, if this is ICH8, the flash controller GbE
+ * registers must be mapped, or this will crash.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_init_eeprom_params(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd = er32(EECD);
+ s32 ret_val = E1000_SUCCESS;
+ u16 eeprom_size;
+
+ DEBUGFUNC("e1000_init_eeprom_params");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->word_size = 64;
+ eeprom->opcode_bits = 3;
+ eeprom->address_bits = 6;
+ eeprom->delay_usec = 50;
+ eeprom->use_eerd = false;
+ eeprom->use_eewr = false;
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->opcode_bits = 3;
+ eeprom->delay_usec = 50;
+ if (eecd & E1000_EECD_SIZE) {
+ eeprom->word_size = 256;
+ eeprom->address_bits = 8;
+ } else {
+ eeprom->word_size = 64;
+ eeprom->address_bits = 6;
+ }
+ eeprom->use_eerd = false;
+ eeprom->use_eewr = false;
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (eecd & E1000_EECD_TYPE) {
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ } else {
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->opcode_bits = 3;
+ eeprom->delay_usec = 50;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->word_size = 256;
+ eeprom->address_bits = 8;
+ } else {
+ eeprom->word_size = 64;
+ eeprom->address_bits = 6;
+ }
+ }
+ eeprom->use_eerd = false;
+ eeprom->use_eewr = false;
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ eeprom->use_eerd = false;
+ eeprom->use_eewr = false;
+ break;
+ case e1000_82573:
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ eeprom->use_eerd = true;
+ eeprom->use_eewr = true;
+ if (!e1000_is_onboard_nvm_eeprom(hw)) {
+ eeprom->type = e1000_eeprom_flash;
+ eeprom->word_size = 2048;
+
+ /* Ensure that the Autonomous FLASH update bit is cleared due to
+ * Flash update issue on parts which use a FLASH for NVM. */
+ eecd &= ~E1000_EECD_AUPDEN;
+ ew32(EECD, eecd);
+ }
+ break;
+ case e1000_80003es2lan:
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ eeprom->use_eerd = true;
+ eeprom->use_eewr = false;
+ break;
+ case e1000_ich8lan:
+ {
+ s32 i = 0;
+ u32 flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
+
+ eeprom->type = e1000_eeprom_ich8;
+ eeprom->use_eerd = false;
+ eeprom->use_eewr = false;
+ eeprom->word_size = E1000_SHADOW_RAM_WORDS;
+
+ /* Zero the shadow RAM structure. But don't load it from NVM
+ * so as to save time for driver init */
+ if (hw->eeprom_shadow_ram != NULL) {
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ hw->eeprom_shadow_ram[i].modified = false;
+ hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+ }
+ }
+
+ hw->flash_base_addr = (flash_size & ICH_GFPREG_BASE_MASK) *
+ ICH_FLASH_SECTOR_SIZE;
+
+ hw->flash_bank_size = ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
+ hw->flash_bank_size -= (flash_size & ICH_GFPREG_BASE_MASK);
+
+ hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
+
+ hw->flash_bank_size /= 2 * sizeof(u16);
+
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (eeprom->type == e1000_eeprom_spi) {
+ /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
+ * 32KB (incremented by powers of 2).
+ */
+ if (hw->mac_type <= e1000_82547_rev_2) {
+ /* Set to default value for initial eeprom read. */
+ eeprom->word_size = 64;
+ ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
+ if (ret_val)
+ return ret_val;
+ eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+ /* 256B eeprom size was not supported in earlier hardware, so we
+ * bump eeprom_size up one to ensure that "1" (which maps to 256B)
+ * is never the result used in the shifting logic below. */
+ if (eeprom_size)
+ eeprom_size++;
+ } else {
+ eeprom_size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+ }
+
+ eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
+ }
+ return ret_val;
+}
+
+/******************************************************************************
+ * Raises the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+ * wait <delay> microseconds.
+ */
+ *eecd = *eecd | E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Lowers the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+ * wait 50 microseconds.
+ */
+ *eecd = *eecd & ~E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Shift data bits out to the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * data - data to send to the EEPROM
+ * count - number of bits to shift out
+ *****************************************************************************/
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+ u32 mask;
+
+ /* We need to shift "count" bits out to the EEPROM. So, value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01 << (count - 1);
+ eecd = er32(EECD);
+ if (eeprom->type == e1000_eeprom_microwire) {
+ eecd &= ~E1000_EECD_DO;
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ eecd |= E1000_EECD_DO;
+ }
+ do {
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+ * and then raising and then lowering the clock (the SK bit controls
+ * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
+ * by setting "DI" to "0" and then raising and then lowering the clock.
+ */
+ eecd &= ~E1000_EECD_DI;
+
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+
+ udelay(eeprom->delay_usec);
+
+ e1000_raise_ee_clk(hw, &eecd);
+ e1000_lower_ee_clk(hw, &eecd);
+
+ mask = mask >> 1;
+
+ } while (mask);
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eecd &= ~E1000_EECD_DI;
+ ew32(EECD, eecd);
+}
+
+/******************************************************************************
+ * Shift data bits in from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
+{
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+ /* In order to read a register from the EEPROM, we need to shift 'count'
+ * bits in from the EEPROM. Bits are "shifted in" by raising the clock
+ * input to the EEPROM (setting the SK bit), and then reading the value of
+ * the "DO" bit. During this "shifting in" process the "DI" bit should
+ * always be clear.
+ */
+
+ eecd = er32(EECD);
+
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data = data << 1;
+ e1000_raise_ee_clk(hw, &eecd);
+
+ eecd = er32(EECD);
+
+ eecd &= ~(E1000_EECD_DI);
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
+
+ e1000_lower_ee_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/******************************************************************************
+ * Prepares EEPROM for access
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
+ * function should be called before issuing a command to the EEPROM.
+ *****************************************************************************/
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd, i=0;
+
+ DEBUGFUNC("e1000_acquire_eeprom");
+
+ if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
+ return -E1000_ERR_SWFW_SYNC;
+ eecd = er32(EECD);
+
+ if (hw->mac_type != e1000_82573) {
+ /* Request EEPROM Access */
+ if (hw->mac_type > e1000_82544) {
+ eecd |= E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ eecd = er32(EECD);
+ while ((!(eecd & E1000_EECD_GNT)) &&
+ (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+ i++;
+ udelay(5);
+ eecd = er32(EECD);
+ }
+ if (!(eecd & E1000_EECD_GNT)) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ DEBUGOUT("Could not acquire EEPROM grant\n");
+ e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+ return -E1000_ERR_EEPROM;
+ }
+ }
+ }
+
+ /* Setup EEPROM for Read/Write */
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ /* Clear SK and DI */
+ eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+ ew32(EECD, eecd);
+
+ /* Set CS */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ udelay(1);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Returns EEPROM to a "standby" state
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_standby_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+
+ eecd = er32(EECD);
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Clock high */
+ eecd |= E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Select EEPROM */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Clock low */
+ eecd &= ~E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ }
+}
+
+/******************************************************************************
+ * Terminates a command by inverting the EEPROM's chip select pin
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_release_eeprom(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ DEBUGFUNC("e1000_release_eeprom");
+
+ eecd = er32(EECD);
+
+ if (hw->eeprom.type == e1000_eeprom_spi) {
+ eecd |= E1000_EECD_CS; /* Pull CS high */
+ eecd &= ~E1000_EECD_SK; /* Lower SCK */
+
+ ew32(EECD, eecd);
+
+ udelay(hw->eeprom.delay_usec);
+ } else if (hw->eeprom.type == e1000_eeprom_microwire) {
+ /* cleanup eeprom */
+
+ /* CS on Microwire is active-high */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+
+ ew32(EECD, eecd);
+
+ /* Rising edge of clock */
+ eecd |= E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+
+ /* Falling edge of clock */
+ eecd &= ~E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+ }
+
+ /* Stop requesting EEPROM access */
+ if (hw->mac_type > e1000_82544) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ }
+
+ e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
+{
+ u16 retry_count = 0;
+ u8 spi_stat_reg;
+
+ DEBUGFUNC("e1000_spi_eeprom_ready");
+
+ /* Read "Status Register" repeatedly until the LSB is cleared. The
+ * EEPROM will signal that the command has been completed by clearing
+ * bit 0 of the internal status register. If it's not cleared within
+ * 5 milliseconds, then error out.
+ */
+ retry_count = 0;
+ do {
+ e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
+ hw->eeprom.opcode_bits);
+ spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8);
+ if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
+ break;
+
+ udelay(5);
+ retry_count += 5;
+
+ e1000_standby_eeprom(hw);
+ } while (retry_count < EEPROM_MAX_RETRY_SPI);
+
+ /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
+ * only 0-5mSec on 5V devices)
+ */
+ if (retry_count >= EEPROM_MAX_RETRY_SPI) {
+ DEBUGOUT("SPI EEPROM Status error\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ s32 ret;
+ spin_lock(&e1000_eeprom_lock);
+ ret = e1000_do_read_eeprom(hw, offset, words, data);
+ spin_unlock(&e1000_eeprom_lock);
+ return ret;
+}
+
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 i = 0;
+
+ DEBUGFUNC("e1000_read_eeprom");
+
+ /* If eeprom is not yet detected, do so now */
+ if (eeprom->word_size == 0)
+ e1000_init_eeprom_params(hw);
+
+ /* A check for invalid values: offset too large, too many words, and not
+ * enough words.
+ */
+ if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT2("\"words\" parameter out of bounds. Words = %d, size = %d\n", offset, eeprom->word_size);
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
+ * directly. In this case, we need to acquire the EEPROM so that
+ * FW or other port software does not interrupt.
+ */
+ if (e1000_is_onboard_nvm_eeprom(hw) && !hw->eeprom.use_eerd) {
+ /* Prepare the EEPROM for bit-bang reading */
+ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Eerd register EEPROM access requires no eeprom aquire/release */
+ if (eeprom->use_eerd)
+ return e1000_read_eeprom_eerd(hw, offset, words, data);
+
+ /* ICH EEPROM access is done via the ICH flash controller */
+ if (eeprom->type == e1000_eeprom_ich8)
+ return e1000_read_eeprom_ich8(hw, offset, words, data);
+
+ /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
+ * acquired the EEPROM at this point, so any returns should relase it */
+ if (eeprom->type == e1000_eeprom_spi) {
+ u16 word_in;
+ u8 read_opcode = EEPROM_READ_OPCODE_SPI;
+
+ if (e1000_spi_eeprom_ready(hw)) {
+ e1000_release_eeprom(hw);
+ return -E1000_ERR_EEPROM;
+ }
+
+ e1000_standby_eeprom(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ if ((eeprom->address_bits == 8) && (offset >= 128))
+ read_opcode |= EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
+ e1000_shift_out_ee_bits(hw, (u16)(offset*2), eeprom->address_bits);
+
+ /* Read the data. The address of the eeprom internally increments with
+ * each byte (spi) being read, saving on the overhead of eeprom setup
+ * and tear-down. The address counter will roll over if reading beyond
+ * the size of the eeprom, thus allowing the entire memory to be read
+ * starting from any offset. */
+ for (i = 0; i < words; i++) {
+ word_in = e1000_shift_in_ee_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+ } else if (eeprom->type == e1000_eeprom_microwire) {
+ for (i = 0; i < words; i++) {
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
+ eeprom->opcode_bits);
+ e1000_shift_out_ee_bits(hw, (u16)(offset + i),
+ eeprom->address_bits);
+
+ /* Read the data. For microwire, each word requires the overhead
+ * of eeprom setup and tear-down. */
+ data[i] = e1000_shift_in_ee_bits(hw, 16);
+ e1000_standby_eeprom(hw);
+ }
+ }
+
+ /* End this read operation */
+ e1000_release_eeprom(hw);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ u32 i, eerd = 0;
+ s32 error = 0;
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
+ E1000_EEPROM_RW_REG_START;
+
+ ew32(EERD, eerd);
+ error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
+
+ if (error) {
+ break;
+ }
+ data[i] = (er32(EERD) >> E1000_EEPROM_RW_REG_DATA);
+
+ }
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word from the EEPROM using the EEWR register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ u32 register_value = 0;
+ u32 i = 0;
+ s32 error = 0;
+
+ if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
+ return -E1000_ERR_SWFW_SYNC;
+
+ for (i = 0; i < words; i++) {
+ register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
+ ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
+ E1000_EEPROM_RW_REG_START;
+
+ error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+ if (error) {
+ break;
+ }
+
+ ew32(EEWR, register_value);
+
+ error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+
+ if (error) {
+ break;
+ }
+ }
+
+ e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+ return error;
+}
+
+/******************************************************************************
+ * Polls the status bit (bit 1) of the EERD to determine when the read is done.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
+{
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+ s32 done = E1000_ERR_EEPROM;
+
+ for (i = 0; i < attempts; i++) {
+ if (eerd == E1000_EEPROM_POLL_READ)
+ reg = er32(EERD);
+ else
+ reg = er32(EEWR);
+
+ if (reg & E1000_EEPROM_RW_REG_DONE) {
+ done = E1000_SUCCESS;
+ break;
+ }
+ udelay(5);
+ }
+
+ return done;
+}
+
+/***************************************************************************
+* Description: Determines if the onboard NVM is FLASH or EEPROM.
+*
+* hw - Struct containing variables accessed by shared code
+****************************************************************************/
+static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
+{
+ u32 eecd = 0;
+
+ DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
+
+ if (hw->mac_type == e1000_ich8lan)
+ return false;
+
+ if (hw->mac_type == e1000_82573) {
+ eecd = er32(EECD);
+
+ /* Isolate bits 15 & 16 */
+ eecd = ((eecd >> 15) & 0x03);
+
+ /* If both bits are set, device is Flash type */
+ if (eecd == 0x03) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/******************************************************************************
+ * Verifies that the EEPROM has a valid checksum
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Reads the first 64 16 bit words of the EEPROM and sums the values read.
+ * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * valid.
+ *****************************************************************************/
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
+{
+ u16 checksum = 0;
+ u16 i, eeprom_data;
+
+ DEBUGFUNC("e1000_validate_eeprom_checksum");
+
+ if ((hw->mac_type == e1000_82573) && !e1000_is_onboard_nvm_eeprom(hw)) {
+ /* Check bit 4 of word 10h. If it is 0, firmware is done updating
+ * 10h-12h. Checksum may need to be fixed. */
+ e1000_read_eeprom(hw, 0x10, 1, &eeprom_data);
+ if ((eeprom_data & 0x10) == 0) {
+ /* Read 0x23 and check bit 15. This bit is a 1 when the checksum
+ * has already been fixed. If the checksum is still wrong and this
+ * bit is a 1, we need to return bad checksum. Otherwise, we need
+ * to set this bit to a 1 and update the checksum. */
+ e1000_read_eeprom(hw, 0x23, 1, &eeprom_data);
+ if ((eeprom_data & 0x8000) == 0) {
+ eeprom_data |= 0x8000;
+ e1000_write_eeprom(hw, 0x23, 1, &eeprom_data);
+ e1000_update_eeprom_checksum(hw);
+ }
+ }
+ }
+
+ if (hw->mac_type == e1000_ich8lan) {
+ /* Drivers must allocate the shadow ram structure for the
+ * EEPROM checksum to be updated. Otherwise, this bit as well
+ * as the checksum must both be set correctly for this
+ * validation to pass.
+ */
+ e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
+ if ((eeprom_data & 0x40) == 0) {
+ eeprom_data |= 0x40;
+ e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
+ e1000_update_eeprom_checksum(hw);
+ }
+ }
+
+ for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ checksum += eeprom_data;
+ }
+
+ if (checksum == (u16)EEPROM_SUM)
+ return E1000_SUCCESS;
+ else {
+ DEBUGOUT("EEPROM Checksum Invalid\n");
+ return -E1000_ERR_EEPROM;
+ }
+}
+
+/******************************************************************************
+ * Calculates the EEPROM checksum and writes it to the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
+ * Writes the difference to word offset 63 of the EEPROM.
+ *****************************************************************************/
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ u16 checksum = 0;
+ u16 i, eeprom_data;
+
+ DEBUGFUNC("e1000_update_eeprom_checksum");
+
+ for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ checksum += eeprom_data;
+ }
+ checksum = (u16)EEPROM_SUM - checksum;
+ if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+ DEBUGOUT("EEPROM Write Error\n");
+ return -E1000_ERR_EEPROM;
+ } else if (hw->eeprom.type == e1000_eeprom_flash) {
+ e1000_commit_shadow_ram(hw);
+ } else if (hw->eeprom.type == e1000_eeprom_ich8) {
+ e1000_commit_shadow_ram(hw);
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after next adapter reset. */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ msleep(10);
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Parent function for writing words to the different EEPROM types.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - 16 bit word to be written to the EEPROM
+ *
+ * If e1000_update_eeprom_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ *****************************************************************************/
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ s32 ret;
+ spin_lock(&e1000_eeprom_lock);
+ ret = e1000_do_write_eeprom(hw, offset, words, data);
+ spin_unlock(&e1000_eeprom_lock);
+ return ret;
+}
+
+
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ s32 status = 0;
+
+ DEBUGFUNC("e1000_write_eeprom");
+
+ /* If eeprom is not yet detected, do so now */
+ if (eeprom->word_size == 0)
+ e1000_init_eeprom_params(hw);
+
+ /* A check for invalid values: offset too large, too many words, and not
+ * enough words.
+ */
+ if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT("\"words\" parameter out of bounds\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* 82573 writes only through eewr */
+ if (eeprom->use_eewr)
+ return e1000_write_eeprom_eewr(hw, offset, words, data);
+
+ if (eeprom->type == e1000_eeprom_ich8)
+ return e1000_write_eeprom_ich8(hw, offset, words, data);
+
+ /* Prepare the EEPROM for writing */
+ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ status = e1000_write_eeprom_microwire(hw, offset, words, data);
+ } else {
+ status = e1000_write_eeprom_spi(hw, offset, words, data);
+ msleep(10);
+ }
+
+ /* Done with writing */
+ e1000_release_eeprom(hw);
+
+ return status;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in an SPI EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 8 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u16 widx = 0;
+
+ DEBUGFUNC("e1000_write_eeprom_spi");
+
+ while (widx < words) {
+ u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
+
+ if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
+
+ e1000_standby_eeprom(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
+ eeprom->opcode_bits);
+
+ e1000_standby_eeprom(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ if ((eeprom->address_bits == 8) && (offset >= 128))
+ write_opcode |= EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
+
+ e1000_shift_out_ee_bits(hw, (u16)((offset + widx)*2),
+ eeprom->address_bits);
+
+ /* Send the data */
+
+ /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ e1000_shift_out_ee_bits(hw, word_out, 16);
+ widx++;
+
+ /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
+ * operation, while the smaller eeproms are capable of an 8-byte
+ * PAGE WRITE operation. Break the inner loop to pass new address
+ */
+ if ((((offset + widx)*2) % eeprom->page_size) == 0) {
+ e1000_standby_eeprom(hw);
+ break;
+ }
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in a Microwire EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 16 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+ u16 words_written = 0;
+ u16 i = 0;
+
+ DEBUGFUNC("e1000_write_eeprom_microwire");
+
+ /* Send the write enable command to the EEPROM (3-bit opcode plus
+ * 6/8-bit dummy address beginning with 11). It's less work to include
+ * the 11 of the dummy address as part of the opcode than it is to shift
+ * it over the correct number of bits for the address. This puts the
+ * EEPROM into write/erase mode.
+ */
+ e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
+ (u16)(eeprom->opcode_bits + 2));
+
+ e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
+
+ /* Prepare the EEPROM */
+ e1000_standby_eeprom(hw);
+
+ while (words_written < words) {
+ /* Send the Write command (3-bit opcode + addr) */
+ e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
+ eeprom->opcode_bits);
+
+ e1000_shift_out_ee_bits(hw, (u16)(offset + words_written),
+ eeprom->address_bits);
+
+ /* Send the data */
+ e1000_shift_out_ee_bits(hw, data[words_written], 16);
+
+ /* Toggle the CS line. This in effect tells the EEPROM to execute
+ * the previous command.
+ */
+ e1000_standby_eeprom(hw);
+
+ /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
+ * signal that the command has been completed by raising the DO signal.
+ * If DO does not go high in 10 milliseconds, then error out.
+ */
+ for (i = 0; i < 200; i++) {
+ eecd = er32(EECD);
+ if (eecd & E1000_EECD_DO) break;
+ udelay(50);
+ }
+ if (i == 200) {
+ DEBUGOUT("EEPROM Write did not complete\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Recover from write */
+ e1000_standby_eeprom(hw);
+
+ words_written++;
+ }
+
+ /* Send the write disable command to the EEPROM (3-bit opcode plus
+ * 6/8-bit dummy address beginning with 10). It's less work to include
+ * the 10 of the dummy address as part of the opcode than it is to shift
+ * it over the correct number of bits for the address. This takes the
+ * EEPROM out of write/erase mode.
+ */
+ e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
+ (u16)(eeprom->opcode_bits + 2));
+
+ e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Flushes the cached eeprom to NVM. This is done by saving the modified values
+ * in the eeprom cache and the non modified values in the currently active bank
+ * to the new bank.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_commit_shadow_ram(struct e1000_hw *hw)
+{
+ u32 attempts = 100000;
+ u32 eecd = 0;
+ u32 flop = 0;
+ u32 i = 0;
+ s32 error = E1000_SUCCESS;
+ u32 old_bank_offset = 0;
+ u32 new_bank_offset = 0;
+ u8 low_byte = 0;
+ u8 high_byte = 0;
+ bool sector_write_failed = false;
+
+ if (hw->mac_type == e1000_82573) {
+ /* The flop register will be used to determine if flash type is STM */
+ flop = er32(FLOP);
+ for (i=0; i < attempts; i++) {
+ eecd = er32(EECD);
+ if ((eecd & E1000_EECD_FLUPD) == 0) {
+ break;
+ }
+ udelay(5);
+ }
+
+ if (i == attempts) {
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* If STM opcode located in bits 15:8 of flop, reset firmware */
+ if ((flop & 0xFF00) == E1000_STM_OPCODE) {
+ ew32(HICR, E1000_HICR_FW_RESET);
+ }
+
+ /* Perform the flash update */
+ ew32(EECD, eecd | E1000_EECD_FLUPD);
+
+ for (i=0; i < attempts; i++) {
+ eecd = er32(EECD);
+ if ((eecd & E1000_EECD_FLUPD) == 0) {
+ break;
+ }
+ udelay(5);
+ }
+
+ if (i == attempts) {
+ return -E1000_ERR_EEPROM;
+ }
+ }
+
+ if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) {
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written */
+ if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
+ new_bank_offset = hw->flash_bank_size * 2;
+ old_bank_offset = 0;
+ e1000_erase_ich8_4k_segment(hw, 1);
+ } else {
+ old_bank_offset = hw->flash_bank_size * 2;
+ new_bank_offset = 0;
+ e1000_erase_ich8_4k_segment(hw, 0);
+ }
+
+ sector_write_failed = false;
+ /* Loop for every byte in the shadow RAM,
+ * which is in units of words. */
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ /* Determine whether to write the value stored
+ * in the other NVM bank or a modified value stored
+ * in the shadow RAM */
+ if (hw->eeprom_shadow_ram[i].modified) {
+ low_byte = (u8)hw->eeprom_shadow_ram[i].eeprom_word;
+ udelay(100);
+ error = e1000_verify_write_ich8_byte(hw,
+ (i << 1) + new_bank_offset, low_byte);
+
+ if (error != E1000_SUCCESS)
+ sector_write_failed = true;
+ else {
+ high_byte =
+ (u8)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
+ udelay(100);
+ }
+ } else {
+ e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
+ &low_byte);
+ udelay(100);
+ error = e1000_verify_write_ich8_byte(hw,
+ (i << 1) + new_bank_offset, low_byte);
+
+ if (error != E1000_SUCCESS)
+ sector_write_failed = true;
+ else {
+ e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
+ &high_byte);
+ udelay(100);
+ }
+ }
+
+ /* If the write of the low byte was successful, go ahead and
+ * write the high byte while checking to make sure that if it
+ * is the signature byte, then it is handled properly */
+ if (!sector_write_failed) {
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress */
+ if (i == E1000_ICH_NVM_SIG_WORD)
+ high_byte = E1000_ICH_NVM_SIG_MASK | high_byte;
+
+ error = e1000_verify_write_ich8_byte(hw,
+ (i << 1) + new_bank_offset + 1, high_byte);
+ if (error != E1000_SUCCESS)
+ sector_write_failed = true;
+
+ } else {
+ /* If the write failed then break from the loop and
+ * return an error */
+ break;
+ }
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed. */
+ if (!sector_write_failed) {
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b */
+ e1000_read_ich8_byte(hw,
+ E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+ &high_byte);
+ high_byte &= 0xBF;
+ error = e1000_verify_write_ich8_byte(hw,
+ E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase */
+ if (error == E1000_SUCCESS) {
+ error = e1000_verify_write_ich8_byte(hw,
+ E1000_ICH_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
+ }
+
+ /* Clear the now not used entry in the cache */
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ hw->eeprom_shadow_ram[i].modified = false;
+ hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+ }
+ }
+ }
+
+ return error;
+}
+
+/******************************************************************************
+ * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
+ * second function of dual function devices
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+ u16 offset;
+ u16 eeprom_data, i;
+
+ DEBUGFUNC("e1000_read_mac_addr");
+
+ for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+ offset = i >> 1;
+ if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF);
+ hw->perm_mac_addr[i+1] = (u8)(eeprom_data >> 8);
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82571:
+ case e1000_80003es2lan:
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
+ hw->perm_mac_addr[5] ^= 0x01;
+ break;
+ }
+
+ for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+ hw->mac_addr[i] = hw->perm_mac_addr[i];
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Initializes receive address filters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive addresss registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ *****************************************************************************/
+static void e1000_init_rx_addrs(struct e1000_hw *hw)
+{
+ u32 i;
+ u32 rar_num;
+
+ DEBUGFUNC("e1000_init_rx_addrs");
+
+ /* Setup the receive address. */
+ DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+ e1000_rar_set(hw, hw->mac_addr, 0);
+
+ rar_num = E1000_RAR_ENTRIES;
+
+ /* Reserve a spot for the Locally Administered Address to work around
+ * an 82571 issue in which a reset on one port will reload the MAC on
+ * the other port. */
+ if ((hw->mac_type == e1000_82571) && (hw->laa_is_present))
+ rar_num -= 1;
+ if (hw->mac_type == e1000_ich8lan)
+ rar_num = E1000_RAR_ENTRIES_ICH8LAN;
+
+ /* Zero out the other 15 receive addresses. */
+ DEBUGOUT("Clearing RAR[1-15]\n");
+ for (i = 1; i < rar_num; i++) {
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+/******************************************************************************
+ * Hashes an address to determine its location in the multicast table
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *****************************************************************************/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value = 0;
+
+ /* The portion of the address that is used for the hash table is
+ * determined by the mc_filter_type setting.
+ */
+ switch (hw->mc_filter_type) {
+ /* [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ */
+ case 0:
+ if (hw->mac_type == e1000_ich8lan) {
+ /* [47:38] i.e. 0x158 for above example address */
+ hash_value = ((mc_addr[4] >> 6) | (((u16)mc_addr[5]) << 2));
+ } else {
+ /* [47:36] i.e. 0x563 for above example address */
+ hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ }
+ break;
+ case 1:
+ if (hw->mac_type == e1000_ich8lan) {
+ /* [46:37] i.e. 0x2B1 for above example address */
+ hash_value = ((mc_addr[4] >> 5) | (((u16)mc_addr[5]) << 3));
+ } else {
+ /* [46:35] i.e. 0xAC6 for above example address */
+ hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ }
+ break;
+ case 2:
+ if (hw->mac_type == e1000_ich8lan) {
+ /*[45:36] i.e. 0x163 for above example address */
+ hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ } else {
+ /* [45:34] i.e. 0x5D8 for above example address */
+ hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ }
+ break;
+ case 3:
+ if (hw->mac_type == e1000_ich8lan) {
+ /* [43:34] i.e. 0x18D for above example address */
+ hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ } else {
+ /* [43:32] i.e. 0x634 for above example address */
+ hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ }
+ break;
+ }
+
+ hash_value &= 0xFFF;
+ if (hw->mac_type == e1000_ich8lan)
+ hash_value &= 0x3FF;
+
+ return hash_value;
+}
+
+/******************************************************************************
+ * Sets the bit in the multicast table corresponding to the hash value.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ *****************************************************************************/
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
+{
+ u32 hash_bit, hash_reg;
+ u32 mta;
+ u32 temp;
+
+ /* The MTA is a register array of 128 32-bit registers.
+ * It is treated like an array of 4096 bits. We want to set
+ * bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The register is determined by the
+ * upper 7 bits of the hash value and the bit within that
+ * register are determined by the lower 5 bits of the value.
+ */
+ hash_reg = (hash_value >> 5) & 0x7F;
+ if (hw->mac_type == e1000_ich8lan)
+ hash_reg &= 0x1F;
+
+ hash_bit = hash_value & 0x1F;
+
+ mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
+
+ mta |= (1 << hash_bit);
+
+ /* If we are on an 82544 and we are trying to write an odd offset
+ * in the MTA, save off the previous entry before writing and
+ * restore the old value after writing.
+ */
+ if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
+ temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
+ E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp);
+ E1000_WRITE_FLUSH();
+ } else {
+ E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+/******************************************************************************
+ * Puts an ethernet address into a receive address register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * addr - Address to put into receive address register
+ * index - Receive address register to write
+ *****************************************************************************/
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+ rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
+ * unit hang.
+ *
+ * Description:
+ * If there are any Rx frames queued up or otherwise present in the HW
+ * before RSS is enabled, and then we enable RSS, the HW Rx unit will
+ * hang. To work around this issue, we have to disable receives and
+ * flush out all Rx frames before we enable RSS. To do so, we modify we
+ * redirect all Rx traffic to manageability and then reset the HW.
+ * This flushes away Rx frames, and (since the redirections to
+ * manageability persists across resets) keeps new ones from coming in
+ * while we work. Then, we clear the Address Valid AV bit for all MAC
+ * addresses and undo the re-direction to manageability.
+ * Now, frames are coming in again, but the MAC won't accept them, so
+ * far so good. We now proceed to initialize RSS (if necessary) and
+ * configure the Rx unit. Last, we re-enable the AV bits and continue
+ * on our merry way.
+ */
+ switch (hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ if (hw->leave_av_bit_off)
+ break;
+ default:
+ /* Indicate to hardware the Address is Valid. */
+ rar_high |= E1000_RAH_AV;
+ break;
+ }
+
+ E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+ E1000_WRITE_FLUSH();
+}
+
+/******************************************************************************
+ * Writes a value to the specified offset in the VLAN filter table.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - Offset in VLAN filer table to write
+ * value - Value to write into VLAN filter table
+ *****************************************************************************/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ u32 temp;
+
+ if (hw->mac_type == e1000_ich8lan)
+ return;
+
+ if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+ temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+ E1000_WRITE_FLUSH();
+ } else {
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+/******************************************************************************
+ * Clears the VLAN filer table
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_clear_vfta(struct e1000_hw *hw)
+{
+ u32 offset;
+ u32 vfta_value = 0;
+ u32 vfta_offset = 0;
+ u32 vfta_bit_in_reg = 0;
+
+ if (hw->mac_type == e1000_ich8lan)
+ return;
+
+ if (hw->mac_type == e1000_82573) {
+ if (hw->mng_cookie.vlan_id != 0) {
+ /* The VFTA is a 4096b bit-field, each identifying a single VLAN
+ * ID. The following operations determine which 32b entry
+ * (i.e. offset) into the array we want to set the VLAN ID
+ * (i.e. bit) of the manageability unit. */
+ vfta_offset = (hw->mng_cookie.vlan_id >>
+ E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK;
+ vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ }
+ }
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ /* If the offset we want to clear is the same offset of the
+ * manageability VLAN ID, then clear all bits except that of the
+ * manageability unit */
+ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+static s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+ u32 ledctl;
+ const u32 ledctl_mask = 0x000000FF;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+ u16 eeprom_data, i, temp;
+ const u16 led_mask = 0x0F;
+
+ DEBUGFUNC("e1000_id_led_init");
+
+ if (hw->mac_type < e1000_82540) {
+ /* Nothing to do */
+ return E1000_SUCCESS;
+ }
+
+ ledctl = er32(LEDCTL);
+ hw->ledctl_default = ledctl;
+ hw->ledctl_mode1 = hw->ledctl_default;
+ hw->ledctl_mode2 = hw->ledctl_default;
+
+ if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ if ((hw->mac_type == e1000_82573) &&
+ (eeprom_data == ID_LED_RESERVED_82573))
+ eeprom_data = ID_LED_DEFAULT_82573;
+ else if ((eeprom_data == ID_LED_RESERVED_0000) ||
+ (eeprom_data == ID_LED_RESERVED_FFFF)) {
+ if (hw->mac_type == e1000_ich8lan)
+ eeprom_data = ID_LED_DEFAULT_ICH8LAN;
+ else
+ eeprom_data = ID_LED_DEFAULT;
+ }
+
+ for (i = 0; i < 4; i++) {
+ temp = (eeprom_data >> (i << 2)) & led_mask;
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode1 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode1 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode2 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode2 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Prepares SW controlable LED for use and saves the current state of the LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+ u32 ledctl;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_setup_led");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* No setup necessary */
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ /* Turn off PHY Smart Power Down (if enabled) */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ &hw->phy_spd_default);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ (u16)(hw->phy_spd_default &
+ ~IGP01E1000_GMII_SPD));
+ if (ret_val)
+ return ret_val;
+ /* Fall Through */
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ ledctl = er32(LEDCTL);
+ /* Save current LEDCTL settings */
+ hw->ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+ E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ ew32(LEDCTL, ledctl);
+ } else if (hw->media_type == e1000_media_type_copper)
+ ew32(LEDCTL, hw->ledctl_mode1);
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Used on 82571 and later Si that has LED blink bits.
+ * Callers must use their own timer and should have already called
+ * e1000_id_led_init()
+ * Call e1000_cleanup led() to stop blinking
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_blink_led_start(struct e1000_hw *hw)
+{
+ s16 i;
+ u32 ledctl_blink = 0;
+
+ DEBUGFUNC("e1000_id_led_blink_on");
+
+ if (hw->mac_type < e1000_82571) {
+ /* Nothing to do */
+ return E1000_SUCCESS;
+ }
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* always blink LED0 for PCI-E fiber */
+ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+ } else {
+ /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */
+ ledctl_blink = hw->ledctl_mode2;
+ for (i=0; i < 4; i++)
+ if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) ==
+ E1000_LEDCTL_MODE_LED_ON)
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8));
+ }
+
+ ew32(LEDCTL, ledctl_blink);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Restores the saved state of the SW controlable LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_cleanup_led");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* No cleanup necessary */
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ /* Turn on PHY Smart Power Down (if previously enabled) */
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ hw->phy_spd_default);
+ if (ret_val)
+ return ret_val;
+ /* Fall Through */
+ default:
+ if (hw->phy_type == e1000_phy_ife) {
+ e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+ break;
+ }
+ /* Restore LEDCTL settings */
+ ew32(LEDCTL, hw->ledctl_default);
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns on the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+ u32 ctrl = er32(CTRL);
+
+ DEBUGFUNC("e1000_led_on");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ /* Set SW Defineable Pin 0 to turn on the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ break;
+ case e1000_82544:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Set SW Defineable Pin 0 to turn on the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Clear SW Defineable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ break;
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Clear SW Defineable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else if (hw->phy_type == e1000_phy_ife) {
+ e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+ } else if (hw->media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->ledctl_mode2);
+ return E1000_SUCCESS;
+ }
+ break;
+ }
+
+ ew32(CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns off the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+ u32 ctrl = er32(CTRL);
+
+ DEBUGFUNC("e1000_led_off");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ /* Clear SW Defineable Pin 0 to turn off the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ break;
+ case e1000_82544:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Clear SW Defineable Pin 0 to turn off the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Set SW Defineable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ break;
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Set SW Defineable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else if (hw->phy_type == e1000_phy_ife) {
+ e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+ } else if (hw->media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->ledctl_mode1);
+ return E1000_SUCCESS;
+ }
+ break;
+ }
+
+ ew32(CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Clears all hardware statistics counters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
+{
+ volatile u32 temp;
+
+ temp = er32(CRCERRS);
+ temp = er32(SYMERRS);
+ temp = er32(MPC);
+ temp = er32(SCC);
+ temp = er32(ECOL);
+ temp = er32(MCC);
+ temp = er32(LATECOL);
+ temp = er32(COLC);
+ temp = er32(DC);
+ temp = er32(SEC);
+ temp = er32(RLEC);
+ temp = er32(XONRXC);
+ temp = er32(XONTXC);
+ temp = er32(XOFFRXC);
+ temp = er32(XOFFTXC);
+ temp = er32(FCRUC);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ temp = er32(PRC64);
+ temp = er32(PRC127);
+ temp = er32(PRC255);
+ temp = er32(PRC511);
+ temp = er32(PRC1023);
+ temp = er32(PRC1522);
+ }
+
+ temp = er32(GPRC);
+ temp = er32(BPRC);
+ temp = er32(MPRC);
+ temp = er32(GPTC);
+ temp = er32(GORCL);
+ temp = er32(GORCH);
+ temp = er32(GOTCL);
+ temp = er32(GOTCH);
+ temp = er32(RNBC);
+ temp = er32(RUC);
+ temp = er32(RFC);
+ temp = er32(ROC);
+ temp = er32(RJC);
+ temp = er32(TORL);
+ temp = er32(TORH);
+ temp = er32(TOTL);
+ temp = er32(TOTH);
+ temp = er32(TPR);
+ temp = er32(TPT);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ temp = er32(PTC64);
+ temp = er32(PTC127);
+ temp = er32(PTC255);
+ temp = er32(PTC511);
+ temp = er32(PTC1023);
+ temp = er32(PTC1522);
+ }
+
+ temp = er32(MPTC);
+ temp = er32(BPTC);
+
+ if (hw->mac_type < e1000_82543) return;
+
+ temp = er32(ALGNERRC);
+ temp = er32(RXERRC);
+ temp = er32(TNCRS);
+ temp = er32(CEXTERR);
+ temp = er32(TSCTC);
+ temp = er32(TSCTFC);
+
+ if (hw->mac_type <= e1000_82544) return;
+
+ temp = er32(MGTPRC);
+ temp = er32(MGTPDC);
+ temp = er32(MGTPTC);
+
+ if (hw->mac_type <= e1000_82547_rev_2) return;
+
+ temp = er32(IAC);
+ temp = er32(ICRXOC);
+
+ if (hw->mac_type == e1000_ich8lan) return;
+
+ temp = er32(ICRXPTC);
+ temp = er32(ICRXATC);
+ temp = er32(ICTXPTC);
+ temp = er32(ICTXATC);
+ temp = er32(ICTXQEC);
+ temp = er32(ICTXQMTC);
+ temp = er32(ICRXDMTC);
+}
+
+/******************************************************************************
+ * Resets Adaptive IFS to its default state.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Call this after e1000_init_hw. You may override the IFS defaults by setting
+ * hw->ifs_params_forced to true. However, you must initialize hw->
+ * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
+ * before calling this function.
+ *****************************************************************************/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_reset_adaptive");
+
+ if (hw->adaptive_ifs) {
+ if (!hw->ifs_params_forced) {
+ hw->current_ifs_val = 0;
+ hw->ifs_min_val = IFS_MIN;
+ hw->ifs_max_val = IFS_MAX;
+ hw->ifs_step_size = IFS_STEP;
+ hw->ifs_ratio = IFS_RATIO;
+ }
+ hw->in_ifs_mode = false;
+ ew32(AIT, 0);
+ } else {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ }
+}
+
+/******************************************************************************
+ * Called during the callback/watchdog routine to update IFS value based on
+ * the ratio of transmits to collisions.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * tx_packets - Number of transmits since last callback
+ * total_collisions - Number of collisions since last callback
+ *****************************************************************************/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_update_adaptive");
+
+ if (hw->adaptive_ifs) {
+ if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
+ if (hw->tx_packet_delta > MIN_NUM_XMITS) {
+ hw->in_ifs_mode = true;
+ if (hw->current_ifs_val < hw->ifs_max_val) {
+ if (hw->current_ifs_val == 0)
+ hw->current_ifs_val = hw->ifs_min_val;
+ else
+ hw->current_ifs_val += hw->ifs_step_size;
+ ew32(AIT, hw->current_ifs_val);
+ }
+ }
+ } else {
+ if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+ hw->current_ifs_val = 0;
+ hw->in_ifs_mode = false;
+ ew32(AIT, 0);
+ }
+ }
+ } else {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ }
+}
+
+/******************************************************************************
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ *
+ * hw - Struct containing variables accessed by shared code
+ * frame_len - The length of the frame in question
+ * mac_addr - The Ethernet destination address of the frame in question
+ *****************************************************************************/
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
+ u32 frame_len, u8 *mac_addr)
+{
+ u64 carry_bit;
+
+ /* First adjust the frame length. */
+ frame_len--;
+ /* We need to adjust the statistics counters, since the hardware
+ * counters overcount this packet as a CRC error and undercount
+ * the packet as a good packet
+ */
+ /* This packet should not be counted as a CRC error. */
+ stats->crcerrs--;
+ /* This packet does count as a Good Packet Received. */
+ stats->gprc++;
+
+ /* Adjust the Good Octets received counters */
+ carry_bit = 0x80000000 & stats->gorcl;
+ stats->gorcl += frame_len;
+ /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+ * Received Count) was one before the addition,
+ * AND it is zero after, then we lost the carry out,
+ * need to add one to Gorch (Good Octets Received Count High).
+ * This could be simplified if all environments supported
+ * 64-bit integers.
+ */
+ if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
+ stats->gorch++;
+ /* Is this a broadcast or multicast? Check broadcast first,
+ * since the test for a multicast frame will test positive on
+ * a broadcast frame.
+ */
+ if ((mac_addr[0] == (u8)0xff) && (mac_addr[1] == (u8)0xff))
+ /* Broadcast packet */
+ stats->bprc++;
+ else if (*mac_addr & 0x01)
+ /* Multicast packet */
+ stats->mprc++;
+
+ if (frame_len == hw->max_frame_size) {
+ /* In this case, the hardware has overcounted the number of
+ * oversize frames.
+ */
+ if (stats->roc > 0)
+ stats->roc--;
+ }
+
+ /* Adjust the bin counters when the extra byte put the frame in the
+ * wrong bin. Remember that the frame_len was adjusted above.
+ */
+ if (frame_len == 64) {
+ stats->prc64++;
+ stats->prc127--;
+ } else if (frame_len == 127) {
+ stats->prc127++;
+ stats->prc255--;
+ } else if (frame_len == 255) {
+ stats->prc255++;
+ stats->prc511--;
+ } else if (frame_len == 511) {
+ stats->prc511++;
+ stats->prc1023--;
+ } else if (frame_len == 1023) {
+ stats->prc1023++;
+ stats->prc1522--;
+ } else if (frame_len == 1522) {
+ stats->prc1522++;
+ }
+}
+
+/******************************************************************************
+ * Gets the current PCI bus type, speed, and width of the hardware
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void e1000_get_bus_info(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 pci_ex_link_status;
+ u32 status;
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ hw->bus_type = e1000_bus_type_pci;
+ hw->bus_speed = e1000_bus_speed_unknown;
+ hw->bus_width = e1000_bus_width_unknown;
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ hw->bus_type = e1000_bus_type_pci_express;
+ hw->bus_speed = e1000_bus_speed_2500;
+ ret_val = e1000_read_pcie_cap_reg(hw,
+ PCI_EX_LINK_STATUS,
+ &pci_ex_link_status);
+ if (ret_val)
+ hw->bus_width = e1000_bus_width_unknown;
+ else
+ hw->bus_width = (pci_ex_link_status & PCI_EX_LINK_WIDTH_MASK) >>
+ PCI_EX_LINK_WIDTH_SHIFT;
+ break;
+ case e1000_ich8lan:
+ hw->bus_type = e1000_bus_type_pci_express;
+ hw->bus_speed = e1000_bus_speed_2500;
+ hw->bus_width = e1000_bus_width_pciex_1;
+ break;
+ default:
+ status = er32(STATUS);
+ hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
+ e1000_bus_type_pcix : e1000_bus_type_pci;
+
+ if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+ hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
+ e1000_bus_speed_66 : e1000_bus_speed_120;
+ } else if (hw->bus_type == e1000_bus_type_pci) {
+ hw->bus_speed = (status & E1000_STATUS_PCI66) ?
+ e1000_bus_speed_66 : e1000_bus_speed_33;
+ } else {
+ switch (status & E1000_STATUS_PCIX_SPEED) {
+ case E1000_STATUS_PCIX_SPEED_66:
+ hw->bus_speed = e1000_bus_speed_66;
+ break;
+ case E1000_STATUS_PCIX_SPEED_100:
+ hw->bus_speed = e1000_bus_speed_100;
+ break;
+ case E1000_STATUS_PCIX_SPEED_133:
+ hw->bus_speed = e1000_bus_speed_133;
+ break;
+ default:
+ hw->bus_speed = e1000_bus_speed_reserved;
+ break;
+ }
+ }
+ hw->bus_width = (status & E1000_STATUS_BUS64) ?
+ e1000_bus_width_64 : e1000_bus_width_32;
+ break;
+ }
+}
+
+/******************************************************************************
+ * Writes a value to one of the devices registers using port I/O (as opposed to
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset to write to
+ * value - value to write
+ *****************************************************************************/
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ unsigned long io_addr = hw->io_base;
+ unsigned long io_data = hw->io_base + 4;
+
+ e1000_io_write(hw, io_addr, offset);
+ e1000_io_write(hw, io_data, value);
+}
+
+/******************************************************************************
+ * Estimates the cable length.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * min_length - The estimated minimum length
+ * max_length - The estimated maximum length
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * This function always returns a ranged length (minimum & maximum).
+ * So for M88 phy's, this function interprets the one value returned from the
+ * register to the minimum and maximum range.
+ * For IGP phy's, the function calculates the range by the AGC registers.
+ *****************************************************************************/
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+ u16 *max_length)
+{
+ s32 ret_val;
+ u16 agc_value = 0;
+ u16 i, phy_data;
+ u16 cable_length;
+
+ DEBUGFUNC("e1000_get_cable_length");
+
+ *min_length = *max_length = 0;
+
+ /* Use old method for Phy older than IGP */
+ if (hw->phy_type == e1000_phy_m88) {
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+ /* Convert the enum value to ranged values */
+ switch (cable_length) {
+ case e1000_cable_length_50:
+ *min_length = 0;
+ *max_length = e1000_igp_cable_length_50;
+ break;
+ case e1000_cable_length_50_80:
+ *min_length = e1000_igp_cable_length_50;
+ *max_length = e1000_igp_cable_length_80;
+ break;
+ case e1000_cable_length_80_110:
+ *min_length = e1000_igp_cable_length_80;
+ *max_length = e1000_igp_cable_length_110;
+ break;
+ case e1000_cable_length_110_140:
+ *min_length = e1000_igp_cable_length_110;
+ *max_length = e1000_igp_cable_length_140;
+ break;
+ case e1000_cable_length_140:
+ *min_length = e1000_igp_cable_length_140;
+ *max_length = e1000_igp_cable_length_170;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+ } else if (hw->phy_type == e1000_phy_gg82563) {
+ ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+ switch (cable_length) {
+ case e1000_gg_cable_length_60:
+ *min_length = 0;
+ *max_length = e1000_igp_cable_length_60;
+ break;
+ case e1000_gg_cable_length_60_115:
+ *min_length = e1000_igp_cable_length_60;
+ *max_length = e1000_igp_cable_length_115;
+ break;
+ case e1000_gg_cable_length_115_150:
+ *min_length = e1000_igp_cable_length_115;
+ *max_length = e1000_igp_cable_length_150;
+ break;
+ case e1000_gg_cable_length_150:
+ *min_length = e1000_igp_cable_length_150;
+ *max_length = e1000_igp_cable_length_180;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+ } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+ u16 cur_agc_value;
+ u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+ u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+ {IGP01E1000_PHY_AGC_A,
+ IGP01E1000_PHY_AGC_B,
+ IGP01E1000_PHY_AGC_C,
+ IGP01E1000_PHY_AGC_D};
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+
+ ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+ /* Value bound check. */
+ if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+ (cur_agc_value == 0))
+ return -E1000_ERR_PHY;
+
+ agc_value += cur_agc_value;
+
+ /* Update minimal AGC value. */
+ if (min_agc_value > cur_agc_value)
+ min_agc_value = cur_agc_value;
+ }
+
+ /* Remove the minimal AGC result for length < 50m */
+ if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+ agc_value -= min_agc_value;
+
+ /* Get the average length of the remaining 3 channels */
+ agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+ } else {
+ /* Get the average length of all the 4 channels. */
+ agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+ }
+
+ /* Set the range of the calculated length. */
+ *min_length = ((e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE) > 0) ?
+ (e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE) : 0;
+ *max_length = e1000_igp_cable_length_table[agc_value] +
+ IGP01E1000_AGC_RANGE;
+ } else if (hw->phy_type == e1000_phy_igp_2 ||
+ hw->phy_type == e1000_phy_igp_3) {
+ u16 cur_agc_index, max_agc_index = 0;
+ u16 min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
+ u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
+ {IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D};
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Getting bits 15:9, which represent the combination of course and
+ * fine gain values. The result is a number that can be put into
+ * the lookup table to obtain the approximate cable length. */
+ cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK;
+
+ /* Array index bound check. */
+ if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
+ (cur_agc_index == 0))
+ return -E1000_ERR_PHY;
+
+ /* Remove min & max AGC values from calculation. */
+ if (e1000_igp_2_cable_length_table[min_agc_index] >
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ min_agc_index = cur_agc_index;
+ if (e1000_igp_2_cable_length_table[max_agc_index] <
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ max_agc_index = cur_agc_index;
+
+ agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+ }
+
+ agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+ e1000_igp_2_cable_length_table[max_agc_index]);
+ agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+ /* Calculate cable length with the error range of +/- 10 meters. */
+ *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0;
+ *max_length = agc_value + IGP02E1000_AGC_RANGE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check the cable polarity
+ *
+ * hw - Struct containing variables accessed by shared code
+ * polarity - output parameter : 0 - Polarity is not reversed
+ * 1 - Polarity is reversed.
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * For phy's older then IGP, this function simply reads the polarity bit in the
+ * Phy Status register. For IGP phy's, this bit is valid only if link speed is
+ * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will
+ * return 0. If the link speed is 1000 Mbps the polarity status is in the
+ * IGP01E1000_PHY_PCS_INIT_REG.
+ *****************************************************************************/
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+ e1000_rev_polarity *polarity)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_check_polarity");
+
+ if ((hw->phy_type == e1000_phy_m88) ||
+ (hw->phy_type == e1000_phy_gg82563)) {
+ /* return the Polarity bit in the Status register. */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
+ M88E1000_PSSR_REV_POLARITY_SHIFT) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+
+ } else if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
+ hw->phy_type == e1000_phy_igp_2) {
+ /* Read the Status register to check the speed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
+ * find the polarity status */
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+
+ /* Read the GIG initialization PCS register (0x00B4) */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check the polarity bits */
+ *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+ } else {
+ /* For 10 Mbps, read the polarity bit in the status register. (for
+ * 100 Mbps this bit is always 0) */
+ *polarity = (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+ }
+ } else if (hw->phy_type == e1000_phy_ife) {
+ ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ *polarity = ((phy_data & IFE_PESC_POLARITY_REVERSED) >>
+ IFE_PESC_POLARITY_REVERSED_SHIFT) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check if Downshift occured
+ *
+ * hw - Struct containing variables accessed by shared code
+ * downshift - output parameter : 0 - No Downshift ocured.
+ * 1 - Downshift ocured.
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * For phy's older then IGP, this function reads the Downshift bit in the Phy
+ * Specific Status register. For IGP phy's, it reads the Downgrade bit in the
+ * Link Health register. In IGP this bit is latched high, so the driver must
+ * read it immediately after link is established.
+ *****************************************************************************/
+static s32 e1000_check_downshift(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_check_downshift");
+
+ if (hw->phy_type == e1000_phy_igp ||
+ hw->phy_type == e1000_phy_igp_3 ||
+ hw->phy_type == e1000_phy_igp_2) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
+ } else if ((hw->phy_type == e1000_phy_m88) ||
+ (hw->phy_type == e1000_phy_gg82563)) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
+ M88E1000_PSSR_DOWNSHIFT_SHIFT;
+ } else if (hw->phy_type == e1000_phy_ife) {
+ /* e1000_phy_ife supports 10/100 speed only */
+ hw->speed_downgraded = false;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
+{
+ s32 ret_val;
+ u16 phy_data, phy_saved_data, speed, duplex, i;
+ u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+ {IGP01E1000_PHY_AGC_PARAM_A,
+ IGP01E1000_PHY_AGC_PARAM_B,
+ IGP01E1000_PHY_AGC_PARAM_C,
+ IGP01E1000_PHY_AGC_PARAM_D};
+ u16 min_length, max_length;
+
+ DEBUGFUNC("e1000_config_dsp_after_link_change");
+
+ if (hw->phy_type != e1000_phy_igp)
+ return E1000_SUCCESS;
+
+ if (link_up) {
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (speed == SPEED_1000) {
+
+ ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
+ min_length >= e1000_igp_cable_length_50) {
+
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+ ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ hw->dsp_config_state = e1000_dsp_config_activated;
+ }
+
+ if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
+ (min_length < e1000_igp_cable_length_50)) {
+
+ u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+ u32 idle_errs = 0;
+
+ /* clear previous idle error counts */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ for (i = 0; i < ffe_idle_err_timeout; i++) {
+ udelay(1000);
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
+ if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+ hw->ffe_config_state = e1000_ffe_config_active;
+
+ ret_val = e1000_write_phy_reg(hw,
+ IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_CM_CP);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+
+ if (idle_errs)
+ ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+ }
+ }
+ }
+ } else {
+ if (hw->dsp_config_state == e1000_dsp_config_activated) {
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of the routines. */
+ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ /* Disable the PHY transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIGA);
+ if (ret_val)
+ return ret_val;
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+ phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+ ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ /* Now enable the transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+ }
+
+ if (hw->ffe_config_state == e1000_ffe_config_active) {
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of the routines. */
+ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ /* Disable the PHY transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIGA);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_DEFAULT);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ /* Now enable the transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ hw->ffe_config_state = e1000_ffe_config_enabled;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set PHY to class A mode
+ * Assumes the following operations will follow to enable the new class mode.
+ * 1. Do a PHY soft reset
+ * 2. Restart auto-negotiation or force link.
+ *
+ * hw - Struct containing variables accessed by shared code
+ ****************************************************************************/
+static s32 e1000_set_phy_mode(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 eeprom_data;
+
+ DEBUGFUNC("e1000_set_phy_mode");
+
+ if ((hw->mac_type == e1000_82545_rev_3) &&
+ (hw->media_type == e1000_media_type_copper)) {
+ ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
+ if (ret_val) {
+ return ret_val;
+ }
+
+ if ((eeprom_data != EEPROM_RESERVED_WORD) &&
+ (eeprom_data & EEPROM_PHY_CLASS_A)) {
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_reset_disable = false;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu state according to the active flag. When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+ u32 phy_ctrl = 0;
+ s32 ret_val;
+ u16 phy_data;
+ DEBUGFUNC("e1000_set_d3_lplu_state");
+
+ if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
+ && hw->phy_type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* During driver activity LPLU should not be used or it will attain link
+ * from the lowest speeds starting from 10Mbps. The capability is used for
+ * Dx transitions and states */
+ if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->mac_type == e1000_ich8lan) {
+ /* MAC writes into PHY register based on the state transition
+ * and start auto-negotiation. SW driver can overwrite the settings
+ * in CSR PHY power control E1000_PHY_CTRL register. */
+ phy_ctrl = er32(PHY_CTRL);
+ } else {
+ ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (!active) {
+ if (hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+ } else {
+ phy_data &= ~IGP02E1000_PM_D3_LPLU;
+ ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
+ * Dx states where the power conservation is most important. During
+ * driver activity we should enable SmartSpeed, so performance is
+ * maintained. */
+ if (hw->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
+ (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
+ (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
+
+ if (hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ phy_data |= IGP01E1000_GMII_FLEX_SPD;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+ } else {
+ phy_data |= IGP02E1000_PM_D3_LPLU;
+ ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* When LPLU is enabled we should disable SmartSpeed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ }
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu d0 state according to the active flag. When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+ u32 phy_ctrl = 0;
+ s32 ret_val;
+ u16 phy_data;
+ DEBUGFUNC("e1000_set_d0_lplu_state");
+
+ if (hw->mac_type <= e1000_82547_rev_2)
+ return E1000_SUCCESS;
+
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl = er32(PHY_CTRL);
+ } else {
+ ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (!active) {
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+ } else {
+ phy_data &= ~IGP02E1000_PM_D0_LPLU;
+ ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
+ * Dx states where the power conservation is most important. During
+ * driver activity we should enable SmartSpeed, so performance is
+ * maintained. */
+ if (hw->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+
+ } else {
+
+ if (hw->mac_type == e1000_ich8lan) {
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+ } else {
+ phy_data |= IGP02E1000_PM_D0_LPLU;
+ ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* When LPLU is enabled we should disable SmartSpeed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ }
+ return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Change VCO speed register to improve Bit Error Rate performance of SERDES.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_set_vco_speed(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 default_page = 0;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_set_vco_speed");
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ /* Set PHY register 30, page 5, bit 8 to 0 */
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set PHY register 30, page 4, bit 11 to 1 */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
+ if (ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function reads the cookie from ARC ram.
+ *
+ * returns: - E1000_SUCCESS .
+ ****************************************************************************/
+static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer)
+{
+ u8 i;
+ u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET;
+ u8 length = E1000_MNG_DHCP_COOKIE_LENGTH;
+
+ length = (length >> 2);
+ offset = (offset >> 2);
+
+ for (i = 0; i < length; i++) {
+ *((u32 *)buffer + i) =
+ E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
+ }
+ return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function checks whether the HOST IF is enabled for command operaton
+ * and also checks whether the previous command is completed.
+ * It busy waits in case of previous command is not completed.
+ *
+ * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
+ * timeout
+ * - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+ u32 hicr;
+ u8 i;
+
+ /* Check that the host interface is enabled. */
+ hicr = er32(HICR);
+ if ((hicr & E1000_HICR_EN) == 0) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+ /* check the previous command is completed */
+ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+ hicr = er32(HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ mdelay(1);
+ }
+
+ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+ DEBUGOUT("Previous command timeout failed .\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+ return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient way.
+ * Also fills up the sum of the buffer in *buffer parameter.
+ *
+ * returns - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum)
+{
+ u8 *tmp;
+ u8 *bufptr = buffer;
+ u32 data = 0;
+ u16 remaining, i, j, prev_bytes;
+
+ /* sum = only sum of the data and it is not checksum */
+
+ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
+ return -E1000_ERR_PARAM;
+ }
+
+ tmp = (u8 *)&data;
+ prev_bytes = offset & 0x3;
+ offset &= 0xFFFC;
+ offset >>= 2;
+
+ if (prev_bytes) {
+ data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
+ for (j = prev_bytes; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data);
+ length -= j - prev_bytes;
+ offset++;
+ }
+
+ remaining = length & 0x3;
+ length -= remaining;
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant command block into the
+ * ram area. */
+ for (i = 0; i < length; i++) {
+ for (j = 0; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+
+ E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
+ }
+ if (remaining) {
+ for (j = 0; j < sizeof(u32); j++) {
+ if (j < remaining)
+ *(tmp + j) = *bufptr++;
+ else
+ *(tmp + j) = 0;
+
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
+ }
+
+ return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function writes the command header after does the checksum calculation.
+ *
+ * returns - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ u16 i;
+ u8 sum;
+ u8 *buffer;
+
+ /* Write the whole command header structure which includes sum of
+ * the buffer */
+
+ u16 length = sizeof(struct e1000_host_mng_command_header);
+
+ sum = hdr->checksum;
+ hdr->checksum = 0;
+
+ buffer = (u8 *)hdr;
+ i = length;
+ while (i--)
+ sum += buffer[i];
+
+ hdr->checksum = 0 - sum;
+
+ length >>= 2;
+ /* The device driver writes the relevant command block into the ram area. */
+ for (i = 0; i < length; i++) {
+ E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *)hdr + i));
+ E1000_WRITE_FLUSH();
+ }
+
+ return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function indicates to ARC that a new command is pending which completes
+ * one write operation by the driver.
+ *
+ * returns - E1000_SUCCESS for success.
+ ****************************************************************************/
+static s32 e1000_mng_write_commit(struct e1000_hw *hw)
+{
+ u32 hicr;
+
+ hicr = er32(HICR);
+ /* Setting this bit tells the ARC that a new command is pending. */
+ ew32(HICR, hicr | E1000_HICR_C);
+
+ return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function checks the mode of the firmware.
+ *
+ * returns - true when the mode is IAMT or false.
+ ****************************************************************************/
+bool e1000_check_mng_mode(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ fwsm = er32(FWSM);
+
+ if (hw->mac_type == e1000_ich8lan) {
+ if ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+ return true;
+ } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+ return true;
+
+ return false;
+}
+
+
+/*****************************************************************************
+ * This function writes the dhcp info .
+ ****************************************************************************/
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+ s32 ret_val;
+ struct e1000_host_mng_command_header hdr;
+
+ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+ hdr.command_length = length;
+ hdr.reserved1 = 0;
+ hdr.reserved2 = 0;
+ hdr.checksum = 0;
+
+ ret_val = e1000_mng_enable_host_if(hw);
+ if (ret_val == E1000_SUCCESS) {
+ ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr),
+ &(hdr.checksum));
+ if (ret_val == E1000_SUCCESS) {
+ ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+ if (ret_val == E1000_SUCCESS)
+ ret_val = e1000_mng_write_commit(hw);
+ }
+ }
+ return ret_val;
+}
+
+
+/*****************************************************************************
+ * This function calculates the checksum.
+ *
+ * returns - checksum of buffer contents.
+ ****************************************************************************/
+static u8 e1000_calculate_mng_checksum(char *buffer, u32 length)
+{
+ u8 sum = 0;
+ u32 i;
+
+ if (!buffer)
+ return 0;
+
+ for (i=0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8)(0 - sum);
+}
+
+/*****************************************************************************
+ * This function checks whether tx pkt filtering needs to be enabled or not.
+ *
+ * returns - true for packet filtering or false.
+ ****************************************************************************/
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+ /* called in init as well as watchdog timer functions */
+
+ s32 ret_val, checksum;
+ bool tx_filter = false;
+ struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
+ u8 *buffer = (u8 *) &(hw->mng_cookie);
+
+ if (e1000_check_mng_mode(hw)) {
+ ret_val = e1000_mng_enable_host_if(hw);
+ if (ret_val == E1000_SUCCESS) {
+ ret_val = e1000_host_if_read_cookie(hw, buffer);
+ if (ret_val == E1000_SUCCESS) {
+ checksum = hdr->checksum;
+ hdr->checksum = 0;
+ if ((hdr->signature == E1000_IAMT_SIGNATURE) &&
+ checksum == e1000_calculate_mng_checksum((char *)buffer,
+ E1000_MNG_DHCP_COOKIE_LENGTH)) {
+ if (hdr->status &
+ E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
+ tx_filter = true;
+ } else
+ tx_filter = true;
+ } else
+ tx_filter = true;
+ }
+ }
+
+ hw->tx_pkt_filtering = tx_filter;
+ return tx_filter;
+}
+
+/******************************************************************************
+ * Verifies the hardware needs to allow ARPs to be processed by the host
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * returns: - true/false
+ *
+ *****************************************************************************/
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+ u32 manc;
+ u32 fwsm, factps;
+
+ if (hw->asf_firmware_present) {
+ manc = er32(MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+ !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+ return false;
+ if (e1000_arc_subsystem_valid(hw)) {
+ fwsm = er32(FWSM);
+ factps = er32(FACTPS);
+
+ if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
+ e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
+ return true;
+ } else
+ if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
+ return true;
+ }
+ return false;
+}
+
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_status_reg;
+ u16 i;
+
+ /* Polarity reversal workaround for forced 10F/10H links. */
+
+ /* Disable the transmitter on the PHY */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ /* This loop will early-out if the NO link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Link Status bit
+ * to be clear.
+ */
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
+ mdelay(100);
+ }
+
+ /* Recommended delay time after link has been lost */
+ mdelay(1000);
+
+ /* Now we will re-enable th transmitter on the PHY */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ return ret_val;
+ mdelay(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+ if (ret_val)
+ return ret_val;
+ mdelay(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+ if (ret_val)
+ return ret_val;
+ mdelay(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ /* This loop will early-out if the link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Link Status bit
+ * to be set.
+ */
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_LINK_STATUS) break;
+ mdelay(100);
+ }
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Disables PCI-Express master access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - none.
+ *
+ ***************************************************************************/
+static void e1000_set_pci_express_master_disable(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_set_pci_express_master_disable");
+
+ if (hw->bus_type != e1000_bus_type_pci_express)
+ return;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+ ew32(CTRL, ctrl);
+}
+
+/*******************************************************************************
+ *
+ * Disables PCI-Express master access and verifies there are no pending requests
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't
+ * caused the master requests to be disabled.
+ * E1000_SUCCESS master requests disabled.
+ *
+ ******************************************************************************/
+s32 e1000_disable_pciex_master(struct e1000_hw *hw)
+{
+ s32 timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */
+
+ DEBUGFUNC("e1000_disable_pciex_master");
+
+ if (hw->bus_type != e1000_bus_type_pci_express)
+ return E1000_SUCCESS;
+
+ e1000_set_pci_express_master_disable(hw);
+
+ while (timeout) {
+ if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
+ break;
+ else
+ udelay(100);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Master requests are pending.\n");
+ return -E1000_ERR_MASTER_REQUESTS_PENDING;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/*******************************************************************************
+ *
+ * Check for EEPROM Auto Read bit done.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ * E1000_SUCCESS at any other case.
+ *
+ ******************************************************************************/
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
+{
+ s32 timeout = AUTO_READ_DONE_TIMEOUT;
+
+ DEBUGFUNC("e1000_get_auto_rd_done");
+
+ switch (hw->mac_type) {
+ default:
+ msleep(5);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ while (timeout) {
+ if (er32(EECD) & E1000_EECD_AUTO_RD)
+ break;
+ else msleep(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+ break;
+ }
+
+ /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
+ * Need to wait for PHY configuration completion before accessing NVM
+ * and PHY. */
+ if (hw->mac_type == e1000_82573)
+ msleep(25);
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ * Checks if the PHY configuration is done
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ * E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 cfg_mask = E1000_EEPROM_CFG_DONE;
+
+ DEBUGFUNC("e1000_get_phy_cfg_done");
+
+ switch (hw->mac_type) {
+ default:
+ mdelay(10);
+ break;
+ case e1000_80003es2lan:
+ /* Separate *_CFG_DONE_* bit for each port */
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
+ cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1;
+ /* Fall Through */
+ case e1000_82571:
+ case e1000_82572:
+ while (timeout) {
+ if (er32(EEMNGCTL) & cfg_mask)
+ break;
+ else
+ msleep(1);
+ timeout--;
+ }
+ if (!timeout) {
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Using the combination of SMBI and SWESMBI semaphore bits when resetting
+ * adapter or Eeprom access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_EEPROM if fail to access EEPROM.
+ * E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
+{
+ s32 timeout;
+ u32 swsm;
+
+ DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
+
+ if (!hw->eeprom_semaphore_present)
+ return E1000_SUCCESS;
+
+ if (hw->mac_type == e1000_80003es2lan) {
+ /* Get the SW semaphore. */
+ if (e1000_get_software_semaphore(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Get the FW semaphore. */
+ timeout = hw->eeprom.word_size + 1;
+ while (timeout) {
+ swsm = er32(SWSM);
+ swsm |= E1000_SWSM_SWESMBI;
+ ew32(SWSM, swsm);
+ /* if we managed to set the bit we got the semaphore. */
+ swsm = er32(SWSM);
+ if (swsm & E1000_SWSM_SWESMBI)
+ break;
+
+ udelay(50);
+ timeout--;
+ }
+
+ if (!timeout) {
+ /* Release semaphores */
+ e1000_put_hw_eeprom_semaphore(hw);
+ DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ * This function clears HW semaphore bits.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - None.
+ *
+ ***************************************************************************/
+static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
+
+ if (!hw->eeprom_semaphore_present)
+ return;
+
+ swsm = er32(SWSM);
+ if (hw->mac_type == e1000_80003es2lan) {
+ /* Release both semaphores. */
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+ } else
+ swsm &= ~(E1000_SWSM_SWESMBI);
+ ew32(SWSM, swsm);
+}
+
+/***************************************************************************
+ *
+ * Obtaining software semaphore bit (SMBI) before resetting PHY.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to obtain semaphore.
+ * E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+static s32 e1000_get_software_semaphore(struct e1000_hw *hw)
+{
+ s32 timeout = hw->eeprom.word_size + 1;
+ u32 swsm;
+
+ DEBUGFUNC("e1000_get_software_semaphore");
+
+ if (hw->mac_type != e1000_80003es2lan) {
+ return E1000_SUCCESS;
+ }
+
+ while (timeout) {
+ swsm = er32(SWSM);
+ /* If SMBI bit cleared, it is now set and we hold the semaphore */
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+ mdelay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release semaphore bit (SMBI).
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static void e1000_release_software_semaphore(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ DEBUGFUNC("e1000_release_software_semaphore");
+
+ if (hw->mac_type != e1000_80003es2lan) {
+ return;
+ }
+
+ swsm = er32(SWSM);
+ /* Release the SW semaphores.*/
+ swsm &= ~E1000_SWSM_SMBI;
+ ew32(SWSM, swsm);
+}
+
+/******************************************************************************
+ * Checks if PHY reset is blocked due to SOL/IDER session, for example.
+ * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to
+ * the caller to figure out how to deal with it.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_BLK_PHY_RESET
+ * E1000_SUCCESS
+ *
+ *****************************************************************************/
+s32 e1000_check_phy_reset_block(struct e1000_hw *hw)
+{
+ u32 manc = 0;
+ u32 fwsm = 0;
+
+ if (hw->mac_type == e1000_ich8lan) {
+ fwsm = er32(FWSM);
+ return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
+ : E1000_BLK_PHY_RESET;
+ }
+
+ if (hw->mac_type > e1000_82547_rev_2)
+ manc = er32(MANC);
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+ E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
+ * may not be provided a DMA clock when no manageability features are
+ * enabled. We do not want to perform any reads/writes to these registers
+ * if this is the case. We read FWSM to determine the manageability mode.
+ */
+ switch (hw->mac_type) {
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ fwsm = er32(FWSM);
+ if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
+ return true;
+ break;
+ case e1000_ich8lan:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+
+/******************************************************************************
+ * Configure PCI-Ex no-snoop
+ *
+ * hw - Struct containing variables accessed by shared code.
+ * no_snoop - Bitmap of no-snoop events.
+ *
+ * returns: E1000_SUCCESS
+ *
+ *****************************************************************************/
+static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
+{
+ u32 gcr_reg = 0;
+
+ DEBUGFUNC("e1000_set_pci_ex_no_snoop");
+
+ if (hw->bus_type == e1000_bus_type_unknown)
+ e1000_get_bus_info(hw);
+
+ if (hw->bus_type != e1000_bus_type_pci_express)
+ return E1000_SUCCESS;
+
+ if (no_snoop) {
+ gcr_reg = er32(GCR);
+ gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
+ gcr_reg |= no_snoop;
+ ew32(GCR, gcr_reg);
+ }
+ if (hw->mac_type == e1000_ich8lan) {
+ u32 ctrl_ext;
+
+ ew32(GCR, PCI_EX_82566_SNOOP_ALL);
+
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Get software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static s32 e1000_get_software_flag(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 extcnf_ctrl;
+
+ DEBUGFUNC("e1000_get_software_flag");
+
+ if (hw->mac_type == e1000_ich8lan) {
+ while (timeout) {
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+ break;
+ mdelay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ DEBUGOUT("FW or HW locks the resource too long.\n");
+ return -E1000_ERR_CONFIG;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+static void e1000_release_software_flag(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ DEBUGFUNC("e1000_release_software_flag");
+
+ if (hw->mac_type == e1000_ich8lan) {
+ extcnf_ctrl= er32(EXTCNF_CTRL);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ }
+
+ return;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
+ * register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 error = E1000_SUCCESS;
+ u32 flash_bank = 0;
+ u32 act_offset = 0;
+ u32 bank_offset = 0;
+ u16 word = 0;
+ u16 i = 0;
+
+ /* We need to know which is the valid flash bank. In the event
+ * that we didn't allocate eeprom_shadow_ram, we may not be
+ * managing flash_bank. So it cannot be trusted and needs
+ * to be updated with each read.
+ */
+ /* Value of bit 22 corresponds to the flash bank we're on. */
+ flash_bank = (er32(EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
+
+ /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
+ bank_offset = flash_bank * (hw->flash_bank_size * 2);
+
+ error = e1000_get_software_flag(hw);
+ if (error != E1000_SUCCESS)
+ return error;
+
+ for (i = 0; i < words; i++) {
+ if (hw->eeprom_shadow_ram != NULL &&
+ hw->eeprom_shadow_ram[offset+i].modified) {
+ data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
+ } else {
+ /* The NVM part needs a byte offset, hence * 2 */
+ act_offset = bank_offset + ((offset + i) * 2);
+ error = e1000_read_ich8_word(hw, act_offset, &word);
+ if (error != E1000_SUCCESS)
+ break;
+ data[i] = word;
+ }
+ }
+
+ e1000_release_software_flag(hw);
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
+ * register. Actually, writes are written to the shadow ram cache in the hw
+ * structure hw->e1000_shadow_ram. e1000_commit_shadow_ram flushes this to
+ * the NVM, which occurs when the NVM checksum is updated.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to write
+ * words - number of words to write
+ * data - words to write to the EEPROM
+ *****************************************************************************/
+static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ u32 i = 0;
+ s32 error = E1000_SUCCESS;
+
+ error = e1000_get_software_flag(hw);
+ if (error != E1000_SUCCESS)
+ return error;
+
+ /* A driver can write to the NVM only if it has eeprom_shadow_ram
+ * allocated. Subsequent reads to the modified words are read from
+ * this cached structure as well. Writes will only go into this
+ * cached structure unless it's followed by a call to
+ * e1000_update_eeprom_checksum() where it will commit the changes
+ * and clear the "modified" field.
+ */
+ if (hw->eeprom_shadow_ram != NULL) {
+ for (i = 0; i < words; i++) {
+ if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
+ hw->eeprom_shadow_ram[offset+i].modified = true;
+ hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
+ } else {
+ error = -E1000_ERR_EEPROM;
+ break;
+ }
+ }
+ } else {
+ /* Drivers have the option to not allocate eeprom_shadow_ram as long
+ * as they don't perform any NVM writes. An attempt in doing so
+ * will result in this error.
+ */
+ error = -E1000_ERR_EEPROM;
+ }
+
+ e1000_release_software_flag(hw);
+
+ return error;
+}
+
+/******************************************************************************
+ * This function does initial flash setup so that a new read/write/erase cycle
+ * can be started.
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+static s32 e1000_ich8_cycle_init(struct e1000_hw *hw)
+{
+ union ich8_hws_flash_status hsfsts;
+ s32 error = E1000_ERR_EEPROM;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_ich8_cycle_init");
+
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+ /* May be check the Flash Des Valid bit in Hw status */
+ if (hsfsts.hsf_status.fldesvalid == 0) {
+ DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.");
+ return error;
+ }
+
+ /* Clear FCERR in Hw status by writing 1 */
+ /* Clear DAEL in Hw status by writing a 1 */
+ hsfsts.hsf_status.flcerr = 1;
+ hsfsts.hsf_status.dael = 1;
+
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+
+ /* Either we should have a hardware SPI cycle in progress bit to check
+ * against, in order to start a new cycle or FDONE bit should be changed
+ * in the hardware so that it is 1 after harware reset, which can then be
+ * used as an indication whether a cycle is in progress or has been
+ * completed .. we should also have some software semaphore mechanism to
+ * guard FDONE or the cycle in progress bit so that two threads access to
+ * those bits can be sequentiallized or a way so that 2 threads dont
+ * start the cycle at the same time */
+
+ if (hsfsts.hsf_status.flcinprog == 0) {
+ /* There is no cycle running at present, so we can start a cycle */
+ /* Begin by setting Flash Cycle Done. */
+ hsfsts.hsf_status.flcdone = 1;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+ error = E1000_SUCCESS;
+ } else {
+ /* otherwise poll for sometime so the current cycle has a chance
+ * to end before giving up. */
+ for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcinprog == 0) {
+ error = E1000_SUCCESS;
+ break;
+ }
+ udelay(1);
+ }
+ if (error == E1000_SUCCESS) {
+ /* Successful in waiting for previous cycle to timeout,
+ * now set the Flash Cycle Done. */
+ hsfsts.hsf_status.flcdone = 1;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+ } else {
+ DEBUGOUT("Flash controller busy, cannot get access");
+ }
+ }
+ return error;
+}
+
+/******************************************************************************
+ * This function starts a flash cycle and waits for its completion
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
+{
+ union ich8_hws_flash_ctrl hsflctl;
+ union ich8_hws_flash_status hsfsts;
+ s32 error = E1000_ERR_EEPROM;
+ u32 i = 0;
+
+ /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcgo = 1;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* wait till FDONE bit is set to 1 */
+ do {
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcdone == 1)
+ break;
+ udelay(1);
+ i++;
+ } while (i < timeout);
+ if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
+ error = E1000_SUCCESS;
+ }
+ return error;
+}
+
+/******************************************************************************
+ * Reads a byte or word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte or word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - Pointer to the word to store the value read.
+ *****************************************************************************/
+static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+ u16 *data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_address;
+ u32 flash_data = 0;
+ s32 error = -E1000_ERR_EEPROM;
+ s32 count = 0;
+
+ DEBUGFUNC("e1000_read_ich8_data");
+
+ if (size < 1 || size > 2 || data == NULL ||
+ index > ICH_FLASH_LINEAR_ADDR_MASK)
+ return error;
+
+ flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
+ hw->flash_base_addr;
+
+ do {
+ udelay(1);
+ /* Steps */
+ error = e1000_ich8_cycle_init(hw);
+ if (error != E1000_SUCCESS)
+ break;
+
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of index into Flash Linear address field in
+ * Flash Address */
+ /* TODO: TBD maybe check the index against the size of flash */
+
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
+
+ error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
+ * sequence a few more times, else read in (shift in) the Flash Data0,
+ * the order is least significant byte first msb to lsb */
+ if (error == E1000_SUCCESS) {
+ flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
+ if (size == 1) {
+ *data = (u8)(flash_data & 0x000000FF);
+ } else if (size == 2) {
+ *data = (u16)(flash_data & 0x0000FFFF);
+ }
+ break;
+ } else {
+ /* If we've gotten here, then things are probably completely hosed,
+ * but if the error condition is detected, it won't hurt to give
+ * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes One /two bytes to the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte/word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - The byte(s) to write to the NVM.
+ *****************************************************************************/
+static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
+ u16 data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_address;
+ u32 flash_data = 0;
+ s32 error = -E1000_ERR_EEPROM;
+ s32 count = 0;
+
+ DEBUGFUNC("e1000_write_ich8_data");
+
+ if (size < 1 || size > 2 || data > size * 0xff ||
+ index > ICH_FLASH_LINEAR_ADDR_MASK)
+ return error;
+
+ flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
+ hw->flash_base_addr;
+
+ do {
+ udelay(1);
+ /* Steps */
+ error = e1000_ich8_cycle_init(hw);
+ if (error != E1000_SUCCESS)
+ break;
+
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size -1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of index into Flash Linear address field in
+ * Flash Address */
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
+
+ if (size == 1)
+ flash_data = (u32)data & 0x00FF;
+ else
+ flash_data = (u32)data;
+
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
+ * sequence a few more times else done */
+ error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
+ if (error == E1000_SUCCESS) {
+ break;
+ } else {
+ /* If we're here, then things are most likely completely hosed,
+ * but if the error condition is detected, it won't hurt to give
+ * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return error;
+}
+
+/******************************************************************************
+ * Reads a single byte from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - Pointer to a byte to store the value read.
+ *****************************************************************************/
+static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 word = 0;
+
+ status = e1000_read_ich8_data(hw, index, 1, &word);
+ if (status == E1000_SUCCESS) {
+ *data = (u8)word;
+ }
+
+ return status;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ * Performs verification by reading back the value and then going through
+ * a retry algorithm before giving up.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to write.
+ * byte - The byte to write to the NVM.
+ *****************************************************************************/
+static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
+{
+ s32 error = E1000_SUCCESS;
+ s32 program_retries = 0;
+
+ DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index);
+
+ error = e1000_write_ich8_byte(hw, index, byte);
+
+ if (error != E1000_SUCCESS) {
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ DEBUGOUT2("Retrying \t Byte := %2.2X Offset := %d\n", byte, index);
+ error = e1000_write_ich8_byte(hw, index, byte);
+ udelay(100);
+ if (error == E1000_SUCCESS)
+ break;
+ }
+ }
+
+ if (program_retries == 100)
+ error = E1000_ERR_EEPROM;
+
+ return error;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - The byte to write to the NVM.
+ *****************************************************************************/
+static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 word = (u16)data;
+
+ status = e1000_write_ich8_data(hw, index, 1, word);
+
+ return status;
+}
+
+/******************************************************************************
+ * Reads a word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The starting byte index of the word to read.
+ * data - Pointer to a word to store the value read.
+ *****************************************************************************/
+static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ status = e1000_read_ich8_data(hw, index, 2, data);
+ return status;
+}
+
+/******************************************************************************
+ * Erases the bank specified. Each bank may be a 4, 8 or 64k block. Banks are 0
+ * based.
+ *
+ * hw - pointer to e1000_hw structure
+ * bank - 0 for first bank, 1 for second bank
+ *
+ * Note that this function may actually erase as much as 8 or 64 KBytes. The
+ * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the
+ * bank size may be 4, 8 or 64 KBytes
+ *****************************************************************************/
+static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_address;
+ s32 count = 0;
+ s32 error = E1000_ERR_EEPROM;
+ s32 iteration;
+ s32 sub_sector_size = 0;
+ s32 bank_size;
+ s32 j = 0;
+ s32 error_flag = 0;
+
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+ /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
+ /* 00: The Hw sector is 256 bytes, hence we need to erase 16
+ * consecutive sectors. The start index for the nth Hw sector can be
+ * calculated as bank * 4096 + n * 256
+ * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+ * The start index for the nth Hw sector can be calculated
+ * as bank * 4096
+ * 10: The HW sector is 8K bytes
+ * 11: The Hw sector size is 64K bytes */
+ if (hsfsts.hsf_status.berasesz == 0x0) {
+ /* Hw sector size 256 */
+ sub_sector_size = ICH_FLASH_SEG_SIZE_256;
+ bank_size = ICH_FLASH_SECTOR_SIZE;
+ iteration = ICH_FLASH_SECTOR_SIZE / ICH_FLASH_SEG_SIZE_256;
+ } else if (hsfsts.hsf_status.berasesz == 0x1) {
+ bank_size = ICH_FLASH_SEG_SIZE_4K;
+ iteration = 1;
+ } else if (hsfsts.hsf_status.berasesz == 0x3) {
+ bank_size = ICH_FLASH_SEG_SIZE_64K;
+ iteration = 1;
+ } else {
+ return error;
+ }
+
+ for (j = 0; j < iteration ; j++) {
+ do {
+ count++;
+ /* Steps */
+ error = e1000_ich8_cycle_init(hw);
+ if (error != E1000_SUCCESS) {
+ error_flag = 1;
+ break;
+ }
+
+ /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
+ * Control */
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* Write the last 24 bits of an index within the block into Flash
+ * Linear address field in Flash Address. This probably needs to
+ * be calculated here based off the on-chip erase sector size and
+ * the software bank size (4, 8 or 64 KBytes) */
+ flash_linear_address = bank * bank_size + j * sub_sector_size;
+ flash_linear_address += hw->flash_base_addr;
+ flash_linear_address &= ICH_FLASH_LINEAR_ADDR_MASK;
+
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
+
+ error = e1000_ich8_flash_cycle(hw, ICH_FLASH_ERASE_TIMEOUT);
+ /* Check if FCERR is set to 1. If 1, clear it and try the whole
+ * sequence a few more times else Done */
+ if (error == E1000_SUCCESS) {
+ break;
+ } else {
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* repeat for some time before giving up */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ error_flag = 1;
+ break;
+ }
+ }
+ } while ((count < ICH_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
+ if (error_flag == 1)
+ break;
+ }
+ if (error_flag != 1)
+ error = E1000_SUCCESS;
+ return error;
+}
+
+static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+ u32 cnf_base_addr,
+ u32 cnf_size)
+{
+ u32 ret_val = E1000_SUCCESS;
+ u16 word_addr, reg_data, reg_addr;
+ u16 i;
+
+ /* cnf_base_addr is in DWORD */
+ word_addr = (u16)(cnf_base_addr << 1);
+
+ /* cnf_size is returned in size of dwords */
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, ®_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, ®_addr);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_get_software_flag(hw);
+ if (ret_val != E1000_SUCCESS)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg_ex(hw, (u32)reg_addr, reg_data);
+
+ e1000_release_software_flag(hw);
+ }
+
+ return ret_val;
+}
+
+
+/******************************************************************************
+ * This function initializes the PHY from the NVM on ICH8 platforms. This
+ * is needed due to an issue where the NVM configuration is not properly
+ * autoloaded after power transitions. Therefore, after each PHY reset, we
+ * will load the configuration data out of the NVM manually.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *****************************************************************************/
+static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw)
+{
+ u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop;
+
+ if (hw->phy_type != e1000_phy_igp_3)
+ return E1000_SUCCESS;
+
+ /* Check if SW needs configure the PHY */
+ reg_data = er32(FEXTNVM);
+ if (!(reg_data & FEXTNVM_SW_CONFIG))
+ return E1000_SUCCESS;
+
+ /* Wait for basic configuration completes before proceeding*/
+ loop = 0;
+ do {
+ reg_data = er32(STATUS) & E1000_STATUS_LAN_INIT_DONE;
+ udelay(100);
+ loop++;
+ } while ((!reg_data) && (loop < 50));
+
+ /* Clear the Init Done bit for the next init event */
+ reg_data = er32(STATUS);
+ reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
+ ew32(STATUS, reg_data);
+
+ /* Make sure HW does not configure LCD from PHY extended configuration
+ before SW configuration */
+ reg_data = er32(EXTCNF_CTRL);
+ if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
+ reg_data = er32(EXTCNF_SIZE);
+ cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
+ cnf_size >>= 16;
+ if (cnf_size) {
+ reg_data = er32(EXTCNF_CTRL);
+ cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
+ /* cnf_base_addr is in DWORD */
+ cnf_base_addr >>= 16;
+
+ /* Configure LCD from extended configuration region. */
+ ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
+ cnf_size);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_hw-2.6.28-orig.h Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,3406 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.h
+ * Structures, enums, and macros for the MAC
+ */
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+
+
+/* Forward declarations of structures used by the shared code */
+struct e1000_hw;
+struct e1000_hw_stats;
+
+/* Enumerated types specific to the e1000 hardware */
+/* Media Access Controlers */
+typedef enum {
+ e1000_undefined = 0,
+ e1000_82542_rev2_0,
+ e1000_82542_rev2_1,
+ e1000_82543,
+ e1000_82544,
+ e1000_82540,
+ e1000_82545,
+ e1000_82545_rev_3,
+ e1000_82546,
+ e1000_82546_rev_3,
+ e1000_82541,
+ e1000_82541_rev_2,
+ e1000_82547,
+ e1000_82547_rev_2,
+ e1000_82571,
+ e1000_82572,
+ e1000_82573,
+ e1000_80003es2lan,
+ e1000_ich8lan,
+ e1000_num_macs
+} e1000_mac_type;
+
+typedef enum {
+ e1000_eeprom_uninitialized = 0,
+ e1000_eeprom_spi,
+ e1000_eeprom_microwire,
+ e1000_eeprom_flash,
+ e1000_eeprom_ich8,
+ e1000_eeprom_none, /* No NVM support */
+ e1000_num_eeprom_types
+} e1000_eeprom_type;
+
+/* Media Types */
+typedef enum {
+ e1000_media_type_copper = 0,
+ e1000_media_type_fiber = 1,
+ e1000_media_type_internal_serdes = 2,
+ e1000_num_media_types
+} e1000_media_type;
+
+typedef enum {
+ e1000_10_half = 0,
+ e1000_10_full = 1,
+ e1000_100_half = 2,
+ e1000_100_full = 3
+} e1000_speed_duplex_type;
+
+/* Flow Control Settings */
+typedef enum {
+ E1000_FC_NONE = 0,
+ E1000_FC_RX_PAUSE = 1,
+ E1000_FC_TX_PAUSE = 2,
+ E1000_FC_FULL = 3,
+ E1000_FC_DEFAULT = 0xFF
+} e1000_fc_type;
+
+struct e1000_shadow_ram {
+ u16 eeprom_word;
+ bool modified;
+};
+
+/* PCI bus types */
+typedef enum {
+ e1000_bus_type_unknown = 0,
+ e1000_bus_type_pci,
+ e1000_bus_type_pcix,
+ e1000_bus_type_pci_express,
+ e1000_bus_type_reserved
+} e1000_bus_type;
+
+/* PCI bus speeds */
+typedef enum {
+ e1000_bus_speed_unknown = 0,
+ e1000_bus_speed_33,
+ e1000_bus_speed_66,
+ e1000_bus_speed_100,
+ e1000_bus_speed_120,
+ e1000_bus_speed_133,
+ e1000_bus_speed_2500,
+ e1000_bus_speed_reserved
+} e1000_bus_speed;
+
+/* PCI bus widths */
+typedef enum {
+ e1000_bus_width_unknown = 0,
+ /* These PCIe values should literally match the possible return values
+ * from config space */
+ e1000_bus_width_pciex_1 = 1,
+ e1000_bus_width_pciex_2 = 2,
+ e1000_bus_width_pciex_4 = 4,
+ e1000_bus_width_32,
+ e1000_bus_width_64,
+ e1000_bus_width_reserved
+} e1000_bus_width;
+
+/* PHY status info structure and supporting enums */
+typedef enum {
+ e1000_cable_length_50 = 0,
+ e1000_cable_length_50_80,
+ e1000_cable_length_80_110,
+ e1000_cable_length_110_140,
+ e1000_cable_length_140,
+ e1000_cable_length_undefined = 0xFF
+} e1000_cable_length;
+
+typedef enum {
+ e1000_gg_cable_length_60 = 0,
+ e1000_gg_cable_length_60_115 = 1,
+ e1000_gg_cable_length_115_150 = 2,
+ e1000_gg_cable_length_150 = 4
+} e1000_gg_cable_length;
+
+typedef enum {
+ e1000_igp_cable_length_10 = 10,
+ e1000_igp_cable_length_20 = 20,
+ e1000_igp_cable_length_30 = 30,
+ e1000_igp_cable_length_40 = 40,
+ e1000_igp_cable_length_50 = 50,
+ e1000_igp_cable_length_60 = 60,
+ e1000_igp_cable_length_70 = 70,
+ e1000_igp_cable_length_80 = 80,
+ e1000_igp_cable_length_90 = 90,
+ e1000_igp_cable_length_100 = 100,
+ e1000_igp_cable_length_110 = 110,
+ e1000_igp_cable_length_115 = 115,
+ e1000_igp_cable_length_120 = 120,
+ e1000_igp_cable_length_130 = 130,
+ e1000_igp_cable_length_140 = 140,
+ e1000_igp_cable_length_150 = 150,
+ e1000_igp_cable_length_160 = 160,
+ e1000_igp_cable_length_170 = 170,
+ e1000_igp_cable_length_180 = 180
+} e1000_igp_cable_length;
+
+typedef enum {
+ e1000_10bt_ext_dist_enable_normal = 0,
+ e1000_10bt_ext_dist_enable_lower,
+ e1000_10bt_ext_dist_enable_undefined = 0xFF
+} e1000_10bt_ext_dist_enable;
+
+typedef enum {
+ e1000_rev_polarity_normal = 0,
+ e1000_rev_polarity_reversed,
+ e1000_rev_polarity_undefined = 0xFF
+} e1000_rev_polarity;
+
+typedef enum {
+ e1000_downshift_normal = 0,
+ e1000_downshift_activated,
+ e1000_downshift_undefined = 0xFF
+} e1000_downshift;
+
+typedef enum {
+ e1000_smart_speed_default = 0,
+ e1000_smart_speed_on,
+ e1000_smart_speed_off
+} e1000_smart_speed;
+
+typedef enum {
+ e1000_polarity_reversal_enabled = 0,
+ e1000_polarity_reversal_disabled,
+ e1000_polarity_reversal_undefined = 0xFF
+} e1000_polarity_reversal;
+
+typedef enum {
+ e1000_auto_x_mode_manual_mdi = 0,
+ e1000_auto_x_mode_manual_mdix,
+ e1000_auto_x_mode_auto1,
+ e1000_auto_x_mode_auto2,
+ e1000_auto_x_mode_undefined = 0xFF
+} e1000_auto_x_mode;
+
+typedef enum {
+ e1000_1000t_rx_status_not_ok = 0,
+ e1000_1000t_rx_status_ok,
+ e1000_1000t_rx_status_undefined = 0xFF
+} e1000_1000t_rx_status;
+
+typedef enum {
+ e1000_phy_m88 = 0,
+ e1000_phy_igp,
+ e1000_phy_igp_2,
+ e1000_phy_gg82563,
+ e1000_phy_igp_3,
+ e1000_phy_ife,
+ e1000_phy_undefined = 0xFF
+} e1000_phy_type;
+
+typedef enum {
+ e1000_ms_hw_default = 0,
+ e1000_ms_force_master,
+ e1000_ms_force_slave,
+ e1000_ms_auto
+} e1000_ms_type;
+
+typedef enum {
+ e1000_ffe_config_enabled = 0,
+ e1000_ffe_config_active,
+ e1000_ffe_config_blocked
+} e1000_ffe_config;
+
+typedef enum {
+ e1000_dsp_config_disabled = 0,
+ e1000_dsp_config_enabled,
+ e1000_dsp_config_activated,
+ e1000_dsp_config_undefined = 0xFF
+} e1000_dsp_config;
+
+struct e1000_phy_info {
+ e1000_cable_length cable_length;
+ e1000_10bt_ext_dist_enable extended_10bt_distance;
+ e1000_rev_polarity cable_polarity;
+ e1000_downshift downshift;
+ e1000_polarity_reversal polarity_correction;
+ e1000_auto_x_mode mdix_mode;
+ e1000_1000t_rx_status local_rx;
+ e1000_1000t_rx_status remote_rx;
+};
+
+struct e1000_phy_stats {
+ u32 idle_errors;
+ u32 receive_errors;
+};
+
+struct e1000_eeprom_info {
+ e1000_eeprom_type type;
+ u16 word_size;
+ u16 opcode_bits;
+ u16 address_bits;
+ u16 delay_usec;
+ u16 page_size;
+ bool use_eerd;
+ bool use_eewr;
+};
+
+/* Flex ASF Information */
+#define E1000_HOST_IF_MAX_SIZE 2048
+
+typedef enum {
+ e1000_byte_align = 0,
+ e1000_word_align = 1,
+ e1000_dword_align = 2
+} e1000_align_type;
+
+
+
+/* Error Codes */
+#define E1000_SUCCESS 0
+#define E1000_ERR_EEPROM 1
+#define E1000_ERR_PHY 2
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_PARAM 4
+#define E1000_ERR_MAC_TYPE 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET 9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET 12
+#define E1000_ERR_SWFW_SYNC 13
+
+#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \
+ (((_value) & 0xff00) >> 8))
+
+/* Function prototypes */
+/* Initialization */
+s32 e1000_reset_hw(struct e1000_hw *hw);
+s32 e1000_init_hw(struct e1000_hw *hw);
+s32 e1000_set_mac_type(struct e1000_hw *hw);
+void e1000_set_media_type(struct e1000_hw *hw);
+
+/* Link Configuration */
+s32 e1000_setup_link(struct e1000_hw *hw);
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+s32 e1000_check_for_link(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+s32 e1000_force_mac_fc(struct e1000_hw *hw);
+
+/* PHY */
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data);
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
+s32 e1000_phy_hw_reset(struct e1000_hw *hw);
+s32 e1000_phy_reset(struct e1000_hw *hw);
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
+
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
+
+/* EEPROM Functions */
+s32 e1000_init_eeprom_params(struct e1000_hw *hw);
+
+/* MNG HOST IF functions */
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
+
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_ICH_IAMT_MODE 0x2
+#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
+
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */
+#define E1000_VFTA_ENTRY_SHIFT 0x5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+struct e1000_host_mng_command_header {
+ u8 command_id;
+ u8 checksum;
+ u16 reserved1;
+ u16 reserved2;
+ u16 command_length;
+};
+
+struct e1000_host_mng_command_info {
+ struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
+ u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/
+};
+#ifdef __BIG_ENDIAN
+struct e1000_host_mng_dhcp_cookie{
+ u32 signature;
+ u16 vlan_id;
+ u8 reserved0;
+ u8 status;
+ u32 reserved1;
+ u8 checksum;
+ u8 reserved3;
+ u16 reserved2;
+};
+#else
+struct e1000_host_mng_dhcp_cookie{
+ u32 signature;
+ u8 status;
+ u8 reserved0;
+ u16 vlan_id;
+ u32 reserved1;
+ u16 reserved2;
+ u8 reserved3;
+ u8 checksum;
+};
+#endif
+
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer,
+ u16 length);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
+s32 e1000_read_mac_addr(struct e1000_hw * hw);
+
+/* Filters (multicast, vlan, receive) */
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
+void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+
+/* LED functions */
+s32 e1000_setup_led(struct e1000_hw *hw);
+s32 e1000_cleanup_led(struct e1000_hw *hw);
+s32 e1000_led_on(struct e1000_hw *hw);
+s32 e1000_led_off(struct e1000_hw *hw);
+s32 e1000_blink_led_start(struct e1000_hw *hw);
+
+/* Adaptive IFS Functions */
+
+/* Everything else */
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr);
+void e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
+int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
+/* Port I/O is only supported on 82544 and newer */
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
+s32 e1000_disable_pciex_master(struct e1000_hw *hw);
+s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
+
+
+#define E1000_READ_REG_IO(a, reg) \
+ e1000_read_reg_io((a), E1000_##reg)
+#define E1000_WRITE_REG_IO(a, reg, val) \
+ e1000_write_reg_io((a), E1000_##reg, val)
+
+/* PCI Device IDs */
+#define E1000_DEV_ID_82542 0x1000
+#define E1000_DEV_ID_82543GC_FIBER 0x1001
+#define E1000_DEV_ID_82543GC_COPPER 0x1004
+#define E1000_DEV_ID_82544EI_COPPER 0x1008
+#define E1000_DEV_ID_82544EI_FIBER 0x1009
+#define E1000_DEV_ID_82544GC_COPPER 0x100C
+#define E1000_DEV_ID_82544GC_LOM 0x100D
+#define E1000_DEV_ID_82540EM 0x100E
+#define E1000_DEV_ID_82540EM_LOM 0x1015
+#define E1000_DEV_ID_82540EP_LOM 0x1016
+#define E1000_DEV_ID_82540EP 0x1017
+#define E1000_DEV_ID_82540EP_LP 0x101E
+#define E1000_DEV_ID_82545EM_COPPER 0x100F
+#define E1000_DEV_ID_82545EM_FIBER 0x1011
+#define E1000_DEV_ID_82545GM_COPPER 0x1026
+#define E1000_DEV_ID_82545GM_FIBER 0x1027
+#define E1000_DEV_ID_82545GM_SERDES 0x1028
+#define E1000_DEV_ID_82546EB_COPPER 0x1010
+#define E1000_DEV_ID_82546EB_FIBER 0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
+#define E1000_DEV_ID_82541EI 0x1013
+#define E1000_DEV_ID_82541EI_MOBILE 0x1018
+#define E1000_DEV_ID_82541ER_LOM 0x1014
+#define E1000_DEV_ID_82541ER 0x1078
+#define E1000_DEV_ID_82547GI 0x1075
+#define E1000_DEV_ID_82541GI 0x1076
+#define E1000_DEV_ID_82541GI_MOBILE 0x1077
+#define E1000_DEV_ID_82541GI_LF 0x107C
+#define E1000_DEV_ID_82546GB_COPPER 0x1079
+#define E1000_DEV_ID_82546GB_FIBER 0x107A
+#define E1000_DEV_ID_82546GB_SERDES 0x107B
+#define E1000_DEV_ID_82546GB_PCIE 0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
+#define E1000_DEV_ID_82547EI 0x1019
+#define E1000_DEV_ID_82547EI_MOBILE 0x101A
+#define E1000_DEV_ID_82571EB_COPPER 0x105E
+#define E1000_DEV_ID_82571EB_FIBER 0x105F
+#define E1000_DEV_ID_82571EB_SERDES 0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC
+#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
+#define E1000_DEV_ID_82572EI_COPPER 0x107D
+#define E1000_DEV_ID_82572EI_FIBER 0x107E
+#define E1000_DEV_ID_82572EI_SERDES 0x107F
+#define E1000_DEV_ID_82572EI 0x10B9
+#define E1000_DEV_ID_82573E 0x108B
+#define E1000_DEV_ID_82573E_IAMT 0x108C
+#define E1000_DEV_ID_82573L 0x109A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+
+#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
+#define E1000_DEV_ID_ICH8_IGP_C 0x104B
+#define E1000_DEV_ID_ICH8_IFE 0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M 0x104D
+
+
+#define NODE_ADDRESS_SIZE 6
+#define ETH_LENGTH_OF_ADDRESS 6
+
+/* MAC decode size is 128K - This is the size of BAR0 */
+#define MAC_DECODE_SIZE (128 * 1024)
+
+#define E1000_82542_2_0_REV_ID 2
+#define E1000_82542_2_1_REV_ID 3
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* The sizes (in bytes) of a ethernet packet */
+#define ENET_HEADER_SIZE 14
+#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* With FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
+#define ETHERNET_FCS_SIZE 4
+#define MAXIMUM_ETHERNET_PACKET_SIZE \
+ (MAXIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define MINIMUM_ETHERNET_PACKET_SIZE \
+ (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define CRC_LENGTH ETHERNET_FCS_SIZE
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+
+
+/* 802.1q VLAN Packet Sizes */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+#define ETHERNET_IP_TYPE 0x0800 /* IP packets */
+#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */
+
+/* Packet Header defines */
+#define IP_PROTOCOL_TCP 6
+#define IP_PROTOCOL_UDP 0x11
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ)
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
+
+/* Additional interrupts need to be handled for e1000_ich8lan:
+ DSW = The FW changed the status of the DISSW bit in FWSM
+ PHYINT = The LAN connected device generates an interrupt
+ EPRST = Manageability reset event */
+#define IMS_ICH8LAN_ENABLE_MASK (\
+ E1000_IMS_DSW | \
+ E1000_IMS_PHYINT | \
+ E1000_IMS_EPRST)
+
+/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor. We
+ * reserve one of these spots for our directed address, allowing us room for
+ * E1000_RAR_ENTRIES - 1 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+
+#define E1000_RAR_ENTRIES_ICH8LAN 6
+
+#define MIN_NUMBER_OF_DESCRIPTORS 8
+#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+ struct {
+ __le64 buffer_addr;
+ __le64 reserved;
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length;
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+ struct {
+ /* one buffer for protocol header(s), three data buffers */
+ __le64 buffer_addr[MAX_PS_BUFFERS];
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length0; /* length of buffer 0 */
+ __le16 vlan; /* VLAN tag */
+ } middle;
+ struct {
+ __le16 header_status;
+ __le16 length[3]; /* length of buffers 1-3 */
+ } upper;
+ __le64 reserved;
+ } wb; /* writeback */
+};
+
+/* Receive Decriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
+#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_TCPE 0x20000000
+#define E1000_RXDEXT_STATERR_IPE 0x40000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
+
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
+
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+ union {
+ __le32 ip_config;
+ struct {
+ u8 ipcss; /* IP checksum start */
+ u8 ipcso; /* IP checksum offset */
+ __le16 ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ __le32 tcp_config;
+ struct {
+ u8 tucss; /* TCP checksum start */
+ u8 tucso; /* TCP checksum offset */
+ __le16 tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ __le32 cmd_and_length; /* */
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 hdr_len; /* Header length */
+ __le16 mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's buffer address */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 typ_len_ext; /* */
+ u8 cmd; /* */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 popts; /* Packet Options */
+ __le16 special; /* */
+ } fields;
+ } upper;
+};
+
+/* Filters */
+#define E1000_NUM_UNICAST 16 /* Unicast filter entries */
+#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+#define E1000_NUM_UNICAST_ICH8LAN 7
+#define E1000_MC_TBL_SIZE_ICH8LAN 32
+
+
+/* Receive Address Register */
+struct e1000_rar {
+ volatile __le32 low; /* receive address low */
+ volatile __le32 high; /* receive address high */
+};
+
+/* Number of entries in the Multicast Table Array (MTA). */
+#define E1000_NUM_MTA_REGISTERS 128
+#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
+
+/* IPv4 Address Table Entry */
+struct e1000_ipv4_at_entry {
+ volatile u32 ipv4_addr; /* IP Address (RW) */
+ volatile u32 reserved;
+};
+
+/* Four wakeup IP addresses are supported */
+#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
+#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
+#define E1000_IP4AT_SIZE_ICH8LAN 3
+#define E1000_IP6AT_SIZE 1
+
+/* IPv6 Address Table Entry */
+struct e1000_ipv6_at_entry {
+ volatile u8 ipv6_addr[16];
+};
+
+/* Flexible Filter Length Table Entry */
+struct e1000_fflt_entry {
+ volatile u32 length; /* Flexible Filter Length (RW) */
+ volatile u32 reserved;
+};
+
+/* Flexible Filter Mask Table Entry */
+struct e1000_ffmt_entry {
+ volatile u32 mask; /* Flexible Filter Mask (RW) */
+ volatile u32 reserved;
+};
+
+/* Flexible Filter Value Table Entry */
+struct e1000_ffvt_entry {
+ volatile u32 value; /* Flexible Filter Value (RW) */
+ volatile u32 reserved;
+};
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+#define E1000_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Register Set. (82543, 82544)
+ *
+ * Registers are defined to be 32 bits and should be accessed as 32 bit values.
+ * These registers are physically located on the NIC, but are mapped into the
+ * host memory address space.
+ *
+ * RW - register is both readable and writable
+ * RO - register is read only
+ * WO - register is write only
+ * R/clr - register is read only and is cleared when read
+ * A - register array
+ */
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL 0x00100 /* RX Control - RW */
+#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
+#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
+#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */
+#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */
+#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */
+#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
+#define E1000_TCTL 0x00400 /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
+#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
+#define E1000_TBT 0x00448 /* TX Burst Timer - RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define FEXTNVM_SW_CONFIG 0x0001
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_FLASH_UPDATES 1000
+#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL 0x01030 /* FLASH control register */
+#define E1000_FLSWDATA 0x01034 /* FLASH data register */
+#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */
+#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */
+#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */
+#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */
+#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */
+#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */
+#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
+#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */
+#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */
+#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */
+#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */
+#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */
+#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */
+#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */
+#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
+#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
+#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
+#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */
+#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */
+#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */
+#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */
+#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */
+#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */
+#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
+#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
+#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
+#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
+#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
+#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
+#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
+
+#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA 0x0003C /* PHY address - RW */
+#define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */
+#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Inteface Control */
+
+/* RSS registers */
+#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */
+#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */
+#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
+/* Register Set (82542)
+ *
+ * Some of the 82542 registers are located at different offsets than they are
+ * in more current versions of the 8254x. Despite the difference in location,
+ * the registers function in the same manner.
+ */
+#define E1000_82542_CTRL E1000_CTRL
+#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
+#define E1000_82542_STATUS E1000_STATUS
+#define E1000_82542_EECD E1000_EECD
+#define E1000_82542_EERD E1000_EERD
+#define E1000_82542_CTRL_EXT E1000_CTRL_EXT
+#define E1000_82542_FLA E1000_FLA
+#define E1000_82542_MDIC E1000_MDIC
+#define E1000_82542_SCTL E1000_SCTL
+#define E1000_82542_FEXTNVM E1000_FEXTNVM
+#define E1000_82542_FCAL E1000_FCAL
+#define E1000_82542_FCAH E1000_FCAH
+#define E1000_82542_FCT E1000_FCT
+#define E1000_82542_VET E1000_VET
+#define E1000_82542_RA 0x00040
+#define E1000_82542_ICR E1000_ICR
+#define E1000_82542_ITR E1000_ITR
+#define E1000_82542_ICS E1000_ICS
+#define E1000_82542_IMS E1000_IMS
+#define E1000_82542_IMC E1000_IMC
+#define E1000_82542_RCTL E1000_RCTL
+#define E1000_82542_RDTR 0x00108
+#define E1000_82542_RDBAL 0x00110
+#define E1000_82542_RDBAH 0x00114
+#define E1000_82542_RDLEN 0x00118
+#define E1000_82542_RDH 0x00120
+#define E1000_82542_RDT 0x00128
+#define E1000_82542_RDTR0 E1000_82542_RDTR
+#define E1000_82542_RDBAL0 E1000_82542_RDBAL
+#define E1000_82542_RDBAH0 E1000_82542_RDBAH
+#define E1000_82542_RDLEN0 E1000_82542_RDLEN
+#define E1000_82542_RDH0 E1000_82542_RDH
+#define E1000_82542_RDT0 E1000_82542_RDT
+#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
+ * RX Control - RW */
+#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
+#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */
+#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */
+#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */
+#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */
+#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */
+#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */
+#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */
+#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */
+#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */
+#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */
+#define E1000_82542_RDTR1 0x00130
+#define E1000_82542_RDBAL1 0x00138
+#define E1000_82542_RDBAH1 0x0013C
+#define E1000_82542_RDLEN1 0x00140
+#define E1000_82542_RDH1 0x00148
+#define E1000_82542_RDT1 0x00150
+#define E1000_82542_FCRTH 0x00160
+#define E1000_82542_FCRTL 0x00168
+#define E1000_82542_FCTTV E1000_FCTTV
+#define E1000_82542_TXCW E1000_TXCW
+#define E1000_82542_RXCW E1000_RXCW
+#define E1000_82542_MTA 0x00200
+#define E1000_82542_TCTL E1000_TCTL
+#define E1000_82542_TCTL_EXT E1000_TCTL_EXT
+#define E1000_82542_TIPG E1000_TIPG
+#define E1000_82542_TDBAL 0x00420
+#define E1000_82542_TDBAH 0x00424
+#define E1000_82542_TDLEN 0x00428
+#define E1000_82542_TDH 0x00430
+#define E1000_82542_TDT 0x00438
+#define E1000_82542_TIDV 0x00440
+#define E1000_82542_TBT E1000_TBT
+#define E1000_82542_AIT E1000_AIT
+#define E1000_82542_VFTA 0x00600
+#define E1000_82542_LEDCTL E1000_LEDCTL
+#define E1000_82542_PBA E1000_PBA
+#define E1000_82542_PBS E1000_PBS
+#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
+#define E1000_82542_EEARBC E1000_EEARBC
+#define E1000_82542_FLASHT E1000_FLASHT
+#define E1000_82542_EEWR E1000_EEWR
+#define E1000_82542_FLSWCTL E1000_FLSWCTL
+#define E1000_82542_FLSWDATA E1000_FLSWDATA
+#define E1000_82542_FLSWCNT E1000_FLSWCNT
+#define E1000_82542_FLOP E1000_FLOP
+#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL
+#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE
+#define E1000_82542_PHY_CTRL E1000_PHY_CTRL
+#define E1000_82542_ERT E1000_ERT
+#define E1000_82542_RXDCTL E1000_RXDCTL
+#define E1000_82542_RXDCTL1 E1000_RXDCTL1
+#define E1000_82542_RADV E1000_RADV
+#define E1000_82542_RSRPD E1000_RSRPD
+#define E1000_82542_TXDMAC E1000_TXDMAC
+#define E1000_82542_KABGTXD E1000_KABGTXD
+#define E1000_82542_TDFHS E1000_TDFHS
+#define E1000_82542_TDFTS E1000_TDFTS
+#define E1000_82542_TDFPC E1000_TDFPC
+#define E1000_82542_TXDCTL E1000_TXDCTL
+#define E1000_82542_TADV E1000_TADV
+#define E1000_82542_TSPMT E1000_TSPMT
+#define E1000_82542_CRCERRS E1000_CRCERRS
+#define E1000_82542_ALGNERRC E1000_ALGNERRC
+#define E1000_82542_SYMERRS E1000_SYMERRS
+#define E1000_82542_RXERRC E1000_RXERRC
+#define E1000_82542_MPC E1000_MPC
+#define E1000_82542_SCC E1000_SCC
+#define E1000_82542_ECOL E1000_ECOL
+#define E1000_82542_MCC E1000_MCC
+#define E1000_82542_LATECOL E1000_LATECOL
+#define E1000_82542_COLC E1000_COLC
+#define E1000_82542_DC E1000_DC
+#define E1000_82542_TNCRS E1000_TNCRS
+#define E1000_82542_SEC E1000_SEC
+#define E1000_82542_CEXTERR E1000_CEXTERR
+#define E1000_82542_RLEC E1000_RLEC
+#define E1000_82542_XONRXC E1000_XONRXC
+#define E1000_82542_XONTXC E1000_XONTXC
+#define E1000_82542_XOFFRXC E1000_XOFFRXC
+#define E1000_82542_XOFFTXC E1000_XOFFTXC
+#define E1000_82542_FCRUC E1000_FCRUC
+#define E1000_82542_PRC64 E1000_PRC64
+#define E1000_82542_PRC127 E1000_PRC127
+#define E1000_82542_PRC255 E1000_PRC255
+#define E1000_82542_PRC511 E1000_PRC511
+#define E1000_82542_PRC1023 E1000_PRC1023
+#define E1000_82542_PRC1522 E1000_PRC1522
+#define E1000_82542_GPRC E1000_GPRC
+#define E1000_82542_BPRC E1000_BPRC
+#define E1000_82542_MPRC E1000_MPRC
+#define E1000_82542_GPTC E1000_GPTC
+#define E1000_82542_GORCL E1000_GORCL
+#define E1000_82542_GORCH E1000_GORCH
+#define E1000_82542_GOTCL E1000_GOTCL
+#define E1000_82542_GOTCH E1000_GOTCH
+#define E1000_82542_RNBC E1000_RNBC
+#define E1000_82542_RUC E1000_RUC
+#define E1000_82542_RFC E1000_RFC
+#define E1000_82542_ROC E1000_ROC
+#define E1000_82542_RJC E1000_RJC
+#define E1000_82542_MGTPRC E1000_MGTPRC
+#define E1000_82542_MGTPDC E1000_MGTPDC
+#define E1000_82542_MGTPTC E1000_MGTPTC
+#define E1000_82542_TORL E1000_TORL
+#define E1000_82542_TORH E1000_TORH
+#define E1000_82542_TOTL E1000_TOTL
+#define E1000_82542_TOTH E1000_TOTH
+#define E1000_82542_TPR E1000_TPR
+#define E1000_82542_TPT E1000_TPT
+#define E1000_82542_PTC64 E1000_PTC64
+#define E1000_82542_PTC127 E1000_PTC127
+#define E1000_82542_PTC255 E1000_PTC255
+#define E1000_82542_PTC511 E1000_PTC511
+#define E1000_82542_PTC1023 E1000_PTC1023
+#define E1000_82542_PTC1522 E1000_PTC1522
+#define E1000_82542_MPTC E1000_MPTC
+#define E1000_82542_BPTC E1000_BPTC
+#define E1000_82542_TSCTC E1000_TSCTC
+#define E1000_82542_TSCTFC E1000_TSCTFC
+#define E1000_82542_RXCSUM E1000_RXCSUM
+#define E1000_82542_WUC E1000_WUC
+#define E1000_82542_WUFC E1000_WUFC
+#define E1000_82542_WUS E1000_WUS
+#define E1000_82542_MANC E1000_MANC
+#define E1000_82542_IPAV E1000_IPAV
+#define E1000_82542_IP4AT E1000_IP4AT
+#define E1000_82542_IP6AT E1000_IP6AT
+#define E1000_82542_WUPL E1000_WUPL
+#define E1000_82542_WUPM E1000_WUPM
+#define E1000_82542_FFLT E1000_FFLT
+#define E1000_82542_TDFH 0x08010
+#define E1000_82542_TDFT 0x08018
+#define E1000_82542_FFMT E1000_FFMT
+#define E1000_82542_FFVT E1000_FFVT
+#define E1000_82542_HOST_IF E1000_HOST_IF
+#define E1000_82542_IAM E1000_IAM
+#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
+#define E1000_82542_PSRCTL E1000_PSRCTL
+#define E1000_82542_RAID E1000_RAID
+#define E1000_82542_TARC0 E1000_TARC0
+#define E1000_82542_TDBAL1 E1000_TDBAL1
+#define E1000_82542_TDBAH1 E1000_TDBAH1
+#define E1000_82542_TDLEN1 E1000_TDLEN1
+#define E1000_82542_TDH1 E1000_TDH1
+#define E1000_82542_TDT1 E1000_TDT1
+#define E1000_82542_TXDCTL1 E1000_TXDCTL1
+#define E1000_82542_TARC1 E1000_TARC1
+#define E1000_82542_RFCTL E1000_RFCTL
+#define E1000_82542_GCR E1000_GCR
+#define E1000_82542_GSCL_1 E1000_GSCL_1
+#define E1000_82542_GSCL_2 E1000_GSCL_2
+#define E1000_82542_GSCL_3 E1000_GSCL_3
+#define E1000_82542_GSCL_4 E1000_GSCL_4
+#define E1000_82542_FACTPS E1000_FACTPS
+#define E1000_82542_SWSM E1000_SWSM
+#define E1000_82542_FWSM E1000_FWSM
+#define E1000_82542_FFLT_DBG E1000_FFLT_DBG
+#define E1000_82542_IAC E1000_IAC
+#define E1000_82542_ICRXPTC E1000_ICRXPTC
+#define E1000_82542_ICRXATC E1000_ICRXATC
+#define E1000_82542_ICTXPTC E1000_ICTXPTC
+#define E1000_82542_ICTXATC E1000_ICTXATC
+#define E1000_82542_ICTXQEC E1000_ICTXQEC
+#define E1000_82542_ICTXQMTC E1000_ICTXQMTC
+#define E1000_82542_ICRXDMTC E1000_ICRXDMTC
+#define E1000_82542_ICRXOC E1000_ICRXOC
+#define E1000_82542_HICR E1000_HICR
+
+#define E1000_82542_CPUVEC E1000_CPUVEC
+#define E1000_82542_MRQC E1000_MRQC
+#define E1000_82542_RETA E1000_RETA
+#define E1000_82542_RSSRK E1000_RSSRK
+#define E1000_82542_RSSIM E1000_RSSIM
+#define E1000_82542_RSSIR E1000_RSSIR
+#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
+#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
+#define E1000_82542_MANC2H E1000_MANC2H
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 txerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 cexterr;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorcl;
+ u64 gorch;
+ u64 gotcl;
+ u64 gotch;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rlerrc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 torl;
+ u64 torh;
+ u64 totl;
+ u64 toth;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 iac;
+ u64 icrxptc;
+ u64 icrxatc;
+ u64 ictxptc;
+ u64 ictxatc;
+ u64 ictxqec;
+ u64 ictxqmtc;
+ u64 icrxdmtc;
+ u64 icrxoc;
+};
+
+/* Structure containing variables used by the shared code (e1000_hw.c) */
+struct e1000_hw {
+ u8 __iomem *hw_addr;
+ u8 __iomem *flash_address;
+ e1000_mac_type mac_type;
+ e1000_phy_type phy_type;
+ u32 phy_init_script;
+ e1000_media_type media_type;
+ void *back;
+ struct e1000_shadow_ram *eeprom_shadow_ram;
+ u32 flash_bank_size;
+ u32 flash_base_addr;
+ e1000_fc_type fc;
+ e1000_bus_speed bus_speed;
+ e1000_bus_width bus_width;
+ e1000_bus_type bus_type;
+ struct e1000_eeprom_info eeprom;
+ e1000_ms_type master_slave;
+ e1000_ms_type original_master_slave;
+ e1000_ffe_config ffe_config_state;
+ u32 asf_firmware_present;
+ u32 eeprom_semaphore_present;
+ u32 swfw_sync_present;
+ u32 swfwhw_semaphore_present;
+ unsigned long io_base;
+ u32 phy_id;
+ u32 phy_revision;
+ u32 phy_addr;
+ u32 original_fc;
+ u32 txcw;
+ u32 autoneg_failed;
+ u32 max_frame_size;
+ u32 min_frame_size;
+ u32 mc_filter_type;
+ u32 num_mc_addrs;
+ u32 collision_delta;
+ u32 tx_packet_delta;
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ bool tx_pkt_filtering;
+ struct e1000_host_mng_dhcp_cookie mng_cookie;
+ u16 phy_spd_default;
+ u16 autoneg_advertised;
+ u16 pci_cmd_word;
+ u16 fc_high_water;
+ u16 fc_low_water;
+ u16 fc_pause_time;
+ u16 current_ifs_val;
+ u16 ifs_min_val;
+ u16 ifs_max_val;
+ u16 ifs_step_size;
+ u16 ifs_ratio;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 autoneg;
+ u8 mdix;
+ u8 forced_speed_duplex;
+ u8 wait_autoneg_complete;
+ u8 dma_fairness;
+ u8 mac_addr[NODE_ADDRESS_SIZE];
+ u8 perm_mac_addr[NODE_ADDRESS_SIZE];
+ bool disable_polarity_correction;
+ bool speed_downgraded;
+ e1000_smart_speed smart_speed;
+ e1000_dsp_config dsp_config_state;
+ bool get_link_status;
+ bool serdes_link_down;
+ bool tbi_compatibility_en;
+ bool tbi_compatibility_on;
+ bool laa_is_present;
+ bool phy_reset_disable;
+ bool initialize_hw_bits_disable;
+ bool fc_send_xon;
+ bool fc_strict_ieee;
+ bool report_tx_early;
+ bool adaptive_ifs;
+ bool ifs_params_forced;
+ bool in_ifs_mode;
+ bool mng_reg_access_disabled;
+ bool leave_av_bit_off;
+ bool kmrn_lock_loss_workaround_disabled;
+ bool bad_tx_carr_stats_fd;
+ bool has_manc2h;
+ bool rx_needs_kicking;
+ bool has_smbus;
+};
+
+
+#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
+#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
+#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */
+#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */
+#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
+/* Register Bit Masks */
+/* Device Control */
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */
+
+/* Device Status */
+#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion
+ by EEPROM/Flash */
+#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
+#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
+#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
+#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_FUSE_8 0x04000000
+#define E1000_STATUS_FUSE_9 0x08000000
+#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */
+
+/* Constants used to intrepret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+
+/* EEPROM/Flash Control */
+#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */
+#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */
+#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */
+#define E1000_EECD_FWE_MASK 0x00000030
+#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */
+#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
+#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
+ * (0-small, 1-large) */
+#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_EEPROM_GRANT_ATTEMPTS
+#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */
+#define E1000_EECD_SIZE_EX_SHIFT 11
+#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
+#define E1000_EECD_SECVAL_SHIFT 22
+#define E1000_STM_OPCODE 0xDB00
+#define E1000_HICR_FW_RESET 0xC0
+
+#define E1000_SHADOW_RAM_WORDS 2048
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC0
+
+/* EEPROM Read */
+#define E1000_EERD_START 0x00000001 /* Start Read */
+#define E1000_EERD_DONE 0x00000010 /* Read Done */
+#define E1000_EERD_ADDR_SHIFT 8
+#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */
+#define E1000_EERD_DATA_SHIFT 16
+#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */
+
+/* SPI EEPROM Status Register */
+#define EEPROM_STATUS_RDY_SPI 0x01
+#define EEPROM_STATUS_WEN_SPI 0x02
+#define EEPROM_STATUS_BP0_SPI 0x04
+#define EEPROM_STATUS_BP1_SPI 0x08
+#define EEPROM_STATUS_WPEN_SPI 0x80
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK 0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK 0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE 0x04000000
+#define E1000_MDIC_OP_READ 0x08000000
+#define E1000_MDIC_READY 0x10000000
+#define E1000_MDIC_INT_EN 0x20000000
+#define E1000_MDIC_ERROR 0x40000000
+
+#define E1000_KUMCTRLSTA_MASK 0x0000FFFF
+#define E1000_KUMCTRLSTA_OFFSET 0x001F0000
+#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KUMCTRLSTA_REN 0x00200000
+
+#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000
+#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001
+#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002
+#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003
+#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004
+#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009
+#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010
+#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E
+#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F
+
+/* FIFO Control */
+#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008
+#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800
+
+/* In-Band Control */
+#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500
+#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010
+
+/* Half-Duplex Control */
+#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004
+#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000
+
+#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E
+
+#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000
+#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000
+
+#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000
+#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000
+#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003
+
+#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+
+#define E1000_PHY_CTRL_SPD_EN 0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
+#define E1000_PHY_CTRL_B2B_EN 0x00000080
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020
+#define E1000_LEDCTL_LED0_IVRT 0x00000040
+#define E1000_LEDCTL_LED0_BLINK 0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT 8
+#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000
+#define E1000_LEDCTL_LED1_IVRT 0x00004000
+#define E1000_LEDCTL_LED1_BLINK 0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT 16
+#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000
+#define E1000_LEDCTL_LED2_IVRT 0x00400000
+#define E1000_LEDCTL_LED2_BLINK 0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT 24
+#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000
+#define E1000_LEDCTL_LED3_IVRT 0x40000000
+#define E1000_LEDCTL_LED3_BLINK 0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000 0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP 0x2
+#define E1000_LEDCTL_MODE_ACTIVITY 0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10 0x5
+#define E1000_LEDCTL_MODE_LINK_100 0x6
+#define E1000_LEDCTL_MODE_LINK_1000 0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE 0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9
+#define E1000_LEDCTL_MODE_COLLISION 0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED 0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE 0xC
+#define E1000_LEDCTL_MODE_PAUSED 0xD
+#define E1000_LEDCTL_MODE_LED_ON 0xE
+#define E1000_LEDCTL_MODE_LED_OFF 0xF
+
+/* Receive Address */
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO 0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */
+#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW 0x00008000
+#define E1000_ICR_SRPD 0x00010000
+#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG 0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD E1000_ICR_SRPD
+#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW E1000_ICR_DSW
+#define E1000_ICS_PHYINT E1000_ICR_PHYINT
+#define E1000_ICS_EPRST E1000_ICR_EPRST
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD E1000_ICR_SRPD
+#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW E1000_ICR_DSW
+#define E1000_IMS_PHYINT E1000_ICR_PHYINT
+#define E1000_IMS_EPRST E1000_ICR_EPRST
+
+/* Interrupt Mask Clear */
+#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMC_SRPD E1000_ICR_SRPD
+#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMC_DSW E1000_ICR_DSW
+#define E1000_IMC_PHYINT E1000_ICR_PHYINT
+#define E1000_IMC_EPRST E1000_ICR_EPRST
+
+/* Receive Control */
+#define E1000_RCTL_RST 0x00000001 /* Software reset */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ * E1000_PSRCTL_BSIZE0_MASK) |
+ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ * E1000_PSRCTL_BSIZE1_MASK) |
+ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ * E1000_PSRCTL_BSIZE2_MASK) |
+ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ * E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256], default=256
+ * value1 = [1024..64512], default=4096
+ * value2 = [0..64512], default=4096
+ * value3 = [0..64512], default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+
+/* SW_W_SYNC definitions */
+#define E1000_SWFW_EEP_SM 0x0001
+#define E1000_SWFW_PHY0_SM 0x0002
+#define E1000_SWFW_PHY1_SM 0x0004
+#define E1000_SWFW_MAC_CSR_SM 0x0008
+
+/* Receive Descriptor */
+#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */
+#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */
+#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */
+#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */
+#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */
+
+/* Flow Control */
+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */
+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+
+/* Header split receive */
+#define E1000_RFCTL_ISCSI_DIS 0x00000001
+#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E
+#define E1000_RFCTL_ISCSI_DWC_SHIFT 1
+#define E1000_RFCTL_NFSW_DIS 0x00000040
+#define E1000_RFCTL_NFSR_DIS 0x00000080
+#define E1000_RFCTL_NFS_VER_MASK 0x00000300
+#define E1000_RFCTL_NFS_VER_SHIFT 8
+#define E1000_RFCTL_IPV6_DIS 0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800
+#define E1000_RFCTL_ACK_DIS 0x00001000
+#define E1000_RFCTL_ACKD_DIS 0x00002000
+#define E1000_RFCTL_IPFRSP_DIS 0x00004000
+#define E1000_RFCTL_EXTEN 0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* Receive Descriptor Control */
+#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
+#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
+#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
+#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
+ still to be processed. */
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
+#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
+#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */
+#define E1000_TXCW_NP 0x00008000 /* TXCW next page */
+#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */
+#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */
+#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
+#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */
+#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
+#define E1000_RXCW_CC 0x10000000 /* Receive config change */
+#define E1000_RXCW_C 0x20000000 /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
+
+/* Transmit Control */
+#define E1000_TCTL_RST 0x00000001 /* software reset */
+#define E1000_TCTL_EN 0x00000002 /* enable tx */
+#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
+#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
+/* Extended Transmit Control */
+#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+
+#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX 0x00010000
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
+#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* Multiple Receive Queue Control */
+#define E1000_MRQC_ENABLE_MASK 0x00000003
+#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT 0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_SPM 0x80000000 /* Enable SPM */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
+#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */
+#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */
+#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */
+#define E1000_WUS_BC 0x00000010 /* Broadcast Received */
+#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */
+#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
+#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
+#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
+#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
+#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
+#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
+#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery
+ * Filtering */
+#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
+ * filtering */
+#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
+ * memory */
+#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
+ * filtering */
+#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+
+/* FW Semaphore Register */
+#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */
+#define E1000_FWSM_MODE_SHIFT 1
+#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */
+
+#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */
+#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */
+#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */
+#define E1000_FWSM_SKUEL_SHIFT 29
+#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */
+#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */
+#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
+#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
+
+/* FFLT Debug Register */
+#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */
+
+typedef enum {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_interface_only
+} e1000_mng_mode;
+
+/* Host Inteface Control Register */
+#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */
+#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done
+ * to put command in RAM */
+#define E1000_HICR_SV 0x00000004 /* Status Validity */
+#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */
+
+/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
+#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */
+
+struct e1000_host_command_header {
+ u8 command_id;
+ u8 command_length;
+ u8 command_options; /* I/F bits for command, status for return */
+ u8 checksum;
+};
+struct e1000_host_command_info {
+ struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
+ u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
+};
+
+/* Host SMB register #0 */
+#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */
+#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */
+#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */
+#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */
+
+/* Host SMB register #1 */
+#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN
+#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN
+#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT
+#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT
+
+/* FW Status Register */
+#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
+
+#define E1000_MDALIGN 4096
+
+/* PCI-Ex registers*/
+
+/* PCI-Ex Control Register */
+#define E1000_GCR_RXD_NO_SNOOP 0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
+#define E1000_GCR_TXD_NO_SNOOP 0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
+
+#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
+ E1000_GCR_RXDSCW_NO_SNOOP | \
+ E1000_GCR_RXDSCR_NO_SNOOP | \
+ E1000_GCR_TXD_NO_SNOOP | \
+ E1000_GCR_TXDSCW_NO_SNOOP | \
+ E1000_GCR_TXDSCR_NO_SNOOP)
+
+#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+/* Function Active and Power State to MNG */
+#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
+#define E1000_FACTPS_LAN0_VALID 0x00000004
+#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008
+#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0
+#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6
+#define E1000_FACTPS_LAN1_VALID 0x00000100
+#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200
+#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000
+#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12
+#define E1000_FACTPS_IDE_ENABLE 0x00004000
+#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000
+#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000
+#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18
+#define E1000_FACTPS_SP_ENABLE 0x00100000
+#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000
+#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000
+#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24
+#define E1000_FACTPS_IPMI_ENABLE 0x04000000
+#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000
+#define E1000_FACTPS_MNGCG 0x20000000
+#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000
+#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000
+
+/* PCI-Ex Config Space */
+#define PCI_EX_LINK_STATUS 0x12
+#define PCI_EX_LINK_WIDTH_MASK 0x3F0
+#define PCI_EX_LINK_WIDTH_SHIFT 4
+
+/* EEPROM Commands - Microwire */
+#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */
+
+/* EEPROM Commands - SPI */
+#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
+#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */
+#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */
+#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */
+#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */
+#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
+#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
+#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
+
+/* EEPROM Size definitions */
+#define EEPROM_WORD_SIZE_SHIFT 6
+#define EEPROM_SIZE_SHIFT 10
+#define EEPROM_SIZE_MASK 0x1C00
+
+/* EEPROM Word Offsets */
+#define EEPROM_COMPAT 0x0003
+#define EEPROM_ID_LED_SETTINGS 0x0004
+#define EEPROM_VERSION 0x0005
+#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */
+#define EEPROM_PHY_CLASS_WORD 0x0007
+#define EEPROM_INIT_CONTROL1_REG 0x000A
+#define EEPROM_INIT_CONTROL2_REG 0x000F
+#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define EEPROM_INIT_CONTROL3_PORT_B 0x0014
+#define EEPROM_INIT_3GIO_3 0x001A
+#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define EEPROM_INIT_CONTROL3_PORT_A 0x0024
+#define EEPROM_CFG 0x0012
+#define EEPROM_FLASH_VERSION 0x0032
+#define EEPROM_CHECKSUM_REG 0x003F
+
+#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */
+#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_RESERVED_82573 0xF746
+#define ID_LED_DEFAULT_82573 0x1811
+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_DEF1_OFF2 << 8) | \
+ (ID_LED_DEF1_ON2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2 0x1
+#define ID_LED_DEF1_ON2 0x2
+#define ID_LED_DEF1_OFF2 0x3
+#define ID_LED_ON1_DEF2 0x4
+#define ID_LED_ON1_ON2 0x5
+#define ID_LED_ON1_OFF2 0x6
+#define ID_LED_OFF1_DEF2 0x7
+#define ID_LED_OFF1_ON2 0x8
+#define ID_LED_OFF1_OFF2 0x9
+
+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE 0x07000000
+
+
+/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */
+#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F
+
+/* Mask bit for PHY class in Word 7 of the EEPROM */
+#define EEPROM_PHY_CLASS_A 0x8000
+
+/* Mask bits for fields in Word 0x0a of the EEPROM */
+#define EEPROM_WORD0A_ILOS 0x0010
+#define EEPROM_WORD0A_SWDPIO 0x01E0
+#define EEPROM_WORD0A_LRST 0x0200
+#define EEPROM_WORD0A_FD 0x0400
+#define EEPROM_WORD0A_66MHZ 0x0800
+
+/* Mask bits for fields in Word 0x0f of the EEPROM */
+#define EEPROM_WORD0F_PAUSE_MASK 0x3000
+#define EEPROM_WORD0F_PAUSE 0x1000
+#define EEPROM_WORD0F_ASM_DIR 0x2000
+#define EEPROM_WORD0F_ANE 0x0800
+#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0
+#define EEPROM_WORD0F_LPLU 0x0001
+
+/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */
+#define EEPROM_WORD1020_GIGA_DISABLE 0x0010
+#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008
+
+/* Mask bits for fields in Word 0x1a of the EEPROM */
+#define EEPROM_WORD1A_ASPM_MASK 0x000C
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
+#define EEPROM_SUM 0xBABA
+
+/* EEPROM Map defines (WORD OFFSETS)*/
+#define EEPROM_NODE_ADDRESS_BYTE_0 0
+#define EEPROM_PBA_BYTE_1 8
+
+#define EEPROM_RESERVED_WORD 0xFFFF
+
+/* EEPROM Map Sizes (Byte Counts) */
+#define PBA_SIZE 4
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD 15
+#define E1000_CT_SHIFT 4
+/* Collision distance is a 0-based value that applies to
+ * half-duplex-capable hardware only. */
+#define E1000_COLLISION_DISTANCE 63
+#define E1000_COLLISION_DISTANCE_82542 64
+#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
+#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
+#define E1000_COLD_SHIFT 12
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82542_TIPG_IPGT 10
+#define DEFAULT_82543_TIPG_IPGT_FIBER 9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK 0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82542_TIPG_IPGR1 2
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT 10
+
+#define DEFAULT_82542_TIPG_IPGR2 10
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT 20
+
+#define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009
+#define DEFAULT_80003ES2LAN_TIPG_IPGT_1000 0x00000008
+#define E1000_TXDMAC_DPP 0x00000001
+
+/* Adaptive IFS defines */
+#define TX_THRESHOLD_START 8
+#define TX_THRESHOLD_INCREMENT 10
+#define TX_THRESHOLD_DECREMENT 1
+#define TX_THRESHOLD_STOP 190
+#define TX_THRESHOLD_DISABLE 0
+#define TX_THRESHOLD_TIMER_MS 10000
+#define MIN_NUM_XMITS 1000
+#define IFS_MAX 80
+#define IFS_STEP 10
+#define IFS_MIN 40
+#define IFS_RATIO 4
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002
+#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004
+#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008
+#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000
+
+#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF
+#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
+
+/* PBA constants */
+#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
+#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
+#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+
+/* The historical defaults for the flow control values are given below. */
+#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
+#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
+#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
+
+/* PCIX Config space */
+#define PCIX_COMMAND_REGISTER 0xE6
+#define PCIX_STATUS_REGISTER_LO 0xE8
+#define PCIX_STATUS_REGISTER_HI 0xEA
+
+#define PCIX_COMMAND_MMRBC_MASK 0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT 0x2
+#define PCIX_STATUS_HI_MMRBC_MASK 0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5
+#define PCIX_STATUS_HI_MMRBC_4K 0x3
+#define PCIX_STATUS_HI_MMRBC_2K 0x2
+
+
+/* Number of bits required to shift right the "pause" bits from the
+ * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register.
+ */
+#define PAUSE_SHIFT 5
+
+/* Number of bits required to shift left the "SWDPIO" bits from the
+ * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register.
+ */
+#define SWDPIO_SHIFT 17
+
+/* Number of bits required to shift left the "SWDPIO_EXT" bits from the
+ * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register.
+ */
+#define SWDPIO__EXT_SHIFT 4
+
+/* Number of bits required to shift left the "ILOS" bit from the EEPROM
+ * (bit 4) to the "ILOS" (bit 7) field in the CTRL register.
+ */
+#define ILOS_SHIFT 3
+
+
+#define RECEIVE_BUFFER_ALIGN_SIZE (256)
+
+/* Number of milliseconds we wait for auto-negotiation to complete */
+#define LINK_UP_TIMEOUT 500
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT 800
+/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
+#define AUTO_READ_DONE_TIMEOUT 10
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT 100
+
+#define E1000_TX_BUFFER_SIZE ((u32)1514)
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION 0x0F
+
+/* TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ * adapter = a pointer to struct e1000_hw
+ * status = the 8 bit status field of the RX descriptor with EOP set
+ * error = the 8 bit error field of the RX descriptor with EOP set
+ * length = the sum of all the length fields of the RX descriptors that
+ * make up the current frame
+ * last_byte = the last byte of the frame DMAed by the hardware
+ * max_frame_length = the maximum frame length we want to accept.
+ * min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ * ...
+ * if (TBI_ACCEPT) {
+ * accept_frame = true;
+ * e1000_tbi_adjust_stats(adapter, MacAddress);
+ * frame_length--;
+ * } else {
+ * accept_frame = false;
+ * }
+ * ...
+ */
+
+#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \
+ ((adapter)->tbi_compatibility_on && \
+ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+ ((last_byte) == CARRIER_EXTENSION) && \
+ (((status) & E1000_RXD_STAT_VP) ? \
+ (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \
+ ((length) <= ((adapter)->max_frame_size + 1))) : \
+ (((length) > (adapter)->min_frame_size) && \
+ ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+
+/* Structures, enums, and macros for the PHY */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CTRL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Regiser */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
+
+#define IGP01E1000_IEEE_REGS_PAGE 0x0000
+#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
+#define IGP01E1000_IEEE_FORCE_GIGA 0x0140
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
+#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */
+#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
+#define IGP02E1000_PHY_POWER_MGMT 0x19
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */
+
+/* IGP01E1000 AGC Registers - stores the cable length values*/
+#define IGP01E1000_PHY_AGC_A 0x1172
+#define IGP01E1000_PHY_AGC_B 0x1272
+#define IGP01E1000_PHY_AGC_C 0x1472
+#define IGP01E1000_PHY_AGC_D 0x1872
+
+/* IGP02E1000 AGC Registers for cable length values */
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+/* IGP01E1000 DSP Reset Register */
+#define IGP01E1000_PHY_DSP_RESET 0x1F33
+#define IGP01E1000_PHY_DSP_SET 0x1F71
+#define IGP01E1000_PHY_DSP_FFE 0x1F35
+
+#define IGP01E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+
+#define IGP01E1000_PHY_AGC_PARAM_A 0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B 0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C 0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D 0x1871
+
+#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000
+
+#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890
+#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000
+#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004
+#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069
+
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A
+/* IGP01E1000 PCS Initialization register - stores the polarity status when
+ * speed = 1000 Mbps. */
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5
+
+#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT 5
+#define GG82563_REG(page, reg) \
+ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG 30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL \
+ GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_SPEC_STATUS \
+ GG82563_REG(0, 17) /* PHY Specific Status */
+#define GG82563_PHY_INT_ENABLE \
+ GG82563_REG(0, 18) /* Interrupt Enable */
+#define GG82563_PHY_SPEC_STATUS_2 \
+ GG82563_REG(0, 19) /* PHY Specific Status 2 */
+#define GG82563_PHY_RX_ERR_CNTR \
+ GG82563_REG(0, 21) /* Receive Error Counter */
+#define GG82563_PHY_PAGE_SELECT \
+ GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2 \
+ GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT \
+ GG82563_REG(0, 29) /* Alternate Page Select */
+#define GG82563_PHY_TEST_CLK_CTRL \
+ GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
+
+#define GG82563_PHY_MAC_SPEC_CTRL \
+ GG82563_REG(2, 21) /* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL_2 \
+ GG82563_REG(2, 26) /* MAC Specific Control 2 */
+
+#define GG82563_PHY_DSP_DISTANCE \
+ GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL \
+ GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PORT_RESET \
+ GG82563_REG(193, 17) /* Port Reset */
+#define GG82563_PHY_REVISION_ID \
+ GG82563_REG(193, 18) /* Revision ID */
+#define GG82563_PHY_DEVICE_ID \
+ GG82563_REG(193, 19) /* Device ID */
+#define GG82563_PHY_PWR_MGMT_CTRL \
+ GG82563_REG(193, 20) /* Power Management Control */
+#define GG82563_PHY_RATE_ADAPT_CTRL \
+ GG82563_REG(193, 25) /* Rate Adaptation Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
+ GG82563_REG(194, 16) /* FIFO's Control/Status */
+#define GG82563_PHY_KMRN_CTRL \
+ GG82563_REG(194, 17) /* Control */
+#define GG82563_PHY_INBAND_CTRL \
+ GG82563_REG(194, 18) /* Inband Control */
+#define GG82563_PHY_KMRN_DIAGNOSTIC \
+ GG82563_REG(194, 19) /* Diagnostic */
+#define GG82563_PHY_ACK_TIMEOUTS \
+ GG82563_REG(194, 20) /* Acknowledge Timeouts */
+#define GG82563_PHY_ADV_ABILITY \
+ GG82563_REG(194, 21) /* Advertised Ability */
+#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
+ GG82563_REG(194, 23) /* Link Partner Advertised Ability */
+#define GG82563_PHY_ADV_NEXT_PAGE \
+ GG82563_REG(194, 24) /* Advertised Next Page */
+#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
+ GG82563_REG(194, 25) /* Link Partner Advertised Next page */
+#define GG82563_PHY_KMRN_MISC \
+ GG82563_REG(194, 26) /* Misc. */
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* Next Page TX Register */
+#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges
+ * of different NP
+ */
+#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
+ * 0 = cannot comply with msg
+ */
+#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
+#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
+ * 0 = sending last NP
+ */
+
+/* Link Partner Next Page Register */
+#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges
+ * of different NP
+ */
+#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
+ * 0 = cannot comply with msg
+ */
+#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
+#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */
+#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
+ * 0 = sending last NP
+ */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
+ /* 0=DTE device */
+#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
+ /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
+ /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
+
+/* Extended Status Register */
+#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
+#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
+#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
+#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+
+#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */
+#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */
+
+#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */
+ /* (0=enable, 1=disable) */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
+#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low,
+ * 0=CLK125 toggling
+ */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
+ /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
+ * 100BASE-TX/10BASE-T:
+ * MDI Mode
+ */
+#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
+ * all speeds.
+ */
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080
+ /* 1=Enable Extended 10BASE-T distance
+ * (Lower 10BASE-T RX Threshold)
+ * 0=Normal 10BASE-T RX Threshold */
+#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100
+ /* 1=5-Bit interface in 100BASE-TX
+ * 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+
+#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1
+#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M;
+ * 3=110-140M;4=>140M */
+#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_REV_POLARITY_SHIFT 1
+#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5
+#define M88E1000_PSSR_MDIX_SHIFT 6
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled.
+ * Will assert lost lock and bring
+ * link down if idle not seen
+ * within 1ms in 1000BASE-T
+ */
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00
+
+/* IGP01E1000 Specific Port Config Register - R/W */
+#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010
+#define IGP01E1000_PSCFR_PRE_EN 0x0020
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100
+#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400
+#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000
+
+/* IGP01E1000 Specific Port Status Register - R/O */
+#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C
+#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200
+#define IGP01E1000_PSSR_LINK_UP 0x0400
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */
+#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000
+#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */
+#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */
+
+/* IGP01E1000 Specific Port Control Register - R/W */
+#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010
+#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200
+#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400
+#define IGP01E1000_PSCR_FLIP_CHIP 0x0800
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */
+
+/* IGP01E1000 Specific Port Link Health Register */
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000
+#define IGP01E1000_PLHR_MASTER_FAULT 0x2000
+#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000
+#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */
+#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_0 0x0100
+#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040
+#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010
+#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008
+#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004
+#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002
+#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001
+
+/* IGP01E1000 Channel Quality Register */
+#define IGP01E1000_MSE_CHANNEL_D 0x000F
+#define IGP01E1000_MSE_CHANNEL_C 0x00F0
+#define IGP01E1000_MSE_CHANNEL_B 0x0F00
+#define IGP01E1000_MSE_CHANNEL_A 0xF000
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */
+
+/* IGP01E1000 DSP reset macros */
+#define DSP_RESET_ENABLE 0x0
+#define DSP_RESET_DISABLE 0x2
+#define E1000_MAX_DSP_RESETS 10
+
+/* IGP01E1000 & IGP02E1000 AGC Registers */
+
+#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */
+
+/* IGP02E1000 AGC Register Length 9-bit mask */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+
+/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
+#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
+#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113
+
+/* The precision error of the cable length is +/- 10 meters */
+#define IGP01E1000_AGC_RANGE 10
+#define IGP02E1000_AGC_RANGE 15
+
+/* IGP01E1000 PCS Initialization register */
+/* bits 3:6 in the PCS registers stores the channels polarity */
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+/* IGP01E1000 GMII FIFO Register */
+#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed
+ * on Link-Up */
+#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */
+
+/* IGP01E1000 Analog Register */
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1
+#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0
+#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE
+
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100
+#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002
+
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_DISABLE_JABBER 0x0001 /* 1=Disable Jabber */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Polarity Reversal Disabled */
+#define GG82563_PSCR_POWER_DOWN 0x0004 /* 1=Power Down */
+#define GG82563_PSCR_COPPER_TRANSMITER_DISABLE 0x0008 /* 1=Transmitter Disabled */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI configuration */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX configuration */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Automatic crossover */
+#define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE 0x0080 /* 1=Enable Extended Distance */
+#define GG82563_PSCR_ENERGY_DETECT_MASK 0x0300
+#define GG82563_PSCR_ENERGY_DETECT_OFF 0x0000 /* 00,01=Off */
+#define GG82563_PSCR_ENERGY_DETECT_RX 0x0200 /* 10=Sense on Rx only (Energy Detect) */
+#define GG82563_PSCR_ENERGY_DETECT_RX_TM 0x0300 /* 11=Sense and Tx NLP */
+#define GG82563_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force Link Good */
+#define GG82563_PSCR_DOWNSHIFT_ENABLE 0x0800 /* 1=Enable Downshift */
+#define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK 0x7000
+#define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT 12
+
+/* PHY Specific Status Register (Page 0, Register 17) */
+#define GG82563_PSSR_JABBER 0x0001 /* 1=Jabber */
+#define GG82563_PSSR_POLARITY 0x0002 /* 1=Polarity Reversed */
+#define GG82563_PSSR_LINK 0x0008 /* 1=Link is Up */
+#define GG82563_PSSR_ENERGY_DETECT 0x0010 /* 1=Sleep, 0=Active */
+#define GG82563_PSSR_DOWNSHIFT 0x0020 /* 1=Downshift */
+#define GG82563_PSSR_CROSSOVER_STATUS 0x0040 /* 1=MDIX, 0=MDI */
+#define GG82563_PSSR_RX_PAUSE_ENABLED 0x0100 /* 1=Receive Pause Enabled */
+#define GG82563_PSSR_TX_PAUSE_ENABLED 0x0200 /* 1=Transmit Pause Enabled */
+#define GG82563_PSSR_LINK_UP 0x0400 /* 1=Link Up */
+#define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */
+#define GG82563_PSSR_PAGE_RECEIVED 0x1000 /* 1=Page Received */
+#define GG82563_PSSR_DUPLEX 0x2000 /* 1-Full-Duplex */
+#define GG82563_PSSR_SPEED_MASK 0xC000
+#define GG82563_PSSR_SPEED_10MBPS 0x0000 /* 00=10Mbps */
+#define GG82563_PSSR_SPEED_100MBPS 0x4000 /* 01=100Mbps */
+#define GG82563_PSSR_SPEED_1000MBPS 0x8000 /* 10=1000Mbps */
+
+/* PHY Specific Status Register 2 (Page 0, Register 19) */
+#define GG82563_PSSR2_JABBER 0x0001 /* 1=Jabber */
+#define GG82563_PSSR2_POLARITY_CHANGED 0x0002 /* 1=Polarity Changed */
+#define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */
+#define GG82563_PSSR2_DOWNSHIFT_INTERRUPT 0x0020 /* 1=Downshift Detected */
+#define GG82563_PSSR2_MDI_CROSSOVER_CHANGE 0x0040 /* 1=Crossover Changed */
+#define GG82563_PSSR2_FALSE_CARRIER 0x0100 /* 1=False Carrier */
+#define GG82563_PSSR2_SYMBOL_ERROR 0x0200 /* 1=Symbol Error */
+#define GG82563_PSSR2_LINK_STATUS_CHANGED 0x0400 /* 1=Link Status Changed */
+#define GG82563_PSSR2_AUTO_NEG_COMPLETED 0x0800 /* 1=Auto-Neg Completed */
+#define GG82563_PSSR2_PAGE_RECEIVED 0x1000 /* 1=Page Received */
+#define GG82563_PSSR2_DUPLEX_CHANGED 0x2000 /* 1=Duplex Changed */
+#define GG82563_PSSR2_SPEED_CHANGED 0x4000 /* 1=Speed Changed */
+#define GG82563_PSSR2_AUTO_NEG_ERROR 0x8000 /* 1=Auto-Neg Error */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_10BT_POLARITY_FORCE 0x0002 /* 1=Force Negative Polarity */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_MASK 0x000C
+#define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL 0x0000 /* 00,01=Normal Operation */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_112NS 0x0008 /* 10=Select 112ns Sequence */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_16NS 0x000C /* 11=Select 16ns Sequence */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Negotiation */
+#define GG82563_PSCR2_1000BT_DISABLE 0x4000 /* 1=Disable 1000BASE-T */
+#define GG82563_PSCR2_TRANSMITER_TYPE_MASK 0x8000
+#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B 0x0000 /* 0=Class B */
+#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A 0x8000 /* 1=Class A */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25MHZ 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ 0x0006
+#define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26) */
+#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M;
+ 1 = 50-80M;
+ 2 = 80-110M;
+ 3 = 110-140M;
+ 4 = >140M */
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
+#define GG82563_KMCR_FORCE_LINK_UP 0x0040 /* 1=Force Link Up */
+#define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT 0x0080
+#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK 0x0400
+#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT 0x0400 /* 1=6.25MHz, 0=0.8MHz */
+#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
+
+/* Power Management Control Register (Page 193, Register 20) */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 /* 1=Enalbe SERDES Electrical Idle */
+#define GG82563_PMCR_DISABLE_PORT 0x0002 /* 1=Disable Port */
+#define GG82563_PMCR_DISABLE_SERDES 0x0004 /* 1=Disable SERDES */
+#define GG82563_PMCR_REVERSE_AUTO_NEG 0x0008 /* 1=Enable Reverse Auto-Negotiation */
+#define GG82563_PMCR_DISABLE_1000_NON_D0 0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */
+#define GG82563_PMCR_DISABLE_1000 0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */
+#define GG82563_PMCR_REVERSE_AUTO_NEG_D0A 0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */
+#define GG82563_PMCR_FORCE_POWER_STATE 0x0080 /* 1=Force Power State */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK 0x0300
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR 0x0000 /* 00=Dr */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U 0x0100 /* 01=D0u */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A 0x0200 /* 10=D0a */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3 0x0300 /* 11=D3 */
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding Use */
+
+
+/* Bit definitions for valid PHY IDs. */
+/* I = Integrated
+ * E = External
+ */
+#define M88_VENDOR 0x0141
+#define M88E1000_E_PHY_ID 0x01410C50
+#define M88E1000_I_PHY_ID 0x01410C30
+#define M88E1011_I_PHY_ID 0x01410C20
+#define IGP01E1000_I_PHY_ID 0x02A80380
+#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID
+#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
+#define M88E1011_I_REV_4 0x04
+#define M88E1111_I_PHY_ID 0x01410CC0
+#define L1LXT971A_PHY_ID 0x001378E0
+#define GG82563_E_PHY_ID 0x01410CA0
+
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) \
+ (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+
+#define IGP3_PHY_PORT_CTRL \
+ PHY_REG(769, 17) /* Port General Configuration */
+#define IGP3_PHY_RATE_ADAPT_CTRL \
+ PHY_REG(769, 25) /* Rate Adapter Control Register */
+
+#define IGP3_KMRN_FIFO_CTRL_STATS \
+ PHY_REG(770, 16) /* KMRN FIFO's control/status register */
+#define IGP3_KMRN_POWER_MNG_CTRL \
+ PHY_REG(770, 17) /* KMRN Power Management Control Register */
+#define IGP3_KMRN_INBAND_CTRL \
+ PHY_REG(770, 18) /* KMRN Inband Control Register */
+#define IGP3_KMRN_DIAG \
+ PHY_REG(770, 19) /* KMRN Diagnostic register */
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
+#define IGP3_KMRN_ACK_TIMEOUT \
+ PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
+
+#define IGP3_VR_CTRL \
+ PHY_REG(776, 18) /* Voltage regulator control register */
+#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */
+#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */
+
+#define IGP3_CAPABILITY \
+ PHY_REG(776, 19) /* IGP3 Capability Register */
+
+/* Capabilities for SKU Control */
+#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */
+#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */
+#define IGP3_CAP_ASF 0x0004 /* Support ASF */
+#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */
+#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */
+#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */
+#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */
+#define IGP3_CAP_RSS 0x0080 /* Support RSS */
+#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */
+#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */
+
+#define IGP3_PPC_JORDAN_EN 0x0001
+#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002
+
+#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001
+#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040
+
+#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */
+#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */
+
+#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18)
+#define IGP3_KMRN_EC_DIS_INBAND 0x0080
+
+#define IGP03E1000_E_PHY_ID 0x02A80390
+#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */
+#define IFE_PLUS_E_PHY_ID 0x02A80320
+#define IFE_C_E_PHY_ID 0x02A80310
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */
+#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */
+#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnet Counter */
+#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */
+#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */
+#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */
+#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */
+#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */
+#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */
+#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */
+
+#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Defaut 1 = Disable auto reduced power down */
+#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */
+#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */
+#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */
+#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */
+#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
+#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */
+#define IFE_PESC_POLARITY_REVERSED_SHIFT 8
+
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dyanmic Power Down disabled */
+#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */
+#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */
+#define IFE_PSC_FORCE_POLARITY_SHIFT 5
+#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4
+
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */
+#define IFE_PMC_MDIX_MODE_SHIFT 6
+#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */
+
+#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */
+#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */
+#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */
+#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */
+#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */
+#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */
+#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */
+#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */
+#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */
+#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+#define ICH_CYCLE_READ 0x0
+#define ICH_CYCLE_RESERVED 0x1
+#define ICH_CYCLE_WRITE 0x2
+#define ICH_CYCLE_ERASE 0x3
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+#define ICH_FLASH_FRACC 0x0050
+#define ICH_FLASH_FREG0 0x0054
+#define ICH_FLASH_FREG1 0x0058
+#define ICH_FLASH_FREG2 0x005C
+#define ICH_FLASH_FREG3 0x0060
+#define ICH_FLASH_FPR0 0x0074
+#define ICH_FLASH_FPR1 0x0078
+#define ICH_FLASH_SSFSTS 0x0090
+#define ICH_FLASH_SSFCTL 0x0092
+#define ICH_FLASH_PREOP 0x0094
+#define ICH_FLASH_OPTYPE 0x0096
+#define ICH_FLASH_OPMENU 0x0098
+
+#define ICH_FLASH_REG_MAPSIZE 0x00A0
+#define ICH_FLASH_SECTOR_SIZE 4096
+#define ICH_GFPREG_BASE_MASK 0x1FFF
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+
+/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+ struct ich8_hsfsts {
+#ifdef __BIG_ENDIAN
+ u16 reserved2 :6;
+ u16 fldesvalid :1;
+ u16 flockdn :1;
+ u16 flcdone :1;
+ u16 flcerr :1;
+ u16 dael :1;
+ u16 berasesz :2;
+ u16 flcinprog :1;
+ u16 reserved1 :2;
+#else
+ u16 flcdone :1; /* bit 0 Flash Cycle Done */
+ u16 flcerr :1; /* bit 1 Flash Cycle Error */
+ u16 dael :1; /* bit 2 Direct Access error Log */
+ u16 berasesz :2; /* bit 4:3 Block/Sector Erase Size */
+ u16 flcinprog :1; /* bit 5 flash SPI cycle in Progress */
+ u16 reserved1 :2; /* bit 13:6 Reserved */
+ u16 reserved2 :6; /* bit 13:6 Reserved */
+ u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
+ u16 flockdn :1; /* bit 15 Flash Configuration Lock-Down */
+#endif
+ } hsf_status;
+ u16 regval;
+};
+
+/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+ struct ich8_hsflctl {
+#ifdef __BIG_ENDIAN
+ u16 fldbcount :2;
+ u16 flockdn :6;
+ u16 flcgo :1;
+ u16 flcycle :2;
+ u16 reserved :5;
+#else
+ u16 flcgo :1; /* 0 Flash Cycle Go */
+ u16 flcycle :2; /* 2:1 Flash Cycle */
+ u16 reserved :5; /* 7:3 Reserved */
+ u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
+ u16 flockdn :6; /* 15:10 Reserved */
+#endif
+ } hsf_ctrl;
+ u16 regval;
+};
+
+/* ICH8 Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+ struct ich8_flracc {
+#ifdef __BIG_ENDIAN
+ u32 gmwag :8;
+ u32 gmrag :8;
+ u32 grwa :8;
+ u32 grra :8;
+#else
+ u32 grra :8; /* 0:7 GbE region Read Access */
+ u32 grwa :8; /* 8:15 GbE region Write Access */
+ u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
+ u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
+#endif
+ } hsf_flregacc;
+ u16 regval;
+};
+
+/* Miscellaneous PHY bit definitions. */
+#define PHY_PREAMBLE 0xFFFFFFFF
+#define PHY_SOF 0x01
+#define PHY_OP_READ 0x02
+#define PHY_OP_WRITE 0x01
+#define PHY_TURNAROUND 0x02
+#define PHY_PREAMBLE_SIZE 32
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+#define E1000_PHY_ADDRESS 0x01
+#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */
+#define REG4_SPEED_MASK 0x01E0
+#define REG9_SPEED_MASK 0x0300
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010
+#define ADVERTISE_1000_FULL 0x0020
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
+#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds*/
+#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds*/
+
+#endif /* _E1000_HW_H_ */
--- a/devices/e1000/e1000_main-2.6.13-ethercat.c Mon Feb 07 21:17:09 2011 +0100
+++ b/devices/e1000/e1000_main-2.6.13-ethercat.c Mon Feb 07 21:30:25 2011 +0100
@@ -2682,7 +2682,7 @@
#ifdef CONFIG_E1000_NAPI
if (adapter->ecdev) {
for(i = 0; i < E1000_MAX_INTR; i++)
- if(unlikely(!adapter->clean_rx(adapter, &work_done, 100) &
+ if(unlikely(!adapter->clean_rx(adapter, &work_done, 100) &&
!e1000_clean_tx_irq(adapter)))
break;
} else {
--- a/devices/e1000/e1000_main-2.6.20-ethercat.c Mon Feb 07 21:17:09 2011 +0100
+++ b/devices/e1000/e1000_main-2.6.20-ethercat.c Mon Feb 07 21:30:25 2011 +0100
@@ -3879,10 +3879,10 @@
for (i = 0; i < E1000_MAX_INTR; i++)
#ifdef CONFIG_E1000_NAPI
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
- &ec_work_done, 100) &
+ &ec_work_done, 100) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
#else
- if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
#endif
break;
@@ -4017,10 +4017,10 @@
for (i = 0; i < E1000_MAX_INTR; i++)
#ifdef CONFIG_E1000_NAPI
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
- &ec_work_done, 100) &
+ &ec_work_done, 100) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
#else
- if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
#endif
break;
--- a/devices/e1000/e1000_main-2.6.22-ethercat.c Mon Feb 07 21:17:09 2011 +0100
+++ b/devices/e1000/e1000_main-2.6.22-ethercat.c Mon Feb 07 21:30:25 2011 +0100
@@ -3860,10 +3860,10 @@
for (i = 0; i < E1000_MAX_INTR; i++)
#ifdef CONFIG_E1000_NAPI
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
- &ec_work_done, 100) &
+ &ec_work_done, 100) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
#else
- if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
#endif
break;
@@ -3974,10 +3974,10 @@
for (i = 0; i < E1000_MAX_INTR; i++)
#ifdef CONFIG_E1000_NAPI
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
- &ec_work_done, 100) &
+ &ec_work_done, 100) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
#else
- if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
#endif
break;
--- a/devices/e1000/e1000_main-2.6.24-ethercat.c Mon Feb 07 21:17:09 2011 +0100
+++ b/devices/e1000/e1000_main-2.6.24-ethercat.c Mon Feb 07 21:30:25 2011 +0100
@@ -3857,10 +3857,10 @@
for (i = 0; i < E1000_MAX_INTR; i++)
#ifdef CONFIG_E1000_NAPI
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
- &ec_work_done, 100) &
+ &ec_work_done, 100) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
#else
- if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
#endif
break;
@@ -3973,10 +3973,10 @@
for (i = 0; i < E1000_MAX_INTR; i++)
#ifdef CONFIG_E1000_NAPI
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
- &ec_work_done, 100) &
+ &ec_work_done, 100) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
#else
- if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
#endif
break;
--- a/devices/e1000/e1000_main-2.6.26-ethercat.c Mon Feb 07 21:17:09 2011 +0100
+++ b/devices/e1000/e1000_main-2.6.26-ethercat.c Mon Feb 07 21:30:25 2011 +0100
@@ -3934,10 +3934,10 @@
for (i = 0; i < E1000_MAX_INTR; i++) {
#ifdef CONFIG_E1000_NAPI
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
- &ec_work_done, 100) &
+ &ec_work_done, 100) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
#else
- if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
#endif
break;
@@ -4045,10 +4045,10 @@
for (i = 0; i < E1000_MAX_INTR; i++) {
#ifdef CONFIG_E1000_NAPI
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
- &ec_work_done, 100) &
+ &ec_work_done, 100) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
#else
- if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
#endif
break;
--- a/devices/e1000/e1000_main-2.6.27-ethercat.c Mon Feb 07 21:17:09 2011 +0100
+++ b/devices/e1000/e1000_main-2.6.27-ethercat.c Mon Feb 07 21:30:25 2011 +0100
@@ -3891,7 +3891,7 @@
int i, ec_work_done = 0;
for (i = 0; i < E1000_MAX_INTR; i++) {
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
- &ec_work_done, 100) &
+ &ec_work_done, 100) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
break;
}
@@ -3975,7 +3975,7 @@
int i, ec_work_done = 0;
for (i = 0; i < E1000_MAX_INTR; i++) {
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
- &ec_work_done, 100) &
+ &ec_work_done, 100) &&
!e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
break;
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_main-2.6.28-ethercat.c Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,5007 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+ vim: noexpandtab
+
+*******************************************************************************/
+
+#include "e1000-2.6.27-ethercat.h"
+#include <net/ip6_checksum.h>
+
+char e1000_driver_name[] = "ec_e1000";
+static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
+#define DRV_VERSION "7.3.21-k3-NAPI"
+const char e1000_driver_version[] = DRV_VERSION;
+static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
+
+/* e1000_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * Macro expands to...
+ * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+ */
+static struct pci_device_id e1000_pci_tbl[] = {
+ INTEL_E1000_ETHERNET_DEVICE(0x1000),
+ INTEL_E1000_ETHERNET_DEVICE(0x1001),
+ INTEL_E1000_ETHERNET_DEVICE(0x1004),
+ INTEL_E1000_ETHERNET_DEVICE(0x1008),
+ INTEL_E1000_ETHERNET_DEVICE(0x1009),
+ INTEL_E1000_ETHERNET_DEVICE(0x100C),
+ INTEL_E1000_ETHERNET_DEVICE(0x100D),
+ INTEL_E1000_ETHERNET_DEVICE(0x100E),
+ INTEL_E1000_ETHERNET_DEVICE(0x100F),
+ INTEL_E1000_ETHERNET_DEVICE(0x1010),
+ INTEL_E1000_ETHERNET_DEVICE(0x1011),
+ INTEL_E1000_ETHERNET_DEVICE(0x1012),
+ INTEL_E1000_ETHERNET_DEVICE(0x1013),
+ INTEL_E1000_ETHERNET_DEVICE(0x1014),
+ INTEL_E1000_ETHERNET_DEVICE(0x1015),
+ INTEL_E1000_ETHERNET_DEVICE(0x1016),
+ INTEL_E1000_ETHERNET_DEVICE(0x1017),
+ INTEL_E1000_ETHERNET_DEVICE(0x1018),
+ INTEL_E1000_ETHERNET_DEVICE(0x1019),
+ INTEL_E1000_ETHERNET_DEVICE(0x101A),
+ INTEL_E1000_ETHERNET_DEVICE(0x101D),
+ INTEL_E1000_ETHERNET_DEVICE(0x101E),
+ INTEL_E1000_ETHERNET_DEVICE(0x1026),
+ INTEL_E1000_ETHERNET_DEVICE(0x1027),
+ INTEL_E1000_ETHERNET_DEVICE(0x1028),
+ INTEL_E1000_ETHERNET_DEVICE(0x1075),
+ INTEL_E1000_ETHERNET_DEVICE(0x1076),
+ INTEL_E1000_ETHERNET_DEVICE(0x1077),
+ INTEL_E1000_ETHERNET_DEVICE(0x1078),
+ INTEL_E1000_ETHERNET_DEVICE(0x1079),
+ INTEL_E1000_ETHERNET_DEVICE(0x107A),
+ INTEL_E1000_ETHERNET_DEVICE(0x107B),
+ INTEL_E1000_ETHERNET_DEVICE(0x107C),
+ INTEL_E1000_ETHERNET_DEVICE(0x108A),
+ INTEL_E1000_ETHERNET_DEVICE(0x1099),
+ INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+ /* required last entry */
+ {0,}
+};
+
+// do not auto-load driver
+// MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *txdr);
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rxdr);
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring);
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
+void e1000_update_stats(struct e1000_adapter *adapter);
+
+static int e1000_init_module(void);
+static void e1000_exit_module(void);
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit e1000_remove(struct pci_dev *pdev);
+static int e1000_alloc_queues(struct e1000_adapter *adapter);
+static int e1000_sw_init(struct e1000_adapter *adapter);
+static int e1000_open(struct net_device *netdev);
+static int e1000_close(struct net_device *netdev);
+static void e1000_configure_tx(struct e1000_adapter *adapter);
+static void e1000_configure_rx(struct e1000_adapter *adapter);
+static void e1000_setup_rctl(struct e1000_adapter *adapter);
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring);
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
+static void e1000_set_rx_mode(struct net_device *netdev);
+static void e1000_update_phy_info(unsigned long data);
+static void e1000_watchdog(unsigned long data);
+static void e1000_82547_tx_fifo_stall(unsigned long data);
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
+static int e1000_set_mac(struct net_device *netdev, void *p);
+void ec_poll(struct net_device *);
+static irqreturn_t e1000_intr(int irq, void *data);
+static irqreturn_t e1000_intr_msi(int irq, void *data);
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring);
+static int e1000_clean(struct napi_struct *napi, int budget);
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd);
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
+static void e1000_tx_timeout(struct net_device *dev);
+static void e1000_reset_task(struct work_struct *work);
+static void e1000_smartspeed(struct e1000_adapter *adapter);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+ struct sk_buff *skb);
+
+static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_restore_vlan(struct e1000_adapter *adapter);
+
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
+#ifdef CONFIG_PM
+static int e1000_resume(struct pci_dev *pdev);
+#endif
+static void e1000_shutdown(struct pci_dev *pdev);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void e1000_netpoll (struct net_device *netdev);
+#endif
+
+#define COPYBREAK_DEFAULT 256
+static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
+static void e1000_io_resume(struct pci_dev *pdev);
+
+static struct pci_error_handlers e1000_err_handler = {
+ .error_detected = e1000_io_error_detected,
+ .slot_reset = e1000_io_slot_reset,
+ .resume = e1000_io_resume,
+};
+
+static struct pci_driver e1000_driver = {
+ .name = e1000_driver_name,
+ .id_table = e1000_pci_tbl,
+ .probe = e1000_probe,
+ .remove = __devexit_p(e1000_remove),
+#ifdef CONFIG_PM
+ /* Power Managment Hooks */
+ .suspend = e1000_suspend,
+ .resume = e1000_resume,
+#endif
+ .shutdown = e1000_shutdown,
+ .err_handler = &e1000_err_handler
+};
+
+MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
+MODULE_DESCRIPTION("EtherCAT-capable Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+
+static int __init e1000_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "%s - version %s\n",
+ e1000_driver_string, e1000_driver_version);
+
+ printk(KERN_INFO "%s\n", e1000_copyright);
+
+ ret = pci_register_driver(&e1000_driver);
+ if (copybreak != COPYBREAK_DEFAULT) {
+ if (copybreak == 0)
+ printk(KERN_INFO "e1000: copybreak disabled\n");
+ else
+ printk(KERN_INFO "e1000: copybreak enabled for "
+ "packets <= %u bytes\n", copybreak);
+ }
+ return ret;
+}
+
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+
+static void __exit e1000_exit_module(void)
+{
+ pci_unregister_driver(&e1000_driver);
+}
+
+module_exit(e1000_exit_module);
+
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ irq_handler_t handler = e1000_intr;
+ int irq_flags = IRQF_SHARED;
+ int err;
+
+ if (adapter->ecdev)
+ return 0;
+
+ if (hw->mac_type >= e1000_82571) {
+ adapter->have_msi = !pci_enable_msi(adapter->pdev);
+ if (adapter->have_msi) {
+ handler = e1000_intr_msi;
+ irq_flags = 0;
+ }
+ }
+
+ err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
+ netdev);
+ if (err) {
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate interrupt Error: %d\n", err);
+ }
+
+ return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->ecdev)
+ return;
+
+ free_irq(adapter->pdev->irq, netdev);
+
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+
+static void e1000_irq_disable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->ecdev)
+ return;
+
+ ew32(IMC, ~0);
+ E1000_WRITE_FLUSH();
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+
+static void e1000_irq_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->ecdev)
+ return;
+
+ ew32(IMS, IMS_ENABLE_MASK);
+ E1000_WRITE_FLUSH();
+}
+
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u16 vid = hw->mng_cookie.vlan_id;
+ u16 old_vid = adapter->mng_vlan_id;
+ if (adapter->vlgrp) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid)) {
+ if (hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
+ e1000_vlan_rx_add_vid(netdev, vid);
+ adapter->mng_vlan_id = vid;
+ } else
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+
+ if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
+ (vid != old_vid) &&
+ !vlan_group_get_device(adapter->vlgrp, old_vid))
+ e1000_vlan_rx_kill_vid(netdev, old_vid);
+ } else
+ adapter->mng_vlan_id = vid;
+ }
+}
+
+/**
+ * e1000_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the network i/f is closed.
+ *
+ **/
+
+static void e1000_release_hw_control(struct e1000_adapter *adapter)
+{
+ u32 ctrl_ext;
+ u32 swsm;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Let firmware taken over control of h/w */
+ switch (hw->mac_type) {
+ case e1000_82573:
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ ctrl_ext = er32(CTRL_EXT);
+ ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * e1000_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the network i/f is open.
+ *
+ **/
+
+static void e1000_get_hw_control(struct e1000_adapter *adapter)
+{
+ u32 ctrl_ext;
+ u32 swsm;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Let firmware know the driver has taken over */
+ switch (hw->mac_type) {
+ case e1000_82573:
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ ctrl_ext = er32(CTRL_EXT);
+ ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ break;
+ default:
+ break;
+ }
+}
+
+static void e1000_init_manageability(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->en_mng_pt) {
+ u32 manc = er32(MANC);
+
+ /* disable hardware interception of ARP */
+ manc &= ~(E1000_MANC_ARP_EN);
+
+ /* enable receiving management packets to the host */
+ /* this will probably generate destination unreachable messages
+ * from the host OS, but the packets will be handled on SMBUS */
+ if (hw->has_manc2h) {
+ u32 manc2h = er32(MANC2H);
+
+ manc |= E1000_MANC_EN_MNG2HOST;
+#define E1000_MNG2HOST_PORT_623 (1 << 5)
+#define E1000_MNG2HOST_PORT_664 (1 << 6)
+ manc2h |= E1000_MNG2HOST_PORT_623;
+ manc2h |= E1000_MNG2HOST_PORT_664;
+ ew32(MANC2H, manc2h);
+ }
+
+ ew32(MANC, manc);
+ }
+}
+
+static void e1000_release_manageability(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->en_mng_pt) {
+ u32 manc = er32(MANC);
+
+ /* re-enable hardware interception of ARP */
+ manc |= E1000_MANC_ARP_EN;
+
+ if (hw->has_manc2h)
+ manc &= ~E1000_MANC_EN_MNG2HOST;
+
+ /* don't explicitly have to mess with MANC2H since
+ * MANC has an enable disable that gates MANC2H */
+
+ ew32(MANC, manc);
+ }
+}
+
+/**
+ * e1000_configure - configure the hardware for RX and TX
+ * @adapter = private board structure
+ **/
+static void e1000_configure(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ e1000_set_rx_mode(netdev);
+
+ e1000_restore_vlan(adapter);
+ e1000_init_manageability(adapter);
+
+ e1000_configure_tx(adapter);
+ e1000_setup_rctl(adapter);
+ e1000_configure_rx(adapter);
+ /* call E1000_DESC_UNUSED which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct e1000_rx_ring *ring = &adapter->rx_ring[i];
+ if (adapter->ecdev) {
+ /* fill rx ring completely! */
+ adapter->alloc_rx_buf(adapter, ring, ring->count);
+ } else {
+ /* this one leaves the last ring element unallocated! */
+ adapter->alloc_rx_buf(adapter, ring,
+ E1000_DESC_UNUSED(ring));
+ }
+ }
+
+ adapter->tx_queue_len = netdev->tx_queue_len;
+}
+
+int e1000_up(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* hardware has been reset, we need to reload some things */
+ e1000_configure(adapter);
+
+ clear_bit(__E1000_DOWN, &adapter->flags);
+
+ if (!adapter->ecdev) {
+ napi_enable(&adapter->napi);
+
+ e1000_irq_enable(adapter);
+
+ /* fire a link change interrupt to start the watchdog */
+ ew32(ICS, E1000_ICS_LSC);
+ }
+ return 0;
+}
+
+/**
+ * e1000_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000_reset ***
+ *
+ **/
+
+void e1000_power_up_phy(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 mii_reg = 0;
+
+ /* Just clear the power down bit to wake the phy back up */
+ if (hw->media_type == e1000_media_type_copper) {
+ /* according to the manual, the phy will retain its
+ * settings across a power-down/up cycle */
+ e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
+ }
+}
+
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Power down the PHY so no link is implied when interface is down *
+ * The PHY cannot be powered down if any of the following is true *
+ * (a) WoL is enabled
+ * (b) AMT is active
+ * (c) SoL/IDER session is active */
+ if (!adapter->wol && hw->mac_type >= e1000_82540 &&
+ hw->media_type == e1000_media_type_copper) {
+ u16 mii_reg = 0;
+
+ switch (hw->mac_type) {
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (er32(MANC) & E1000_MANC_SMBUS_EN)
+ goto out;
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ if (e1000_check_mng_mode(hw) ||
+ e1000_check_phy_reset_block(hw))
+ goto out;
+ break;
+ default:
+ goto out;
+ }
+ e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
+ mdelay(1);
+ }
+out:
+ return;
+}
+
+void e1000_down(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer */
+ set_bit(__E1000_DOWN, &adapter->flags);
+
+ if (!adapter->ecdev) {
+ napi_disable(&adapter->napi);
+
+ e1000_irq_disable(adapter);
+
+ del_timer_sync(&adapter->tx_fifo_stall_timer);
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+ }
+
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ if (!adapter->ecdev) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+
+ e1000_reset(adapter);
+ e1000_clean_all_tx_rings(adapter);
+ e1000_clean_all_rx_rings(adapter);
+}
+
+void e1000_reinit_locked(struct e1000_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+ e1000_down(adapter);
+ e1000_up(adapter);
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+}
+
+void e1000_reset(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 pba = 0, tx_space, min_tx_space, min_rx_space;
+ u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
+ bool legacy_pba_adjust = false;
+
+ /* Repartition Pba for greater than 9k mtu
+ * To take effect CTRL.RST is required.
+ */
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ legacy_pba_adjust = true;
+ pba = E1000_PBA_48K;
+ break;
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ pba = E1000_PBA_48K;
+ break;
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ legacy_pba_adjust = true;
+ pba = E1000_PBA_30K;
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ pba = E1000_PBA_38K;
+ break;
+ case e1000_82573:
+ pba = E1000_PBA_20K;
+ break;
+ case e1000_ich8lan:
+ pba = E1000_PBA_8K;
+ case e1000_undefined:
+ case e1000_num_macs:
+ break;
+ }
+
+ if (legacy_pba_adjust) {
+ if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
+ pba -= 8; /* allocate more FIFO for Tx */
+
+ if (hw->mac_type == e1000_82547) {
+ adapter->tx_fifo_head = 0;
+ adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
+ adapter->tx_fifo_size =
+ (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
+ atomic_set(&adapter->tx_fifo_stall, 0);
+ }
+ } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
+ /* adjust PBA for jumbo frames */
+ ew32(PBA, pba);
+
+ /* To maintain wire speed transmits, the Tx FIFO should be
+ * large enough to accomodate two full transmit packets,
+ * rounded up to the next 1KB and expressed in KB. Likewise,
+ * the Rx FIFO should be large enough to accomodate at least
+ * one full receive packet and is similarly rounded up and
+ * expressed in KB. */
+ pba = er32(PBA);
+ /* upper 16 bits has Tx packet buffer allocation size in KB */
+ tx_space = pba >> 16;
+ /* lower 16 bits has Rx packet buffer allocation size in KB */
+ pba &= 0xffff;
+ /* don't include ethernet FCS because hardware appends/strips */
+ min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
+ VLAN_TAG_SIZE;
+ min_tx_space = min_rx_space;
+ min_tx_space *= 2;
+ min_tx_space = ALIGN(min_tx_space, 1024);
+ min_tx_space >>= 10;
+ min_rx_space = ALIGN(min_rx_space, 1024);
+ min_rx_space >>= 10;
+
+ /* If current Tx allocation is less than the min Tx FIFO size,
+ * and the min Tx FIFO size is less than the current Rx FIFO
+ * allocation, take space away from current Rx allocation */
+ if (tx_space < min_tx_space &&
+ ((min_tx_space - tx_space) < pba)) {
+ pba = pba - (min_tx_space - tx_space);
+
+ /* PCI/PCIx hardware has PBA alignment constraints */
+ switch (hw->mac_type) {
+ case e1000_82545 ... e1000_82546_rev_3:
+ pba &= ~(E1000_PBA_8K - 1);
+ break;
+ default:
+ break;
+ }
+
+ /* if short on rx space, rx wins and must trump tx
+ * adjustment or use Early Receive if available */
+ if (pba < min_rx_space) {
+ switch (hw->mac_type) {
+ case e1000_82573:
+ /* ERT enabled in e1000_configure_rx */
+ break;
+ default:
+ pba = min_rx_space;
+ break;
+ }
+ }
+ }
+ }
+
+ ew32(PBA, pba);
+
+ /* flow control settings */
+ /* Set the FC high water mark to 90% of the FIFO size.
+ * Required to clear last 3 LSB */
+ fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
+ /* We can't use 90% on small FIFOs because the remainder
+ * would be less than 1 full frame. In this case, we size
+ * it to allow at least a full frame above the high water
+ * mark. */
+ if (pba < E1000_PBA_16K)
+ fc_high_water_mark = (pba * 1024) - 1600;
+
+ hw->fc_high_water = fc_high_water_mark;
+ hw->fc_low_water = fc_high_water_mark - 8;
+ if (hw->mac_type == e1000_80003es2lan)
+ hw->fc_pause_time = 0xFFFF;
+ else
+ hw->fc_pause_time = E1000_FC_PAUSE_TIME;
+ hw->fc_send_xon = 1;
+ hw->fc = hw->original_fc;
+
+ /* Allow time for pending master requests to run */
+ e1000_reset_hw(hw);
+ if (hw->mac_type >= e1000_82544)
+ ew32(WUC, 0);
+
+ if (e1000_init_hw(hw))
+ DPRINTK(PROBE, ERR, "Hardware Error\n");
+ e1000_update_mng_vlan(adapter);
+
+ /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
+ if (hw->mac_type >= e1000_82544 &&
+ hw->mac_type <= e1000_82547_rev_2 &&
+ hw->autoneg == 1 &&
+ hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+ u32 ctrl = er32(CTRL);
+ /* clear phy power management bit if we are in gig only mode,
+ * which if enabled will attempt negotiation to 100Mb, which
+ * can cause a loss of link at power off or driver unload */
+ ctrl &= ~E1000_CTRL_SWDPIN3;
+ ew32(CTRL, ctrl);
+ }
+
+ /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+ ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
+
+ e1000_reset_adaptive(hw);
+ e1000_phy_get_info(hw, &adapter->phy_info);
+
+ if (!adapter->smart_power_down &&
+ (hw->mac_type == e1000_82571 ||
+ hw->mac_type == e1000_82572)) {
+ u16 phy_data = 0;
+ /* speed up time to link by disabling smart power down, ignore
+ * the return value of this function because there is nothing
+ * different we would do if it failed */
+ e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ &phy_data);
+ phy_data &= ~IGP02E1000_PM_SPD;
+ e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ phy_data);
+ }
+
+ e1000_release_manageability(adapter);
+}
+
+/**
+ * Dump the eeprom for users having checksum issues
+ **/
+static void e1000_dump_eeprom(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_eeprom eeprom;
+ const struct ethtool_ops *ops = netdev->ethtool_ops;
+ u8 *data;
+ int i;
+ u16 csum_old, csum_new = 0;
+
+ eeprom.len = ops->get_eeprom_len(netdev);
+ eeprom.offset = 0;
+
+ data = kmalloc(eeprom.len, GFP_KERNEL);
+ if (!data) {
+ printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
+ " data\n");
+ return;
+ }
+
+ ops->get_eeprom(netdev, &eeprom, data);
+
+ csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
+ (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
+ for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
+ csum_new += data[i] + (data[i + 1] << 8);
+ csum_new = EEPROM_SUM - csum_new;
+
+ printk(KERN_ERR "/*********************/\n");
+ printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
+ printk(KERN_ERR "Calculated : 0x%04x\n", csum_new);
+
+ printk(KERN_ERR "Offset Values\n");
+ printk(KERN_ERR "======== ======\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
+
+ printk(KERN_ERR "Include this output when contacting your support "
+ "provider.\n");
+ printk(KERN_ERR "This is not a software error! Something bad "
+ "happened to your hardware or\n");
+ printk(KERN_ERR "EEPROM image. Ignoring this "
+ "problem could result in further problems,\n");
+ printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
+ printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
+ "which is invalid\n");
+ printk(KERN_ERR "and requires you to set the proper MAC "
+ "address manually before continuing\n");
+ printk(KERN_ERR "to enable this network device.\n");
+ printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
+ "to your hardware vendor\n");
+ printk(KERN_ERR "or Intel Customer Support.\n");
+ printk(KERN_ERR "/*********************/\n");
+
+ kfree(data);
+}
+
+/**
+ * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
+ * @pdev: PCI device information struct
+ *
+ * Return true if an adapter needs ioport resources
+ **/
+static int e1000_is_need_ioport(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case E1000_DEV_ID_82540EM:
+ case E1000_DEV_ID_82540EM_LOM:
+ case E1000_DEV_ID_82540EP:
+ case E1000_DEV_ID_82540EP_LOM:
+ case E1000_DEV_ID_82540EP_LP:
+ case E1000_DEV_ID_82541EI:
+ case E1000_DEV_ID_82541EI_MOBILE:
+ case E1000_DEV_ID_82541ER:
+ case E1000_DEV_ID_82541ER_LOM:
+ case E1000_DEV_ID_82541GI:
+ case E1000_DEV_ID_82541GI_LF:
+ case E1000_DEV_ID_82541GI_MOBILE:
+ case E1000_DEV_ID_82544EI_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82544GC_COPPER:
+ case E1000_DEV_ID_82544GC_LOM:
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ case E1000_DEV_ID_82546EB_COPPER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit e1000_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct e1000_adapter *adapter;
+ struct e1000_hw *hw;
+
+ static int cards_found = 0;
+ static int global_quad_port_a = 0; /* global ksp3 port a indication */
+ int i, err, pci_using_dac;
+ u16 eeprom_data = 0;
+ u16 eeprom_apme_mask = E1000_EEPROM_APME;
+ int bars, need_ioport;
+ DECLARE_MAC_BUF(mac);
+
+ /* do not allocate ioport bars when not needed */
+ need_ioport = e1000_is_need_ioport(pdev);
+ if (need_ioport) {
+ bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
+ err = pci_enable_device(pdev);
+ } else {
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_enable_device_mem(pdev);
+ }
+ if (err)
+ return err;
+
+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
+ pci_using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ E1000_ERR("No usable DMA configuration, "
+ "aborting\n");
+ goto err_dma;
+ }
+ }
+ pci_using_dac = 0;
+ }
+
+ err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ pci_set_master(pdev);
+
+ err = -ENOMEM;
+ netdev = alloc_etherdev(sizeof(struct e1000_adapter));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->msg_enable = (1 << debug) - 1;
+ adapter->bars = bars;
+ adapter->need_ioport = need_ioport;
+
+ hw = &adapter->hw;
+ hw->back = adapter;
+
+ err = -EIO;
+ hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
+ pci_resource_len(pdev, BAR_0));
+ if (!hw->hw_addr)
+ goto err_ioremap;
+
+ if (adapter->need_ioport) {
+ for (i = BAR_1; i <= BAR_5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ hw->io_base = pci_resource_start(pdev, i);
+ break;
+ }
+ }
+ }
+
+ netdev->open = &e1000_open;
+ netdev->stop = &e1000_close;
+ netdev->hard_start_xmit = &e1000_xmit_frame;
+ netdev->get_stats = &e1000_get_stats;
+ netdev->set_rx_mode = &e1000_set_rx_mode;
+ netdev->set_mac_address = &e1000_set_mac;
+ netdev->change_mtu = &e1000_change_mtu;
+ netdev->do_ioctl = &e1000_ioctl;
+ e1000_set_ethtool_ops(netdev);
+ netdev->tx_timeout = &e1000_tx_timeout;
+ netdev->watchdog_timeo = 5 * HZ;
+ netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+ netdev->vlan_rx_register = e1000_vlan_rx_register;
+ netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
+ netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = e1000_netpoll;
+#endif
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ adapter->bd_number = cards_found;
+
+ /* setup the private structure */
+
+ err = e1000_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ err = -EIO;
+ /* Flash BAR mapping must happen after e1000_sw_init
+ * because it depends on mac_type */
+ if ((hw->mac_type == e1000_ich8lan) &&
+ (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ hw->flash_address =
+ ioremap(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (!hw->flash_address)
+ goto err_flashmap;
+ }
+
+ if (e1000_check_phy_reset_block(hw))
+ DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
+
+ if (hw->mac_type >= e1000_82543) {
+ netdev->features = NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+ if (hw->mac_type == e1000_ich8lan)
+ netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
+ }
+
+ if ((hw->mac_type >= e1000_82544) &&
+ (hw->mac_type != e1000_82547))
+ netdev->features |= NETIF_F_TSO;
+
+ if (hw->mac_type > e1000_82547_rev_2)
+ netdev->features |= NETIF_F_TSO6;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->features |= NETIF_F_LLTX;
+
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_HW_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
+
+ /* initialize eeprom parameters */
+ if (e1000_init_eeprom_params(hw)) {
+ E1000_ERR("EEPROM initialization failed\n");
+ goto err_eeprom;
+ }
+
+ /* before reading the EEPROM, reset the controller to
+ * put the device in a known good starting state */
+
+ e1000_reset_hw(hw);
+
+ /* make sure the EEPROM is good */
+ if (e1000_validate_eeprom_checksum(hw) < 0) {
+ DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+ e1000_dump_eeprom(adapter);
+ /*
+ * set MAC address to all zeroes to invalidate and temporary
+ * disable this device for the user. This blocks regular
+ * traffic while still permitting ethtool ioctls from reaching
+ * the hardware as well as allowing the user to run the
+ * interface after manually setting a hw addr using
+ * `ip set address`
+ */
+ memset(hw->mac_addr, 0, netdev->addr_len);
+ } else {
+ /* copy the MAC address out of the EEPROM */
+ if (e1000_read_mac_addr(hw))
+ DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
+ }
+ /* don't block initalization here due to bad MAC address */
+ memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr))
+ DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+
+ e1000_get_bus_info(hw);
+
+ init_timer(&adapter->tx_fifo_stall_timer);
+ adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
+ adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &e1000_watchdog;
+ adapter->watchdog_timer.data = (unsigned long) adapter;
+
+ init_timer(&adapter->phy_info_timer);
+ adapter->phy_info_timer.function = &e1000_update_phy_info;
+ adapter->phy_info_timer.data = (unsigned long)adapter;
+
+ INIT_WORK(&adapter->reset_task, e1000_reset_task);
+
+ e1000_check_options(adapter);
+
+ /* Initial Wake on LAN setting
+ * If APM wake is enabled in the EEPROM,
+ * enable the ACPI Magic Packet filter
+ */
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ break;
+ case e1000_82544:
+ e1000_read_eeprom(hw,
+ EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
+ eeprom_apme_mask = E1000_EEPROM_82544_APM;
+ break;
+ case e1000_ich8lan:
+ e1000_read_eeprom(hw,
+ EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
+ eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
+ break;
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82571:
+ case e1000_80003es2lan:
+ if (er32(STATUS) & E1000_STATUS_FUNC_1){
+ e1000_read_eeprom(hw,
+ EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+ break;
+ }
+ /* Fall Through */
+ default:
+ e1000_read_eeprom(hw,
+ EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ break;
+ }
+ if (eeprom_data & eeprom_apme_mask)
+ adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+ /* now that we have the eeprom settings, apply the special cases
+ * where the eeprom may be wrong or the board simply won't support
+ * wake on lan on a particular port */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82546GB_PCIE:
+ adapter->eeprom_wol = 0;
+ break;
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ /* Wake events only supported on port A for dual fiber
+ * regardless of eeprom setting */
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
+ adapter->eeprom_wol = 0;
+ break;
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
+ /* if quad port adapter, disable WoL on all but port A */
+ if (global_quad_port_a != 0)
+ adapter->eeprom_wol = 0;
+ else
+ adapter->quad_port_a = 1;
+ /* Reset for multiple quad port adapters */
+ if (++global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
+ }
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wol = adapter->eeprom_wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ /* print bus type/speed/width info */
+ DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+ ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
+ (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
+ ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+ (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
+ (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
+ (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
+ (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
+ ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
+ (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
+ (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
+ "32-bit"));
+
+ printk("%s\n", print_mac(mac, netdev->dev_addr));
+
+ if (hw->bus_type == e1000_bus_type_pci_express) {
+ DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
+ "longer be supported by this driver in the future.\n",
+ pdev->vendor, pdev->device);
+ DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
+ "driver instead.\n");
+ }
+
+ /* reset the hardware with the new settings */
+ e1000_reset(adapter);
+
+ /* If the controller is 82573 and f/w is AMT, do not set
+ * DRV_LOAD until the interface is up. For all other cases,
+ * let the f/w know that the h/w is now under the control
+ * of the driver. */
+ if (hw->mac_type != e1000_82573 ||
+ !e1000_check_mng_mode(hw))
+ e1000_get_hw_control(adapter);
+
+ // offer device to EtherCAT master module
+ adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE);
+ if (adapter->ecdev) {
+ if (ecdev_open(adapter->ecdev)) {
+ ecdev_withdraw(adapter->ecdev);
+ goto err_register;
+ }
+ } else {
+ /* tell the stack to leave us alone until e1000_open() is called */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+ }
+
+ DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+
+ cards_found++;
+ return 0;
+
+err_register:
+ e1000_release_hw_control(adapter);
+err_eeprom:
+ if (!e1000_check_phy_reset_block(hw))
+ e1000_phy_hw_reset(hw);
+
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+err_flashmap:
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ dev_put(&adapter->polling_netdev[i]);
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+ kfree(adapter->polling_netdev);
+err_sw_init:
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_selected_regions(pdev, bars);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+
+static void __devexit e1000_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ cancel_work_sync(&adapter->reset_task);
+
+ e1000_release_manageability(adapter);
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant. */
+ e1000_release_hw_control(adapter);
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ dev_put(&adapter->polling_netdev[i]);
+
+ if (adapter->ecdev) {
+ ecdev_close(adapter->ecdev);
+ ecdev_withdraw(adapter->ecdev);
+ } else {
+ unregister_netdev(netdev);
+ }
+
+ if (!e1000_check_phy_reset_block(hw))
+ e1000_phy_hw_reset(hw);
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+ kfree(adapter->polling_netdev);
+
+ iounmap(hw->hw_addr);
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ pci_release_selected_regions(pdev, adapter->bars);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+
+static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int i;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ hw->max_frame_size = netdev->mtu +
+ ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+ hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+ /* identify the MAC */
+
+ if (e1000_set_mac_type(hw)) {
+ DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
+ return -EIO;
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->phy_init_script = 1;
+ break;
+ }
+
+ e1000_set_media_type(hw);
+
+ hw->wait_autoneg_complete = false;
+ hw->tbi_compatibility_en = true;
+ hw->adaptive_ifs = true;
+
+ /* Copper options */
+
+ if (hw->media_type == e1000_media_type_copper) {
+ hw->mdix = AUTO_ALL_MODES;
+ hw->disable_polarity_correction = false;
+ hw->master_slave = E1000_MASTER_SLAVE;
+ }
+
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+
+ if (e1000_alloc_queues(adapter)) {
+ DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->polling_netdev[i].priv = adapter;
+ dev_hold(&adapter->polling_netdev[i]);
+ set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
+ }
+ spin_lock_init(&adapter->tx_queue_lock);
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ e1000_irq_disable(adapter);
+
+ spin_lock_init(&adapter->stats_lock);
+
+ set_bit(__E1000_DOWN, &adapter->flags);
+
+ return 0;
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+
+static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+ adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct e1000_tx_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ return -ENOMEM;
+
+ adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct e1000_rx_ring), GFP_KERNEL);
+ if (!adapter->rx_ring) {
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+ }
+
+ adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
+ sizeof(struct net_device),
+ GFP_KERNEL);
+ if (!adapter->polling_netdev) {
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+ return -ENOMEM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+
+static int e1000_open(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__E1000_TESTING, &adapter->flags))
+ return -EBUSY;
+
+ /* allocate transmit descriptors */
+ err = e1000_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = e1000_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ e1000_power_up_phy(adapter);
+
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ if ((hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
+ e1000_update_mng_vlan(adapter);
+ }
+
+ /* If AMT is enabled, let the firmware know that the network
+ * interface is now open */
+ if (hw->mac_type == e1000_82573 &&
+ e1000_check_mng_mode(hw))
+ e1000_get_hw_control(adapter);
+
+ /* before we allocate an interrupt, we must be ready to handle it.
+ * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+ * as soon as we call pci_request_irq, so we have to setup our
+ * clean_rx handler before we do so. */
+ e1000_configure(adapter);
+
+ err = e1000_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ /* From here on the code is the same as e1000_up() */
+ clear_bit(__E1000_DOWN, &adapter->flags);
+
+ napi_enable(&adapter->napi);
+
+ e1000_irq_enable(adapter);
+
+ netif_start_queue(netdev);
+
+ /* fire a link status change interrupt to start the watchdog */
+ ew32(ICS, E1000_ICS_LSC);
+
+ return E1000_SUCCESS;
+
+err_req_irq:
+ e1000_release_hw_control(adapter);
+ e1000_power_down_phy(adapter);
+ e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+ e1000_free_all_tx_resources(adapter);
+err_setup_tx:
+ e1000_reset(adapter);
+
+ return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+
+static int e1000_close(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ e1000_down(adapter);
+ e1000_power_down_phy(adapter);
+ e1000_free_irq(adapter);
+
+ e1000_free_all_tx_resources(adapter);
+ e1000_free_all_rx_resources(adapter);
+
+ /* kill manageability vlan ID if supported, but not if a vlan with
+ * the same ID is registered on the host OS (let 8021q kill it) */
+ if ((hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ !(adapter->vlgrp &&
+ vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
+ e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+ }
+
+ /* If AMT is enabled, let the firmware know that the network
+ * interface is now closed */
+ if (hw->mac_type == e1000_82573 &&
+ e1000_check_mng_mode(hw))
+ e1000_release_hw_control(adapter);
+
+ return 0;
+}
+
+/**
+ * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
+ * @adapter: address of board private structure
+ * @start: address of beginning of memory
+ * @len: length of memory
+ **/
+static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
+ unsigned long len)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long begin = (unsigned long)start;
+ unsigned long end = begin + len;
+
+ /* First rev 82545 and 82546 need to not allow any memory
+ * write location to cross 64k boundary due to errata 23 */
+ if (hw->mac_type == e1000_82545 ||
+ hw->mac_type == e1000_82546) {
+ return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
+ }
+
+ return true;
+}
+
+/**
+ * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @txdr: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *txdr)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct e1000_buffer) * txdr->count;
+ txdr->buffer_info = vmalloc(size);
+ if (!txdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+ }
+ memset(txdr->buffer_info, 0, size);
+
+ /* round up to nearest 4K */
+
+ txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+ txdr->size = ALIGN(txdr->size, 4096);
+
+ txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ if (!txdr->desc) {
+setup_tx_desc_die:
+ vfree(txdr->buffer_info);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+ }
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+ void *olddesc = txdr->desc;
+ dma_addr_t olddma = txdr->dma;
+ DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
+ "at %p\n", txdr->size, txdr->desc);
+ /* Try again, without freeing the previous */
+ txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ /* Failed allocation, critical failure */
+ if (!txdr->desc) {
+ pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ goto setup_tx_desc_die;
+ }
+
+ if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+ /* give up */
+ pci_free_consistent(pdev, txdr->size, txdr->desc,
+ txdr->dma);
+ pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate aligned memory "
+ "for the transmit descriptor ring\n");
+ vfree(txdr->buffer_info);
+ return -ENOMEM;
+ } else {
+ /* Free old allocation, new allocation was successful */
+ pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ }
+ }
+ memset(txdr->desc, 0, txdr->size);
+
+ txdr->next_to_use = 0;
+ txdr->next_to_clean = 0;
+ spin_lock_init(&txdr->tx_lock);
+
+ return 0;
+}
+
+/**
+ * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "Allocation for Tx Queue %u failed\n", i);
+ for (i-- ; i >= 0; i--)
+ e1000_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+
+static void e1000_configure_tx(struct e1000_adapter *adapter)
+{
+ u64 tdba;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tdlen, tctl, tipg, tarc;
+ u32 ipgr1, ipgr2;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+
+ switch (adapter->num_tx_queues) {
+ case 1:
+ default:
+ tdba = adapter->tx_ring[0].dma;
+ tdlen = adapter->tx_ring[0].count *
+ sizeof(struct e1000_tx_desc);
+ ew32(TDLEN, tdlen);
+ ew32(TDBAH, (tdba >> 32));
+ ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
+ ew32(TDT, 0);
+ ew32(TDH, 0);
+ adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
+ adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
+ break;
+ }
+
+ /* Set the default values for the Tx Inter Packet Gap timer */
+ if (hw->mac_type <= e1000_82547_rev_2 &&
+ (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes))
+ tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+ else
+ tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ tipg = DEFAULT_82542_TIPG_IPGT;
+ ipgr1 = DEFAULT_82542_TIPG_IPGR1;
+ ipgr2 = DEFAULT_82542_TIPG_IPGR2;
+ break;
+ case e1000_80003es2lan:
+ ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+ ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
+ break;
+ default:
+ ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+ ipgr2 = DEFAULT_82543_TIPG_IPGR2;
+ break;
+ }
+ tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+ tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+ ew32(TIPG, tipg);
+
+ /* Set the Tx Interrupt Delay register */
+
+ ew32(TIDV, adapter->tx_int_delay);
+ if (hw->mac_type >= e1000_82540)
+ ew32(TADV, adapter->tx_abs_int_delay);
+
+ /* Program the Transmit Control Register */
+
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+ if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
+ tarc = er32(TARC0);
+ /* set the speed mode bit, we'll clear it if we're not at
+ * gigabit link later */
+ tarc |= (1 << 21);
+ ew32(TARC0, tarc);
+ } else if (hw->mac_type == e1000_80003es2lan) {
+ tarc = er32(TARC0);
+ tarc |= 1;
+ ew32(TARC0, tarc);
+ tarc = er32(TARC1);
+ tarc |= 1;
+ ew32(TARC1, tarc);
+ }
+
+ e1000_config_collision_dist(hw);
+
+ /* Setup Transmit Descriptor Settings for eop descriptor */
+ adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+ /* only set IDE if we are delaying interrupts using the timers */
+ if (adapter->tx_int_delay)
+ adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+ if (hw->mac_type < e1000_82543)
+ adapter->txd_cmd |= E1000_TXD_CMD_RPS;
+ else
+ adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+ /* Cache if we're 82544 running in PCI-X because we'll
+ * need this to apply a workaround later in the send path. */
+ if (hw->mac_type == e1000_82544 &&
+ hw->bus_type == e1000_bus_type_pcix)
+ adapter->pcix_82544 = 1;
+
+ ew32(TCTL, tctl);
+
+}
+
+/**
+ * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rxdr: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rxdr)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int size, desc_len;
+
+ size = sizeof(struct e1000_buffer) * rxdr->count;
+ rxdr->buffer_info = vmalloc(size);
+ if (!rxdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for the receive descriptor ring\n");
+ return -ENOMEM;
+ }
+ memset(rxdr->buffer_info, 0, size);
+
+ if (hw->mac_type <= e1000_82547_rev_2)
+ desc_len = sizeof(struct e1000_rx_desc);
+ else
+ desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+ /* Round up to nearest 4K */
+
+ rxdr->size = rxdr->count * desc_len;
+ rxdr->size = ALIGN(rxdr->size, 4096);
+
+ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+
+ if (!rxdr->desc) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for the receive descriptor ring\n");
+setup_rx_desc_die:
+ vfree(rxdr->buffer_info);
+ return -ENOMEM;
+ }
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+ void *olddesc = rxdr->desc;
+ dma_addr_t olddma = rxdr->dma;
+ DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
+ "at %p\n", rxdr->size, rxdr->desc);
+ /* Try again, without freeing the previous */
+ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ /* Failed allocation, critical failure */
+ if (!rxdr->desc) {
+ pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory "
+ "for the receive descriptor ring\n");
+ goto setup_rx_desc_die;
+ }
+
+ if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+ /* give up */
+ pci_free_consistent(pdev, rxdr->size, rxdr->desc,
+ rxdr->dma);
+ pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate aligned memory "
+ "for the receive descriptor ring\n");
+ goto setup_rx_desc_die;
+ } else {
+ /* Free old allocation, new allocation was successful */
+ pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ }
+ }
+ memset(rxdr->desc, 0, rxdr->size);
+
+ rxdr->next_to_clean = 0;
+ rxdr->next_to_use = 0;
+
+ return 0;
+}
+
+/**
+ * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "Allocation for Rx Queue %u failed\n", i);
+ for (i-- ; i >= 0; i--)
+ e1000_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+ (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ rctl = er32(RCTL);
+
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ if (hw->tbi_compatibility_on == 1)
+ rctl |= E1000_RCTL_SBP;
+ else
+ rctl &= ~E1000_RCTL_SBP;
+
+ if (adapter->netdev->mtu <= ETH_DATA_LEN)
+ rctl &= ~E1000_RCTL_LPE;
+ else
+ rctl |= E1000_RCTL_LPE;
+
+ /* Setup buffer sizes */
+ rctl &= ~E1000_RCTL_SZ_4096;
+ rctl |= E1000_RCTL_BSEX;
+ switch (adapter->rx_buffer_len) {
+ case E1000_RXBUFFER_256:
+ rctl |= E1000_RCTL_SZ_256;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_512:
+ rctl |= E1000_RCTL_SZ_512;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_1024:
+ rctl |= E1000_RCTL_SZ_1024;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_2048:
+ default:
+ rctl |= E1000_RCTL_SZ_2048;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_4096:
+ rctl |= E1000_RCTL_SZ_4096;
+ break;
+ case E1000_RXBUFFER_8192:
+ rctl |= E1000_RCTL_SZ_8192;
+ break;
+ case E1000_RXBUFFER_16384:
+ rctl |= E1000_RCTL_SZ_16384;
+ break;
+ }
+
+ ew32(RCTL, rctl);
+}
+
+/**
+ * e1000_configure_rx - Configure 8254x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+
+static void e1000_configure_rx(struct e1000_adapter *adapter)
+{
+ u64 rdba;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rdlen, rctl, rxcsum, ctrl_ext;
+
+ rdlen = adapter->rx_ring[0].count *
+ sizeof(struct e1000_rx_desc);
+ adapter->clean_rx = e1000_clean_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+
+ /* disable receives while setting up the descriptors */
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+
+ /* set the Receive Delay Timer Register */
+ ew32(RDTR, adapter->rx_int_delay);
+
+ if (hw->mac_type >= e1000_82540) {
+ ew32(RADV, adapter->rx_abs_int_delay);
+ if (adapter->itr_setting != 0)
+ ew32(ITR, 1000000000 / (adapter->itr * 256));
+ }
+
+ if (hw->mac_type >= e1000_82571) {
+ ctrl_ext = er32(CTRL_EXT);
+ /* Reset delay timers after every interrupt */
+ ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
+ /* Auto-Mask interrupts upon ICR access */
+ ctrl_ext |= E1000_CTRL_EXT_IAME;
+ ew32(IAM, 0xffffffff);
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ }
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ switch (adapter->num_rx_queues) {
+ case 1:
+ default:
+ rdba = adapter->rx_ring[0].dma;
+ ew32(RDLEN, rdlen);
+ ew32(RDBAH, (rdba >> 32));
+ ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
+ ew32(RDT, 0);
+ ew32(RDH, 0);
+ adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
+ adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
+ break;
+ }
+
+ /* Enable 82543 Receive Checksum Offload for TCP and UDP */
+ if (hw->mac_type >= e1000_82543) {
+ rxcsum = er32(RXCSUM);
+ if (adapter->rx_csum)
+ rxcsum |= E1000_RXCSUM_TUOFL;
+ else
+ /* don't need to clear IPPCSE as it defaults to 0 */
+ rxcsum &= ~E1000_RXCSUM_TUOFL;
+ ew32(RXCSUM, rxcsum);
+ }
+
+ /* Enable Receives */
+ ew32(RCTL, rctl);
+}
+
+/**
+ * e1000_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ e1000_clean_tx_ring(adapter, tx_ring);
+
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
+}
+
+static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+ struct e1000_buffer *buffer_info)
+{
+ if (buffer_info->dma) {
+ pci_unmap_page(adapter->pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ /* buffer_info must be completely set up in the transmit path */
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ }
+
+ size = sizeof(struct e1000_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ tx_ring->last_tx_tso = 0;
+
+ writel(0, hw->hw_addr + tx_ring->tdh);
+ writel(0, hw->hw_addr + tx_ring->tdt);
+}
+
+/**
+ * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * e1000_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ e1000_clean_rx_ring(adapter, rx_ring);
+
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->ecdev)
+ return;
+
+ e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ if (buffer_info->skb) {
+ pci_unmap_single(pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct e1000_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ writel(0, hw->hw_addr + rx_ring->rdh);
+ writel(0, hw->hw_addr + rx_ring->rdt);
+}
+
+/**
+ * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
+ * and memory write and invalidate disabled for certain operations
+ */
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 rctl;
+
+ e1000_pci_clear_mwi(hw);
+
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_RST;
+ ew32(RCTL, rctl);
+ E1000_WRITE_FLUSH();
+ mdelay(5);
+
+ if (!adapter->ecdev && netif_running(netdev))
+ e1000_clean_all_rx_rings(adapter);
+}
+
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 rctl;
+
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_RST;
+ ew32(RCTL, rctl);
+ E1000_WRITE_FLUSH();
+ mdelay(5);
+
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+
+ if (!adapter->netdev && netif_running(netdev)) {
+ /* No need to loop, because 82542 supports only 1 queue */
+ struct e1000_rx_ring *ring = &adapter->rx_ring[0];
+ e1000_configure_rx(adapter);
+ if (adapter->ecdev) {
+ /* fill rx ring completely! */
+ adapter->alloc_rx_buf(adapter, ring, ring->count);
+ } else {
+ /* this one leaves the last ring element unallocated! */
+ adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
+ }
+
+ }
+}
+
+/**
+ * e1000_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_set_mac(struct net_device *netdev, void *p)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* 82542 2.0 needs to be in reset to write receive address registers */
+
+ if (hw->mac_type == e1000_82542_rev2_0)
+ e1000_enter_82542_rst(adapter);
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
+
+ e1000_rar_set(hw, hw->mac_addr, 0);
+
+ /* With 82571 controllers, LAA may be overwritten (with the default)
+ * due to controller reset from the other port. */
+ if (hw->mac_type == e1000_82571) {
+ /* activate the work around */
+ hw->laa_is_present = 1;
+
+ /* Hold a copy of the LAA in RAR[14] This is done so that
+ * between the time RAR[0] gets clobbered and the time it
+ * gets fixed (in e1000_watchdog), the actual LAA is in one
+ * of the RARs and no incoming packets directed to this port
+ * are dropped. Eventaully the LAA will be in RAR[0] and
+ * RAR[14] */
+ e1000_rar_set(hw, hw->mac_addr,
+ E1000_RAR_ENTRIES - 1);
+ }
+
+ if (hw->mac_type == e1000_82542_rev2_0)
+ e1000_leave_82542_rst(adapter);
+
+ return 0;
+}
+
+/**
+ * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+
+static void e1000_set_rx_mode(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct dev_addr_list *uc_ptr;
+ struct dev_addr_list *mc_ptr;
+ u32 rctl;
+ u32 hash_value;
+ int i, rar_entries = E1000_RAR_ENTRIES;
+ int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
+ E1000_NUM_MTA_REGISTERS_ICH8LAN :
+ E1000_NUM_MTA_REGISTERS;
+
+ if (hw->mac_type == e1000_ich8lan)
+ rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
+
+ /* reserve RAR[14] for LAA over-write work-around */
+ if (hw->mac_type == e1000_82571)
+ rar_entries--;
+
+ /* Check for Promiscuous and All Multicast modes */
+
+ rctl = er32(RCTL);
+
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ rctl &= ~E1000_RCTL_VFE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= E1000_RCTL_MPE;
+ } else {
+ rctl &= ~E1000_RCTL_MPE;
+ }
+ if (adapter->hw.mac_type != e1000_ich8lan)
+ rctl |= E1000_RCTL_VFE;
+ }
+
+ uc_ptr = NULL;
+ if (netdev->uc_count > rar_entries - 1) {
+ rctl |= E1000_RCTL_UPE;
+ } else if (!(netdev->flags & IFF_PROMISC)) {
+ rctl &= ~E1000_RCTL_UPE;
+ uc_ptr = netdev->uc_list;
+ }
+
+ ew32(RCTL, rctl);
+
+ /* 82542 2.0 needs to be in reset to write receive address registers */
+
+ if (hw->mac_type == e1000_82542_rev2_0)
+ e1000_enter_82542_rst(adapter);
+
+ /* load the first 14 addresses into the exact filters 1-14. Unicast
+ * addresses take precedence to avoid disabling unicast filtering
+ * when possible.
+ *
+ * RAR 0 is used for the station MAC adddress
+ * if there are not 14 addresses, go ahead and clear the filters
+ * -- with 82571 controllers only 0-13 entries are filled here
+ */
+ mc_ptr = netdev->mc_list;
+
+ for (i = 1; i < rar_entries; i++) {
+ if (uc_ptr) {
+ e1000_rar_set(hw, uc_ptr->da_addr, i);
+ uc_ptr = uc_ptr->next;
+ } else if (mc_ptr) {
+ e1000_rar_set(hw, mc_ptr->da_addr, i);
+ mc_ptr = mc_ptr->next;
+ } else {
+ E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+ E1000_WRITE_FLUSH();
+ }
+ }
+ WARN_ON(uc_ptr != NULL);
+
+ /* clear the old settings from the multicast hash table */
+
+ for (i = 0; i < mta_reg_count; i++) {
+ E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ E1000_WRITE_FLUSH();
+ }
+
+ /* load any remaining addresses into the hash table */
+
+ for (; mc_ptr; mc_ptr = mc_ptr->next) {
+ hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
+ e1000_mta_set(hw, hash_value);
+ }
+
+ if (hw->mac_type == e1000_82542_rev2_0)
+ e1000_leave_82542_rst(adapter);
+}
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy */
+
+static void e1000_update_phy_info(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ e1000_phy_get_info(hw, &adapter->phy_info);
+}
+
+/**
+ * e1000_82547_tx_fifo_stall - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+
+static void e1000_82547_tx_fifo_stall(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 tctl;
+
+ if (atomic_read(&adapter->tx_fifo_stall)) {
+ if ((er32(TDT) == er32(TDH)) &&
+ (er32(TDFT) == er32(TDFH)) &&
+ (er32(TDFTS) == er32(TDFHS))) {
+ tctl = er32(TCTL);
+ ew32(TCTL, tctl & ~E1000_TCTL_EN);
+ ew32(TDFT, adapter->tx_head_addr);
+ ew32(TDFH, adapter->tx_head_addr);
+ ew32(TDFTS, adapter->tx_head_addr);
+ ew32(TDFHS, adapter->tx_head_addr);
+ ew32(TCTL, tctl);
+ E1000_WRITE_FLUSH();
+
+ adapter->tx_fifo_head = 0;
+ atomic_set(&adapter->tx_fifo_stall, 0);
+ if (!adapter->ecdev) netif_wake_queue(netdev);
+ } else {
+ if (!adapter->ecdev)
+ mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
+ }
+ }
+}
+
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_tx_ring *txdr = adapter->tx_ring;
+ u32 link, tctl;
+ s32 ret_val;
+
+ ret_val = e1000_check_for_link(hw);
+ if ((ret_val == E1000_ERR_PHY) &&
+ (hw->phy_type == e1000_phy_igp_3) &&
+ (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+ /* See e1000_kumeran_lock_loss_workaround() */
+ DPRINTK(LINK, INFO,
+ "Gigabit has been disabled, downgrading speed\n");
+ }
+
+ if (hw->mac_type == e1000_82573) {
+ e1000_enable_tx_pkt_filtering(hw);
+ if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
+ e1000_update_mng_vlan(adapter);
+ }
+
+ if ((hw->media_type == e1000_media_type_internal_serdes) &&
+ !(er32(TXCW) & E1000_TXCW_ANE))
+ link = !hw->serdes_link_down;
+ else
+ link = er32(STATUS) & E1000_STATUS_LU;
+
+ if (link) {
+ if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev))
+ || (!adapter->ecdev && !netif_carrier_ok(netdev))) {
+ u32 ctrl;
+ bool txb2b = true;
+ e1000_get_speed_and_duplex(hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+
+ ctrl = er32(CTRL);
+ DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
+ "Flow Control: %s\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) && (ctrl &
+ E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+ E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+ E1000_CTRL_TFCE) ? "TX" : "None" )));
+
+ /* tweak tx_queue_len according to speed/duplex
+ * and adjust the timeout factor */
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ adapter->tx_timeout_factor = 1;
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ txb2b = false;
+ netdev->tx_queue_len = 10;
+ adapter->tx_timeout_factor = 8;
+ break;
+ case SPEED_100:
+ txb2b = false;
+ netdev->tx_queue_len = 100;
+ /* maybe add some timeout factor ? */
+ break;
+ }
+
+ if ((hw->mac_type == e1000_82571 ||
+ hw->mac_type == e1000_82572) &&
+ !txb2b) {
+ u32 tarc0;
+ tarc0 = er32(TARC0);
+ tarc0 &= ~(1 << 21);
+ ew32(TARC0, tarc0);
+ }
+
+ /* disable TSO for pcie and 10/100 speeds, to avoid
+ * some hardware issues */
+ if (!adapter->tso_force &&
+ hw->bus_type == e1000_bus_type_pci_express){
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ DPRINTK(PROBE,INFO,
+ "10/100 speed: disabling TSO\n");
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ break;
+ case SPEED_1000:
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ break;
+ default:
+ /* oops */
+ break;
+ }
+ }
+
+ /* enable transmits in the hardware, need to do this
+ * after setting TARC0 */
+ tctl = er32(TCTL);
+ tctl |= E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+
+ if (adapter->ecdev) {
+ ecdev_set_link(adapter->ecdev, 1);
+ } else {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
+ }
+ adapter->smartspeed = 0;
+ } else {
+ /* make sure the receive unit is started */
+ if (hw->rx_needs_kicking) {
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl | E1000_RCTL_EN);
+ }
+ }
+ } else {
+ if ((adapter->ecdev && ecdev_get_link(adapter->ecdev))
+ || (!adapter->ecdev && netif_carrier_ok(netdev))) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ DPRINTK(LINK, INFO, "NIC Link is Down\n");
+ if (adapter->ecdev) {
+ ecdev_set_link(adapter->ecdev, 0);
+ } else {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
+ }
+
+ /* 80003ES2LAN workaround--
+ * For packet buffer work-around on link down event;
+ * disable receives in the ISR and
+ * reset device here in the watchdog
+ */
+ if (hw->mac_type == e1000_80003es2lan)
+ /* reset device */
+ schedule_work(&adapter->reset_task);
+ }
+
+ e1000_smartspeed(adapter);
+ }
+
+ e1000_update_stats(adapter);
+
+ hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+ adapter->tpt_old = adapter->stats.tpt;
+ hw->collision_delta = adapter->stats.colc - adapter->colc_old;
+ adapter->colc_old = adapter->stats.colc;
+
+ adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
+ adapter->gorcl_old = adapter->stats.gorcl;
+ adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
+ adapter->gotcl_old = adapter->stats.gotcl;
+
+ e1000_update_adaptive(hw);
+
+ if (!adapter->ecdev && !netif_carrier_ok(netdev)) {
+ if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context). */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ }
+ }
+
+ /* Cause software interrupt to ensure rx ring is cleaned */
+ ew32(ICS, E1000_ICS_RXDMT0);
+
+ /* Force detection of hung controller every watchdog period */
+ if (!adapter->ecdev) adapter->detect_tx_hung = true;
+
+ /* With 82571 controllers, LAA may be overwritten due to controller
+ * reset from the other port. Set the appropriate LAA in RAR[0] */
+ if (hw->mac_type == e1000_82571 && hw->laa_is_present)
+ e1000_rar_set(hw, hw->mac_addr, 0);
+
+ /* Reset the timer */
+ if (!adapter->ecdev)
+ mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * this functionality is controlled by the InterruptThrottleRate module
+ * parameter (see e1000_param.c)
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ **/
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+ u16 itr_setting, int packets, int bytes)
+{
+ unsigned int retval = itr_setting;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (unlikely(hw->mac_type < e1000_82540))
+ goto update_itr_done;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+ switch (itr_setting) {
+ case lowest_latency:
+ /* jumbo frames get bulk treatment*/
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ retval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* jumbo frames need bulk latency setting */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 10) || ((bytes/packets) > 1200))
+ retval = bulk_latency;
+ else if ((packets > 35))
+ retval = lowest_latency;
+ } else if (bytes/packets > 2000)
+ retval = bulk_latency;
+ else if (packets <= 2 && bytes < 512)
+ retval = lowest_latency;
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ retval = low_latency;
+ } else if (bytes < 6000) {
+ retval = low_latency;
+ }
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 current_itr;
+ u32 new_itr = adapter->itr;
+
+ if (unlikely(hw->mac_type < e1000_82540))
+ return;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ if (unlikely(adapter->link_speed != SPEED_1000)) {
+ current_itr = 0;
+ new_itr = 4000;
+ goto set_itr_now;
+ }
+
+ adapter->tx_itr = e1000_update_itr(adapter,
+ adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
+ adapter->tx_itr = low_latency;
+
+ adapter->rx_itr = e1000_update_itr(adapter,
+ adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
+ adapter->rx_itr = low_latency;
+
+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 70000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ new_itr = 4000;
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != adapter->itr) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing */
+ new_itr = new_itr > adapter->itr ?
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
+ adapter->itr = new_itr;
+ ew32(ITR, 1000000000 / (new_itr * 256));
+ }
+
+ return;
+}
+
+#define E1000_TX_FLAGS_CSUM 0x00000001
+#define E1000_TX_FLAGS_VLAN 0x00000002
+#define E1000_TX_FLAGS_TSO 0x00000004
+#define E1000_TX_FLAGS_IPV4 0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT 16
+
+static int e1000_tso(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+{
+ struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i;
+ u32 cmd_length = 0;
+ u16 ipcse = 0, tucse, mss;
+ u8 ipcss, ipcso, tucss, tucso, hdr_len;
+ int err;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ cmd_length = E1000_TXD_CMD_IP;
+ ipcse = skb_transport_offset(skb) - 1;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ ipcse = 0;
+ }
+ ipcss = skb_network_offset(skb);
+ ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+ tucss = skb_transport_offset(skb);
+ tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+ tucse = 0;
+
+ cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+ E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+
+ i = tx_ring->next_to_use;
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+
+ context_desc->lower_setup.ip_fields.ipcss = ipcss;
+ context_desc->lower_setup.ip_fields.ipcso = ipcso;
+ context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
+ context_desc->upper_setup.tcp_fields.tucss = tucss;
+ context_desc->upper_setup.tcp_fields.tucso = tucso;
+ context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+ context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
+ context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ if (++i == tx_ring->count) i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+ return false;
+}
+
+static bool e1000_tx_csum(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+{
+ struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i;
+ u8 css;
+ u32 cmd_len = E1000_TXD_CMD_DEXT;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return false;
+
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ /* XXX not handling all IPV6 headers */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit()))
+ DPRINTK(DRV, WARNING,
+ "checksum_partial proto=%x!\n", skb->protocol);
+ break;
+ }
+
+ css = skb_transport_offset(skb);
+
+ i = tx_ring->next_to_use;
+ buffer_info = &tx_ring->buffer_info[i];
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+
+ context_desc->lower_setup.ip_config = 0;
+ context_desc->upper_setup.tcp_fields.tucss = css;
+ context_desc->upper_setup.tcp_fields.tucso =
+ css + skb->csum_offset;
+ context_desc->upper_setup.tcp_fields.tucse = 0;
+ context_desc->tcp_seg_setup.data = 0;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_len);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+}
+
+#define E1000_MAX_TXD_PWR 12
+#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
+
+static int e1000_tx_map(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring,
+ struct sk_buff *skb, unsigned int first,
+ unsigned int max_per_txd, unsigned int nr_frags,
+ unsigned int mss)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_buffer *buffer_info;
+ unsigned int len = skb->len;
+ unsigned int offset = 0, size, count = 0, i;
+ unsigned int f;
+ len -= skb->data_len;
+
+ i = tx_ring->next_to_use;
+
+ while (len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+ /* Workaround for Controller erratum --
+ * descriptor for non-tso packet in a linear SKB that follows a
+ * tso gets written back prematurely before the data is fully
+ * DMA'd to the controller */
+ if (!skb->data_len && tx_ring->last_tx_tso &&
+ !skb_is_gso(skb)) {
+ tx_ring->last_tx_tso = 0;
+ size -= 4;
+ }
+
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if (unlikely(mss && !nr_frags && size == len && size > 8))
+ size -= 4;
+ /* work-around for errata 10 and it applies
+ * to all controllers in PCI-X mode
+ * The fix is to make sure that the first descriptor of a
+ * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
+ */
+ if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
+ (size > 2015) && count == 0))
+ size = 2015;
+
+ /* Workaround for potential 82544 hang in PCI-X. Avoid
+ * terminating buffers within evenly-aligned dwords. */
+ if (unlikely(adapter->pcix_82544 &&
+ !((unsigned long)(skb->data + offset + size - 1) & 4) &&
+ size > 4))
+ size -= 4;
+
+ buffer_info->length = size;
+ buffer_info->dma =
+ pci_map_single(adapter->pdev,
+ skb->data + offset,
+ size,
+ PCI_DMA_TODEVICE);
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ len -= size;
+ offset += size;
+ count++;
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = frag->size;
+ offset = frag->page_offset;
+
+ while (len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+ size -= 4;
+ /* Workaround for potential 82544 hang in PCI-X.
+ * Avoid terminating buffers within evenly-aligned
+ * dwords. */
+ if (unlikely(adapter->pcix_82544 &&
+ !((unsigned long)(frag->page+offset+size-1) & 4) &&
+ size > 4))
+ size -= 4;
+
+ buffer_info->length = size;
+ buffer_info->dma =
+ pci_map_page(adapter->pdev,
+ frag->page,
+ offset,
+ size,
+ PCI_DMA_TODEVICE);
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ len -= size;
+ offset += size;
+ count++;
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ }
+ }
+
+ i = (i == 0) ? tx_ring->count - 1 : i - 1;
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[first].next_to_watch = i;
+
+ return count;
+}
+
+static void e1000_tx_queue(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring, int tx_flags,
+ int count)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_desc *tx_desc = NULL;
+ struct e1000_buffer *buffer_info;
+ u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+ unsigned int i;
+
+ if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+ E1000_TXD_CMD_TSE;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+
+ if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
+ txd_upper |= E1000_TXD_POPTS_IXSM << 8;
+ }
+
+ if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+ }
+
+ if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
+ txd_lower |= E1000_TXD_CMD_VLE;
+ txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+ }
+
+ i = tx_ring->next_to_use;
+
+ while (count--) {
+ buffer_info = &tx_ring->buffer_info[i];
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ tx_desc->lower.data =
+ cpu_to_le32(txd_lower | buffer_info->length);
+ tx_desc->upper.data = cpu_to_le32(txd_upper);
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ }
+
+ tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ writel(i, hw->hw_addr + tx_ring->tdt);
+ /* we need this if more than one processor can write to our tail
+ * at a time, it syncronizes IO on IA64/Altix systems */
+ mmiowb();
+}
+
+/**
+ * 82547 workaround to avoid controller hang in half-duplex environment.
+ * The workaround is to avoid queuing a large packet that would span
+ * the internal Tx FIFO ring boundary by notifying the stack to resend
+ * the packet at a later time. This gives the Tx FIFO an opportunity to
+ * flush all packets. When that occurs, we reset the Tx FIFO pointers
+ * to the beginning of the Tx FIFO.
+ **/
+
+#define E1000_FIFO_HDR 0x10
+#define E1000_82547_PAD_LEN 0x3E0
+
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
+ u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
+
+ skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
+
+ if (adapter->link_duplex != HALF_DUPLEX)
+ goto no_fifo_stall_required;
+
+ if (atomic_read(&adapter->tx_fifo_stall))
+ return 1;
+
+ if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+ atomic_set(&adapter->tx_fifo_stall, 1);
+ return 1;
+ }
+
+no_fifo_stall_required:
+ adapter->tx_fifo_head += skb_fifo_len;
+ if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
+ adapter->tx_fifo_head -= adapter->tx_fifo_size;
+ return 0;
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
+ struct sk_buff *skb)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 length, offset;
+ if (vlan_tx_tag_present(skb)) {
+ if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
+ ( hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
+ return 0;
+ }
+ if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ if ((htons(ETH_P_IP) == eth->h_proto)) {
+ const struct iphdr *ip =
+ (struct iphdr *)((u8 *)skb->data+14);
+ if (IPPROTO_UDP == ip->protocol) {
+ struct udphdr *udp =
+ (struct udphdr *)((u8 *)ip +
+ (ip->ihl << 2));
+ if (ntohs(udp->dest) == 67) {
+ offset = (u8 *)udp + 8 - skb->data;
+ length = skb->len - offset;
+
+ return e1000_mng_write_dhcp_info(hw,
+ (u8 *)udp + 8,
+ length);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+
+ netif_stop_queue(netdev);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(E1000_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! */
+ netif_start_queue(netdev);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int e1000_maybe_stop_tx(struct net_device *netdev,
+ struct e1000_tx_ring *tx_ring, int size)
+{
+ if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __e1000_maybe_stop_tx(netdev, size);
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_ring *tx_ring;
+ unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
+ unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+ unsigned int tx_flags = 0;
+ unsigned int len = skb->len - skb->data_len;
+ unsigned long flags = 0;
+ unsigned int nr_frags = 0;
+ unsigned int mss = 0;
+ int count = 0;
+ int tso;
+ unsigned int f;
+
+ /* This goes back to the question of how to logically map a tx queue
+ * to a flow. Right now, performance is impacted slightly negatively
+ * if using multiple tx queues. If the stack breaks away from a
+ * single qdisc implementation, we can look at this again. */
+ tx_ring = adapter->tx_ring;
+
+ if (unlikely(skb->len <= 0)) {
+ if (!adapter->ecdev)
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* 82571 and newer doesn't need the workaround that limited descriptor
+ * length to 4kB */
+ if (hw->mac_type >= e1000_82571)
+ max_per_txd = 8192;
+
+ mss = skb_shinfo(skb)->gso_size;
+ /* The controller does a simple calculation to
+ * make sure there is enough room in the FIFO before
+ * initiating the DMA for each buffer. The calc is:
+ * 4 = ceil(buffer len/mss). To make sure we don't
+ * overrun the FIFO, adjust the max buffer len if mss
+ * drops. */
+ if (mss) {
+ u8 hdr_len;
+ max_per_txd = min(mss << 2, max_per_txd);
+ max_txd_pwr = fls(max_per_txd) - 1;
+
+ /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
+ * points to just header, pull a few bytes of payload from
+ * frags into skb->data */
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb->data_len && hdr_len == len) {
+ switch (hw->mac_type) {
+ unsigned int pull_size;
+ case e1000_82544:
+ /* Make sure we have room to chop off 4 bytes,
+ * and that the end alignment will work out to
+ * this hardware's requirements
+ * NOTE: this is a TSO only workaround
+ * if end byte alignment not correct move us
+ * into the next dword */
+ if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+ break;
+ /* fall through */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_ich8lan:
+ pull_size = min((unsigned int)4, skb->data_len);
+ if (!__pskb_pull_tail(skb, pull_size)) {
+ DPRINTK(DRV, ERR,
+ "__pskb_pull_tail failed.\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ len = skb->len - skb->data_len;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+ }
+
+ /* reserve a descriptor for the offload context */
+ if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
+ count++;
+ count++;
+
+ /* Controller Erratum workaround */
+ if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
+ count++;
+
+ count += TXD_USE_COUNT(len, max_txd_pwr);
+
+ if (adapter->pcix_82544)
+ count++;
+
+ /* work-around for errata 10 and it applies to all controllers
+ * in PCI-X mode, so add one more descriptor to the count
+ */
+ if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
+ (len > 2015)))
+ count++;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (f = 0; f < nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+ max_txd_pwr);
+ if (adapter->pcix_82544)
+ count += nr_frags;
+
+
+ if (hw->tx_pkt_filtering &&
+ (hw->mac_type == e1000_82573))
+ e1000_transfer_dhcp_info(adapter, skb);
+
+ if (!adapter->ecdev &&
+ !spin_trylock_irqsave(&tx_ring->tx_lock, flags))
+ /* Collision - tell upper layer to requeue */
+ return NETDEV_TX_LOCKED;
+
+ /* need: count + 2 desc gap to keep tail from touching
+ * head, otherwise try next time */
+ if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
+ if (!adapter->ecdev) {
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(hw->mac_type == e1000_82547)) {
+ if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
+ if (!adapter->ecdev) {
+ netif_stop_queue(netdev);
+ mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ }
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+ tx_flags |= E1000_TX_FLAGS_VLAN;
+ tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+ }
+
+ first = tx_ring->next_to_use;
+
+ tso = e1000_tso(adapter, tx_ring, skb);
+ if (tso < 0) {
+ if (!adapter->ecdev) {
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ }
+ return NETDEV_TX_OK;
+ }
+
+ if (likely(tso)) {
+ tx_ring->last_tx_tso = 1;
+ tx_flags |= E1000_TX_FLAGS_TSO;
+ } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+ tx_flags |= E1000_TX_FLAGS_CSUM;
+
+ /* Old method was to assume IPv4 packet by default if TSO was enabled.
+ * 82571 hardware supports TSO capabilities for IPv6 as well...
+ * no longer assume, we must. */
+ if (likely(skb->protocol == htons(ETH_P_IP)))
+ tx_flags |= E1000_TX_FLAGS_IPV4;
+
+ e1000_tx_queue(adapter, tx_ring, tx_flags,
+ e1000_tx_map(adapter, tx_ring, skb, first,
+ max_per_txd, nr_frags, mss));
+
+ netdev->trans_start = jiffies;
+
+ if (!adapter->ecdev) {
+ /* Make sure there is space in the ring for the next send. */
+ e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ }
+ return NETDEV_TX_OK;
+}
+
+/**
+ * e1000_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+
+static void e1000_tx_timeout(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+}
+
+static void e1000_reset_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter =
+ container_of(work, struct e1000_adapter, reset_task);
+
+ e1000_reinit_locked(adapter);
+}
+
+/**
+ * e1000_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+
+static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * e1000_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+ u16 eeprom_data = 0;
+
+ if (adapter->ecdev)
+ return -EBUSY;
+
+ if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+ (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ /* Adapter-specific max frame size limits. */
+ switch (hw->mac_type) {
+ case e1000_undefined ... e1000_82542_rev2_1:
+ case e1000_ich8lan:
+ if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+ DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+ return -EINVAL;
+ }
+ break;
+ case e1000_82573:
+ /* Jumbo Frames not supported if:
+ * - this is not an 82573L device
+ * - ASPM is enabled in any way (0x1A bits 3:2) */
+ e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
+ &eeprom_data);
+ if ((hw->device_id != E1000_DEV_ID_82573L) ||
+ (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
+ if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+ DPRINTK(PROBE, ERR,
+ "Jumbo Frames not supported.\n");
+ return -EINVAL;
+ }
+ break;
+ }
+ /* ERT will be enabled later to enable wire speed receives */
+
+ /* fall through to get support */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
+ if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+ DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
+ break;
+ }
+
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+ * larger slab size
+ * i.e. RXBUFFER_2048 --> size-4096 slab */
+
+ if (max_frame <= E1000_RXBUFFER_256)
+ adapter->rx_buffer_len = E1000_RXBUFFER_256;
+ else if (max_frame <= E1000_RXBUFFER_512)
+ adapter->rx_buffer_len = E1000_RXBUFFER_512;
+ else if (max_frame <= E1000_RXBUFFER_1024)
+ adapter->rx_buffer_len = E1000_RXBUFFER_1024;
+ else if (max_frame <= E1000_RXBUFFER_2048)
+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+ else if (max_frame <= E1000_RXBUFFER_4096)
+ adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+ else if (max_frame <= E1000_RXBUFFER_8192)
+ adapter->rx_buffer_len = E1000_RXBUFFER_8192;
+ else if (max_frame <= E1000_RXBUFFER_16384)
+ adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+
+ /* adjust allocation if LPE protects us, and we aren't using SBP */
+ if (!hw->tbi_compatibility_on &&
+ ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
+ (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+
+ netdev->mtu = new_mtu;
+ hw->max_frame_size = max_frame;
+
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
+
+ return 0;
+}
+
+/**
+ * e1000_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+
+void e1000_update_stats(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long flags = 0;
+ u16 phy_tmp;
+
+#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
+
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+ if (pci_channel_offline(pdev))
+ return;
+
+ if (!adapter->ecdev)
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+
+ /* these counters are modified from e1000_tbi_adjust_stats,
+ * called from the interrupt context, so they must only
+ * be written while holding adapter->stats_lock
+ */
+
+ adapter->stats.crcerrs += er32(CRCERRS);
+ adapter->stats.gprc += er32(GPRC);
+ adapter->stats.gorcl += er32(GORCL);
+ adapter->stats.gorch += er32(GORCH);
+ adapter->stats.bprc += er32(BPRC);
+ adapter->stats.mprc += er32(MPRC);
+ adapter->stats.roc += er32(ROC);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ adapter->stats.prc64 += er32(PRC64);
+ adapter->stats.prc127 += er32(PRC127);
+ adapter->stats.prc255 += er32(PRC255);
+ adapter->stats.prc511 += er32(PRC511);
+ adapter->stats.prc1023 += er32(PRC1023);
+ adapter->stats.prc1522 += er32(PRC1522);
+ }
+
+ adapter->stats.symerrs += er32(SYMERRS);
+ adapter->stats.mpc += er32(MPC);
+ adapter->stats.scc += er32(SCC);
+ adapter->stats.ecol += er32(ECOL);
+ adapter->stats.mcc += er32(MCC);
+ adapter->stats.latecol += er32(LATECOL);
+ adapter->stats.dc += er32(DC);
+ adapter->stats.sec += er32(SEC);
+ adapter->stats.rlec += er32(RLEC);
+ adapter->stats.xonrxc += er32(XONRXC);
+ adapter->stats.xontxc += er32(XONTXC);
+ adapter->stats.xoffrxc += er32(XOFFRXC);
+ adapter->stats.xofftxc += er32(XOFFTXC);
+ adapter->stats.fcruc += er32(FCRUC);
+ adapter->stats.gptc += er32(GPTC);
+ adapter->stats.gotcl += er32(GOTCL);
+ adapter->stats.gotch += er32(GOTCH);
+ adapter->stats.rnbc += er32(RNBC);
+ adapter->stats.ruc += er32(RUC);
+ adapter->stats.rfc += er32(RFC);
+ adapter->stats.rjc += er32(RJC);
+ adapter->stats.torl += er32(TORL);
+ adapter->stats.torh += er32(TORH);
+ adapter->stats.totl += er32(TOTL);
+ adapter->stats.toth += er32(TOTH);
+ adapter->stats.tpr += er32(TPR);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ adapter->stats.ptc64 += er32(PTC64);
+ adapter->stats.ptc127 += er32(PTC127);
+ adapter->stats.ptc255 += er32(PTC255);
+ adapter->stats.ptc511 += er32(PTC511);
+ adapter->stats.ptc1023 += er32(PTC1023);
+ adapter->stats.ptc1522 += er32(PTC1522);
+ }
+
+ adapter->stats.mptc += er32(MPTC);
+ adapter->stats.bptc += er32(BPTC);
+
+ /* used for adaptive IFS */
+
+ hw->tx_packet_delta = er32(TPT);
+ adapter->stats.tpt += hw->tx_packet_delta;
+ hw->collision_delta = er32(COLC);
+ adapter->stats.colc += hw->collision_delta;
+
+ if (hw->mac_type >= e1000_82543) {
+ adapter->stats.algnerrc += er32(ALGNERRC);
+ adapter->stats.rxerrc += er32(RXERRC);
+ adapter->stats.tncrs += er32(TNCRS);
+ adapter->stats.cexterr += er32(CEXTERR);
+ adapter->stats.tsctc += er32(TSCTC);
+ adapter->stats.tsctfc += er32(TSCTFC);
+ }
+ if (hw->mac_type > e1000_82547_rev_2) {
+ adapter->stats.iac += er32(IAC);
+ adapter->stats.icrxoc += er32(ICRXOC);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ adapter->stats.icrxptc += er32(ICRXPTC);
+ adapter->stats.icrxatc += er32(ICRXATC);
+ adapter->stats.ictxptc += er32(ICTXPTC);
+ adapter->stats.ictxatc += er32(ICTXATC);
+ adapter->stats.ictxqec += er32(ICTXQEC);
+ adapter->stats.ictxqmtc += er32(ICTXQMTC);
+ adapter->stats.icrxdmtc += er32(ICRXDMTC);
+ }
+ }
+
+ /* Fill out the OS statistics structure */
+ adapter->net_stats.multicast = adapter->stats.mprc;
+ adapter->net_stats.collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /* RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC */
+ adapter->net_stats.rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
+ adapter->net_stats.rx_length_errors = adapter->stats.rlerrc;
+ adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
+ adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
+ adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
+ adapter->net_stats.tx_errors = adapter->stats.txerrc;
+ adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
+ adapter->net_stats.tx_window_errors = adapter->stats.latecol;
+ adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
+ if (hw->bad_tx_carr_stats_fd &&
+ adapter->link_duplex == FULL_DUPLEX) {
+ adapter->net_stats.tx_carrier_errors = 0;
+ adapter->stats.tncrs = 0;
+ }
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ /* Phy Stats */
+ if (hw->media_type == e1000_media_type_copper) {
+ if ((adapter->link_speed == SPEED_1000) &&
+ (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
+ phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
+ adapter->phy_stats.idle_errors += phy_tmp;
+ }
+
+ if ((hw->mac_type <= e1000_82546) &&
+ (hw->phy_type == e1000_phy_m88) &&
+ !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
+ adapter->phy_stats.receive_errors += phy_tmp;
+ }
+
+ /* Management Stats */
+ if (hw->has_smbus) {
+ adapter->stats.mgptc += er32(MGTPTC);
+ adapter->stats.mgprc += er32(MGTPRC);
+ adapter->stats.mgpdc += er32(MGTPDC);
+ }
+
+ if (!adapter->ecdev)
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
+void ec_poll(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) {
+ e1000_watchdog((unsigned long) adapter);
+ adapter->ec_watchdog_jiffies = jiffies;
+ }
+
+#ifdef CONFIG_PCI_MSI
+ e1000_intr_msi(0, netdev);
+#else
+ e1000_intr(0, netdev);
+#endif
+}
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static irqreturn_t e1000_intr_msi(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ if (adapter->ecdev) {
+ int i, ec_work_done = 0;
+ for (i = 0; i < E1000_MAX_INTR; i++) {
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
+ &ec_work_done, 100) &&
+ !e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
+ break;
+ }
+ }
+ } else {
+ /* in NAPI mode read ICR disables interrupts using IAM */
+
+ if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ hw->get_link_status = 1;
+ /* 80003ES2LAN workaround-- For packet buffer work-around on
+ * link down event; disable receives here in the ISR and reset
+ * adapter in watchdog */
+ if (netif_carrier_ok(netdev) &&
+ (hw->mac_type == e1000_80003es2lan)) {
+ /* disable receives */
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __netif_rx_schedule(netdev, &adapter->napi);
+ } else
+ e1000_irq_enable(adapter);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static irqreturn_t e1000_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl, icr = er32(ICR);
+
+ if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags)))
+ return IRQ_NONE; /* Not our interrupt */
+
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt */
+ if (unlikely(hw->mac_type >= e1000_82571 &&
+ !(icr & E1000_ICR_INT_ASSERTED)))
+ return IRQ_NONE;
+
+ /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
+ * need for the IMC write */
+
+ if (!adapter->ecdev && unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+ hw->get_link_status = 1;
+ /* 80003ES2LAN workaround--
+ * For packet buffer work-around on link down event;
+ * disable receives here in the ISR and
+ * reset adapter in watchdog
+ */
+ if (netif_carrier_ok(netdev) &&
+ (hw->mac_type == e1000_80003es2lan)) {
+ /* disable receives */
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ if (adapter->ecdev) {
+ int i, ec_work_done = 0;
+ for (i = 0; i < E1000_MAX_INTR; i++) {
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
+ &ec_work_done, 100) &&
+ !e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
+ break;
+ }
+ }
+ } else {
+ if (unlikely(hw->mac_type < e1000_82571)) {
+ /* disable interrupts, without the synchronize_irq bit */
+ ew32(IMC, ~0);
+ E1000_WRITE_FLUSH();
+ }
+ if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __netif_rx_schedule(netdev, &adapter->napi);
+ } else
+ /* this really should not happen! if it does it is basically a
+ * bug, but not a hard error, so enable ints and continue */
+ e1000_irq_enable(adapter);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ * EtherCAT: never called
+ **/
+static int e1000_clean(struct napi_struct *napi, int budget)
+{
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+ struct net_device *poll_dev = adapter->netdev;
+ int tx_cleaned = 0, work_done = 0;
+
+ /* Must NOT use netdev_priv macro here. */
+ adapter = poll_dev->priv;
+
+ /* e1000_clean is called per-cpu. This lock protects
+ * tx_ring[0] from being cleaned by multiple cpus
+ * simultaneously. A failure obtaining the lock means
+ * tx_ring[0] is currently being cleaned anyway. */
+ if (spin_trylock(&adapter->tx_queue_lock)) {
+ tx_cleaned = e1000_clean_tx_irq(adapter,
+ &adapter->tx_ring[0]);
+ spin_unlock(&adapter->tx_queue_lock);
+ }
+
+ adapter->clean_rx(adapter, &adapter->rx_ring[0],
+ &work_done, budget);
+
+ if (tx_cleaned)
+ work_done = budget;
+
+ /* If budget not fully consumed, exit the polling mode */
+ if (work_done < budget) {
+ if (likely(adapter->itr_setting & 3))
+ e1000_set_itr(adapter);
+ netif_rx_complete(poll_dev, napi);
+ e1000_irq_enable(adapter);
+ }
+
+ return work_done;
+}
+
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ **/
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_tx_desc *tx_desc, *eop_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i, eop;
+ unsigned int count = 0;
+ bool cleaned = false;
+ unsigned int total_tx_bytes=0, total_tx_packets=0;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+ while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
+ for (cleaned = false; !cleaned; ) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ cleaned = (i == eop);
+
+ if (cleaned) {
+ struct sk_buff *skb = buffer_info->skb;
+ unsigned int segs, bytecount;
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+ skb->len;
+ total_tx_packets += segs;
+ total_tx_bytes += bytecount;
+ }
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ tx_desc->upper.data = 0;
+
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ }
+
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+#define E1000_TX_WEIGHT 64
+ /* weight of a sort for tx, to avoid endless transmit cleanup */
+ if (count++ == E1000_TX_WEIGHT)
+ break;
+ }
+
+ tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+ if (!adapter->ecdev && unlikely(cleaned && netif_carrier_ok(netdev) &&
+ E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_queue_stopped(netdev)) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+ }
+
+ if (!adapter->ecdev && adapter->detect_tx_hung) {
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i */
+ adapter->detect_tx_hung = false;
+ if (tx_ring->buffer_info[eop].dma &&
+ time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
+ (adapter->tx_timeout_factor * HZ))
+ && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+
+ /* detected Tx unit hang */
+ DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+ " Tx Queue <%lu>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
+ (unsigned long)((tx_ring - adapter->tx_ring) /
+ sizeof(struct e1000_tx_ring)),
+ readl(hw->hw_addr + tx_ring->tdh),
+ readl(hw->hw_addr + tx_ring->tdt),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
+ eop,
+ jiffies,
+ eop_desc->upper.fields.status);
+ netif_stop_queue(netdev);
+ }
+ }
+ adapter->total_tx_bytes += total_tx_bytes;
+ adapter->total_tx_packets += total_tx_packets;
+ adapter->net_stats.tx_bytes += total_tx_bytes;
+ adapter->net_stats.tx_packets += total_tx_packets;
+ return cleaned;
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * @adapter: board private structure
+ * @status_err: receive descriptor status and error fields
+ * @csum: receive descriptor csum field
+ * @sk_buff: socket buffer with received data
+ **/
+
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+ u32 csum, struct sk_buff *skb)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 status = (u16)status_err;
+ u8 errors = (u8)(status_err >> 24);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* 82543 or newer only */
+ if (unlikely(hw->mac_type < e1000_82543)) return;
+ /* Ignore Checksum bit is set */
+ if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
+ /* TCP/UDP checksum error bit is set */
+ if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
+ /* let the stack verify checksum errors */
+ adapter->hw_csum_err++;
+ return;
+ }
+ /* TCP/UDP Checksum has not been calculated */
+ if (hw->mac_type <= e1000_82547_rev_2) {
+ if (!(status & E1000_RXD_STAT_TCPCS))
+ return;
+ } else {
+ if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+ return;
+ }
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (likely(status & E1000_RXD_STAT_TCPCS)) {
+ /* TCP checksum is good */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if (hw->mac_type > e1000_82547_rev_2) {
+ /* IP fragment with UDP payload */
+ /* Hardware complements the payload checksum, so we undo it
+ * and then put the value in host order for further stack use.
+ */
+ __sum16 sum = (__force __sum16)htons(csum);
+ skb->csum = csum_unfold(~sum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+ adapter->hw_csum_good++;
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ **/
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ unsigned long flags;
+ u32 length;
+ u8 last_byte;
+ unsigned int i;
+ int cleaned_count = 0;
+ bool cleaned = false;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (rx_desc->status & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb;
+ u8 status;
+
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+
+ status = rx_desc->status;
+ skb = buffer_info->skb;
+ if (!adapter->ecdev) buffer_info->skb = NULL;
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ if (++i == rx_ring->count) i = 0;
+ next_rxd = E1000_RX_DESC(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = true;
+ cleaned_count++;
+ pci_unmap_single(pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+
+ length = le16_to_cpu(rx_desc->length);
+
+ if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
+ /* All receives must fit into a single buffer */
+ E1000_DBG("%s: Receive packet consumed multiple"
+ " buffers\n", netdev->name);
+ /* recycle */
+ buffer_info->skb = skb;
+ goto next_desc;
+ }
+
+ if (!adapter->ecdev &&
+ unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+ last_byte = *(skb->data + length - 1);
+ if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
+ last_byte)) {
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ e1000_tbi_adjust_stats(hw, &adapter->stats,
+ length, skb->data);
+ spin_unlock_irqrestore(&adapter->stats_lock,
+ flags);
+ length--;
+ } else {
+ /* recycle */
+ buffer_info->skb = skb;
+ goto next_desc;
+ }
+ }
+
+ /* adjust length to remove Ethernet CRC, this must be
+ * done after the TBI_ACCEPT workaround above */
+ length -= 4;
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += length;
+ total_rx_packets++;
+
+ /* code added for copybreak, this should improve
+ * performance for small packets with large amounts
+ * of reassembly being done in the stack */
+ if (!adapter->ecdev && length < copybreak) {
+ struct sk_buff *new_skb =
+ netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+ if (new_skb) {
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ skb_copy_to_linear_data_offset(new_skb,
+ -NET_IP_ALIGN,
+ (skb->data -
+ NET_IP_ALIGN),
+ (length +
+ NET_IP_ALIGN));
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = skb;
+ skb = new_skb;
+ }
+ /* else just continue with the old one */
+ }
+ /* end copybreak code */
+ skb_put(skb, length);
+
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter,
+ (u32)(status) |
+ ((u32)(rx_desc->errors) << 24),
+ le16_to_cpu(rx_desc->csum), skb);
+
+ if (adapter->ecdev) {
+ ecdev_receive(adapter->ecdev, skb->data, length);
+
+ // No need to detect link status as
+ // long as frames are received: Reset watchdog.
+ adapter->ec_watchdog_jiffies = jiffies;
+ } else {
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (unlikely(adapter->vlgrp &&
+ (status & E1000_RXD_STAT_VP))) {
+ vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+ le16_to_cpu(rx_desc->special));
+ } else {
+ netif_receive_skb(skb);
+ }
+ }
+
+ netdev->last_rx = jiffies;
+
+next_desc:
+ rx_desc->status = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = E1000_DESC_UNUSED(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+ adapter->total_rx_packets += total_rx_packets;
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: address of board private structure
+ **/
+
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto map_skb;
+ }
+
+ skb = netdev_alloc_skb(netdev, bufsz);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ struct sk_buff *oldskb = skb;
+ DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
+ "at %p\n", bufsz, skb->data);
+ /* Try again, without freeing the previous */
+ skb = netdev_alloc_skb(netdev, bufsz);
+ /* Failed allocation, critical failure */
+ if (!skb) {
+ dev_kfree_skb(oldskb);
+ break;
+ }
+
+ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ /* give up */
+ dev_kfree_skb(skb);
+ dev_kfree_skb(oldskb);
+ break; /* while !buffer_info->skb */
+ }
+
+ /* Use new allocation */
+ dev_kfree_skb(oldskb);
+ }
+ /* Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ buffer_info->skb = skb;
+ buffer_info->length = adapter->rx_buffer_len;
+map_skb:
+ buffer_info->dma = pci_map_single(pdev,
+ skb->data,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter,
+ (void *)(unsigned long)buffer_info->dma,
+ adapter->rx_buffer_len)) {
+ DPRINTK(RX_ERR, ERR,
+ "dma align check failed: %u bytes at %p\n",
+ adapter->rx_buffer_len,
+ (void *)(unsigned long)buffer_info->dma);
+ if (!adapter->ecdev) {
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+ }
+
+ pci_unmap_single(pdev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+
+ break; /* while !buffer_info->skb */
+ }
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ writel(i, hw->hw_addr + rx_ring->rdt);
+ }
+}
+
+/**
+ * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
+ * @adapter:
+ **/
+
+static void e1000_smartspeed(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_status;
+ u16 phy_ctrl;
+
+ if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
+ !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
+ return;
+
+ if (adapter->smartspeed == 0) {
+ /* If Master/Slave config fault is asserted twice,
+ * we assume back-to-back */
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
+ if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
+ if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+ e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
+ if (phy_ctrl & CR_1000T_MS_ENABLE) {
+ phy_ctrl &= ~CR_1000T_MS_ENABLE;
+ e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+ phy_ctrl);
+ adapter->smartspeed++;
+ if (!e1000_phy_setup_autoneg(hw) &&
+ !e1000_read_phy_reg(hw, PHY_CTRL,
+ &phy_ctrl)) {
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+ MII_CR_RESTART_AUTO_NEG);
+ e1000_write_phy_reg(hw, PHY_CTRL,
+ phy_ctrl);
+ }
+ }
+ return;
+ } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+ /* If still no link, perhaps using 2/3 pair cable */
+ e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
+ phy_ctrl |= CR_1000T_MS_ENABLE;
+ e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
+ if (!e1000_phy_setup_autoneg(hw) &&
+ !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+ MII_CR_RESTART_AUTO_NEG);
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
+ }
+ }
+ /* Restart process after E1000_SMARTSPEED_MAX iterations */
+ if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+ adapter->smartspeed = 0;
+}
+
+/**
+ * e1000_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return e1000_mii_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * e1000_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct mii_ioctl_data *data = if_mii(ifr);
+ int retval;
+ u16 mii_reg;
+ u16 spddplx;
+ unsigned long flags;
+
+ if (hw->media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = hw->phy_addr;
+ break;
+ case SIOCGMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
+ &data->val_out)) {
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ break;
+ case SIOCSMIIREG:
+ if (adapter->ecdev || !capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data->reg_num & ~(0x1F))
+ return -EFAULT;
+ mii_reg = data->val_in;
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ if (e1000_write_phy_reg(hw, data->reg_num,
+ mii_reg)) {
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ if (hw->media_type == e1000_media_type_copper) {
+ switch (data->reg_num) {
+ case PHY_CTRL:
+ if (mii_reg & MII_CR_POWER_DOWN)
+ break;
+ if (mii_reg & MII_CR_AUTO_NEG_EN) {
+ hw->autoneg = 1;
+ hw->autoneg_advertised = 0x2F;
+ } else {
+ if (mii_reg & 0x40)
+ spddplx = SPEED_1000;
+ else if (mii_reg & 0x2000)
+ spddplx = SPEED_100;
+ else
+ spddplx = SPEED_10;
+ spddplx += (mii_reg & 0x100)
+ ? DUPLEX_FULL :
+ DUPLEX_HALF;
+ retval = e1000_set_spd_dplx(adapter,
+ spddplx);
+ if (retval)
+ return retval;
+ }
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
+ e1000_reset(adapter);
+ break;
+ case M88E1000_PHY_SPEC_CTRL:
+ case M88E1000_EXT_PHY_SPEC_CTRL:
+ if (e1000_phy_reset(hw))
+ return -EIO;
+ break;
+ }
+ } else {
+ switch (data->reg_num) {
+ case PHY_CTRL:
+ if (mii_reg & MII_CR_POWER_DOWN)
+ break;
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
+ e1000_reset(adapter);
+ break;
+ }
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return E1000_SUCCESS;
+}
+
+void e1000_pci_set_mwi(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+ int ret_val = pci_set_mwi(adapter->pdev);
+
+ if (ret_val)
+ DPRINTK(PROBE, ERR, "Error in setting MWI\n");
+}
+
+void e1000_pci_clear_mwi(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+
+ pci_clear_mwi(adapter->pdev);
+}
+
+int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+ return pcix_get_mmrbc(adapter->pdev);
+}
+
+void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
+{
+ struct e1000_adapter *adapter = hw->back;
+ pcix_set_mmrbc(adapter->pdev, mmrbc);
+}
+
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct e1000_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (!cap_offset)
+ return -E1000_ERR_CONFIG;
+
+ pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+ return E1000_SUCCESS;
+}
+
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
+{
+ outl(value, port);
+}
+
+static void e1000_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, rctl;
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_disable(adapter);
+ adapter->vlgrp = grp;
+
+ if (grp) {
+ /* enable VLAN tag insert/strip */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_VME;
+ ew32(CTRL, ctrl);
+
+ if (adapter->hw.mac_type != e1000_ich8lan) {
+ /* enable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_CFIEN;
+ ew32(RCTL, rctl);
+ e1000_update_mng_vlan(adapter);
+ }
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl = er32(CTRL);
+ ctrl &= ~E1000_CTRL_VME;
+ ew32(CTRL, ctrl);
+
+ if (adapter->hw.mac_type != e1000_ich8lan) {
+ if (adapter->mng_vlan_id !=
+ (u16)E1000_MNG_VLAN_NONE) {
+ e1000_vlan_rx_kill_vid(netdev,
+ adapter->mng_vlan_id);
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ }
+ }
+ }
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+}
+
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta, index;
+
+ if ((hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ (vid == adapter->mng_vlan_id))
+ return;
+ /* add VID to filter table */
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
+ vfta |= (1 << (vid & 0x1F));
+ e1000_write_vfta(hw, index, vfta);
+}
+
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta, index;
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_disable(adapter);
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+
+ if ((hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ (vid == adapter->mng_vlan_id)) {
+ /* release control to f/w */
+ e1000_release_hw_control(adapter);
+ return;
+ }
+
+ /* remove VID from filter table */
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
+ vfta &= ~(1 << (vid & 0x1F));
+ e1000_write_vfta(hw, index, vfta);
+}
+
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+ e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+ if (adapter->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
+ continue;
+ e1000_vlan_rx_add_vid(adapter->netdev, vid);
+ }
+ }
+}
+
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ hw->autoneg = 0;
+
+ /* Fiber NICs only allow 1000 gbps Full duplex */
+ if ((hw->media_type == e1000_media_type_fiber) &&
+ spddplx != (SPEED_1000 + DUPLEX_FULL)) {
+ DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+ }
+
+ switch (spddplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ hw->forced_speed_duplex = e1000_10_half;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ hw->forced_speed_duplex = e1000_10_full;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ hw->forced_speed_duplex = e1000_100_half;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ hw->forced_speed_duplex = e1000_100_full;
+ break;
+ case SPEED_1000 + DUPLEX_FULL:
+ hw->autoneg = 1;
+ hw->autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
+ default:
+ DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, ctrl_ext, rctl, status;
+ u32 wufc = adapter->wol;
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ e1000_down(adapter);
+ }
+
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+ if (adapter->ecdev)
+ return -EBUSY;
+
+#endif
+
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
+
+ if (wufc) {
+ e1000_setup_rctl(adapter);
+ e1000_set_rx_mode(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & E1000_WUFC_MC) {
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_MPE;
+ ew32(RCTL, rctl);
+ }
+
+ if (hw->mac_type >= e1000_82540) {
+ ctrl = er32(CTRL);
+ /* advertise wake from D3Cold */
+ #define E1000_CTRL_ADVD3WUC 0x00100000
+ /* phy power management enable */
+ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+ ctrl |= E1000_CTRL_ADVD3WUC |
+ E1000_CTRL_EN_PHY_PWR_MGMT;
+ ew32(CTRL, ctrl);
+ }
+
+ if (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes) {
+ /* keep the laser running in D3 */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ /* Allow time for pending master requests to run */
+ e1000_disable_pciex_master(hw);
+
+ ew32(WUC, E1000_WUC_PME_EN);
+ ew32(WUFC, wufc);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ } else {
+ ew32(WUC, 0);
+ ew32(WUFC, 0);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+ }
+
+ e1000_release_manageability(adapter);
+
+ /* make sure adapter isn't asleep if manageability is enabled */
+ if (adapter->en_mng_pt) {
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ }
+
+ if (hw->phy_type == e1000_phy_igp_3)
+ e1000_phy_powerdown_workaround(hw);
+
+ if (netif_running(netdev))
+ e1000_free_irq(adapter);
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant. */
+ e1000_release_hw_control(adapter);
+
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int e1000_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (adapter->need_ioport)
+ err = pci_enable_device(pdev);
+ else
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ if (adapter->ecdev)
+ return -EBUSY;
+
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ if (netif_running(netdev)) {
+ err = e1000_request_irq(adapter);
+ if (err)
+ return err;
+ }
+
+ e1000_power_up_phy(adapter);
+ e1000_reset(adapter);
+ ew32(WUS, ~0);
+
+ e1000_init_manageability(adapter);
+
+ if (netif_running(netdev))
+ e1000_up(adapter);
+
+ if (!adapter->ecdev) netif_device_attach(netdev);
+
+ /* If the controller is 82573 and f/w is AMT, do not set
+ * DRV_LOAD until the interface is up. For all other cases,
+ * let the f/w know that the h/w is now under the control
+ * of the driver. */
+ if (hw->mac_type != e1000_82573 ||
+ !e1000_check_mng_mode(hw))
+ e1000_get_hw_control(adapter);
+
+ return 0;
+}
+#endif
+
+static void e1000_shutdown(struct pci_dev *pdev)
+{
+ e1000_suspend(pdev, PMSG_SUSPEND);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void e1000_netpoll(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ disable_irq(adapter->pdev->irq);
+ e1000_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci conneection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ e1000_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ if (adapter->need_ioport)
+ err = pci_enable_device(pdev);
+ else
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ e1000_reset(adapter);
+ ew32(WUS, ~0);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+
+ e1000_init_manageability(adapter);
+
+ if (netif_running(netdev)) {
+ if (e1000_up(adapter)) {
+ printk("e1000: can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+ /* If the controller is 82573 and f/w is AMT, do not set
+ * DRV_LOAD until the interface is up. For all other cases,
+ * let the f/w know that the h/w is now under the control
+ * of the driver. */
+ if (hw->mac_type != e1000_82573 ||
+ !e1000_check_mng_mode(hw))
+ e1000_get_hw_control(adapter);
+
+}
+
+/* e1000_main.c */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_main-2.6.28-orig.c Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,4872 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+#include <net/ip6_checksum.h>
+
+char e1000_driver_name[] = "e1000";
+static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
+#define DRV_VERSION "7.3.21-k3-NAPI"
+const char e1000_driver_version[] = DRV_VERSION;
+static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
+
+/* e1000_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * Macro expands to...
+ * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+ */
+static struct pci_device_id e1000_pci_tbl[] = {
+ INTEL_E1000_ETHERNET_DEVICE(0x1000),
+ INTEL_E1000_ETHERNET_DEVICE(0x1001),
+ INTEL_E1000_ETHERNET_DEVICE(0x1004),
+ INTEL_E1000_ETHERNET_DEVICE(0x1008),
+ INTEL_E1000_ETHERNET_DEVICE(0x1009),
+ INTEL_E1000_ETHERNET_DEVICE(0x100C),
+ INTEL_E1000_ETHERNET_DEVICE(0x100D),
+ INTEL_E1000_ETHERNET_DEVICE(0x100E),
+ INTEL_E1000_ETHERNET_DEVICE(0x100F),
+ INTEL_E1000_ETHERNET_DEVICE(0x1010),
+ INTEL_E1000_ETHERNET_DEVICE(0x1011),
+ INTEL_E1000_ETHERNET_DEVICE(0x1012),
+ INTEL_E1000_ETHERNET_DEVICE(0x1013),
+ INTEL_E1000_ETHERNET_DEVICE(0x1014),
+ INTEL_E1000_ETHERNET_DEVICE(0x1015),
+ INTEL_E1000_ETHERNET_DEVICE(0x1016),
+ INTEL_E1000_ETHERNET_DEVICE(0x1017),
+ INTEL_E1000_ETHERNET_DEVICE(0x1018),
+ INTEL_E1000_ETHERNET_DEVICE(0x1019),
+ INTEL_E1000_ETHERNET_DEVICE(0x101A),
+ INTEL_E1000_ETHERNET_DEVICE(0x101D),
+ INTEL_E1000_ETHERNET_DEVICE(0x101E),
+ INTEL_E1000_ETHERNET_DEVICE(0x1026),
+ INTEL_E1000_ETHERNET_DEVICE(0x1027),
+ INTEL_E1000_ETHERNET_DEVICE(0x1028),
+ INTEL_E1000_ETHERNET_DEVICE(0x1075),
+ INTEL_E1000_ETHERNET_DEVICE(0x1076),
+ INTEL_E1000_ETHERNET_DEVICE(0x1077),
+ INTEL_E1000_ETHERNET_DEVICE(0x1078),
+ INTEL_E1000_ETHERNET_DEVICE(0x1079),
+ INTEL_E1000_ETHERNET_DEVICE(0x107A),
+ INTEL_E1000_ETHERNET_DEVICE(0x107B),
+ INTEL_E1000_ETHERNET_DEVICE(0x107C),
+ INTEL_E1000_ETHERNET_DEVICE(0x108A),
+ INTEL_E1000_ETHERNET_DEVICE(0x1099),
+ INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+ /* required last entry */
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *txdr);
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rxdr);
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring);
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
+void e1000_update_stats(struct e1000_adapter *adapter);
+
+static int e1000_init_module(void);
+static void e1000_exit_module(void);
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit e1000_remove(struct pci_dev *pdev);
+static int e1000_alloc_queues(struct e1000_adapter *adapter);
+static int e1000_sw_init(struct e1000_adapter *adapter);
+static int e1000_open(struct net_device *netdev);
+static int e1000_close(struct net_device *netdev);
+static void e1000_configure_tx(struct e1000_adapter *adapter);
+static void e1000_configure_rx(struct e1000_adapter *adapter);
+static void e1000_setup_rctl(struct e1000_adapter *adapter);
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring);
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
+static void e1000_set_rx_mode(struct net_device *netdev);
+static void e1000_update_phy_info(unsigned long data);
+static void e1000_watchdog(unsigned long data);
+static void e1000_82547_tx_fifo_stall(unsigned long data);
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
+static int e1000_set_mac(struct net_device *netdev, void *p);
+static irqreturn_t e1000_intr(int irq, void *data);
+static irqreturn_t e1000_intr_msi(int irq, void *data);
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring);
+static int e1000_clean(struct napi_struct *napi, int budget);
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd);
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
+static void e1000_tx_timeout(struct net_device *dev);
+static void e1000_reset_task(struct work_struct *work);
+static void e1000_smartspeed(struct e1000_adapter *adapter);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+ struct sk_buff *skb);
+
+static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_restore_vlan(struct e1000_adapter *adapter);
+
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
+#ifdef CONFIG_PM
+static int e1000_resume(struct pci_dev *pdev);
+#endif
+static void e1000_shutdown(struct pci_dev *pdev);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void e1000_netpoll (struct net_device *netdev);
+#endif
+
+#define COPYBREAK_DEFAULT 256
+static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
+static void e1000_io_resume(struct pci_dev *pdev);
+
+static struct pci_error_handlers e1000_err_handler = {
+ .error_detected = e1000_io_error_detected,
+ .slot_reset = e1000_io_slot_reset,
+ .resume = e1000_io_resume,
+};
+
+static struct pci_driver e1000_driver = {
+ .name = e1000_driver_name,
+ .id_table = e1000_pci_tbl,
+ .probe = e1000_probe,
+ .remove = __devexit_p(e1000_remove),
+#ifdef CONFIG_PM
+ /* Power Managment Hooks */
+ .suspend = e1000_suspend,
+ .resume = e1000_resume,
+#endif
+ .shutdown = e1000_shutdown,
+ .err_handler = &e1000_err_handler
+};
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+
+static int __init e1000_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "%s - version %s\n",
+ e1000_driver_string, e1000_driver_version);
+
+ printk(KERN_INFO "%s\n", e1000_copyright);
+
+ ret = pci_register_driver(&e1000_driver);
+ if (copybreak != COPYBREAK_DEFAULT) {
+ if (copybreak == 0)
+ printk(KERN_INFO "e1000: copybreak disabled\n");
+ else
+ printk(KERN_INFO "e1000: copybreak enabled for "
+ "packets <= %u bytes\n", copybreak);
+ }
+ return ret;
+}
+
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+
+static void __exit e1000_exit_module(void)
+{
+ pci_unregister_driver(&e1000_driver);
+}
+
+module_exit(e1000_exit_module);
+
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ irq_handler_t handler = e1000_intr;
+ int irq_flags = IRQF_SHARED;
+ int err;
+
+ if (hw->mac_type >= e1000_82571) {
+ adapter->have_msi = !pci_enable_msi(adapter->pdev);
+ if (adapter->have_msi) {
+ handler = e1000_intr_msi;
+ irq_flags = 0;
+ }
+ }
+
+ err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
+ netdev);
+ if (err) {
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate interrupt Error: %d\n", err);
+ }
+
+ return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+
+static void e1000_irq_disable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(IMC, ~0);
+ E1000_WRITE_FLUSH();
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+
+static void e1000_irq_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(IMS, IMS_ENABLE_MASK);
+ E1000_WRITE_FLUSH();
+}
+
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u16 vid = hw->mng_cookie.vlan_id;
+ u16 old_vid = adapter->mng_vlan_id;
+ if (adapter->vlgrp) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid)) {
+ if (hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
+ e1000_vlan_rx_add_vid(netdev, vid);
+ adapter->mng_vlan_id = vid;
+ } else
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+
+ if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
+ (vid != old_vid) &&
+ !vlan_group_get_device(adapter->vlgrp, old_vid))
+ e1000_vlan_rx_kill_vid(netdev, old_vid);
+ } else
+ adapter->mng_vlan_id = vid;
+ }
+}
+
+/**
+ * e1000_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the network i/f is closed.
+ *
+ **/
+
+static void e1000_release_hw_control(struct e1000_adapter *adapter)
+{
+ u32 ctrl_ext;
+ u32 swsm;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Let firmware taken over control of h/w */
+ switch (hw->mac_type) {
+ case e1000_82573:
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ ctrl_ext = er32(CTRL_EXT);
+ ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * e1000_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the network i/f is open.
+ *
+ **/
+
+static void e1000_get_hw_control(struct e1000_adapter *adapter)
+{
+ u32 ctrl_ext;
+ u32 swsm;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Let firmware know the driver has taken over */
+ switch (hw->mac_type) {
+ case e1000_82573:
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ ctrl_ext = er32(CTRL_EXT);
+ ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ break;
+ default:
+ break;
+ }
+}
+
+static void e1000_init_manageability(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->en_mng_pt) {
+ u32 manc = er32(MANC);
+
+ /* disable hardware interception of ARP */
+ manc &= ~(E1000_MANC_ARP_EN);
+
+ /* enable receiving management packets to the host */
+ /* this will probably generate destination unreachable messages
+ * from the host OS, but the packets will be handled on SMBUS */
+ if (hw->has_manc2h) {
+ u32 manc2h = er32(MANC2H);
+
+ manc |= E1000_MANC_EN_MNG2HOST;
+#define E1000_MNG2HOST_PORT_623 (1 << 5)
+#define E1000_MNG2HOST_PORT_664 (1 << 6)
+ manc2h |= E1000_MNG2HOST_PORT_623;
+ manc2h |= E1000_MNG2HOST_PORT_664;
+ ew32(MANC2H, manc2h);
+ }
+
+ ew32(MANC, manc);
+ }
+}
+
+static void e1000_release_manageability(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->en_mng_pt) {
+ u32 manc = er32(MANC);
+
+ /* re-enable hardware interception of ARP */
+ manc |= E1000_MANC_ARP_EN;
+
+ if (hw->has_manc2h)
+ manc &= ~E1000_MANC_EN_MNG2HOST;
+
+ /* don't explicitly have to mess with MANC2H since
+ * MANC has an enable disable that gates MANC2H */
+
+ ew32(MANC, manc);
+ }
+}
+
+/**
+ * e1000_configure - configure the hardware for RX and TX
+ * @adapter = private board structure
+ **/
+static void e1000_configure(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ e1000_set_rx_mode(netdev);
+
+ e1000_restore_vlan(adapter);
+ e1000_init_manageability(adapter);
+
+ e1000_configure_tx(adapter);
+ e1000_setup_rctl(adapter);
+ e1000_configure_rx(adapter);
+ /* call E1000_DESC_UNUSED which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct e1000_rx_ring *ring = &adapter->rx_ring[i];
+ adapter->alloc_rx_buf(adapter, ring,
+ E1000_DESC_UNUSED(ring));
+ }
+
+ adapter->tx_queue_len = netdev->tx_queue_len;
+}
+
+int e1000_up(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* hardware has been reset, we need to reload some things */
+ e1000_configure(adapter);
+
+ clear_bit(__E1000_DOWN, &adapter->flags);
+
+ napi_enable(&adapter->napi);
+
+ e1000_irq_enable(adapter);
+
+ /* fire a link change interrupt to start the watchdog */
+ ew32(ICS, E1000_ICS_LSC);
+ return 0;
+}
+
+/**
+ * e1000_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000_reset ***
+ *
+ **/
+
+void e1000_power_up_phy(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 mii_reg = 0;
+
+ /* Just clear the power down bit to wake the phy back up */
+ if (hw->media_type == e1000_media_type_copper) {
+ /* according to the manual, the phy will retain its
+ * settings across a power-down/up cycle */
+ e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
+ }
+}
+
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Power down the PHY so no link is implied when interface is down *
+ * The PHY cannot be powered down if any of the following is true *
+ * (a) WoL is enabled
+ * (b) AMT is active
+ * (c) SoL/IDER session is active */
+ if (!adapter->wol && hw->mac_type >= e1000_82540 &&
+ hw->media_type == e1000_media_type_copper) {
+ u16 mii_reg = 0;
+
+ switch (hw->mac_type) {
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (er32(MANC) & E1000_MANC_SMBUS_EN)
+ goto out;
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_80003es2lan:
+ case e1000_ich8lan:
+ if (e1000_check_mng_mode(hw) ||
+ e1000_check_phy_reset_block(hw))
+ goto out;
+ break;
+ default:
+ goto out;
+ }
+ e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
+ mdelay(1);
+ }
+out:
+ return;
+}
+
+void e1000_down(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer */
+ set_bit(__E1000_DOWN, &adapter->flags);
+
+ napi_disable(&adapter->napi);
+
+ e1000_irq_disable(adapter);
+
+ del_timer_sync(&adapter->tx_fifo_stall_timer);
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ e1000_reset(adapter);
+ e1000_clean_all_tx_rings(adapter);
+ e1000_clean_all_rx_rings(adapter);
+}
+
+void e1000_reinit_locked(struct e1000_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+ e1000_down(adapter);
+ e1000_up(adapter);
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+}
+
+void e1000_reset(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 pba = 0, tx_space, min_tx_space, min_rx_space;
+ u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
+ bool legacy_pba_adjust = false;
+
+ /* Repartition Pba for greater than 9k mtu
+ * To take effect CTRL.RST is required.
+ */
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ legacy_pba_adjust = true;
+ pba = E1000_PBA_48K;
+ break;
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ pba = E1000_PBA_48K;
+ break;
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ legacy_pba_adjust = true;
+ pba = E1000_PBA_30K;
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ pba = E1000_PBA_38K;
+ break;
+ case e1000_82573:
+ pba = E1000_PBA_20K;
+ break;
+ case e1000_ich8lan:
+ pba = E1000_PBA_8K;
+ case e1000_undefined:
+ case e1000_num_macs:
+ break;
+ }
+
+ if (legacy_pba_adjust) {
+ if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
+ pba -= 8; /* allocate more FIFO for Tx */
+
+ if (hw->mac_type == e1000_82547) {
+ adapter->tx_fifo_head = 0;
+ adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
+ adapter->tx_fifo_size =
+ (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
+ atomic_set(&adapter->tx_fifo_stall, 0);
+ }
+ } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
+ /* adjust PBA for jumbo frames */
+ ew32(PBA, pba);
+
+ /* To maintain wire speed transmits, the Tx FIFO should be
+ * large enough to accomodate two full transmit packets,
+ * rounded up to the next 1KB and expressed in KB. Likewise,
+ * the Rx FIFO should be large enough to accomodate at least
+ * one full receive packet and is similarly rounded up and
+ * expressed in KB. */
+ pba = er32(PBA);
+ /* upper 16 bits has Tx packet buffer allocation size in KB */
+ tx_space = pba >> 16;
+ /* lower 16 bits has Rx packet buffer allocation size in KB */
+ pba &= 0xffff;
+ /* don't include ethernet FCS because hardware appends/strips */
+ min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
+ VLAN_TAG_SIZE;
+ min_tx_space = min_rx_space;
+ min_tx_space *= 2;
+ min_tx_space = ALIGN(min_tx_space, 1024);
+ min_tx_space >>= 10;
+ min_rx_space = ALIGN(min_rx_space, 1024);
+ min_rx_space >>= 10;
+
+ /* If current Tx allocation is less than the min Tx FIFO size,
+ * and the min Tx FIFO size is less than the current Rx FIFO
+ * allocation, take space away from current Rx allocation */
+ if (tx_space < min_tx_space &&
+ ((min_tx_space - tx_space) < pba)) {
+ pba = pba - (min_tx_space - tx_space);
+
+ /* PCI/PCIx hardware has PBA alignment constraints */
+ switch (hw->mac_type) {
+ case e1000_82545 ... e1000_82546_rev_3:
+ pba &= ~(E1000_PBA_8K - 1);
+ break;
+ default:
+ break;
+ }
+
+ /* if short on rx space, rx wins and must trump tx
+ * adjustment or use Early Receive if available */
+ if (pba < min_rx_space) {
+ switch (hw->mac_type) {
+ case e1000_82573:
+ /* ERT enabled in e1000_configure_rx */
+ break;
+ default:
+ pba = min_rx_space;
+ break;
+ }
+ }
+ }
+ }
+
+ ew32(PBA, pba);
+
+ /* flow control settings */
+ /* Set the FC high water mark to 90% of the FIFO size.
+ * Required to clear last 3 LSB */
+ fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
+ /* We can't use 90% on small FIFOs because the remainder
+ * would be less than 1 full frame. In this case, we size
+ * it to allow at least a full frame above the high water
+ * mark. */
+ if (pba < E1000_PBA_16K)
+ fc_high_water_mark = (pba * 1024) - 1600;
+
+ hw->fc_high_water = fc_high_water_mark;
+ hw->fc_low_water = fc_high_water_mark - 8;
+ if (hw->mac_type == e1000_80003es2lan)
+ hw->fc_pause_time = 0xFFFF;
+ else
+ hw->fc_pause_time = E1000_FC_PAUSE_TIME;
+ hw->fc_send_xon = 1;
+ hw->fc = hw->original_fc;
+
+ /* Allow time for pending master requests to run */
+ e1000_reset_hw(hw);
+ if (hw->mac_type >= e1000_82544)
+ ew32(WUC, 0);
+
+ if (e1000_init_hw(hw))
+ DPRINTK(PROBE, ERR, "Hardware Error\n");
+ e1000_update_mng_vlan(adapter);
+
+ /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
+ if (hw->mac_type >= e1000_82544 &&
+ hw->mac_type <= e1000_82547_rev_2 &&
+ hw->autoneg == 1 &&
+ hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+ u32 ctrl = er32(CTRL);
+ /* clear phy power management bit if we are in gig only mode,
+ * which if enabled will attempt negotiation to 100Mb, which
+ * can cause a loss of link at power off or driver unload */
+ ctrl &= ~E1000_CTRL_SWDPIN3;
+ ew32(CTRL, ctrl);
+ }
+
+ /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+ ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
+
+ e1000_reset_adaptive(hw);
+ e1000_phy_get_info(hw, &adapter->phy_info);
+
+ if (!adapter->smart_power_down &&
+ (hw->mac_type == e1000_82571 ||
+ hw->mac_type == e1000_82572)) {
+ u16 phy_data = 0;
+ /* speed up time to link by disabling smart power down, ignore
+ * the return value of this function because there is nothing
+ * different we would do if it failed */
+ e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ &phy_data);
+ phy_data &= ~IGP02E1000_PM_SPD;
+ e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ phy_data);
+ }
+
+ e1000_release_manageability(adapter);
+}
+
+/**
+ * Dump the eeprom for users having checksum issues
+ **/
+static void e1000_dump_eeprom(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_eeprom eeprom;
+ const struct ethtool_ops *ops = netdev->ethtool_ops;
+ u8 *data;
+ int i;
+ u16 csum_old, csum_new = 0;
+
+ eeprom.len = ops->get_eeprom_len(netdev);
+ eeprom.offset = 0;
+
+ data = kmalloc(eeprom.len, GFP_KERNEL);
+ if (!data) {
+ printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
+ " data\n");
+ return;
+ }
+
+ ops->get_eeprom(netdev, &eeprom, data);
+
+ csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
+ (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
+ for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
+ csum_new += data[i] + (data[i + 1] << 8);
+ csum_new = EEPROM_SUM - csum_new;
+
+ printk(KERN_ERR "/*********************/\n");
+ printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
+ printk(KERN_ERR "Calculated : 0x%04x\n", csum_new);
+
+ printk(KERN_ERR "Offset Values\n");
+ printk(KERN_ERR "======== ======\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
+
+ printk(KERN_ERR "Include this output when contacting your support "
+ "provider.\n");
+ printk(KERN_ERR "This is not a software error! Something bad "
+ "happened to your hardware or\n");
+ printk(KERN_ERR "EEPROM image. Ignoring this "
+ "problem could result in further problems,\n");
+ printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
+ printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
+ "which is invalid\n");
+ printk(KERN_ERR "and requires you to set the proper MAC "
+ "address manually before continuing\n");
+ printk(KERN_ERR "to enable this network device.\n");
+ printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
+ "to your hardware vendor\n");
+ printk(KERN_ERR "or Intel Customer Support.\n");
+ printk(KERN_ERR "/*********************/\n");
+
+ kfree(data);
+}
+
+/**
+ * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
+ * @pdev: PCI device information struct
+ *
+ * Return true if an adapter needs ioport resources
+ **/
+static int e1000_is_need_ioport(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case E1000_DEV_ID_82540EM:
+ case E1000_DEV_ID_82540EM_LOM:
+ case E1000_DEV_ID_82540EP:
+ case E1000_DEV_ID_82540EP_LOM:
+ case E1000_DEV_ID_82540EP_LP:
+ case E1000_DEV_ID_82541EI:
+ case E1000_DEV_ID_82541EI_MOBILE:
+ case E1000_DEV_ID_82541ER:
+ case E1000_DEV_ID_82541ER_LOM:
+ case E1000_DEV_ID_82541GI:
+ case E1000_DEV_ID_82541GI_LF:
+ case E1000_DEV_ID_82541GI_MOBILE:
+ case E1000_DEV_ID_82544EI_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82544GC_COPPER:
+ case E1000_DEV_ID_82544GC_LOM:
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ case E1000_DEV_ID_82546EB_COPPER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit e1000_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct e1000_adapter *adapter;
+ struct e1000_hw *hw;
+
+ static int cards_found = 0;
+ static int global_quad_port_a = 0; /* global ksp3 port a indication */
+ int i, err, pci_using_dac;
+ u16 eeprom_data = 0;
+ u16 eeprom_apme_mask = E1000_EEPROM_APME;
+ int bars, need_ioport;
+ DECLARE_MAC_BUF(mac);
+
+ /* do not allocate ioport bars when not needed */
+ need_ioport = e1000_is_need_ioport(pdev);
+ if (need_ioport) {
+ bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
+ err = pci_enable_device(pdev);
+ } else {
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_enable_device_mem(pdev);
+ }
+ if (err)
+ return err;
+
+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
+ pci_using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ E1000_ERR("No usable DMA configuration, "
+ "aborting\n");
+ goto err_dma;
+ }
+ }
+ pci_using_dac = 0;
+ }
+
+ err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ pci_set_master(pdev);
+
+ err = -ENOMEM;
+ netdev = alloc_etherdev(sizeof(struct e1000_adapter));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->msg_enable = (1 << debug) - 1;
+ adapter->bars = bars;
+ adapter->need_ioport = need_ioport;
+
+ hw = &adapter->hw;
+ hw->back = adapter;
+
+ err = -EIO;
+ hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
+ pci_resource_len(pdev, BAR_0));
+ if (!hw->hw_addr)
+ goto err_ioremap;
+
+ if (adapter->need_ioport) {
+ for (i = BAR_1; i <= BAR_5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ hw->io_base = pci_resource_start(pdev, i);
+ break;
+ }
+ }
+ }
+
+ netdev->open = &e1000_open;
+ netdev->stop = &e1000_close;
+ netdev->hard_start_xmit = &e1000_xmit_frame;
+ netdev->get_stats = &e1000_get_stats;
+ netdev->set_rx_mode = &e1000_set_rx_mode;
+ netdev->set_mac_address = &e1000_set_mac;
+ netdev->change_mtu = &e1000_change_mtu;
+ netdev->do_ioctl = &e1000_ioctl;
+ e1000_set_ethtool_ops(netdev);
+ netdev->tx_timeout = &e1000_tx_timeout;
+ netdev->watchdog_timeo = 5 * HZ;
+ netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+ netdev->vlan_rx_register = e1000_vlan_rx_register;
+ netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
+ netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = e1000_netpoll;
+#endif
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ adapter->bd_number = cards_found;
+
+ /* setup the private structure */
+
+ err = e1000_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ err = -EIO;
+ /* Flash BAR mapping must happen after e1000_sw_init
+ * because it depends on mac_type */
+ if ((hw->mac_type == e1000_ich8lan) &&
+ (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ hw->flash_address =
+ ioremap(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (!hw->flash_address)
+ goto err_flashmap;
+ }
+
+ if (e1000_check_phy_reset_block(hw))
+ DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
+
+ if (hw->mac_type >= e1000_82543) {
+ netdev->features = NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+ if (hw->mac_type == e1000_ich8lan)
+ netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
+ }
+
+ if ((hw->mac_type >= e1000_82544) &&
+ (hw->mac_type != e1000_82547))
+ netdev->features |= NETIF_F_TSO;
+
+ if (hw->mac_type > e1000_82547_rev_2)
+ netdev->features |= NETIF_F_TSO6;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->features |= NETIF_F_LLTX;
+
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_HW_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
+
+ /* initialize eeprom parameters */
+ if (e1000_init_eeprom_params(hw)) {
+ E1000_ERR("EEPROM initialization failed\n");
+ goto err_eeprom;
+ }
+
+ /* before reading the EEPROM, reset the controller to
+ * put the device in a known good starting state */
+
+ e1000_reset_hw(hw);
+
+ /* make sure the EEPROM is good */
+ if (e1000_validate_eeprom_checksum(hw) < 0) {
+ DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+ e1000_dump_eeprom(adapter);
+ /*
+ * set MAC address to all zeroes to invalidate and temporary
+ * disable this device for the user. This blocks regular
+ * traffic while still permitting ethtool ioctls from reaching
+ * the hardware as well as allowing the user to run the
+ * interface after manually setting a hw addr using
+ * `ip set address`
+ */
+ memset(hw->mac_addr, 0, netdev->addr_len);
+ } else {
+ /* copy the MAC address out of the EEPROM */
+ if (e1000_read_mac_addr(hw))
+ DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
+ }
+ /* don't block initalization here due to bad MAC address */
+ memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr))
+ DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+
+ e1000_get_bus_info(hw);
+
+ init_timer(&adapter->tx_fifo_stall_timer);
+ adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
+ adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &e1000_watchdog;
+ adapter->watchdog_timer.data = (unsigned long) adapter;
+
+ init_timer(&adapter->phy_info_timer);
+ adapter->phy_info_timer.function = &e1000_update_phy_info;
+ adapter->phy_info_timer.data = (unsigned long)adapter;
+
+ INIT_WORK(&adapter->reset_task, e1000_reset_task);
+
+ e1000_check_options(adapter);
+
+ /* Initial Wake on LAN setting
+ * If APM wake is enabled in the EEPROM,
+ * enable the ACPI Magic Packet filter
+ */
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ break;
+ case e1000_82544:
+ e1000_read_eeprom(hw,
+ EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
+ eeprom_apme_mask = E1000_EEPROM_82544_APM;
+ break;
+ case e1000_ich8lan:
+ e1000_read_eeprom(hw,
+ EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
+ eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
+ break;
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82571:
+ case e1000_80003es2lan:
+ if (er32(STATUS) & E1000_STATUS_FUNC_1){
+ e1000_read_eeprom(hw,
+ EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+ break;
+ }
+ /* Fall Through */
+ default:
+ e1000_read_eeprom(hw,
+ EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ break;
+ }
+ if (eeprom_data & eeprom_apme_mask)
+ adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+ /* now that we have the eeprom settings, apply the special cases
+ * where the eeprom may be wrong or the board simply won't support
+ * wake on lan on a particular port */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82546GB_PCIE:
+ adapter->eeprom_wol = 0;
+ break;
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82571EB_FIBER:
+ /* Wake events only supported on port A for dual fiber
+ * regardless of eeprom setting */
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
+ adapter->eeprom_wol = 0;
+ break;
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
+ /* if quad port adapter, disable WoL on all but port A */
+ if (global_quad_port_a != 0)
+ adapter->eeprom_wol = 0;
+ else
+ adapter->quad_port_a = 1;
+ /* Reset for multiple quad port adapters */
+ if (++global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
+ }
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wol = adapter->eeprom_wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ /* print bus type/speed/width info */
+ DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+ ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
+ (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
+ ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+ (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
+ (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
+ (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
+ (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
+ ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
+ (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
+ (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
+ "32-bit"));
+
+ printk("%s\n", print_mac(mac, netdev->dev_addr));
+
+ if (hw->bus_type == e1000_bus_type_pci_express) {
+ DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
+ "longer be supported by this driver in the future.\n",
+ pdev->vendor, pdev->device);
+ DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
+ "driver instead.\n");
+ }
+
+ /* reset the hardware with the new settings */
+ e1000_reset(adapter);
+
+ /* If the controller is 82573 and f/w is AMT, do not set
+ * DRV_LOAD until the interface is up. For all other cases,
+ * let the f/w know that the h/w is now under the control
+ * of the driver. */
+ if (hw->mac_type != e1000_82573 ||
+ !e1000_check_mng_mode(hw))
+ e1000_get_hw_control(adapter);
+
+ /* tell the stack to leave us alone until e1000_open() is called */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+
+ cards_found++;
+ return 0;
+
+err_register:
+ e1000_release_hw_control(adapter);
+err_eeprom:
+ if (!e1000_check_phy_reset_block(hw))
+ e1000_phy_hw_reset(hw);
+
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+err_flashmap:
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ dev_put(&adapter->polling_netdev[i]);
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+ kfree(adapter->polling_netdev);
+err_sw_init:
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_selected_regions(pdev, bars);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+
+static void __devexit e1000_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ cancel_work_sync(&adapter->reset_task);
+
+ e1000_release_manageability(adapter);
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant. */
+ e1000_release_hw_control(adapter);
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ dev_put(&adapter->polling_netdev[i]);
+
+ unregister_netdev(netdev);
+
+ if (!e1000_check_phy_reset_block(hw))
+ e1000_phy_hw_reset(hw);
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+ kfree(adapter->polling_netdev);
+
+ iounmap(hw->hw_addr);
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ pci_release_selected_regions(pdev, adapter->bars);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+
+static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int i;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ hw->max_frame_size = netdev->mtu +
+ ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+ hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+ /* identify the MAC */
+
+ if (e1000_set_mac_type(hw)) {
+ DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
+ return -EIO;
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->phy_init_script = 1;
+ break;
+ }
+
+ e1000_set_media_type(hw);
+
+ hw->wait_autoneg_complete = false;
+ hw->tbi_compatibility_en = true;
+ hw->adaptive_ifs = true;
+
+ /* Copper options */
+
+ if (hw->media_type == e1000_media_type_copper) {
+ hw->mdix = AUTO_ALL_MODES;
+ hw->disable_polarity_correction = false;
+ hw->master_slave = E1000_MASTER_SLAVE;
+ }
+
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+
+ if (e1000_alloc_queues(adapter)) {
+ DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->polling_netdev[i].priv = adapter;
+ dev_hold(&adapter->polling_netdev[i]);
+ set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
+ }
+ spin_lock_init(&adapter->tx_queue_lock);
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ e1000_irq_disable(adapter);
+
+ spin_lock_init(&adapter->stats_lock);
+
+ set_bit(__E1000_DOWN, &adapter->flags);
+
+ return 0;
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+
+static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+ adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct e1000_tx_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ return -ENOMEM;
+
+ adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct e1000_rx_ring), GFP_KERNEL);
+ if (!adapter->rx_ring) {
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+ }
+
+ adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
+ sizeof(struct net_device),
+ GFP_KERNEL);
+ if (!adapter->polling_netdev) {
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+ return -ENOMEM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+
+static int e1000_open(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__E1000_TESTING, &adapter->flags))
+ return -EBUSY;
+
+ /* allocate transmit descriptors */
+ err = e1000_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = e1000_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ e1000_power_up_phy(adapter);
+
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ if ((hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
+ e1000_update_mng_vlan(adapter);
+ }
+
+ /* If AMT is enabled, let the firmware know that the network
+ * interface is now open */
+ if (hw->mac_type == e1000_82573 &&
+ e1000_check_mng_mode(hw))
+ e1000_get_hw_control(adapter);
+
+ /* before we allocate an interrupt, we must be ready to handle it.
+ * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+ * as soon as we call pci_request_irq, so we have to setup our
+ * clean_rx handler before we do so. */
+ e1000_configure(adapter);
+
+ err = e1000_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ /* From here on the code is the same as e1000_up() */
+ clear_bit(__E1000_DOWN, &adapter->flags);
+
+ napi_enable(&adapter->napi);
+
+ e1000_irq_enable(adapter);
+
+ netif_start_queue(netdev);
+
+ /* fire a link status change interrupt to start the watchdog */
+ ew32(ICS, E1000_ICS_LSC);
+
+ return E1000_SUCCESS;
+
+err_req_irq:
+ e1000_release_hw_control(adapter);
+ e1000_power_down_phy(adapter);
+ e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+ e1000_free_all_tx_resources(adapter);
+err_setup_tx:
+ e1000_reset(adapter);
+
+ return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+
+static int e1000_close(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ e1000_down(adapter);
+ e1000_power_down_phy(adapter);
+ e1000_free_irq(adapter);
+
+ e1000_free_all_tx_resources(adapter);
+ e1000_free_all_rx_resources(adapter);
+
+ /* kill manageability vlan ID if supported, but not if a vlan with
+ * the same ID is registered on the host OS (let 8021q kill it) */
+ if ((hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ !(adapter->vlgrp &&
+ vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
+ e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+ }
+
+ /* If AMT is enabled, let the firmware know that the network
+ * interface is now closed */
+ if (hw->mac_type == e1000_82573 &&
+ e1000_check_mng_mode(hw))
+ e1000_release_hw_control(adapter);
+
+ return 0;
+}
+
+/**
+ * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
+ * @adapter: address of board private structure
+ * @start: address of beginning of memory
+ * @len: length of memory
+ **/
+static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
+ unsigned long len)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long begin = (unsigned long)start;
+ unsigned long end = begin + len;
+
+ /* First rev 82545 and 82546 need to not allow any memory
+ * write location to cross 64k boundary due to errata 23 */
+ if (hw->mac_type == e1000_82545 ||
+ hw->mac_type == e1000_82546) {
+ return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
+ }
+
+ return true;
+}
+
+/**
+ * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @txdr: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *txdr)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct e1000_buffer) * txdr->count;
+ txdr->buffer_info = vmalloc(size);
+ if (!txdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+ }
+ memset(txdr->buffer_info, 0, size);
+
+ /* round up to nearest 4K */
+
+ txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+ txdr->size = ALIGN(txdr->size, 4096);
+
+ txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ if (!txdr->desc) {
+setup_tx_desc_die:
+ vfree(txdr->buffer_info);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+ }
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+ void *olddesc = txdr->desc;
+ dma_addr_t olddma = txdr->dma;
+ DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
+ "at %p\n", txdr->size, txdr->desc);
+ /* Try again, without freeing the previous */
+ txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ /* Failed allocation, critical failure */
+ if (!txdr->desc) {
+ pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ goto setup_tx_desc_die;
+ }
+
+ if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+ /* give up */
+ pci_free_consistent(pdev, txdr->size, txdr->desc,
+ txdr->dma);
+ pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate aligned memory "
+ "for the transmit descriptor ring\n");
+ vfree(txdr->buffer_info);
+ return -ENOMEM;
+ } else {
+ /* Free old allocation, new allocation was successful */
+ pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ }
+ }
+ memset(txdr->desc, 0, txdr->size);
+
+ txdr->next_to_use = 0;
+ txdr->next_to_clean = 0;
+ spin_lock_init(&txdr->tx_lock);
+
+ return 0;
+}
+
+/**
+ * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "Allocation for Tx Queue %u failed\n", i);
+ for (i-- ; i >= 0; i--)
+ e1000_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+
+static void e1000_configure_tx(struct e1000_adapter *adapter)
+{
+ u64 tdba;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tdlen, tctl, tipg, tarc;
+ u32 ipgr1, ipgr2;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+
+ switch (adapter->num_tx_queues) {
+ case 1:
+ default:
+ tdba = adapter->tx_ring[0].dma;
+ tdlen = adapter->tx_ring[0].count *
+ sizeof(struct e1000_tx_desc);
+ ew32(TDLEN, tdlen);
+ ew32(TDBAH, (tdba >> 32));
+ ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
+ ew32(TDT, 0);
+ ew32(TDH, 0);
+ adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
+ adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
+ break;
+ }
+
+ /* Set the default values for the Tx Inter Packet Gap timer */
+ if (hw->mac_type <= e1000_82547_rev_2 &&
+ (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes))
+ tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+ else
+ tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ tipg = DEFAULT_82542_TIPG_IPGT;
+ ipgr1 = DEFAULT_82542_TIPG_IPGR1;
+ ipgr2 = DEFAULT_82542_TIPG_IPGR2;
+ break;
+ case e1000_80003es2lan:
+ ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+ ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
+ break;
+ default:
+ ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+ ipgr2 = DEFAULT_82543_TIPG_IPGR2;
+ break;
+ }
+ tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+ tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+ ew32(TIPG, tipg);
+
+ /* Set the Tx Interrupt Delay register */
+
+ ew32(TIDV, adapter->tx_int_delay);
+ if (hw->mac_type >= e1000_82540)
+ ew32(TADV, adapter->tx_abs_int_delay);
+
+ /* Program the Transmit Control Register */
+
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+ if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
+ tarc = er32(TARC0);
+ /* set the speed mode bit, we'll clear it if we're not at
+ * gigabit link later */
+ tarc |= (1 << 21);
+ ew32(TARC0, tarc);
+ } else if (hw->mac_type == e1000_80003es2lan) {
+ tarc = er32(TARC0);
+ tarc |= 1;
+ ew32(TARC0, tarc);
+ tarc = er32(TARC1);
+ tarc |= 1;
+ ew32(TARC1, tarc);
+ }
+
+ e1000_config_collision_dist(hw);
+
+ /* Setup Transmit Descriptor Settings for eop descriptor */
+ adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+ /* only set IDE if we are delaying interrupts using the timers */
+ if (adapter->tx_int_delay)
+ adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+ if (hw->mac_type < e1000_82543)
+ adapter->txd_cmd |= E1000_TXD_CMD_RPS;
+ else
+ adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+ /* Cache if we're 82544 running in PCI-X because we'll
+ * need this to apply a workaround later in the send path. */
+ if (hw->mac_type == e1000_82544 &&
+ hw->bus_type == e1000_bus_type_pcix)
+ adapter->pcix_82544 = 1;
+
+ ew32(TCTL, tctl);
+
+}
+
+/**
+ * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rxdr: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rxdr)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int size, desc_len;
+
+ size = sizeof(struct e1000_buffer) * rxdr->count;
+ rxdr->buffer_info = vmalloc(size);
+ if (!rxdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for the receive descriptor ring\n");
+ return -ENOMEM;
+ }
+ memset(rxdr->buffer_info, 0, size);
+
+ if (hw->mac_type <= e1000_82547_rev_2)
+ desc_len = sizeof(struct e1000_rx_desc);
+ else
+ desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+ /* Round up to nearest 4K */
+
+ rxdr->size = rxdr->count * desc_len;
+ rxdr->size = ALIGN(rxdr->size, 4096);
+
+ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+
+ if (!rxdr->desc) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for the receive descriptor ring\n");
+setup_rx_desc_die:
+ vfree(rxdr->buffer_info);
+ return -ENOMEM;
+ }
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+ void *olddesc = rxdr->desc;
+ dma_addr_t olddma = rxdr->dma;
+ DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
+ "at %p\n", rxdr->size, rxdr->desc);
+ /* Try again, without freeing the previous */
+ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ /* Failed allocation, critical failure */
+ if (!rxdr->desc) {
+ pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory "
+ "for the receive descriptor ring\n");
+ goto setup_rx_desc_die;
+ }
+
+ if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+ /* give up */
+ pci_free_consistent(pdev, rxdr->size, rxdr->desc,
+ rxdr->dma);
+ pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate aligned memory "
+ "for the receive descriptor ring\n");
+ goto setup_rx_desc_die;
+ } else {
+ /* Free old allocation, new allocation was successful */
+ pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ }
+ }
+ memset(rxdr->desc, 0, rxdr->size);
+
+ rxdr->next_to_clean = 0;
+ rxdr->next_to_use = 0;
+
+ return 0;
+}
+
+/**
+ * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "Allocation for Rx Queue %u failed\n", i);
+ for (i-- ; i >= 0; i--)
+ e1000_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+ (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ rctl = er32(RCTL);
+
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ if (hw->tbi_compatibility_on == 1)
+ rctl |= E1000_RCTL_SBP;
+ else
+ rctl &= ~E1000_RCTL_SBP;
+
+ if (adapter->netdev->mtu <= ETH_DATA_LEN)
+ rctl &= ~E1000_RCTL_LPE;
+ else
+ rctl |= E1000_RCTL_LPE;
+
+ /* Setup buffer sizes */
+ rctl &= ~E1000_RCTL_SZ_4096;
+ rctl |= E1000_RCTL_BSEX;
+ switch (adapter->rx_buffer_len) {
+ case E1000_RXBUFFER_256:
+ rctl |= E1000_RCTL_SZ_256;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_512:
+ rctl |= E1000_RCTL_SZ_512;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_1024:
+ rctl |= E1000_RCTL_SZ_1024;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_2048:
+ default:
+ rctl |= E1000_RCTL_SZ_2048;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_4096:
+ rctl |= E1000_RCTL_SZ_4096;
+ break;
+ case E1000_RXBUFFER_8192:
+ rctl |= E1000_RCTL_SZ_8192;
+ break;
+ case E1000_RXBUFFER_16384:
+ rctl |= E1000_RCTL_SZ_16384;
+ break;
+ }
+
+ ew32(RCTL, rctl);
+}
+
+/**
+ * e1000_configure_rx - Configure 8254x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+
+static void e1000_configure_rx(struct e1000_adapter *adapter)
+{
+ u64 rdba;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rdlen, rctl, rxcsum, ctrl_ext;
+
+ rdlen = adapter->rx_ring[0].count *
+ sizeof(struct e1000_rx_desc);
+ adapter->clean_rx = e1000_clean_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+
+ /* disable receives while setting up the descriptors */
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+
+ /* set the Receive Delay Timer Register */
+ ew32(RDTR, adapter->rx_int_delay);
+
+ if (hw->mac_type >= e1000_82540) {
+ ew32(RADV, adapter->rx_abs_int_delay);
+ if (adapter->itr_setting != 0)
+ ew32(ITR, 1000000000 / (adapter->itr * 256));
+ }
+
+ if (hw->mac_type >= e1000_82571) {
+ ctrl_ext = er32(CTRL_EXT);
+ /* Reset delay timers after every interrupt */
+ ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
+ /* Auto-Mask interrupts upon ICR access */
+ ctrl_ext |= E1000_CTRL_EXT_IAME;
+ ew32(IAM, 0xffffffff);
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ }
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ switch (adapter->num_rx_queues) {
+ case 1:
+ default:
+ rdba = adapter->rx_ring[0].dma;
+ ew32(RDLEN, rdlen);
+ ew32(RDBAH, (rdba >> 32));
+ ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
+ ew32(RDT, 0);
+ ew32(RDH, 0);
+ adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
+ adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
+ break;
+ }
+
+ /* Enable 82543 Receive Checksum Offload for TCP and UDP */
+ if (hw->mac_type >= e1000_82543) {
+ rxcsum = er32(RXCSUM);
+ if (adapter->rx_csum)
+ rxcsum |= E1000_RXCSUM_TUOFL;
+ else
+ /* don't need to clear IPPCSE as it defaults to 0 */
+ rxcsum &= ~E1000_RXCSUM_TUOFL;
+ ew32(RXCSUM, rxcsum);
+ }
+
+ /* Enable Receives */
+ ew32(RCTL, rctl);
+}
+
+/**
+ * e1000_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ e1000_clean_tx_ring(adapter, tx_ring);
+
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
+}
+
+static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+ struct e1000_buffer *buffer_info)
+{
+ if (buffer_info->dma) {
+ pci_unmap_page(adapter->pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ /* buffer_info must be completely set up in the transmit path */
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ }
+
+ size = sizeof(struct e1000_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ tx_ring->last_tx_tso = 0;
+
+ writel(0, hw->hw_addr + tx_ring->tdh);
+ writel(0, hw->hw_addr + tx_ring->tdt);
+}
+
+/**
+ * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * e1000_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ e1000_clean_rx_ring(adapter, rx_ring);
+
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ if (buffer_info->skb) {
+ pci_unmap_single(pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct e1000_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ writel(0, hw->hw_addr + rx_ring->rdh);
+ writel(0, hw->hw_addr + rx_ring->rdt);
+}
+
+/**
+ * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
+ * and memory write and invalidate disabled for certain operations
+ */
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 rctl;
+
+ e1000_pci_clear_mwi(hw);
+
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_RST;
+ ew32(RCTL, rctl);
+ E1000_WRITE_FLUSH();
+ mdelay(5);
+
+ if (netif_running(netdev))
+ e1000_clean_all_rx_rings(adapter);
+}
+
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 rctl;
+
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_RST;
+ ew32(RCTL, rctl);
+ E1000_WRITE_FLUSH();
+ mdelay(5);
+
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+
+ if (netif_running(netdev)) {
+ /* No need to loop, because 82542 supports only 1 queue */
+ struct e1000_rx_ring *ring = &adapter->rx_ring[0];
+ e1000_configure_rx(adapter);
+ adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
+ }
+}
+
+/**
+ * e1000_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_set_mac(struct net_device *netdev, void *p)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* 82542 2.0 needs to be in reset to write receive address registers */
+
+ if (hw->mac_type == e1000_82542_rev2_0)
+ e1000_enter_82542_rst(adapter);
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
+
+ e1000_rar_set(hw, hw->mac_addr, 0);
+
+ /* With 82571 controllers, LAA may be overwritten (with the default)
+ * due to controller reset from the other port. */
+ if (hw->mac_type == e1000_82571) {
+ /* activate the work around */
+ hw->laa_is_present = 1;
+
+ /* Hold a copy of the LAA in RAR[14] This is done so that
+ * between the time RAR[0] gets clobbered and the time it
+ * gets fixed (in e1000_watchdog), the actual LAA is in one
+ * of the RARs and no incoming packets directed to this port
+ * are dropped. Eventaully the LAA will be in RAR[0] and
+ * RAR[14] */
+ e1000_rar_set(hw, hw->mac_addr,
+ E1000_RAR_ENTRIES - 1);
+ }
+
+ if (hw->mac_type == e1000_82542_rev2_0)
+ e1000_leave_82542_rst(adapter);
+
+ return 0;
+}
+
+/**
+ * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+
+static void e1000_set_rx_mode(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct dev_addr_list *uc_ptr;
+ struct dev_addr_list *mc_ptr;
+ u32 rctl;
+ u32 hash_value;
+ int i, rar_entries = E1000_RAR_ENTRIES;
+ int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
+ E1000_NUM_MTA_REGISTERS_ICH8LAN :
+ E1000_NUM_MTA_REGISTERS;
+
+ if (hw->mac_type == e1000_ich8lan)
+ rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
+
+ /* reserve RAR[14] for LAA over-write work-around */
+ if (hw->mac_type == e1000_82571)
+ rar_entries--;
+
+ /* Check for Promiscuous and All Multicast modes */
+
+ rctl = er32(RCTL);
+
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ rctl &= ~E1000_RCTL_VFE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= E1000_RCTL_MPE;
+ } else {
+ rctl &= ~E1000_RCTL_MPE;
+ }
+ if (adapter->hw.mac_type != e1000_ich8lan)
+ rctl |= E1000_RCTL_VFE;
+ }
+
+ uc_ptr = NULL;
+ if (netdev->uc_count > rar_entries - 1) {
+ rctl |= E1000_RCTL_UPE;
+ } else if (!(netdev->flags & IFF_PROMISC)) {
+ rctl &= ~E1000_RCTL_UPE;
+ uc_ptr = netdev->uc_list;
+ }
+
+ ew32(RCTL, rctl);
+
+ /* 82542 2.0 needs to be in reset to write receive address registers */
+
+ if (hw->mac_type == e1000_82542_rev2_0)
+ e1000_enter_82542_rst(adapter);
+
+ /* load the first 14 addresses into the exact filters 1-14. Unicast
+ * addresses take precedence to avoid disabling unicast filtering
+ * when possible.
+ *
+ * RAR 0 is used for the station MAC adddress
+ * if there are not 14 addresses, go ahead and clear the filters
+ * -- with 82571 controllers only 0-13 entries are filled here
+ */
+ mc_ptr = netdev->mc_list;
+
+ for (i = 1; i < rar_entries; i++) {
+ if (uc_ptr) {
+ e1000_rar_set(hw, uc_ptr->da_addr, i);
+ uc_ptr = uc_ptr->next;
+ } else if (mc_ptr) {
+ e1000_rar_set(hw, mc_ptr->da_addr, i);
+ mc_ptr = mc_ptr->next;
+ } else {
+ E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+ E1000_WRITE_FLUSH();
+ }
+ }
+ WARN_ON(uc_ptr != NULL);
+
+ /* clear the old settings from the multicast hash table */
+
+ for (i = 0; i < mta_reg_count; i++) {
+ E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ E1000_WRITE_FLUSH();
+ }
+
+ /* load any remaining addresses into the hash table */
+
+ for (; mc_ptr; mc_ptr = mc_ptr->next) {
+ hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
+ e1000_mta_set(hw, hash_value);
+ }
+
+ if (hw->mac_type == e1000_82542_rev2_0)
+ e1000_leave_82542_rst(adapter);
+}
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy */
+
+static void e1000_update_phy_info(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ e1000_phy_get_info(hw, &adapter->phy_info);
+}
+
+/**
+ * e1000_82547_tx_fifo_stall - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+
+static void e1000_82547_tx_fifo_stall(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 tctl;
+
+ if (atomic_read(&adapter->tx_fifo_stall)) {
+ if ((er32(TDT) == er32(TDH)) &&
+ (er32(TDFT) == er32(TDFH)) &&
+ (er32(TDFTS) == er32(TDFHS))) {
+ tctl = er32(TCTL);
+ ew32(TCTL, tctl & ~E1000_TCTL_EN);
+ ew32(TDFT, adapter->tx_head_addr);
+ ew32(TDFH, adapter->tx_head_addr);
+ ew32(TDFTS, adapter->tx_head_addr);
+ ew32(TDFHS, adapter->tx_head_addr);
+ ew32(TCTL, tctl);
+ E1000_WRITE_FLUSH();
+
+ adapter->tx_fifo_head = 0;
+ atomic_set(&adapter->tx_fifo_stall, 0);
+ netif_wake_queue(netdev);
+ } else {
+ mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
+ }
+ }
+}
+
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_tx_ring *txdr = adapter->tx_ring;
+ u32 link, tctl;
+ s32 ret_val;
+
+ ret_val = e1000_check_for_link(hw);
+ if ((ret_val == E1000_ERR_PHY) &&
+ (hw->phy_type == e1000_phy_igp_3) &&
+ (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+ /* See e1000_kumeran_lock_loss_workaround() */
+ DPRINTK(LINK, INFO,
+ "Gigabit has been disabled, downgrading speed\n");
+ }
+
+ if (hw->mac_type == e1000_82573) {
+ e1000_enable_tx_pkt_filtering(hw);
+ if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
+ e1000_update_mng_vlan(adapter);
+ }
+
+ if ((hw->media_type == e1000_media_type_internal_serdes) &&
+ !(er32(TXCW) & E1000_TXCW_ANE))
+ link = !hw->serdes_link_down;
+ else
+ link = er32(STATUS) & E1000_STATUS_LU;
+
+ if (link) {
+ if (!netif_carrier_ok(netdev)) {
+ u32 ctrl;
+ bool txb2b = true;
+ e1000_get_speed_and_duplex(hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+
+ ctrl = er32(CTRL);
+ DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
+ "Flow Control: %s\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) && (ctrl &
+ E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+ E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+ E1000_CTRL_TFCE) ? "TX" : "None" )));
+
+ /* tweak tx_queue_len according to speed/duplex
+ * and adjust the timeout factor */
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ adapter->tx_timeout_factor = 1;
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ txb2b = false;
+ netdev->tx_queue_len = 10;
+ adapter->tx_timeout_factor = 8;
+ break;
+ case SPEED_100:
+ txb2b = false;
+ netdev->tx_queue_len = 100;
+ /* maybe add some timeout factor ? */
+ break;
+ }
+
+ if ((hw->mac_type == e1000_82571 ||
+ hw->mac_type == e1000_82572) &&
+ !txb2b) {
+ u32 tarc0;
+ tarc0 = er32(TARC0);
+ tarc0 &= ~(1 << 21);
+ ew32(TARC0, tarc0);
+ }
+
+ /* disable TSO for pcie and 10/100 speeds, to avoid
+ * some hardware issues */
+ if (!adapter->tso_force &&
+ hw->bus_type == e1000_bus_type_pci_express){
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ DPRINTK(PROBE,INFO,
+ "10/100 speed: disabling TSO\n");
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ break;
+ case SPEED_1000:
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ break;
+ default:
+ /* oops */
+ break;
+ }
+ }
+
+ /* enable transmits in the hardware, need to do this
+ * after setting TARC0 */
+ tctl = er32(TCTL);
+ tctl |= E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
+ adapter->smartspeed = 0;
+ } else {
+ /* make sure the receive unit is started */
+ if (hw->rx_needs_kicking) {
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl | E1000_RCTL_EN);
+ }
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ DPRINTK(LINK, INFO, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
+
+ /* 80003ES2LAN workaround--
+ * For packet buffer work-around on link down event;
+ * disable receives in the ISR and
+ * reset device here in the watchdog
+ */
+ if (hw->mac_type == e1000_80003es2lan)
+ /* reset device */
+ schedule_work(&adapter->reset_task);
+ }
+
+ e1000_smartspeed(adapter);
+ }
+
+ e1000_update_stats(adapter);
+
+ hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+ adapter->tpt_old = adapter->stats.tpt;
+ hw->collision_delta = adapter->stats.colc - adapter->colc_old;
+ adapter->colc_old = adapter->stats.colc;
+
+ adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
+ adapter->gorcl_old = adapter->stats.gorcl;
+ adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
+ adapter->gotcl_old = adapter->stats.gotcl;
+
+ e1000_update_adaptive(hw);
+
+ if (!netif_carrier_ok(netdev)) {
+ if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context). */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ }
+ }
+
+ /* Cause software interrupt to ensure rx ring is cleaned */
+ ew32(ICS, E1000_ICS_RXDMT0);
+
+ /* Force detection of hung controller every watchdog period */
+ adapter->detect_tx_hung = true;
+
+ /* With 82571 controllers, LAA may be overwritten due to controller
+ * reset from the other port. Set the appropriate LAA in RAR[0] */
+ if (hw->mac_type == e1000_82571 && hw->laa_is_present)
+ e1000_rar_set(hw, hw->mac_addr, 0);
+
+ /* Reset the timer */
+ mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * this functionality is controlled by the InterruptThrottleRate module
+ * parameter (see e1000_param.c)
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ **/
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+ u16 itr_setting, int packets, int bytes)
+{
+ unsigned int retval = itr_setting;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (unlikely(hw->mac_type < e1000_82540))
+ goto update_itr_done;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+ switch (itr_setting) {
+ case lowest_latency:
+ /* jumbo frames get bulk treatment*/
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ retval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* jumbo frames need bulk latency setting */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 10) || ((bytes/packets) > 1200))
+ retval = bulk_latency;
+ else if ((packets > 35))
+ retval = lowest_latency;
+ } else if (bytes/packets > 2000)
+ retval = bulk_latency;
+ else if (packets <= 2 && bytes < 512)
+ retval = lowest_latency;
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ retval = low_latency;
+ } else if (bytes < 6000) {
+ retval = low_latency;
+ }
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 current_itr;
+ u32 new_itr = adapter->itr;
+
+ if (unlikely(hw->mac_type < e1000_82540))
+ return;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ if (unlikely(adapter->link_speed != SPEED_1000)) {
+ current_itr = 0;
+ new_itr = 4000;
+ goto set_itr_now;
+ }
+
+ adapter->tx_itr = e1000_update_itr(adapter,
+ adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
+ adapter->tx_itr = low_latency;
+
+ adapter->rx_itr = e1000_update_itr(adapter,
+ adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
+ adapter->rx_itr = low_latency;
+
+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 70000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ new_itr = 4000;
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != adapter->itr) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing */
+ new_itr = new_itr > adapter->itr ?
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
+ adapter->itr = new_itr;
+ ew32(ITR, 1000000000 / (new_itr * 256));
+ }
+
+ return;
+}
+
+#define E1000_TX_FLAGS_CSUM 0x00000001
+#define E1000_TX_FLAGS_VLAN 0x00000002
+#define E1000_TX_FLAGS_TSO 0x00000004
+#define E1000_TX_FLAGS_IPV4 0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT 16
+
+static int e1000_tso(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+{
+ struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i;
+ u32 cmd_length = 0;
+ u16 ipcse = 0, tucse, mss;
+ u8 ipcss, ipcso, tucss, tucso, hdr_len;
+ int err;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ cmd_length = E1000_TXD_CMD_IP;
+ ipcse = skb_transport_offset(skb) - 1;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ ipcse = 0;
+ }
+ ipcss = skb_network_offset(skb);
+ ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+ tucss = skb_transport_offset(skb);
+ tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+ tucse = 0;
+
+ cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+ E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+
+ i = tx_ring->next_to_use;
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+
+ context_desc->lower_setup.ip_fields.ipcss = ipcss;
+ context_desc->lower_setup.ip_fields.ipcso = ipcso;
+ context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
+ context_desc->upper_setup.tcp_fields.tucss = tucss;
+ context_desc->upper_setup.tcp_fields.tucso = tucso;
+ context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+ context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
+ context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ if (++i == tx_ring->count) i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+ return false;
+}
+
+static bool e1000_tx_csum(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+{
+ struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i;
+ u8 css;
+ u32 cmd_len = E1000_TXD_CMD_DEXT;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return false;
+
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ /* XXX not handling all IPV6 headers */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit()))
+ DPRINTK(DRV, WARNING,
+ "checksum_partial proto=%x!\n", skb->protocol);
+ break;
+ }
+
+ css = skb_transport_offset(skb);
+
+ i = tx_ring->next_to_use;
+ buffer_info = &tx_ring->buffer_info[i];
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+
+ context_desc->lower_setup.ip_config = 0;
+ context_desc->upper_setup.tcp_fields.tucss = css;
+ context_desc->upper_setup.tcp_fields.tucso =
+ css + skb->csum_offset;
+ context_desc->upper_setup.tcp_fields.tucse = 0;
+ context_desc->tcp_seg_setup.data = 0;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_len);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+}
+
+#define E1000_MAX_TXD_PWR 12
+#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
+
+static int e1000_tx_map(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring,
+ struct sk_buff *skb, unsigned int first,
+ unsigned int max_per_txd, unsigned int nr_frags,
+ unsigned int mss)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_buffer *buffer_info;
+ unsigned int len = skb->len;
+ unsigned int offset = 0, size, count = 0, i;
+ unsigned int f;
+ len -= skb->data_len;
+
+ i = tx_ring->next_to_use;
+
+ while (len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+ /* Workaround for Controller erratum --
+ * descriptor for non-tso packet in a linear SKB that follows a
+ * tso gets written back prematurely before the data is fully
+ * DMA'd to the controller */
+ if (!skb->data_len && tx_ring->last_tx_tso &&
+ !skb_is_gso(skb)) {
+ tx_ring->last_tx_tso = 0;
+ size -= 4;
+ }
+
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if (unlikely(mss && !nr_frags && size == len && size > 8))
+ size -= 4;
+ /* work-around for errata 10 and it applies
+ * to all controllers in PCI-X mode
+ * The fix is to make sure that the first descriptor of a
+ * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
+ */
+ if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
+ (size > 2015) && count == 0))
+ size = 2015;
+
+ /* Workaround for potential 82544 hang in PCI-X. Avoid
+ * terminating buffers within evenly-aligned dwords. */
+ if (unlikely(adapter->pcix_82544 &&
+ !((unsigned long)(skb->data + offset + size - 1) & 4) &&
+ size > 4))
+ size -= 4;
+
+ buffer_info->length = size;
+ buffer_info->dma =
+ pci_map_single(adapter->pdev,
+ skb->data + offset,
+ size,
+ PCI_DMA_TODEVICE);
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ len -= size;
+ offset += size;
+ count++;
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = frag->size;
+ offset = frag->page_offset;
+
+ while (len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+ size -= 4;
+ /* Workaround for potential 82544 hang in PCI-X.
+ * Avoid terminating buffers within evenly-aligned
+ * dwords. */
+ if (unlikely(adapter->pcix_82544 &&
+ !((unsigned long)(frag->page+offset+size-1) & 4) &&
+ size > 4))
+ size -= 4;
+
+ buffer_info->length = size;
+ buffer_info->dma =
+ pci_map_page(adapter->pdev,
+ frag->page,
+ offset,
+ size,
+ PCI_DMA_TODEVICE);
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ len -= size;
+ offset += size;
+ count++;
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ }
+ }
+
+ i = (i == 0) ? tx_ring->count - 1 : i - 1;
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[first].next_to_watch = i;
+
+ return count;
+}
+
+static void e1000_tx_queue(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring, int tx_flags,
+ int count)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_desc *tx_desc = NULL;
+ struct e1000_buffer *buffer_info;
+ u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+ unsigned int i;
+
+ if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+ E1000_TXD_CMD_TSE;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+
+ if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
+ txd_upper |= E1000_TXD_POPTS_IXSM << 8;
+ }
+
+ if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+ }
+
+ if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
+ txd_lower |= E1000_TXD_CMD_VLE;
+ txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+ }
+
+ i = tx_ring->next_to_use;
+
+ while (count--) {
+ buffer_info = &tx_ring->buffer_info[i];
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ tx_desc->lower.data =
+ cpu_to_le32(txd_lower | buffer_info->length);
+ tx_desc->upper.data = cpu_to_le32(txd_upper);
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ }
+
+ tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ writel(i, hw->hw_addr + tx_ring->tdt);
+ /* we need this if more than one processor can write to our tail
+ * at a time, it syncronizes IO on IA64/Altix systems */
+ mmiowb();
+}
+
+/**
+ * 82547 workaround to avoid controller hang in half-duplex environment.
+ * The workaround is to avoid queuing a large packet that would span
+ * the internal Tx FIFO ring boundary by notifying the stack to resend
+ * the packet at a later time. This gives the Tx FIFO an opportunity to
+ * flush all packets. When that occurs, we reset the Tx FIFO pointers
+ * to the beginning of the Tx FIFO.
+ **/
+
+#define E1000_FIFO_HDR 0x10
+#define E1000_82547_PAD_LEN 0x3E0
+
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
+ u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
+
+ skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
+
+ if (adapter->link_duplex != HALF_DUPLEX)
+ goto no_fifo_stall_required;
+
+ if (atomic_read(&adapter->tx_fifo_stall))
+ return 1;
+
+ if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+ atomic_set(&adapter->tx_fifo_stall, 1);
+ return 1;
+ }
+
+no_fifo_stall_required:
+ adapter->tx_fifo_head += skb_fifo_len;
+ if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
+ adapter->tx_fifo_head -= adapter->tx_fifo_size;
+ return 0;
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
+ struct sk_buff *skb)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 length, offset;
+ if (vlan_tx_tag_present(skb)) {
+ if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
+ ( hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
+ return 0;
+ }
+ if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ if ((htons(ETH_P_IP) == eth->h_proto)) {
+ const struct iphdr *ip =
+ (struct iphdr *)((u8 *)skb->data+14);
+ if (IPPROTO_UDP == ip->protocol) {
+ struct udphdr *udp =
+ (struct udphdr *)((u8 *)ip +
+ (ip->ihl << 2));
+ if (ntohs(udp->dest) == 67) {
+ offset = (u8 *)udp + 8 - skb->data;
+ length = skb->len - offset;
+
+ return e1000_mng_write_dhcp_info(hw,
+ (u8 *)udp + 8,
+ length);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+
+ netif_stop_queue(netdev);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(E1000_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! */
+ netif_start_queue(netdev);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int e1000_maybe_stop_tx(struct net_device *netdev,
+ struct e1000_tx_ring *tx_ring, int size)
+{
+ if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __e1000_maybe_stop_tx(netdev, size);
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_ring *tx_ring;
+ unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
+ unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+ unsigned int tx_flags = 0;
+ unsigned int len = skb->len - skb->data_len;
+ unsigned long flags;
+ unsigned int nr_frags;
+ unsigned int mss;
+ int count = 0;
+ int tso;
+ unsigned int f;
+
+ /* This goes back to the question of how to logically map a tx queue
+ * to a flow. Right now, performance is impacted slightly negatively
+ * if using multiple tx queues. If the stack breaks away from a
+ * single qdisc implementation, we can look at this again. */
+ tx_ring = adapter->tx_ring;
+
+ if (unlikely(skb->len <= 0)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* 82571 and newer doesn't need the workaround that limited descriptor
+ * length to 4kB */
+ if (hw->mac_type >= e1000_82571)
+ max_per_txd = 8192;
+
+ mss = skb_shinfo(skb)->gso_size;
+ /* The controller does a simple calculation to
+ * make sure there is enough room in the FIFO before
+ * initiating the DMA for each buffer. The calc is:
+ * 4 = ceil(buffer len/mss). To make sure we don't
+ * overrun the FIFO, adjust the max buffer len if mss
+ * drops. */
+ if (mss) {
+ u8 hdr_len;
+ max_per_txd = min(mss << 2, max_per_txd);
+ max_txd_pwr = fls(max_per_txd) - 1;
+
+ /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
+ * points to just header, pull a few bytes of payload from
+ * frags into skb->data */
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb->data_len && hdr_len == len) {
+ switch (hw->mac_type) {
+ unsigned int pull_size;
+ case e1000_82544:
+ /* Make sure we have room to chop off 4 bytes,
+ * and that the end alignment will work out to
+ * this hardware's requirements
+ * NOTE: this is a TSO only workaround
+ * if end byte alignment not correct move us
+ * into the next dword */
+ if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+ break;
+ /* fall through */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_82573:
+ case e1000_ich8lan:
+ pull_size = min((unsigned int)4, skb->data_len);
+ if (!__pskb_pull_tail(skb, pull_size)) {
+ DPRINTK(DRV, ERR,
+ "__pskb_pull_tail failed.\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ len = skb->len - skb->data_len;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+ }
+
+ /* reserve a descriptor for the offload context */
+ if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
+ count++;
+ count++;
+
+ /* Controller Erratum workaround */
+ if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
+ count++;
+
+ count += TXD_USE_COUNT(len, max_txd_pwr);
+
+ if (adapter->pcix_82544)
+ count++;
+
+ /* work-around for errata 10 and it applies to all controllers
+ * in PCI-X mode, so add one more descriptor to the count
+ */
+ if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
+ (len > 2015)))
+ count++;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (f = 0; f < nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+ max_txd_pwr);
+ if (adapter->pcix_82544)
+ count += nr_frags;
+
+
+ if (hw->tx_pkt_filtering &&
+ (hw->mac_type == e1000_82573))
+ e1000_transfer_dhcp_info(adapter, skb);
+
+ if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
+ /* Collision - tell upper layer to requeue */
+ return NETDEV_TX_LOCKED;
+
+ /* need: count + 2 desc gap to keep tail from touching
+ * head, otherwise try next time */
+ if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(hw->mac_type == e1000_82547)) {
+ if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
+ netif_stop_queue(netdev);
+ mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+ tx_flags |= E1000_TX_FLAGS_VLAN;
+ tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+ }
+
+ first = tx_ring->next_to_use;
+
+ tso = e1000_tso(adapter, tx_ring, skb);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ return NETDEV_TX_OK;
+ }
+
+ if (likely(tso)) {
+ tx_ring->last_tx_tso = 1;
+ tx_flags |= E1000_TX_FLAGS_TSO;
+ } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+ tx_flags |= E1000_TX_FLAGS_CSUM;
+
+ /* Old method was to assume IPv4 packet by default if TSO was enabled.
+ * 82571 hardware supports TSO capabilities for IPv6 as well...
+ * no longer assume, we must. */
+ if (likely(skb->protocol == htons(ETH_P_IP)))
+ tx_flags |= E1000_TX_FLAGS_IPV4;
+
+ e1000_tx_queue(adapter, tx_ring, tx_flags,
+ e1000_tx_map(adapter, tx_ring, skb, first,
+ max_per_txd, nr_frags, mss));
+
+ netdev->trans_start = jiffies;
+
+ /* Make sure there is space in the ring for the next send. */
+ e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * e1000_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+
+static void e1000_tx_timeout(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+}
+
+static void e1000_reset_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter =
+ container_of(work, struct e1000_adapter, reset_task);
+
+ e1000_reinit_locked(adapter);
+}
+
+/**
+ * e1000_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+
+static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * e1000_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+ u16 eeprom_data = 0;
+
+ if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+ (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ /* Adapter-specific max frame size limits. */
+ switch (hw->mac_type) {
+ case e1000_undefined ... e1000_82542_rev2_1:
+ case e1000_ich8lan:
+ if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+ DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+ return -EINVAL;
+ }
+ break;
+ case e1000_82573:
+ /* Jumbo Frames not supported if:
+ * - this is not an 82573L device
+ * - ASPM is enabled in any way (0x1A bits 3:2) */
+ e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
+ &eeprom_data);
+ if ((hw->device_id != E1000_DEV_ID_82573L) ||
+ (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
+ if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+ DPRINTK(PROBE, ERR,
+ "Jumbo Frames not supported.\n");
+ return -EINVAL;
+ }
+ break;
+ }
+ /* ERT will be enabled later to enable wire speed receives */
+
+ /* fall through to get support */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
+ if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+ DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
+ break;
+ }
+
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+ * larger slab size
+ * i.e. RXBUFFER_2048 --> size-4096 slab */
+
+ if (max_frame <= E1000_RXBUFFER_256)
+ adapter->rx_buffer_len = E1000_RXBUFFER_256;
+ else if (max_frame <= E1000_RXBUFFER_512)
+ adapter->rx_buffer_len = E1000_RXBUFFER_512;
+ else if (max_frame <= E1000_RXBUFFER_1024)
+ adapter->rx_buffer_len = E1000_RXBUFFER_1024;
+ else if (max_frame <= E1000_RXBUFFER_2048)
+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+ else if (max_frame <= E1000_RXBUFFER_4096)
+ adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+ else if (max_frame <= E1000_RXBUFFER_8192)
+ adapter->rx_buffer_len = E1000_RXBUFFER_8192;
+ else if (max_frame <= E1000_RXBUFFER_16384)
+ adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+
+ /* adjust allocation if LPE protects us, and we aren't using SBP */
+ if (!hw->tbi_compatibility_on &&
+ ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
+ (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+
+ netdev->mtu = new_mtu;
+ hw->max_frame_size = max_frame;
+
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
+
+ return 0;
+}
+
+/**
+ * e1000_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+
+void e1000_update_stats(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long flags;
+ u16 phy_tmp;
+
+#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
+
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+ if (pci_channel_offline(pdev))
+ return;
+
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+
+ /* these counters are modified from e1000_tbi_adjust_stats,
+ * called from the interrupt context, so they must only
+ * be written while holding adapter->stats_lock
+ */
+
+ adapter->stats.crcerrs += er32(CRCERRS);
+ adapter->stats.gprc += er32(GPRC);
+ adapter->stats.gorcl += er32(GORCL);
+ adapter->stats.gorch += er32(GORCH);
+ adapter->stats.bprc += er32(BPRC);
+ adapter->stats.mprc += er32(MPRC);
+ adapter->stats.roc += er32(ROC);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ adapter->stats.prc64 += er32(PRC64);
+ adapter->stats.prc127 += er32(PRC127);
+ adapter->stats.prc255 += er32(PRC255);
+ adapter->stats.prc511 += er32(PRC511);
+ adapter->stats.prc1023 += er32(PRC1023);
+ adapter->stats.prc1522 += er32(PRC1522);
+ }
+
+ adapter->stats.symerrs += er32(SYMERRS);
+ adapter->stats.mpc += er32(MPC);
+ adapter->stats.scc += er32(SCC);
+ adapter->stats.ecol += er32(ECOL);
+ adapter->stats.mcc += er32(MCC);
+ adapter->stats.latecol += er32(LATECOL);
+ adapter->stats.dc += er32(DC);
+ adapter->stats.sec += er32(SEC);
+ adapter->stats.rlec += er32(RLEC);
+ adapter->stats.xonrxc += er32(XONRXC);
+ adapter->stats.xontxc += er32(XONTXC);
+ adapter->stats.xoffrxc += er32(XOFFRXC);
+ adapter->stats.xofftxc += er32(XOFFTXC);
+ adapter->stats.fcruc += er32(FCRUC);
+ adapter->stats.gptc += er32(GPTC);
+ adapter->stats.gotcl += er32(GOTCL);
+ adapter->stats.gotch += er32(GOTCH);
+ adapter->stats.rnbc += er32(RNBC);
+ adapter->stats.ruc += er32(RUC);
+ adapter->stats.rfc += er32(RFC);
+ adapter->stats.rjc += er32(RJC);
+ adapter->stats.torl += er32(TORL);
+ adapter->stats.torh += er32(TORH);
+ adapter->stats.totl += er32(TOTL);
+ adapter->stats.toth += er32(TOTH);
+ adapter->stats.tpr += er32(TPR);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ adapter->stats.ptc64 += er32(PTC64);
+ adapter->stats.ptc127 += er32(PTC127);
+ adapter->stats.ptc255 += er32(PTC255);
+ adapter->stats.ptc511 += er32(PTC511);
+ adapter->stats.ptc1023 += er32(PTC1023);
+ adapter->stats.ptc1522 += er32(PTC1522);
+ }
+
+ adapter->stats.mptc += er32(MPTC);
+ adapter->stats.bptc += er32(BPTC);
+
+ /* used for adaptive IFS */
+
+ hw->tx_packet_delta = er32(TPT);
+ adapter->stats.tpt += hw->tx_packet_delta;
+ hw->collision_delta = er32(COLC);
+ adapter->stats.colc += hw->collision_delta;
+
+ if (hw->mac_type >= e1000_82543) {
+ adapter->stats.algnerrc += er32(ALGNERRC);
+ adapter->stats.rxerrc += er32(RXERRC);
+ adapter->stats.tncrs += er32(TNCRS);
+ adapter->stats.cexterr += er32(CEXTERR);
+ adapter->stats.tsctc += er32(TSCTC);
+ adapter->stats.tsctfc += er32(TSCTFC);
+ }
+ if (hw->mac_type > e1000_82547_rev_2) {
+ adapter->stats.iac += er32(IAC);
+ adapter->stats.icrxoc += er32(ICRXOC);
+
+ if (hw->mac_type != e1000_ich8lan) {
+ adapter->stats.icrxptc += er32(ICRXPTC);
+ adapter->stats.icrxatc += er32(ICRXATC);
+ adapter->stats.ictxptc += er32(ICTXPTC);
+ adapter->stats.ictxatc += er32(ICTXATC);
+ adapter->stats.ictxqec += er32(ICTXQEC);
+ adapter->stats.ictxqmtc += er32(ICTXQMTC);
+ adapter->stats.icrxdmtc += er32(ICRXDMTC);
+ }
+ }
+
+ /* Fill out the OS statistics structure */
+ adapter->net_stats.multicast = adapter->stats.mprc;
+ adapter->net_stats.collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /* RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC */
+ adapter->net_stats.rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
+ adapter->net_stats.rx_length_errors = adapter->stats.rlerrc;
+ adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
+ adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
+ adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
+ adapter->net_stats.tx_errors = adapter->stats.txerrc;
+ adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
+ adapter->net_stats.tx_window_errors = adapter->stats.latecol;
+ adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
+ if (hw->bad_tx_carr_stats_fd &&
+ adapter->link_duplex == FULL_DUPLEX) {
+ adapter->net_stats.tx_carrier_errors = 0;
+ adapter->stats.tncrs = 0;
+ }
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ /* Phy Stats */
+ if (hw->media_type == e1000_media_type_copper) {
+ if ((adapter->link_speed == SPEED_1000) &&
+ (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
+ phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
+ adapter->phy_stats.idle_errors += phy_tmp;
+ }
+
+ if ((hw->mac_type <= e1000_82546) &&
+ (hw->phy_type == e1000_phy_m88) &&
+ !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
+ adapter->phy_stats.receive_errors += phy_tmp;
+ }
+
+ /* Management Stats */
+ if (hw->has_smbus) {
+ adapter->stats.mgptc += er32(MGTPTC);
+ adapter->stats.mgprc += er32(MGTPRC);
+ adapter->stats.mgpdc += er32(MGTPDC);
+ }
+
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static irqreturn_t e1000_intr_msi(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ /* in NAPI mode read ICR disables interrupts using IAM */
+
+ if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ hw->get_link_status = 1;
+ /* 80003ES2LAN workaround-- For packet buffer work-around on
+ * link down event; disable receives here in the ISR and reset
+ * adapter in watchdog */
+ if (netif_carrier_ok(netdev) &&
+ (hw->mac_type == e1000_80003es2lan)) {
+ /* disable receives */
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __netif_rx_schedule(netdev, &adapter->napi);
+ } else
+ e1000_irq_enable(adapter);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static irqreturn_t e1000_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl, icr = er32(ICR);
+
+ if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags)))
+ return IRQ_NONE; /* Not our interrupt */
+
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt */
+ if (unlikely(hw->mac_type >= e1000_82571 &&
+ !(icr & E1000_ICR_INT_ASSERTED)))
+ return IRQ_NONE;
+
+ /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
+ * need for the IMC write */
+
+ if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+ hw->get_link_status = 1;
+ /* 80003ES2LAN workaround--
+ * For packet buffer work-around on link down event;
+ * disable receives here in the ISR and
+ * reset adapter in watchdog
+ */
+ if (netif_carrier_ok(netdev) &&
+ (hw->mac_type == e1000_80003es2lan)) {
+ /* disable receives */
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ if (unlikely(hw->mac_type < e1000_82571)) {
+ /* disable interrupts, without the synchronize_irq bit */
+ ew32(IMC, ~0);
+ E1000_WRITE_FLUSH();
+ }
+ if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __netif_rx_schedule(netdev, &adapter->napi);
+ } else
+ /* this really should not happen! if it does it is basically a
+ * bug, but not a hard error, so enable ints and continue */
+ e1000_irq_enable(adapter);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ **/
+static int e1000_clean(struct napi_struct *napi, int budget)
+{
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+ struct net_device *poll_dev = adapter->netdev;
+ int tx_cleaned = 0, work_done = 0;
+
+ /* Must NOT use netdev_priv macro here. */
+ adapter = poll_dev->priv;
+
+ /* e1000_clean is called per-cpu. This lock protects
+ * tx_ring[0] from being cleaned by multiple cpus
+ * simultaneously. A failure obtaining the lock means
+ * tx_ring[0] is currently being cleaned anyway. */
+ if (spin_trylock(&adapter->tx_queue_lock)) {
+ tx_cleaned = e1000_clean_tx_irq(adapter,
+ &adapter->tx_ring[0]);
+ spin_unlock(&adapter->tx_queue_lock);
+ }
+
+ adapter->clean_rx(adapter, &adapter->rx_ring[0],
+ &work_done, budget);
+
+ if (tx_cleaned)
+ work_done = budget;
+
+ /* If budget not fully consumed, exit the polling mode */
+ if (work_done < budget) {
+ if (likely(adapter->itr_setting & 3))
+ e1000_set_itr(adapter);
+ netif_rx_complete(poll_dev, napi);
+ e1000_irq_enable(adapter);
+ }
+
+ return work_done;
+}
+
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ **/
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_tx_desc *tx_desc, *eop_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i, eop;
+ unsigned int count = 0;
+ bool cleaned = false;
+ unsigned int total_tx_bytes=0, total_tx_packets=0;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+ while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
+ for (cleaned = false; !cleaned; ) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ cleaned = (i == eop);
+
+ if (cleaned) {
+ struct sk_buff *skb = buffer_info->skb;
+ unsigned int segs, bytecount;
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+ skb->len;
+ total_tx_packets += segs;
+ total_tx_bytes += bytecount;
+ }
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ tx_desc->upper.data = 0;
+
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ }
+
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+#define E1000_TX_WEIGHT 64
+ /* weight of a sort for tx, to avoid endless transmit cleanup */
+ if (count++ == E1000_TX_WEIGHT)
+ break;
+ }
+
+ tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+ if (unlikely(cleaned && netif_carrier_ok(netdev) &&
+ E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_queue_stopped(netdev)) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+ }
+
+ if (adapter->detect_tx_hung) {
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i */
+ adapter->detect_tx_hung = false;
+ if (tx_ring->buffer_info[eop].dma &&
+ time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
+ (adapter->tx_timeout_factor * HZ))
+ && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+
+ /* detected Tx unit hang */
+ DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+ " Tx Queue <%lu>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
+ (unsigned long)((tx_ring - adapter->tx_ring) /
+ sizeof(struct e1000_tx_ring)),
+ readl(hw->hw_addr + tx_ring->tdh),
+ readl(hw->hw_addr + tx_ring->tdt),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
+ eop,
+ jiffies,
+ eop_desc->upper.fields.status);
+ netif_stop_queue(netdev);
+ }
+ }
+ adapter->total_tx_bytes += total_tx_bytes;
+ adapter->total_tx_packets += total_tx_packets;
+ adapter->net_stats.tx_bytes += total_tx_bytes;
+ adapter->net_stats.tx_packets += total_tx_packets;
+ return cleaned;
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * @adapter: board private structure
+ * @status_err: receive descriptor status and error fields
+ * @csum: receive descriptor csum field
+ * @sk_buff: socket buffer with received data
+ **/
+
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+ u32 csum, struct sk_buff *skb)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 status = (u16)status_err;
+ u8 errors = (u8)(status_err >> 24);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* 82543 or newer only */
+ if (unlikely(hw->mac_type < e1000_82543)) return;
+ /* Ignore Checksum bit is set */
+ if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
+ /* TCP/UDP checksum error bit is set */
+ if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
+ /* let the stack verify checksum errors */
+ adapter->hw_csum_err++;
+ return;
+ }
+ /* TCP/UDP Checksum has not been calculated */
+ if (hw->mac_type <= e1000_82547_rev_2) {
+ if (!(status & E1000_RXD_STAT_TCPCS))
+ return;
+ } else {
+ if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+ return;
+ }
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (likely(status & E1000_RXD_STAT_TCPCS)) {
+ /* TCP checksum is good */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if (hw->mac_type > e1000_82547_rev_2) {
+ /* IP fragment with UDP payload */
+ /* Hardware complements the payload checksum, so we undo it
+ * and then put the value in host order for further stack use.
+ */
+ __sum16 sum = (__force __sum16)htons(csum);
+ skb->csum = csum_unfold(~sum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+ adapter->hw_csum_good++;
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ **/
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ unsigned long flags;
+ u32 length;
+ u8 last_byte;
+ unsigned int i;
+ int cleaned_count = 0;
+ bool cleaned = false;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (rx_desc->status & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb;
+ u8 status;
+
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+
+ status = rx_desc->status;
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ if (++i == rx_ring->count) i = 0;
+ next_rxd = E1000_RX_DESC(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = true;
+ cleaned_count++;
+ pci_unmap_single(pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+
+ length = le16_to_cpu(rx_desc->length);
+
+ if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
+ /* All receives must fit into a single buffer */
+ E1000_DBG("%s: Receive packet consumed multiple"
+ " buffers\n", netdev->name);
+ /* recycle */
+ buffer_info->skb = skb;
+ goto next_desc;
+ }
+
+ if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+ last_byte = *(skb->data + length - 1);
+ if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
+ last_byte)) {
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ e1000_tbi_adjust_stats(hw, &adapter->stats,
+ length, skb->data);
+ spin_unlock_irqrestore(&adapter->stats_lock,
+ flags);
+ length--;
+ } else {
+ /* recycle */
+ buffer_info->skb = skb;
+ goto next_desc;
+ }
+ }
+
+ /* adjust length to remove Ethernet CRC, this must be
+ * done after the TBI_ACCEPT workaround above */
+ length -= 4;
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += length;
+ total_rx_packets++;
+
+ /* code added for copybreak, this should improve
+ * performance for small packets with large amounts
+ * of reassembly being done in the stack */
+ if (length < copybreak) {
+ struct sk_buff *new_skb =
+ netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
+ if (new_skb) {
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ skb_copy_to_linear_data_offset(new_skb,
+ -NET_IP_ALIGN,
+ (skb->data -
+ NET_IP_ALIGN),
+ (length +
+ NET_IP_ALIGN));
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = skb;
+ skb = new_skb;
+ }
+ /* else just continue with the old one */
+ }
+ /* end copybreak code */
+ skb_put(skb, length);
+
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter,
+ (u32)(status) |
+ ((u32)(rx_desc->errors) << 24),
+ le16_to_cpu(rx_desc->csum), skb);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (unlikely(adapter->vlgrp &&
+ (status & E1000_RXD_STAT_VP))) {
+ vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+ le16_to_cpu(rx_desc->special));
+ } else {
+ netif_receive_skb(skb);
+ }
+
+ netdev->last_rx = jiffies;
+
+next_desc:
+ rx_desc->status = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = E1000_DESC_UNUSED(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+ adapter->total_rx_packets += total_rx_packets;
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: address of board private structure
+ **/
+
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto map_skb;
+ }
+
+ skb = netdev_alloc_skb(netdev, bufsz);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ struct sk_buff *oldskb = skb;
+ DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
+ "at %p\n", bufsz, skb->data);
+ /* Try again, without freeing the previous */
+ skb = netdev_alloc_skb(netdev, bufsz);
+ /* Failed allocation, critical failure */
+ if (!skb) {
+ dev_kfree_skb(oldskb);
+ break;
+ }
+
+ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ /* give up */
+ dev_kfree_skb(skb);
+ dev_kfree_skb(oldskb);
+ break; /* while !buffer_info->skb */
+ }
+
+ /* Use new allocation */
+ dev_kfree_skb(oldskb);
+ }
+ /* Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ buffer_info->skb = skb;
+ buffer_info->length = adapter->rx_buffer_len;
+map_skb:
+ buffer_info->dma = pci_map_single(pdev,
+ skb->data,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter,
+ (void *)(unsigned long)buffer_info->dma,
+ adapter->rx_buffer_len)) {
+ DPRINTK(RX_ERR, ERR,
+ "dma align check failed: %u bytes at %p\n",
+ adapter->rx_buffer_len,
+ (void *)(unsigned long)buffer_info->dma);
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+
+ pci_unmap_single(pdev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+
+ break; /* while !buffer_info->skb */
+ }
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ writel(i, hw->hw_addr + rx_ring->rdt);
+ }
+}
+
+/**
+ * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
+ * @adapter:
+ **/
+
+static void e1000_smartspeed(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_status;
+ u16 phy_ctrl;
+
+ if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
+ !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
+ return;
+
+ if (adapter->smartspeed == 0) {
+ /* If Master/Slave config fault is asserted twice,
+ * we assume back-to-back */
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
+ if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
+ if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+ e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
+ if (phy_ctrl & CR_1000T_MS_ENABLE) {
+ phy_ctrl &= ~CR_1000T_MS_ENABLE;
+ e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+ phy_ctrl);
+ adapter->smartspeed++;
+ if (!e1000_phy_setup_autoneg(hw) &&
+ !e1000_read_phy_reg(hw, PHY_CTRL,
+ &phy_ctrl)) {
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+ MII_CR_RESTART_AUTO_NEG);
+ e1000_write_phy_reg(hw, PHY_CTRL,
+ phy_ctrl);
+ }
+ }
+ return;
+ } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+ /* If still no link, perhaps using 2/3 pair cable */
+ e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
+ phy_ctrl |= CR_1000T_MS_ENABLE;
+ e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
+ if (!e1000_phy_setup_autoneg(hw) &&
+ !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+ MII_CR_RESTART_AUTO_NEG);
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
+ }
+ }
+ /* Restart process after E1000_SMARTSPEED_MAX iterations */
+ if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+ adapter->smartspeed = 0;
+}
+
+/**
+ * e1000_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return e1000_mii_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * e1000_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct mii_ioctl_data *data = if_mii(ifr);
+ int retval;
+ u16 mii_reg;
+ u16 spddplx;
+ unsigned long flags;
+
+ if (hw->media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = hw->phy_addr;
+ break;
+ case SIOCGMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
+ &data->val_out)) {
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ break;
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data->reg_num & ~(0x1F))
+ return -EFAULT;
+ mii_reg = data->val_in;
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ if (e1000_write_phy_reg(hw, data->reg_num,
+ mii_reg)) {
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ if (hw->media_type == e1000_media_type_copper) {
+ switch (data->reg_num) {
+ case PHY_CTRL:
+ if (mii_reg & MII_CR_POWER_DOWN)
+ break;
+ if (mii_reg & MII_CR_AUTO_NEG_EN) {
+ hw->autoneg = 1;
+ hw->autoneg_advertised = 0x2F;
+ } else {
+ if (mii_reg & 0x40)
+ spddplx = SPEED_1000;
+ else if (mii_reg & 0x2000)
+ spddplx = SPEED_100;
+ else
+ spddplx = SPEED_10;
+ spddplx += (mii_reg & 0x100)
+ ? DUPLEX_FULL :
+ DUPLEX_HALF;
+ retval = e1000_set_spd_dplx(adapter,
+ spddplx);
+ if (retval)
+ return retval;
+ }
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
+ e1000_reset(adapter);
+ break;
+ case M88E1000_PHY_SPEC_CTRL:
+ case M88E1000_EXT_PHY_SPEC_CTRL:
+ if (e1000_phy_reset(hw))
+ return -EIO;
+ break;
+ }
+ } else {
+ switch (data->reg_num) {
+ case PHY_CTRL:
+ if (mii_reg & MII_CR_POWER_DOWN)
+ break;
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
+ e1000_reset(adapter);
+ break;
+ }
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return E1000_SUCCESS;
+}
+
+void e1000_pci_set_mwi(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+ int ret_val = pci_set_mwi(adapter->pdev);
+
+ if (ret_val)
+ DPRINTK(PROBE, ERR, "Error in setting MWI\n");
+}
+
+void e1000_pci_clear_mwi(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+
+ pci_clear_mwi(adapter->pdev);
+}
+
+int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+ return pcix_get_mmrbc(adapter->pdev);
+}
+
+void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
+{
+ struct e1000_adapter *adapter = hw->back;
+ pcix_set_mmrbc(adapter->pdev, mmrbc);
+}
+
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct e1000_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (!cap_offset)
+ return -E1000_ERR_CONFIG;
+
+ pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+ return E1000_SUCCESS;
+}
+
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
+{
+ outl(value, port);
+}
+
+static void e1000_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, rctl;
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_disable(adapter);
+ adapter->vlgrp = grp;
+
+ if (grp) {
+ /* enable VLAN tag insert/strip */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_VME;
+ ew32(CTRL, ctrl);
+
+ if (adapter->hw.mac_type != e1000_ich8lan) {
+ /* enable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_CFIEN;
+ ew32(RCTL, rctl);
+ e1000_update_mng_vlan(adapter);
+ }
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl = er32(CTRL);
+ ctrl &= ~E1000_CTRL_VME;
+ ew32(CTRL, ctrl);
+
+ if (adapter->hw.mac_type != e1000_ich8lan) {
+ if (adapter->mng_vlan_id !=
+ (u16)E1000_MNG_VLAN_NONE) {
+ e1000_vlan_rx_kill_vid(netdev,
+ adapter->mng_vlan_id);
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ }
+ }
+ }
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+}
+
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta, index;
+
+ if ((hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ (vid == adapter->mng_vlan_id))
+ return;
+ /* add VID to filter table */
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
+ vfta |= (1 << (vid & 0x1F));
+ e1000_write_vfta(hw, index, vfta);
+}
+
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta, index;
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_disable(adapter);
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+
+ if ((hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ (vid == adapter->mng_vlan_id)) {
+ /* release control to f/w */
+ e1000_release_hw_control(adapter);
+ return;
+ }
+
+ /* remove VID from filter table */
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
+ vfta &= ~(1 << (vid & 0x1F));
+ e1000_write_vfta(hw, index, vfta);
+}
+
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+ e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+ if (adapter->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
+ continue;
+ e1000_vlan_rx_add_vid(adapter->netdev, vid);
+ }
+ }
+}
+
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ hw->autoneg = 0;
+
+ /* Fiber NICs only allow 1000 gbps Full duplex */
+ if ((hw->media_type == e1000_media_type_fiber) &&
+ spddplx != (SPEED_1000 + DUPLEX_FULL)) {
+ DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+ }
+
+ switch (spddplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ hw->forced_speed_duplex = e1000_10_half;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ hw->forced_speed_duplex = e1000_10_full;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ hw->forced_speed_duplex = e1000_100_half;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ hw->forced_speed_duplex = e1000_100_full;
+ break;
+ case SPEED_1000 + DUPLEX_FULL:
+ hw->autoneg = 1;
+ hw->autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
+ default:
+ DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, ctrl_ext, rctl, status;
+ u32 wufc = adapter->wol;
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ e1000_down(adapter);
+ }
+
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+#endif
+
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
+
+ if (wufc) {
+ e1000_setup_rctl(adapter);
+ e1000_set_rx_mode(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & E1000_WUFC_MC) {
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_MPE;
+ ew32(RCTL, rctl);
+ }
+
+ if (hw->mac_type >= e1000_82540) {
+ ctrl = er32(CTRL);
+ /* advertise wake from D3Cold */
+ #define E1000_CTRL_ADVD3WUC 0x00100000
+ /* phy power management enable */
+ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+ ctrl |= E1000_CTRL_ADVD3WUC |
+ E1000_CTRL_EN_PHY_PWR_MGMT;
+ ew32(CTRL, ctrl);
+ }
+
+ if (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes) {
+ /* keep the laser running in D3 */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ /* Allow time for pending master requests to run */
+ e1000_disable_pciex_master(hw);
+
+ ew32(WUC, E1000_WUC_PME_EN);
+ ew32(WUFC, wufc);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ } else {
+ ew32(WUC, 0);
+ ew32(WUFC, 0);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+ }
+
+ e1000_release_manageability(adapter);
+
+ /* make sure adapter isn't asleep if manageability is enabled */
+ if (adapter->en_mng_pt) {
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ }
+
+ if (hw->phy_type == e1000_phy_igp_3)
+ e1000_phy_powerdown_workaround(hw);
+
+ if (netif_running(netdev))
+ e1000_free_irq(adapter);
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant. */
+ e1000_release_hw_control(adapter);
+
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int e1000_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (adapter->need_ioport)
+ err = pci_enable_device(pdev);
+ else
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ if (netif_running(netdev)) {
+ err = e1000_request_irq(adapter);
+ if (err)
+ return err;
+ }
+
+ e1000_power_up_phy(adapter);
+ e1000_reset(adapter);
+ ew32(WUS, ~0);
+
+ e1000_init_manageability(adapter);
+
+ if (netif_running(netdev))
+ e1000_up(adapter);
+
+ netif_device_attach(netdev);
+
+ /* If the controller is 82573 and f/w is AMT, do not set
+ * DRV_LOAD until the interface is up. For all other cases,
+ * let the f/w know that the h/w is now under the control
+ * of the driver. */
+ if (hw->mac_type != e1000_82573 ||
+ !e1000_check_mng_mode(hw))
+ e1000_get_hw_control(adapter);
+
+ return 0;
+}
+#endif
+
+static void e1000_shutdown(struct pci_dev *pdev)
+{
+ e1000_suspend(pdev, PMSG_SUSPEND);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void e1000_netpoll(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ disable_irq(adapter->pdev->irq);
+ e1000_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci conneection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ e1000_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ if (adapter->need_ioport)
+ err = pci_enable_device(pdev);
+ else
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ e1000_reset(adapter);
+ ew32(WUS, ~0);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+ struct e1000_hw *hw = &adapter->hw;
+
+ e1000_init_manageability(adapter);
+
+ if (netif_running(netdev)) {
+ if (e1000_up(adapter)) {
+ printk("e1000: can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+ /* If the controller is 82573 and f/w is AMT, do not set
+ * DRV_LOAD until the interface is up. For all other cases,
+ * let the f/w know that the h/w is now under the control
+ * of the driver. */
+ if (hw->mac_type != e1000_82573 ||
+ !e1000_check_mng_mode(hw))
+ e1000_get_hw_control(adapter);
+
+}
+
+/* e1000_main.c */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_osdep-2.6.28-ethercat.h Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,113 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS independent part of e1000
+ * includes register access macros
+ */
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+
+#ifdef DBG
+#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
+#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
+#else
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A...)
+#endif
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT7 DEBUGOUT3
+
+
+#define er32(reg) \
+ (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg)))
+
+#define ew32(reg, value) \
+ (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg))))
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 2)))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
+ writew((value), ((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 1))))
+
+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
+ readw((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 1)))
+
+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
+ writeb((value), ((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ (offset))))
+
+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
+ readb((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ (offset)))
+
+#define E1000_WRITE_FLUSH() er32(STATUS)
+
+#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \
+ writel((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH_FLASH_REG(a, reg) ( \
+ readl((a)->flash_address + reg))
+
+#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \
+ writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \
+ readw((a)->flash_address + reg))
+
+#endif /* _E1000_OSDEP_H_ */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_osdep-2.6.28-orig.h Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,113 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS independent part of e1000
+ * includes register access macros
+ */
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+
+#ifdef DBG
+#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
+#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
+#else
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A...)
+#endif
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT7 DEBUGOUT3
+
+
+#define er32(reg) \
+ (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg)))
+
+#define ew32(reg, value) \
+ (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg))))
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 2)))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
+ writew((value), ((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 1))))
+
+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
+ readw((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 1)))
+
+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
+ writeb((value), ((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ (offset))))
+
+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
+ readb((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ (offset)))
+
+#define E1000_WRITE_FLUSH() er32(STATUS)
+
+#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \
+ writel((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH_FLASH_REG(a, reg) ( \
+ readl((a)->flash_address + reg))
+
+#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \
+ writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \
+ readw((a)->flash_address + reg))
+
+#endif /* _E1000_OSDEP_H_ */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_param-2.6.28-ethercat.c Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,792 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000-2.6.28-ethercat.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#define E1000_PARAM(X, desc) \
+ static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+ static unsigned int num_##X; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
+ MODULE_PARM_DESC(X, desc);
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Speed Override
+ *
+ * Valid Range: 0, 10, 100, 1000
+ * - 0 - auto-negotiate at all supported speeds
+ * - 10 - only link at 10 Mbps
+ * - 100 - only link at 100 Mbps
+ * - 1000 - only link at 1000 Mbps
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Speed, "Speed setting");
+
+/* User Specified Duplex Override
+ *
+ * Valid Range: 0-2
+ * - 0 - auto-negotiate for duplex
+ * - 1 - only link at half duplex
+ * - 2 - only link at full duplex
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Duplex, "Duplex setting");
+
+/* Auto-negotiation Advertisement Override
+ *
+ * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit 7 6 5 4 3 2 1 0
+ * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
+ * Duplex Full Full Half Full Half
+ *
+ * Default Value: 0x2F (copper); 0x20 (fiber)
+ */
+E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+#define AUTONEG_ADV_DEFAULT 0x2F
+#define AUTONEG_ADV_MASK 0x2F
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ * - 0 - No Flow Control
+ * - 1 - Rx only, respond to PAUSE frames but do not generate them
+ * - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ * - 3 - Full Flow Control Support
+ *
+ * Default Value: Read flow control settings from the EEPROM
+ */
+E1000_PARAM(FlowControl, "Flow Control setting");
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ * - 0 - disables all checksum offload
+ * - 1 - enables receive IP/TCP/UDP checksum offload
+ * on 82543 and newer -based NICs
+ *
+ * Default Value: 1
+ */
+E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Transmit Interrupt Delay in units of 1.024 microseconds
+ * Tx interrupt delay needs to typically be set to something non zero
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV 8
+#define MAX_TXDELAY 0xFFFF
+#define MIN_TXDELAY 0
+
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV 32
+#define MAX_TXABSDELAY 0xFFFF
+#define MIN_TXABSDELAY 0
+
+/* Receive Interrupt Delay in units of 1.024 microseconds
+ * hardware will likely hang if you set this to anything but zero.
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define DEFAULT_RDTR 0
+#define MAX_RXDELAY 0xFFFF
+#define MIN_RXDELAY 0
+
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define DEFAULT_RADV 8
+#define MAX_RXABSDELAY 0xFFFF
+#define MIN_RXABSDELAY 0
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ */
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR 3
+#define MAX_ITR 100000
+#define MIN_ITR 100
+
+/* Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/* Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
+struct e1000_option {
+ enum { enable_option, range_option, list_option } type;
+ const char *name;
+ const char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ const struct e1000_opt_list { int i; char *str; } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit e1000_validate_option(unsigned int *value,
+ const struct e1000_option *opt,
+ struct e1000_adapter *adapter)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ DPRINTK(PROBE, INFO,
+ "%s set to %i\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ const struct e1000_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ DPRINTK(PROBE, INFO, "%s\n", ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+static void e1000_check_fiber_options(struct e1000_adapter *adapter);
+static void e1000_check_copper_options(struct e1000_adapter *adapter);
+
+/**
+ * e1000_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void __devinit e1000_check_options(struct e1000_adapter *adapter)
+{
+ struct e1000_option opt;
+ int bd = adapter->bd_number;
+
+ if (bd >= E1000_MAX_NIC) {
+ DPRINTK(PROBE, NOTICE,
+ "Warning: no configuration for board #%i\n", bd);
+ DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+ }
+
+ { /* Transmit Descriptor Count */
+ struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+ int i;
+ e1000_mac_type mac_type = adapter->hw.mac_type;
+
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Transmit Descriptors",
+ .err = "using default of "
+ __MODULE_STRING(E1000_DEFAULT_TXD),
+ .def = E1000_DEFAULT_TXD,
+ .arg = { .r = {
+ .min = E1000_MIN_TXD,
+ .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
+ }}
+ };
+
+ if (num_TxDescriptors > bd) {
+ tx_ring->count = TxDescriptors[bd];
+ e1000_validate_option(&tx_ring->count, &opt, adapter);
+ tx_ring->count = ALIGN(tx_ring->count,
+ REQ_TX_DESCRIPTOR_MULTIPLE);
+ } else {
+ tx_ring->count = opt.def;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ tx_ring[i].count = tx_ring->count;
+ }
+ { /* Receive Descriptor Count */
+ struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+ int i;
+ e1000_mac_type mac_type = adapter->hw.mac_type;
+
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Receive Descriptors",
+ .err = "using default of "
+ __MODULE_STRING(E1000_DEFAULT_RXD),
+ .def = E1000_DEFAULT_RXD,
+ .arg = { .r = {
+ .min = E1000_MIN_RXD,
+ .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
+ }}
+ };
+
+ if (num_RxDescriptors > bd) {
+ rx_ring->count = RxDescriptors[bd];
+ e1000_validate_option(&rx_ring->count, &opt, adapter);
+ rx_ring->count = ALIGN(rx_ring->count,
+ REQ_RX_DESCRIPTOR_MULTIPLE);
+ } else {
+ rx_ring->count = opt.def;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rx_ring[i].count = rx_ring->count;
+ }
+ { /* Checksum Offload Enable/Disable */
+ opt = (struct e1000_option) {
+ .type = enable_option,
+ .name = "Checksum Offload",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (num_XsumRX > bd) {
+ unsigned int rx_csum = XsumRX[bd];
+ e1000_validate_option(&rx_csum, &opt, adapter);
+ adapter->rx_csum = rx_csum;
+ } else {
+ adapter->rx_csum = opt.def;
+ }
+ }
+ { /* Flow Control */
+
+ struct e1000_opt_list fc_list[] =
+ {{ E1000_FC_NONE, "Flow Control Disabled" },
+ { E1000_FC_RX_PAUSE,"Flow Control Receive Only" },
+ { E1000_FC_TX_PAUSE,"Flow Control Transmit Only" },
+ { E1000_FC_FULL, "Flow Control Enabled" },
+ { E1000_FC_DEFAULT, "Flow Control Hardware Default" }};
+
+ opt = (struct e1000_option) {
+ .type = list_option,
+ .name = "Flow Control",
+ .err = "reading default settings from EEPROM",
+ .def = E1000_FC_DEFAULT,
+ .arg = { .l = { .nr = ARRAY_SIZE(fc_list),
+ .p = fc_list }}
+ };
+
+ if (num_FlowControl > bd) {
+ unsigned int fc = FlowControl[bd];
+ e1000_validate_option(&fc, &opt, adapter);
+ adapter->hw.fc = adapter->hw.original_fc = fc;
+ } else {
+ adapter->hw.fc = adapter->hw.original_fc = opt.def;
+ }
+ }
+ { /* Transmit Interrupt Delay */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Transmit Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
+ .def = DEFAULT_TIDV,
+ .arg = { .r = { .min = MIN_TXDELAY,
+ .max = MAX_TXDELAY }}
+ };
+
+ if (num_TxIntDelay > bd) {
+ adapter->tx_int_delay = TxIntDelay[bd];
+ e1000_validate_option(&adapter->tx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_int_delay = opt.def;
+ }
+ }
+ { /* Transmit Absolute Interrupt Delay */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Transmit Absolute Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_TADV),
+ .def = DEFAULT_TADV,
+ .arg = { .r = { .min = MIN_TXABSDELAY,
+ .max = MAX_TXABSDELAY }}
+ };
+
+ if (num_TxAbsIntDelay > bd) {
+ adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_abs_int_delay = opt.def;
+ }
+ }
+ { /* Receive Interrupt Delay */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Receive Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
+ .def = DEFAULT_RDTR,
+ .arg = { .r = { .min = MIN_RXDELAY,
+ .max = MAX_RXDELAY }}
+ };
+
+ if (num_RxIntDelay > bd) {
+ adapter->rx_int_delay = RxIntDelay[bd];
+ e1000_validate_option(&adapter->rx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_int_delay = opt.def;
+ }
+ }
+ { /* Receive Absolute Interrupt Delay */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Receive Absolute Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RADV),
+ .def = DEFAULT_RADV,
+ .arg = { .r = { .min = MIN_RXABSDELAY,
+ .max = MAX_RXABSDELAY }}
+ };
+
+ if (num_RxAbsIntDelay > bd) {
+ adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_abs_int_delay = opt.def;
+ }
+ }
+ { /* Interrupt Throttling Rate */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Interrupt Throttling Rate (ints/sec)",
+ .err = "using default of " __MODULE_STRING(DEFAULT_ITR),
+ .def = DEFAULT_ITR,
+ .arg = { .r = { .min = MIN_ITR,
+ .max = MAX_ITR }}
+ };
+
+ if (num_InterruptThrottleRate > bd) {
+ adapter->itr = InterruptThrottleRate[bd];
+ switch (adapter->itr) {
+ case 0:
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ break;
+ case 1:
+ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+ opt.name);
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
+ break;
+ case 3:
+ DPRINTK(PROBE, INFO,
+ "%s set to dynamic conservative mode\n",
+ opt.name);
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
+ break;
+ default:
+ e1000_validate_option(&adapter->itr, &opt,
+ adapter);
+ /* save the setting, because the dynamic bits change itr */
+ /* clear the lower two bits because they are
+ * used as control */
+ adapter->itr_setting = adapter->itr & ~3;
+ break;
+ }
+ } else {
+ adapter->itr_setting = opt.def;
+ adapter->itr = 20000;
+ }
+ }
+ { /* Smart Power Down */
+ opt = (struct e1000_option) {
+ .type = enable_option,
+ .name = "PHY Smart Power Down",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED
+ };
+
+ if (num_SmartPowerDownEnable > bd) {
+ unsigned int spd = SmartPowerDownEnable[bd];
+ e1000_validate_option(&spd, &opt, adapter);
+ adapter->smart_power_down = spd;
+ } else {
+ adapter->smart_power_down = opt.def;
+ }
+ }
+ { /* Kumeran Lock Loss Workaround */
+ opt = (struct e1000_option) {
+ .type = enable_option,
+ .name = "Kumeran Lock Loss Workaround",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (num_KumeranLockLoss > bd) {
+ unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+ e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+ adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
+ } else {
+ adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
+ }
+ }
+
+ switch (adapter->hw.media_type) {
+ case e1000_media_type_fiber:
+ case e1000_media_type_internal_serdes:
+ e1000_check_fiber_options(adapter);
+ break;
+ case e1000_media_type_copper:
+ e1000_check_copper_options(adapter);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on fiber adapters
+ **/
+
+static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
+{
+ int bd = adapter->bd_number;
+ if (num_Speed > bd) {
+ DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
+ "parameter ignored\n");
+ }
+
+ if (num_Duplex > bd) {
+ DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
+ "parameter ignored\n");
+ }
+
+ if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+ DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
+ "not valid for fiber adapters, "
+ "parameter ignored\n");
+ }
+}
+
+/**
+ * e1000_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on copper adapters
+ **/
+
+static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
+{
+ struct e1000_option opt;
+ unsigned int speed, dplx, an;
+ int bd = adapter->bd_number;
+
+ { /* Speed */
+ static const struct e1000_opt_list speed_list[] = {
+ { 0, "" },
+ { SPEED_10, "" },
+ { SPEED_100, "" },
+ { SPEED_1000, "" }};
+
+ opt = (struct e1000_option) {
+ .type = list_option,
+ .name = "Speed",
+ .err = "parameter ignored",
+ .def = 0,
+ .arg = { .l = { .nr = ARRAY_SIZE(speed_list),
+ .p = speed_list }}
+ };
+
+ if (num_Speed > bd) {
+ speed = Speed[bd];
+ e1000_validate_option(&speed, &opt, adapter);
+ } else {
+ speed = opt.def;
+ }
+ }
+ { /* Duplex */
+ static const struct e1000_opt_list dplx_list[] = {
+ { 0, "" },
+ { HALF_DUPLEX, "" },
+ { FULL_DUPLEX, "" }};
+
+ opt = (struct e1000_option) {
+ .type = list_option,
+ .name = "Duplex",
+ .err = "parameter ignored",
+ .def = 0,
+ .arg = { .l = { .nr = ARRAY_SIZE(dplx_list),
+ .p = dplx_list }}
+ };
+
+ if (e1000_check_phy_reset_block(&adapter->hw)) {
+ DPRINTK(PROBE, INFO,
+ "Link active due to SoL/IDER Session. "
+ "Speed/Duplex/AutoNeg parameter ignored.\n");
+ return;
+ }
+ if (num_Duplex > bd) {
+ dplx = Duplex[bd];
+ e1000_validate_option(&dplx, &opt, adapter);
+ } else {
+ dplx = opt.def;
+ }
+ }
+
+ if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+ DPRINTK(PROBE, INFO,
+ "AutoNeg specified along with Speed or Duplex, "
+ "parameter ignored\n");
+ adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+ } else { /* Autoneg */
+ static const struct e1000_opt_list an_list[] =
+ #define AA "AutoNeg advertising "
+ {{ 0x01, AA "10/HD" },
+ { 0x02, AA "10/FD" },
+ { 0x03, AA "10/FD, 10/HD" },
+ { 0x04, AA "100/HD" },
+ { 0x05, AA "100/HD, 10/HD" },
+ { 0x06, AA "100/HD, 10/FD" },
+ { 0x07, AA "100/HD, 10/FD, 10/HD" },
+ { 0x08, AA "100/FD" },
+ { 0x09, AA "100/FD, 10/HD" },
+ { 0x0a, AA "100/FD, 10/FD" },
+ { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+ { 0x0c, AA "100/FD, 100/HD" },
+ { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+ { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+ { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+ { 0x20, AA "1000/FD" },
+ { 0x21, AA "1000/FD, 10/HD" },
+ { 0x22, AA "1000/FD, 10/FD" },
+ { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+ { 0x24, AA "1000/FD, 100/HD" },
+ { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+ { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+ { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+ { 0x28, AA "1000/FD, 100/FD" },
+ { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+ { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+ { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+ { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+ { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+ { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+ { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
+
+ opt = (struct e1000_option) {
+ .type = list_option,
+ .name = "AutoNeg",
+ .err = "parameter ignored",
+ .def = AUTONEG_ADV_DEFAULT,
+ .arg = { .l = { .nr = ARRAY_SIZE(an_list),
+ .p = an_list }}
+ };
+
+ if (num_AutoNeg > bd) {
+ an = AutoNeg[bd];
+ e1000_validate_option(&an, &opt, adapter);
+ } else {
+ an = opt.def;
+ }
+ adapter->hw.autoneg_advertised = an;
+ }
+
+ switch (speed + dplx) {
+ case 0:
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ if ((num_Speed > bd) && (speed != 0 || dplx != 0))
+ DPRINTK(PROBE, INFO,
+ "Speed and duplex autonegotiation enabled\n");
+ break;
+ case HALF_DUPLEX:
+ DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+ "Half Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+ ADVERTISE_100_HALF;
+ break;
+ case FULL_DUPLEX:
+ DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+ "Full Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
+ ADVERTISE_100_FULL |
+ ADVERTISE_1000_FULL;
+ break;
+ case SPEED_10:
+ DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
+ "without Duplex\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+ ADVERTISE_10_FULL;
+ break;
+ case SPEED_10 + HALF_DUPLEX:
+ DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_10_half;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_10 + FULL_DUPLEX:
+ DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_10_full;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_100:
+ DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
+ "without Duplex\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+ "100 Mbps only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
+ ADVERTISE_100_FULL;
+ break;
+ case SPEED_100 + HALF_DUPLEX:
+ DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_100_half;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_100 + FULL_DUPLEX:
+ DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_100_full;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_1000:
+ DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
+ "Duplex\n");
+ goto full_duplex_only;
+ case SPEED_1000 + HALF_DUPLEX:
+ DPRINTK(PROBE, INFO,
+ "Half Duplex is not supported at 1000 Mbps\n");
+ /* fall through */
+ case SPEED_1000 + FULL_DUPLEX:
+full_duplex_only:
+ DPRINTK(PROBE, INFO,
+ "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ default:
+ BUG();
+ }
+
+ /* Speed, AutoNeg and MDI/MDI-X must all play nice */
+ if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
+ DPRINTK(PROBE, INFO,
+ "Speed, AutoNeg and MDI-X specifications are "
+ "incompatible. Setting MDI-X to a compatible value.\n");
+ }
+}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devices/e1000/e1000_param-2.6.28-orig.c Mon Feb 07 21:30:25 2011 +0100
@@ -0,0 +1,792 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#define E1000_PARAM(X, desc) \
+ static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+ static unsigned int num_##X; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
+ MODULE_PARM_DESC(X, desc);
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Speed Override
+ *
+ * Valid Range: 0, 10, 100, 1000
+ * - 0 - auto-negotiate at all supported speeds
+ * - 10 - only link at 10 Mbps
+ * - 100 - only link at 100 Mbps
+ * - 1000 - only link at 1000 Mbps
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Speed, "Speed setting");
+
+/* User Specified Duplex Override
+ *
+ * Valid Range: 0-2
+ * - 0 - auto-negotiate for duplex
+ * - 1 - only link at half duplex
+ * - 2 - only link at full duplex
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Duplex, "Duplex setting");
+
+/* Auto-negotiation Advertisement Override
+ *
+ * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit 7 6 5 4 3 2 1 0
+ * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
+ * Duplex Full Full Half Full Half
+ *
+ * Default Value: 0x2F (copper); 0x20 (fiber)
+ */
+E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+#define AUTONEG_ADV_DEFAULT 0x2F
+#define AUTONEG_ADV_MASK 0x2F
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ * - 0 - No Flow Control
+ * - 1 - Rx only, respond to PAUSE frames but do not generate them
+ * - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ * - 3 - Full Flow Control Support
+ *
+ * Default Value: Read flow control settings from the EEPROM
+ */
+E1000_PARAM(FlowControl, "Flow Control setting");
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ * - 0 - disables all checksum offload
+ * - 1 - enables receive IP/TCP/UDP checksum offload
+ * on 82543 and newer -based NICs
+ *
+ * Default Value: 1
+ */
+E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Transmit Interrupt Delay in units of 1.024 microseconds
+ * Tx interrupt delay needs to typically be set to something non zero
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV 8
+#define MAX_TXDELAY 0xFFFF
+#define MIN_TXDELAY 0
+
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV 32
+#define MAX_TXABSDELAY 0xFFFF
+#define MIN_TXABSDELAY 0
+
+/* Receive Interrupt Delay in units of 1.024 microseconds
+ * hardware will likely hang if you set this to anything but zero.
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define DEFAULT_RDTR 0
+#define MAX_RXDELAY 0xFFFF
+#define MIN_RXDELAY 0
+
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define DEFAULT_RADV 8
+#define MAX_RXABSDELAY 0xFFFF
+#define MIN_RXABSDELAY 0
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ */
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR 3
+#define MAX_ITR 100000
+#define MIN_ITR 100
+
+/* Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/* Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
+struct e1000_option {
+ enum { enable_option, range_option, list_option } type;
+ const char *name;
+ const char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ const struct e1000_opt_list { int i; char *str; } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit e1000_validate_option(unsigned int *value,
+ const struct e1000_option *opt,
+ struct e1000_adapter *adapter)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ DPRINTK(PROBE, INFO,
+ "%s set to %i\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ const struct e1000_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ DPRINTK(PROBE, INFO, "%s\n", ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+static void e1000_check_fiber_options(struct e1000_adapter *adapter);
+static void e1000_check_copper_options(struct e1000_adapter *adapter);
+
+/**
+ * e1000_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void __devinit e1000_check_options(struct e1000_adapter *adapter)
+{
+ struct e1000_option opt;
+ int bd = adapter->bd_number;
+
+ if (bd >= E1000_MAX_NIC) {
+ DPRINTK(PROBE, NOTICE,
+ "Warning: no configuration for board #%i\n", bd);
+ DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+ }
+
+ { /* Transmit Descriptor Count */
+ struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+ int i;
+ e1000_mac_type mac_type = adapter->hw.mac_type;
+
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Transmit Descriptors",
+ .err = "using default of "
+ __MODULE_STRING(E1000_DEFAULT_TXD),
+ .def = E1000_DEFAULT_TXD,
+ .arg = { .r = {
+ .min = E1000_MIN_TXD,
+ .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
+ }}
+ };
+
+ if (num_TxDescriptors > bd) {
+ tx_ring->count = TxDescriptors[bd];
+ e1000_validate_option(&tx_ring->count, &opt, adapter);
+ tx_ring->count = ALIGN(tx_ring->count,
+ REQ_TX_DESCRIPTOR_MULTIPLE);
+ } else {
+ tx_ring->count = opt.def;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ tx_ring[i].count = tx_ring->count;
+ }
+ { /* Receive Descriptor Count */
+ struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+ int i;
+ e1000_mac_type mac_type = adapter->hw.mac_type;
+
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Receive Descriptors",
+ .err = "using default of "
+ __MODULE_STRING(E1000_DEFAULT_RXD),
+ .def = E1000_DEFAULT_RXD,
+ .arg = { .r = {
+ .min = E1000_MIN_RXD,
+ .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
+ }}
+ };
+
+ if (num_RxDescriptors > bd) {
+ rx_ring->count = RxDescriptors[bd];
+ e1000_validate_option(&rx_ring->count, &opt, adapter);
+ rx_ring->count = ALIGN(rx_ring->count,
+ REQ_RX_DESCRIPTOR_MULTIPLE);
+ } else {
+ rx_ring->count = opt.def;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rx_ring[i].count = rx_ring->count;
+ }
+ { /* Checksum Offload Enable/Disable */
+ opt = (struct e1000_option) {
+ .type = enable_option,
+ .name = "Checksum Offload",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (num_XsumRX > bd) {
+ unsigned int rx_csum = XsumRX[bd];
+ e1000_validate_option(&rx_csum, &opt, adapter);
+ adapter->rx_csum = rx_csum;
+ } else {
+ adapter->rx_csum = opt.def;
+ }
+ }
+ { /* Flow Control */
+
+ struct e1000_opt_list fc_list[] =
+ {{ E1000_FC_NONE, "Flow Control Disabled" },
+ { E1000_FC_RX_PAUSE,"Flow Control Receive Only" },
+ { E1000_FC_TX_PAUSE,"Flow Control Transmit Only" },
+ { E1000_FC_FULL, "Flow Control Enabled" },
+ { E1000_FC_DEFAULT, "Flow Control Hardware Default" }};
+
+ opt = (struct e1000_option) {
+ .type = list_option,
+ .name = "Flow Control",
+ .err = "reading default settings from EEPROM",
+ .def = E1000_FC_DEFAULT,
+ .arg = { .l = { .nr = ARRAY_SIZE(fc_list),
+ .p = fc_list }}
+ };
+
+ if (num_FlowControl > bd) {
+ unsigned int fc = FlowControl[bd];
+ e1000_validate_option(&fc, &opt, adapter);
+ adapter->hw.fc = adapter->hw.original_fc = fc;
+ } else {
+ adapter->hw.fc = adapter->hw.original_fc = opt.def;
+ }
+ }
+ { /* Transmit Interrupt Delay */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Transmit Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
+ .def = DEFAULT_TIDV,
+ .arg = { .r = { .min = MIN_TXDELAY,
+ .max = MAX_TXDELAY }}
+ };
+
+ if (num_TxIntDelay > bd) {
+ adapter->tx_int_delay = TxIntDelay[bd];
+ e1000_validate_option(&adapter->tx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_int_delay = opt.def;
+ }
+ }
+ { /* Transmit Absolute Interrupt Delay */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Transmit Absolute Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_TADV),
+ .def = DEFAULT_TADV,
+ .arg = { .r = { .min = MIN_TXABSDELAY,
+ .max = MAX_TXABSDELAY }}
+ };
+
+ if (num_TxAbsIntDelay > bd) {
+ adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_abs_int_delay = opt.def;
+ }
+ }
+ { /* Receive Interrupt Delay */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Receive Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
+ .def = DEFAULT_RDTR,
+ .arg = { .r = { .min = MIN_RXDELAY,
+ .max = MAX_RXDELAY }}
+ };
+
+ if (num_RxIntDelay > bd) {
+ adapter->rx_int_delay = RxIntDelay[bd];
+ e1000_validate_option(&adapter->rx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_int_delay = opt.def;
+ }
+ }
+ { /* Receive Absolute Interrupt Delay */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Receive Absolute Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RADV),
+ .def = DEFAULT_RADV,
+ .arg = { .r = { .min = MIN_RXABSDELAY,
+ .max = MAX_RXABSDELAY }}
+ };
+
+ if (num_RxAbsIntDelay > bd) {
+ adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_abs_int_delay = opt.def;
+ }
+ }
+ { /* Interrupt Throttling Rate */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Interrupt Throttling Rate (ints/sec)",
+ .err = "using default of " __MODULE_STRING(DEFAULT_ITR),
+ .def = DEFAULT_ITR,
+ .arg = { .r = { .min = MIN_ITR,
+ .max = MAX_ITR }}
+ };
+
+ if (num_InterruptThrottleRate > bd) {
+ adapter->itr = InterruptThrottleRate[bd];
+ switch (adapter->itr) {
+ case 0:
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ break;
+ case 1:
+ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+ opt.name);
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
+ break;
+ case 3:
+ DPRINTK(PROBE, INFO,
+ "%s set to dynamic conservative mode\n",
+ opt.name);
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
+ break;
+ default:
+ e1000_validate_option(&adapter->itr, &opt,
+ adapter);
+ /* save the setting, because the dynamic bits change itr */
+ /* clear the lower two bits because they are
+ * used as control */
+ adapter->itr_setting = adapter->itr & ~3;
+ break;
+ }
+ } else {
+ adapter->itr_setting = opt.def;
+ adapter->itr = 20000;
+ }
+ }
+ { /* Smart Power Down */
+ opt = (struct e1000_option) {
+ .type = enable_option,
+ .name = "PHY Smart Power Down",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED
+ };
+
+ if (num_SmartPowerDownEnable > bd) {
+ unsigned int spd = SmartPowerDownEnable[bd];
+ e1000_validate_option(&spd, &opt, adapter);
+ adapter->smart_power_down = spd;
+ } else {
+ adapter->smart_power_down = opt.def;
+ }
+ }
+ { /* Kumeran Lock Loss Workaround */
+ opt = (struct e1000_option) {
+ .type = enable_option,
+ .name = "Kumeran Lock Loss Workaround",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (num_KumeranLockLoss > bd) {
+ unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+ e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+ adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
+ } else {
+ adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
+ }
+ }
+
+ switch (adapter->hw.media_type) {
+ case e1000_media_type_fiber:
+ case e1000_media_type_internal_serdes:
+ e1000_check_fiber_options(adapter);
+ break;
+ case e1000_media_type_copper:
+ e1000_check_copper_options(adapter);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on fiber adapters
+ **/
+
+static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
+{
+ int bd = adapter->bd_number;
+ if (num_Speed > bd) {
+ DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
+ "parameter ignored\n");
+ }
+
+ if (num_Duplex > bd) {
+ DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
+ "parameter ignored\n");
+ }
+
+ if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+ DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
+ "not valid for fiber adapters, "
+ "parameter ignored\n");
+ }
+}
+
+/**
+ * e1000_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on copper adapters
+ **/
+
+static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
+{
+ struct e1000_option opt;
+ unsigned int speed, dplx, an;
+ int bd = adapter->bd_number;
+
+ { /* Speed */
+ static const struct e1000_opt_list speed_list[] = {
+ { 0, "" },
+ { SPEED_10, "" },
+ { SPEED_100, "" },
+ { SPEED_1000, "" }};
+
+ opt = (struct e1000_option) {
+ .type = list_option,
+ .name = "Speed",
+ .err = "parameter ignored",
+ .def = 0,
+ .arg = { .l = { .nr = ARRAY_SIZE(speed_list),
+ .p = speed_list }}
+ };
+
+ if (num_Speed > bd) {
+ speed = Speed[bd];
+ e1000_validate_option(&speed, &opt, adapter);
+ } else {
+ speed = opt.def;
+ }
+ }
+ { /* Duplex */
+ static const struct e1000_opt_list dplx_list[] = {
+ { 0, "" },
+ { HALF_DUPLEX, "" },
+ { FULL_DUPLEX, "" }};
+
+ opt = (struct e1000_option) {
+ .type = list_option,
+ .name = "Duplex",
+ .err = "parameter ignored",
+ .def = 0,
+ .arg = { .l = { .nr = ARRAY_SIZE(dplx_list),
+ .p = dplx_list }}
+ };
+
+ if (e1000_check_phy_reset_block(&adapter->hw)) {
+ DPRINTK(PROBE, INFO,
+ "Link active due to SoL/IDER Session. "
+ "Speed/Duplex/AutoNeg parameter ignored.\n");
+ return;
+ }
+ if (num_Duplex > bd) {
+ dplx = Duplex[bd];
+ e1000_validate_option(&dplx, &opt, adapter);
+ } else {
+ dplx = opt.def;
+ }
+ }
+
+ if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+ DPRINTK(PROBE, INFO,
+ "AutoNeg specified along with Speed or Duplex, "
+ "parameter ignored\n");
+ adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+ } else { /* Autoneg */
+ static const struct e1000_opt_list an_list[] =
+ #define AA "AutoNeg advertising "
+ {{ 0x01, AA "10/HD" },
+ { 0x02, AA "10/FD" },
+ { 0x03, AA "10/FD, 10/HD" },
+ { 0x04, AA "100/HD" },
+ { 0x05, AA "100/HD, 10/HD" },
+ { 0x06, AA "100/HD, 10/FD" },
+ { 0x07, AA "100/HD, 10/FD, 10/HD" },
+ { 0x08, AA "100/FD" },
+ { 0x09, AA "100/FD, 10/HD" },
+ { 0x0a, AA "100/FD, 10/FD" },
+ { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+ { 0x0c, AA "100/FD, 100/HD" },
+ { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+ { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+ { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+ { 0x20, AA "1000/FD" },
+ { 0x21, AA "1000/FD, 10/HD" },
+ { 0x22, AA "1000/FD, 10/FD" },
+ { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+ { 0x24, AA "1000/FD, 100/HD" },
+ { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+ { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+ { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+ { 0x28, AA "1000/FD, 100/FD" },
+ { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+ { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+ { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+ { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+ { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+ { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+ { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
+
+ opt = (struct e1000_option) {
+ .type = list_option,
+ .name = "AutoNeg",
+ .err = "parameter ignored",
+ .def = AUTONEG_ADV_DEFAULT,
+ .arg = { .l = { .nr = ARRAY_SIZE(an_list),
+ .p = an_list }}
+ };
+
+ if (num_AutoNeg > bd) {
+ an = AutoNeg[bd];
+ e1000_validate_option(&an, &opt, adapter);
+ } else {
+ an = opt.def;
+ }
+ adapter->hw.autoneg_advertised = an;
+ }
+
+ switch (speed + dplx) {
+ case 0:
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ if ((num_Speed > bd) && (speed != 0 || dplx != 0))
+ DPRINTK(PROBE, INFO,
+ "Speed and duplex autonegotiation enabled\n");
+ break;
+ case HALF_DUPLEX:
+ DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+ "Half Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+ ADVERTISE_100_HALF;
+ break;
+ case FULL_DUPLEX:
+ DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+ "Full Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
+ ADVERTISE_100_FULL |
+ ADVERTISE_1000_FULL;
+ break;
+ case SPEED_10:
+ DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
+ "without Duplex\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+ ADVERTISE_10_FULL;
+ break;
+ case SPEED_10 + HALF_DUPLEX:
+ DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_10_half;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_10 + FULL_DUPLEX:
+ DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_10_full;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_100:
+ DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
+ "without Duplex\n");
+ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+ "100 Mbps only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
+ ADVERTISE_100_FULL;
+ break;
+ case SPEED_100 + HALF_DUPLEX:
+ DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_100_half;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_100 + FULL_DUPLEX:
+ DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_100_full;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_1000:
+ DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
+ "Duplex\n");
+ goto full_duplex_only;
+ case SPEED_1000 + HALF_DUPLEX:
+ DPRINTK(PROBE, INFO,
+ "Half Duplex is not supported at 1000 Mbps\n");
+ /* fall through */
+ case SPEED_1000 + FULL_DUPLEX:
+full_duplex_only:
+ DPRINTK(PROBE, INFO,
+ "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ default:
+ BUG();
+ }
+
+ /* Speed, AutoNeg and MDI/MDI-X must all play nice */
+ if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
+ DPRINTK(PROBE, INFO,
+ "Speed, AutoNeg and MDI-X specifications are "
+ "incompatible. Setting MDI-X to a compatible value.\n");
+ }
+}
+