devices/e100-2.6.29-ethercat.c
changeset 1505 da637f7e4e3a
child 1506 41ca84fb6bb2
equal deleted inserted replaced
1504:e02f3344a748 1505:da637f7e4e3a
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007-2008  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it and/or
       
    10  *  modify it under the terms of the GNU General Public License version 2, as
       
    11  *  published by the Free Software Foundation.
       
    12  *
       
    13  *  The IgH EtherCAT Master is distributed in the hope that it will be useful,
       
    14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
       
    16  *  Public License for more details.
       
    17  *
       
    18  *  You should have received a copy of the GNU General Public License along
       
    19  *  with the IgH EtherCAT Master; if not, write to the Free Software
       
    20  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    21  *
       
    22  *  ---
       
    23  *
       
    24  *  The license mentioned above concerns the source code only. Using the
       
    25  *  EtherCAT technology and brand is only permitted in compliance with the
       
    26  *  industrial property and similar rights of Beckhoff Automation GmbH.
       
    27  *
       
    28  *  ---
       
    29  *
       
    30  *  vim: noexpandtab
       
    31  *
       
    32  *****************************************************************************/
       
    33 
       
    34 /**
       
    35    \file
       
    36    EtherCAT driver for e100-compatible NICs.
       
    37 */
       
    38 
       
    39 /* Former documentation: */
       
    40 
       
    41 /*******************************************************************************
       
    42 
       
    43   Intel PRO/100 Linux driver
       
    44   Copyright(c) 1999 - 2006 Intel Corporation.
       
    45 
       
    46   This program is free software; you can redistribute it and/or modify it
       
    47   under the terms and conditions of the GNU General Public License,
       
    48   version 2, as published by the Free Software Foundation.
       
    49 
       
    50   This program is distributed in the hope it will be useful, but WITHOUT
       
    51   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    52   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    53   more details.
       
    54 
       
    55   You should have received a copy of the GNU General Public License along with
       
    56   this program; if not, write to the Free Software Foundation, Inc.,
       
    57   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    58 
       
    59   The full GNU General Public License is included in this distribution in
       
    60   the file called "COPYING".
       
    61 
       
    62   Contact Information:
       
    63   Linux NICS <linux.nics@intel.com>
       
    64   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    65   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    66 
       
    67 *******************************************************************************/
       
    68 
       
    69 /*
       
    70  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    71  *
       
    72  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    73  *	original e100 driver, but better described as a munging of
       
    74  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    75  *
       
    76  *	References:
       
    77  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    78  *		Open Source Software Developers Manual,
       
    79  *		http://sourceforge.net/projects/e1000
       
    80  *
       
    81  *
       
    82  *	                      Theory of Operation
       
    83  *
       
    84  *	I.   General
       
    85  *
       
    86  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    87  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    88  *	82551, and 82562 devices.  82558 and greater controllers
       
    89  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    90  *	server and client network interface cards, as well as in
       
    91  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    92  *	configurations.  8255x supports a 32-bit linear addressing
       
    93  *	mode and operates at 33Mhz PCI clock rate.
       
    94  *
       
    95  *	II.  Driver Operation
       
    96  *
       
    97  *	Memory-mapped mode is used exclusively to access the device's
       
    98  *	shared-memory structure, the Control/Status Registers (CSR). All
       
    99  *	setup, configuration, and control of the device, including queuing
       
   100  *	of Tx, Rx, and configuration commands is through the CSR.
       
   101  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   102  *	protects the shared Command Block List (CBL).
       
   103  *
       
   104  *	8255x is highly MII-compliant and all access to the PHY go
       
   105  *	through the Management Data Interface (MDI).  Consequently, the
       
   106  *	driver leverages the mii.c library shared with other MII-compliant
       
   107  *	devices.
       
   108  *
       
   109  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   110  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   111  *	archs are supported.
       
   112  *
       
   113  *	III. Transmit
       
   114  *
       
   115  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   116  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   117  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   118  *	the end of the ring.  The last TCB processed suspends the
       
   119  *	controller, and the controller can be restarted by issue a CU
       
   120  *	resume command to continue from the suspend point, or a CU start
       
   121  *	command to start at a given position in the ring.
       
   122  *
       
   123  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   124  *	into the CBL ring along with Tx commands.  The common structure
       
   125  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   126  *
       
   127  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   128  *	is the next CB to check for completion; cb_to_send is the first
       
   129  *	CB to start on in case of a previous failure to resume.  CB clean
       
   130  *	up happens in interrupt context in response to a CU interrupt.
       
   131  *	cbs_avail keeps track of number of free CB resources available.
       
   132  *
       
   133  * 	Hardware padding of short packets to minimum packet size is
       
   134  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   135  * 	with 00h.
       
   136  *
       
   137  *	IV.  Receive
       
   138  *
       
   139  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   140  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   141  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   142  *	and the data buffer, but the RFD is pulled off before the skb is
       
   143  *	indicated.  The data buffer is aligned such that encapsulated
       
   144  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   145  *	mapped shared memory, and completion status is contained within
       
   146  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   147  *	view from software and hardware.
       
   148  *
       
   149  *	In order to keep updates to the RFD link field from colliding with
       
   150  *	hardware writes to mark packets complete, we use the feature that
       
   151  *	hardware will not write to a size 0 descriptor and mark the previous
       
   152  *	packet as end-of-list (EL).   After updating the link, we remove EL
       
   153  *	and only then restore the size such that hardware may use the
       
   154  *	previous-to-end RFD.
       
   155  *
       
   156  *	Under typical operation, the  receive unit (RU) is start once,
       
   157  *	and the controller happily fills RFDs as frames arrive.  If
       
   158  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   159  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   160  *	and Rx indication and re-allocation happen in the same context,
       
   161  *	therefore no locking is required.  A software-generated interrupt
       
   162  *	is generated from the watchdog to recover from a failed allocation
       
   163  *	scenario where all Rx resources have been indicated and none re-
       
   164  *	placed.
       
   165  *
       
   166  *	V.   Miscellaneous
       
   167  *
       
   168  * 	VLAN offloading of tagging, stripping and filtering is not
       
   169  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   170  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   171  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   172  * 	not supported (hardware limitation).
       
   173  *
       
   174  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   175  *
       
   176  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   177  * 	testing/troubleshooting the development driver.
       
   178  *
       
   179  * 	TODO:
       
   180  * 	o several entry points race with dev->close
       
   181  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   182  *
       
   183  *	FIXES:
       
   184  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   185  *	- Stratus87247: protect MDI control register manipulations
       
   186  */
       
   187 
       
   188 #include <linux/module.h>
       
   189 #include <linux/moduleparam.h>
       
   190 #include <linux/kernel.h>
       
   191 #include <linux/types.h>
       
   192 #include <linux/slab.h>
       
   193 #include <linux/delay.h>
       
   194 #include <linux/init.h>
       
   195 #include <linux/pci.h>
       
   196 #include <linux/dma-mapping.h>
       
   197 #include <linux/netdevice.h>
       
   198 #include <linux/etherdevice.h>
       
   199 #include <linux/mii.h>
       
   200 #include <linux/if_vlan.h>
       
   201 #include <linux/skbuff.h>
       
   202 #include <linux/ethtool.h>
       
   203 #include <linux/string.h>
       
   204 #include <linux/firmware.h>
       
   205 #include <asm/unaligned.h>
       
   206 
       
   207 // EtherCAT includes
       
   208 #include "../globals.h"
       
   209 #include "ecdev.h"
       
   210 
       
   211 #define DRV_NAME		"ec_e100"
       
   212 
       
   213 #define DRV_EXT			"-NAPI"
       
   214 #define DRV_VERSION		"3.5.23-k6"DRV_EXT
       
   215 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   216 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   217 #define PFX			DRV_NAME ": "
       
   218 
       
   219 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   220 #define E100_NAPI_WEIGHT	16
       
   221 
       
   222 #define FIRMWARE_D101M		"e100/d101m_ucode.bin"
       
   223 #define FIRMWARE_D101S		"e100/d101s_ucode.bin"
       
   224 #define FIRMWARE_D102E		"e100/d102e_ucode.bin"
       
   225 
       
   226 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   227 MODULE_AUTHOR(DRV_COPYRIGHT);
       
   228 MODULE_LICENSE("GPL");
       
   229 MODULE_VERSION(DRV_VERSION);
       
   230 MODULE_FIRMWARE(FIRMWARE_D101M);
       
   231 MODULE_FIRMWARE(FIRMWARE_D101S);
       
   232 MODULE_FIRMWARE(FIRMWARE_D102E);
       
   233 
       
   234 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   235 MODULE_AUTHOR("Mario Witkowski <mario.witkowski@w4systems.de>");
       
   236 MODULE_LICENSE("GPL");
       
   237 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   238 
       
   239 void e100_ec_poll(struct net_device *);
       
   240 
       
   241 static int debug = 3;
       
   242 static int eeprom_bad_csum_allow = 0;
       
   243 static int use_io = 0;
       
   244 module_param(debug, int, 0);
       
   245 module_param(eeprom_bad_csum_allow, int, 0);
       
   246 module_param(use_io, int, 0);
       
   247 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   248 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   249 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
       
   250 #define DPRINTK(nlevel, klevel, fmt, args...) \
       
   251 	(void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
       
   252 	printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
       
   253 		__func__ , ## args))
       
   254 
       
   255 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   256 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   257 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   258 static struct pci_device_id e100_id_table[] = {
       
   259 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   260 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   261 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   262 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   263 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   277 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   278 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   279 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   280 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   281 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   282 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   283 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   284 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   285 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   286 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   287 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   288 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   289 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   290 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   291 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   292 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   293 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   294 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   295 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   296 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   297 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   298 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   299 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   300 	{ 0, }
       
   301 };
       
   302 
       
   303 // prevent from being loaded automatically
       
   304 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   305 
       
   306 enum mac {
       
   307 	mac_82557_D100_A  = 0,
       
   308 	mac_82557_D100_B  = 1,
       
   309 	mac_82557_D100_C  = 2,
       
   310 	mac_82558_D101_A4 = 4,
       
   311 	mac_82558_D101_B0 = 5,
       
   312 	mac_82559_D101M   = 8,
       
   313 	mac_82559_D101S   = 9,
       
   314 	mac_82550_D102    = 12,
       
   315 	mac_82550_D102_C  = 13,
       
   316 	mac_82551_E       = 14,
       
   317 	mac_82551_F       = 15,
       
   318 	mac_82551_10      = 16,
       
   319 	mac_unknown       = 0xFF,
       
   320 };
       
   321 
       
   322 enum phy {
       
   323 	phy_100a     = 0x000003E0,
       
   324 	phy_100c     = 0x035002A8,
       
   325 	phy_82555_tx = 0x015002A8,
       
   326 	phy_nsc_tx   = 0x5C002000,
       
   327 	phy_82562_et = 0x033002A8,
       
   328 	phy_82562_em = 0x032002A8,
       
   329 	phy_82562_ek = 0x031002A8,
       
   330 	phy_82562_eh = 0x017002A8,
       
   331 	phy_unknown  = 0xFFFFFFFF,
       
   332 };
       
   333 
       
   334 /* CSR (Control/Status Registers) */
       
   335 struct csr {
       
   336 	struct {
       
   337 		u8 status;
       
   338 		u8 stat_ack;
       
   339 		u8 cmd_lo;
       
   340 		u8 cmd_hi;
       
   341 		u32 gen_ptr;
       
   342 	} scb;
       
   343 	u32 port;
       
   344 	u16 flash_ctrl;
       
   345 	u8 eeprom_ctrl_lo;
       
   346 	u8 eeprom_ctrl_hi;
       
   347 	u32 mdi_ctrl;
       
   348 	u32 rx_dma_count;
       
   349 };
       
   350 
       
   351 enum scb_status {
       
   352 	rus_no_res       = 0x08,
       
   353 	rus_ready        = 0x10,
       
   354 	rus_mask         = 0x3C,
       
   355 };
       
   356 
       
   357 enum ru_state  {
       
   358 	RU_SUSPENDED = 0,
       
   359 	RU_RUNNING	 = 1,
       
   360 	RU_UNINITIALIZED = -1,
       
   361 };
       
   362 
       
   363 enum scb_stat_ack {
       
   364 	stat_ack_not_ours    = 0x00,
       
   365 	stat_ack_sw_gen      = 0x04,
       
   366 	stat_ack_rnr         = 0x10,
       
   367 	stat_ack_cu_idle     = 0x20,
       
   368 	stat_ack_frame_rx    = 0x40,
       
   369 	stat_ack_cu_cmd_done = 0x80,
       
   370 	stat_ack_not_present = 0xFF,
       
   371 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   372 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   373 };
       
   374 
       
   375 enum scb_cmd_hi {
       
   376 	irq_mask_none = 0x00,
       
   377 	irq_mask_all  = 0x01,
       
   378 	irq_sw_gen    = 0x02,
       
   379 };
       
   380 
       
   381 enum scb_cmd_lo {
       
   382 	cuc_nop        = 0x00,
       
   383 	ruc_start      = 0x01,
       
   384 	ruc_load_base  = 0x06,
       
   385 	cuc_start      = 0x10,
       
   386 	cuc_resume     = 0x20,
       
   387 	cuc_dump_addr  = 0x40,
       
   388 	cuc_dump_stats = 0x50,
       
   389 	cuc_load_base  = 0x60,
       
   390 	cuc_dump_reset = 0x70,
       
   391 };
       
   392 
       
   393 enum cuc_dump {
       
   394 	cuc_dump_complete       = 0x0000A005,
       
   395 	cuc_dump_reset_complete = 0x0000A007,
       
   396 };
       
   397 
       
   398 enum port {
       
   399 	software_reset  = 0x0000,
       
   400 	selftest        = 0x0001,
       
   401 	selective_reset = 0x0002,
       
   402 };
       
   403 
       
   404 enum eeprom_ctrl_lo {
       
   405 	eesk = 0x01,
       
   406 	eecs = 0x02,
       
   407 	eedi = 0x04,
       
   408 	eedo = 0x08,
       
   409 };
       
   410 
       
   411 enum mdi_ctrl {
       
   412 	mdi_write = 0x04000000,
       
   413 	mdi_read  = 0x08000000,
       
   414 	mdi_ready = 0x10000000,
       
   415 };
       
   416 
       
   417 enum eeprom_op {
       
   418 	op_write = 0x05,
       
   419 	op_read  = 0x06,
       
   420 	op_ewds  = 0x10,
       
   421 	op_ewen  = 0x13,
       
   422 };
       
   423 
       
   424 enum eeprom_offsets {
       
   425 	eeprom_cnfg_mdix  = 0x03,
       
   426 	eeprom_id         = 0x0A,
       
   427 	eeprom_config_asf = 0x0D,
       
   428 	eeprom_smbus_addr = 0x90,
       
   429 };
       
   430 
       
   431 enum eeprom_cnfg_mdix {
       
   432 	eeprom_mdix_enabled = 0x0080,
       
   433 };
       
   434 
       
   435 enum eeprom_id {
       
   436 	eeprom_id_wol = 0x0020,
       
   437 };
       
   438 
       
   439 enum eeprom_config_asf {
       
   440 	eeprom_asf = 0x8000,
       
   441 	eeprom_gcl = 0x4000,
       
   442 };
       
   443 
       
   444 enum cb_status {
       
   445 	cb_complete = 0x8000,
       
   446 	cb_ok       = 0x2000,
       
   447 };
       
   448 
       
   449 enum cb_command {
       
   450 	cb_nop    = 0x0000,
       
   451 	cb_iaaddr = 0x0001,
       
   452 	cb_config = 0x0002,
       
   453 	cb_multi  = 0x0003,
       
   454 	cb_tx     = 0x0004,
       
   455 	cb_ucode  = 0x0005,
       
   456 	cb_dump   = 0x0006,
       
   457 	cb_tx_sf  = 0x0008,
       
   458 	cb_cid    = 0x1f00,
       
   459 	cb_i      = 0x2000,
       
   460 	cb_s      = 0x4000,
       
   461 	cb_el     = 0x8000,
       
   462 };
       
   463 
       
   464 struct rfd {
       
   465 	__le16 status;
       
   466 	__le16 command;
       
   467 	__le32 link;
       
   468 	__le32 rbd;
       
   469 	__le16 actual_size;
       
   470 	__le16 size;
       
   471 };
       
   472 
       
   473 struct rx {
       
   474 	struct rx *next, *prev;
       
   475 	struct sk_buff *skb;
       
   476 	dma_addr_t dma_addr;
       
   477 };
       
   478 
       
   479 #if defined(__BIG_ENDIAN_BITFIELD)
       
   480 #define X(a,b)	b,a
       
   481 #else
       
   482 #define X(a,b)	a,b
       
   483 #endif
       
   484 struct config {
       
   485 /*0*/	u8 X(byte_count:6, pad0:2);
       
   486 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   487 /*2*/	u8 adaptive_ifs;
       
   488 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   489 	   term_write_cache_line:1), pad3:4);
       
   490 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   491 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   492 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   493 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   494 	   rx_discard_overruns:1), rx_save_bad_frames:1);
       
   495 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   496 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   497 	   tx_dynamic_tbd:1);
       
   498 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   499 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   500 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   501 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   502 	   loopback:2);
       
   503 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   504 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   505 /*13*/	u8 ip_addr_lo;
       
   506 /*14*/	u8 ip_addr_hi;
       
   507 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   508 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   509 	   pad15_2:1), crs_or_cdt:1);
       
   510 /*16*/	u8 fc_delay_lo;
       
   511 /*17*/	u8 fc_delay_hi;
       
   512 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   513 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   514 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   515 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   516 	   full_duplex_force:1), full_duplex_pin:1);
       
   517 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   518 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   519 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   520 	u8 pad_d102[9];
       
   521 };
       
   522 
       
   523 #define E100_MAX_MULTICAST_ADDRS	64
       
   524 struct multi {
       
   525 	__le16 count;
       
   526 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   527 };
       
   528 
       
   529 /* Important: keep total struct u32-aligned */
       
   530 #define UCODE_SIZE			134
       
   531 struct cb {
       
   532 	__le16 status;
       
   533 	__le16 command;
       
   534 	__le32 link;
       
   535 	union {
       
   536 		u8 iaaddr[ETH_ALEN];
       
   537 		__le32 ucode[UCODE_SIZE];
       
   538 		struct config config;
       
   539 		struct multi multi;
       
   540 		struct {
       
   541 			u32 tbd_array;
       
   542 			u16 tcb_byte_count;
       
   543 			u8 threshold;
       
   544 			u8 tbd_count;
       
   545 			struct {
       
   546 				__le32 buf_addr;
       
   547 				__le16 size;
       
   548 				u16 eol;
       
   549 			} tbd;
       
   550 		} tcb;
       
   551 		__le32 dump_buffer_addr;
       
   552 	} u;
       
   553 	struct cb *next, *prev;
       
   554 	dma_addr_t dma_addr;
       
   555 	struct sk_buff *skb;
       
   556 };
       
   557 
       
   558 enum loopback {
       
   559 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   560 };
       
   561 
       
   562 struct stats {
       
   563 	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   564 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   565 		tx_multiple_collisions, tx_total_collisions;
       
   566 	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   567 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   568 		rx_short_frame_errors;
       
   569 	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   570 	__le16 xmt_tco_frames, rcv_tco_frames;
       
   571 	__le32 complete;
       
   572 };
       
   573 
       
   574 struct mem {
       
   575 	struct {
       
   576 		u32 signature;
       
   577 		u32 result;
       
   578 	} selftest;
       
   579 	struct stats stats;
       
   580 	u8 dump_buf[596];
       
   581 };
       
   582 
       
   583 struct param_range {
       
   584 	u32 min;
       
   585 	u32 max;
       
   586 	u32 count;
       
   587 };
       
   588 
       
   589 struct params {
       
   590 	struct param_range rfds;
       
   591 	struct param_range cbs;
       
   592 };
       
   593 
       
   594 struct nic {
       
   595 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   596 	u32 msg_enable				____cacheline_aligned;
       
   597 	struct net_device *netdev;
       
   598 	struct pci_dev *pdev;
       
   599 
       
   600 	struct rx *rxs				____cacheline_aligned;
       
   601 	struct rx *rx_to_use;
       
   602 	struct rx *rx_to_clean;
       
   603 	struct rfd blank_rfd;
       
   604 	enum ru_state ru_running;
       
   605 
       
   606 	spinlock_t cb_lock			____cacheline_aligned;
       
   607 	spinlock_t cmd_lock;
       
   608 	struct csr __iomem *csr;
       
   609 	enum scb_cmd_lo cuc_cmd;
       
   610 	unsigned int cbs_avail;
       
   611 	struct napi_struct napi;
       
   612 	struct cb *cbs;
       
   613 	struct cb *cb_to_use;
       
   614 	struct cb *cb_to_send;
       
   615 	struct cb *cb_to_clean;
       
   616 	__le16 tx_command;
       
   617 	/* End: frequently used values: keep adjacent for cache effect */
       
   618 
       
   619 	enum {
       
   620 		ich                = (1 << 0),
       
   621 		promiscuous        = (1 << 1),
       
   622 		multicast_all      = (1 << 2),
       
   623 		wol_magic          = (1 << 3),
       
   624 		ich_10h_workaround = (1 << 4),
       
   625 	} flags					____cacheline_aligned;
       
   626 
       
   627 	enum mac mac;
       
   628 	enum phy phy;
       
   629 	struct params params;
       
   630 	struct timer_list watchdog;
       
   631 	struct timer_list blink_timer;
       
   632 	struct mii_if_info mii;
       
   633 	struct work_struct tx_timeout_task;
       
   634 	enum loopback loopback;
       
   635 
       
   636 	struct mem *mem;
       
   637 	dma_addr_t dma_addr;
       
   638 
       
   639 	dma_addr_t cbs_dma_addr;
       
   640 	u8 adaptive_ifs;
       
   641 	u8 tx_threshold;
       
   642 	u32 tx_frames;
       
   643 	u32 tx_collisions;
       
   644 	u32 tx_deferred;
       
   645 	u32 tx_single_collisions;
       
   646 	u32 tx_multiple_collisions;
       
   647 	u32 tx_fc_pause;
       
   648 	u32 tx_tco_frames;
       
   649 
       
   650 	u32 rx_fc_pause;
       
   651 	u32 rx_fc_unsupported;
       
   652 	u32 rx_tco_frames;
       
   653 	u32 rx_over_length_errors;
       
   654 
       
   655 	u16 leds;
       
   656 	u16 eeprom_wc;
       
   657 	__le16 eeprom[256];
       
   658 	spinlock_t mdio_lock;
       
   659 
       
   660 	ec_device_t *ecdev;
       
   661 	unsigned long ec_watchdog_jiffies;
       
   662 };
       
   663 
       
   664 static inline void e100_write_flush(struct nic *nic)
       
   665 {
       
   666 	/* Flush previous PCI writes through intermediate bridges
       
   667 	 * by doing a benign read */
       
   668 	(void)ioread8(&nic->csr->scb.status);
       
   669 }
       
   670 
       
   671 static void e100_enable_irq(struct nic *nic)
       
   672 {
       
   673 	unsigned long flags;
       
   674 
       
   675 	if (nic->ecdev)
       
   676 		return;
       
   677 
       
   678 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   679 	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   680 	e100_write_flush(nic);
       
   681 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   682 }
       
   683 
       
   684 static void e100_disable_irq(struct nic *nic)
       
   685 {
       
   686 	unsigned long flags = 0;
       
   687 
       
   688 	if (!nic->ecdev)
       
   689 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   690 	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   691 	e100_write_flush(nic);
       
   692 	if (!nic->ecdev)
       
   693 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   694 }
       
   695 
       
   696 static void e100_hw_reset(struct nic *nic)
       
   697 {
       
   698 	/* Put CU and RU into idle with a selective reset to get
       
   699 	 * device off of PCI bus */
       
   700 	iowrite32(selective_reset, &nic->csr->port);
       
   701 	e100_write_flush(nic); udelay(20);
       
   702 
       
   703 	/* Now fully reset device */
       
   704 	iowrite32(software_reset, &nic->csr->port);
       
   705 	e100_write_flush(nic); udelay(20);
       
   706 
       
   707 	/* Mask off our interrupt line - it's unmasked after reset */
       
   708 	e100_disable_irq(nic);
       
   709 }
       
   710 
       
   711 static int e100_self_test(struct nic *nic)
       
   712 {
       
   713 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   714 
       
   715 	/* Passing the self-test is a pretty good indication
       
   716 	 * that the device can DMA to/from host memory */
       
   717 
       
   718 	nic->mem->selftest.signature = 0;
       
   719 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   720 
       
   721 	iowrite32(selftest | dma_addr, &nic->csr->port);
       
   722 	e100_write_flush(nic);
       
   723 	/* Wait 10 msec for self-test to complete */
       
   724 	msleep(10);
       
   725 
       
   726 	/* Interrupts are enabled after self-test */
       
   727 	e100_disable_irq(nic);
       
   728 
       
   729 	/* Check results of self-test */
       
   730 	if (nic->mem->selftest.result != 0) {
       
   731 		DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
       
   732 			nic->mem->selftest.result);
       
   733 		return -ETIMEDOUT;
       
   734 	}
       
   735 	if (nic->mem->selftest.signature == 0) {
       
   736 		DPRINTK(HW, ERR, "Self-test failed: timed out\n");
       
   737 		return -ETIMEDOUT;
       
   738 	}
       
   739 
       
   740 	return 0;
       
   741 }
       
   742 
       
   743 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
       
   744 {
       
   745 	u32 cmd_addr_data[3];
       
   746 	u8 ctrl;
       
   747 	int i, j;
       
   748 
       
   749 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   750 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   751 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   752 		le16_to_cpu(data);
       
   753 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   754 
       
   755 	/* Bit-bang cmds to write word to eeprom */
       
   756 	for (j = 0; j < 3; j++) {
       
   757 
       
   758 		/* Chip select */
       
   759 		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   760 		e100_write_flush(nic); udelay(4);
       
   761 
       
   762 		for (i = 31; i >= 0; i--) {
       
   763 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   764 				eecs | eedi : eecs;
       
   765 			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   766 			e100_write_flush(nic); udelay(4);
       
   767 
       
   768 			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   769 			e100_write_flush(nic); udelay(4);
       
   770 		}
       
   771 		/* Wait 10 msec for cmd to complete */
       
   772 		msleep(10);
       
   773 
       
   774 		/* Chip deselect */
       
   775 		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   776 		e100_write_flush(nic); udelay(4);
       
   777 	}
       
   778 };
       
   779 
       
   780 /* General technique stolen from the eepro100 driver - very clever */
       
   781 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   782 {
       
   783 	u32 cmd_addr_data;
       
   784 	u16 data = 0;
       
   785 	u8 ctrl;
       
   786 	int i;
       
   787 
       
   788 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   789 
       
   790 	/* Chip select */
       
   791 	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   792 	e100_write_flush(nic); udelay(4);
       
   793 
       
   794 	/* Bit-bang to read word from eeprom */
       
   795 	for (i = 31; i >= 0; i--) {
       
   796 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   797 		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   798 		e100_write_flush(nic); udelay(4);
       
   799 
       
   800 		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   801 		e100_write_flush(nic); udelay(4);
       
   802 
       
   803 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   804 		 * complete address.  Use this to adjust addr_len. */
       
   805 		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
       
   806 		if (!(ctrl & eedo) && i > 16) {
       
   807 			*addr_len -= (i - 16);
       
   808 			i = 17;
       
   809 		}
       
   810 
       
   811 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   812 	}
       
   813 
       
   814 	/* Chip deselect */
       
   815 	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   816 	e100_write_flush(nic); udelay(4);
       
   817 
       
   818 	return cpu_to_le16(data);
       
   819 };
       
   820 
       
   821 /* Load entire EEPROM image into driver cache and validate checksum */
       
   822 static int e100_eeprom_load(struct nic *nic)
       
   823 {
       
   824 	u16 addr, addr_len = 8, checksum = 0;
       
   825 
       
   826 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   827 	e100_eeprom_read(nic, &addr_len, 0);
       
   828 	nic->eeprom_wc = 1 << addr_len;
       
   829 
       
   830 	for (addr = 0; addr < nic->eeprom_wc; addr++) {
       
   831 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   832 		if (addr < nic->eeprom_wc - 1)
       
   833 			checksum += le16_to_cpu(nic->eeprom[addr]);
       
   834 	}
       
   835 
       
   836 	/* The checksum, stored in the last word, is calculated such that
       
   837 	 * the sum of words should be 0xBABA */
       
   838 	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
       
   839 		DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
       
   840 		if (!eeprom_bad_csum_allow)
       
   841 			return -EAGAIN;
       
   842 	}
       
   843 
       
   844 	return 0;
       
   845 }
       
   846 
       
   847 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   848 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   849 {
       
   850 	u16 addr, addr_len = 8, checksum = 0;
       
   851 
       
   852 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   853 	e100_eeprom_read(nic, &addr_len, 0);
       
   854 	nic->eeprom_wc = 1 << addr_len;
       
   855 
       
   856 	if (start + count >= nic->eeprom_wc)
       
   857 		return -EINVAL;
       
   858 
       
   859 	for (addr = start; addr < start + count; addr++)
       
   860 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   861 
       
   862 	/* The checksum, stored in the last word, is calculated such that
       
   863 	 * the sum of words should be 0xBABA */
       
   864 	for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   865 		checksum += le16_to_cpu(nic->eeprom[addr]);
       
   866 	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
       
   867 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   868 		nic->eeprom[nic->eeprom_wc - 1]);
       
   869 
       
   870 	return 0;
       
   871 }
       
   872 
       
   873 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   874 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   875 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   876 {
       
   877 	unsigned long flags = 0;
       
   878 	unsigned int i;
       
   879 	int err = 0;
       
   880 
       
   881 	if (!nic->ecdev)
       
   882 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   883 
       
   884 	/* Previous command is accepted when SCB clears */
       
   885 	for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   886 		if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
       
   887 			break;
       
   888 		cpu_relax();
       
   889 		if (unlikely(i > E100_WAIT_SCB_FAST))
       
   890 			udelay(5);
       
   891 	}
       
   892 	if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   893 		err = -EAGAIN;
       
   894 		goto err_unlock;
       
   895 	}
       
   896 
       
   897 	if (unlikely(cmd != cuc_resume))
       
   898 		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
       
   899 	iowrite8(cmd, &nic->csr->scb.cmd_lo);
       
   900 
       
   901 err_unlock:
       
   902 	if (!nic->ecdev)
       
   903 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   904 
       
   905 	return err;
       
   906 }
       
   907 
       
   908 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   909 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   910 {
       
   911 	struct cb *cb;
       
   912 	unsigned long flags = 0;
       
   913 	int err = 0;
       
   914 
       
   915 	if (!nic->ecdev)
       
   916 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   917 
       
   918 	if (unlikely(!nic->cbs_avail)) {
       
   919 		err = -ENOMEM;
       
   920 		goto err_unlock;
       
   921 	}
       
   922 
       
   923 	cb = nic->cb_to_use;
       
   924 	nic->cb_to_use = cb->next;
       
   925 	nic->cbs_avail--;
       
   926 	cb->skb = skb;
       
   927 
       
   928 	if (unlikely(!nic->cbs_avail))
       
   929 		err = -ENOSPC;
       
   930 
       
   931 	cb_prepare(nic, cb, skb);
       
   932 
       
   933 	/* Order is important otherwise we'll be in a race with h/w:
       
   934 	 * set S-bit in current first, then clear S-bit in previous. */
       
   935 	cb->command |= cpu_to_le16(cb_s);
       
   936 	wmb();
       
   937 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   938 
       
   939 	while (nic->cb_to_send != nic->cb_to_use) {
       
   940 		if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   941 			nic->cb_to_send->dma_addr))) {
       
   942 			/* Ok, here's where things get sticky.  It's
       
   943 			 * possible that we can't schedule the command
       
   944 			 * because the controller is too busy, so
       
   945 			 * let's just queue the command and try again
       
   946 			 * when another command is scheduled. */
       
   947 			if (err == -ENOSPC) {
       
   948 				//request a reset
       
   949 				schedule_work(&nic->tx_timeout_task);
       
   950 			}
       
   951 			break;
       
   952 		} else {
       
   953 			nic->cuc_cmd = cuc_resume;
       
   954 			nic->cb_to_send = nic->cb_to_send->next;
       
   955 		}
       
   956 	}
       
   957 
       
   958 err_unlock:
       
   959 	if (!nic->ecdev)
       
   960 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   961 
       
   962 	return err;
       
   963 }
       
   964 
       
   965 static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
   966 {
       
   967 	u32 data_out = 0;
       
   968 	unsigned int i;
       
   969 	unsigned long flags = 0;
       
   970 
       
   971 
       
   972 	/*
       
   973 	 * Stratus87247: we shouldn't be writing the MDI control
       
   974 	 * register until the Ready bit shows True.  Also, since
       
   975 	 * manipulation of the MDI control registers is a multi-step
       
   976 	 * procedure it should be done under lock.
       
   977 	 */
       
   978 	if (!nic->ecdev)
       
   979 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
   980 	for (i = 100; i; --i) {
       
   981 		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
       
   982 			break;
       
   983 		udelay(20);
       
   984 	}
       
   985 	if (unlikely(!i)) {
       
   986 		printk("e100.mdio_ctrl(%s) won't go Ready\n",
       
   987 			nic->netdev->name );
       
   988 		if (!nic->ecdev)
       
   989 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
   990 		return 0;		/* No way to indicate timeout error */
       
   991 	}
       
   992 	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
   993 
       
   994 	for (i = 0; i < 100; i++) {
       
   995 		udelay(20);
       
   996 		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
       
   997 			break;
       
   998 	}
       
   999 	if (!nic->ecdev)
       
  1000 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1001 	DPRINTK(HW, DEBUG,
       
  1002 		"%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
  1003 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
       
  1004 	return (u16)data_out;
       
  1005 }
       
  1006 
       
  1007 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
  1008 {
       
  1009 	return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
       
  1010 }
       
  1011 
       
  1012 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
  1013 {
       
  1014 	mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
       
  1015 }
       
  1016 
       
  1017 static void e100_get_defaults(struct nic *nic)
       
  1018 {
       
  1019 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
  1020 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
  1021 
       
  1022 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1023 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
       
  1024 	if (nic->mac == mac_unknown)
       
  1025 		nic->mac = mac_82557_D100_A;
       
  1026 
       
  1027 	nic->params.rfds = rfds;
       
  1028 	nic->params.cbs = cbs;
       
  1029 
       
  1030 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1031 	nic->tx_threshold = 0xE0;
       
  1032 
       
  1033 	/* no interrupt for every tx completion, delay = 256us if not 557 */
       
  1034 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1035 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1036 
       
  1037 	/* Template for a freshly allocated RFD */
       
  1038 	nic->blank_rfd.command = 0;
       
  1039 	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
       
  1040 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  1041 
       
  1042 	/* MII setup */
       
  1043 	nic->mii.phy_id_mask = 0x1F;
       
  1044 	nic->mii.reg_num_mask = 0x1F;
       
  1045 	nic->mii.dev = nic->netdev;
       
  1046 	nic->mii.mdio_read = mdio_read;
       
  1047 	nic->mii.mdio_write = mdio_write;
       
  1048 }
       
  1049 
       
  1050 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1051 {
       
  1052 	struct config *config = &cb->u.config;
       
  1053 	u8 *c = (u8 *)config;
       
  1054 
       
  1055 	cb->command = cpu_to_le16(cb_config);
       
  1056 
       
  1057 	memset(config, 0, sizeof(struct config));
       
  1058 
       
  1059 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1060 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1061 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1062 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1063 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1064 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1065 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1066 	config->mii_mode = 0x1;			/* 1=MII mode, 0=503 mode */
       
  1067 	config->pad10 = 0x6;
       
  1068 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1069 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1070 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1071 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1072 	config->pad15_1 = 0x1;
       
  1073 	config->pad15_2 = 0x1;
       
  1074 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1075 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1076 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1077 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1078 	config->pad18 = 0x1;
       
  1079 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1080 	config->pad20_1 = 0x1F;
       
  1081 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1082 	config->pad21_1 = 0x5;
       
  1083 
       
  1084 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1085 	config->loopback = nic->loopback;
       
  1086 
       
  1087 	if (nic->mii.force_media && nic->mii.full_duplex)
       
  1088 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1089 
       
  1090 	if (nic->flags & promiscuous || nic->loopback) {
       
  1091 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1092 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1093 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1094 	}
       
  1095 
       
  1096 	if (nic->flags & multicast_all)
       
  1097 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1098 
       
  1099 	/* disable WoL when up */
       
  1100 	if (nic->ecdev || 
       
  1101 			(netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1102 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1103 
       
  1104 	if (nic->mac >= mac_82558_D101_A4) {
       
  1105 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1106 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1107 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1108 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1109 		if (nic->mac >= mac_82559_D101M) {
       
  1110 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1111 			/* Enable TCO in extended config */
       
  1112 			if (nic->mac >= mac_82551_10) {
       
  1113 				config->byte_count = 0x20; /* extended bytes */
       
  1114 				config->rx_d102_mode = 0x1; /* GMRC for TCO */
       
  1115 			}
       
  1116 		} else {
       
  1117 			config->standard_stat_counter = 0x0;
       
  1118 		}
       
  1119 	}
       
  1120 
       
  1121 	DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1122 		c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1123 	DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1124 		c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1125 	DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1126 		c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1127 }
       
  1128 
       
  1129 /*************************************************************************
       
  1130 *  CPUSaver parameters
       
  1131 *
       
  1132 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1133 *  "move immediate value" instruction.  By changing the value of
       
  1134 *  the literal in the instruction before the code is loaded, the
       
  1135 *  driver can change the algorithm.
       
  1136 *
       
  1137 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1138 *    When this timer expires the interrupt is asserted, and the
       
  1139 *    timer is reset each time a new packet is received.  (see
       
  1140 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1141 *    The current default is 0x600 or 1536.  Experiments show that
       
  1142 *    the value should probably stay within the 0x200 - 0x1000.
       
  1143 *
       
  1144 *  BUNDLEMAX -
       
  1145 *    This sets the maximum number of frames that will be bundled.  In
       
  1146 *    some situations, such as the TCP windowing algorithm, it may be
       
  1147 *    better to limit the growth of the bundle size than let it go as
       
  1148 *    high as it can, because that could cause too much added latency.
       
  1149 *    The default is six, because this is the number of packets in the
       
  1150 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1151 *    an interrupt for every frame received.  If you do not want to put
       
  1152 *    a limit on the bundle size, set this value to xFFFF.
       
  1153 *
       
  1154 *  BUNDLESMALL -
       
  1155 *    This contains a bit-mask describing the minimum size frame that
       
  1156 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1157 *    that any frame less than 128 bytes in length will not be bundled,
       
  1158 *    but will instead immediately generate an interrupt.  This does
       
  1159 *    not affect the current bundle in any way.  Any frame that is 128
       
  1160 *    bytes or large will be bundled normally.  This feature is meant
       
  1161 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1162 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1163 *    enabled was sending but not receiving.  The delay introduced when
       
  1164 *    the ACKs were received was enough to reduce total throughput, because
       
  1165 *    the sender would sit idle until the ACK was finally seen.
       
  1166 *
       
  1167 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1168 *    This means that any frame which is x7F (127) bytes or smaller
       
  1169 *    will cause an immediate interrupt.  Because this value must be a
       
  1170 *    bit mask, there are only a few valid values that can be used.  To
       
  1171 *    turn this feature off, the driver can write the value xFFFF to the
       
  1172 *    lower word of this instruction (in the same way that the other
       
  1173 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1174 *    cause an interrupt to be generated for every frame, because all
       
  1175 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1176 *************************************************************************/
       
  1177 
       
  1178 /* if you wish to disable the ucode functionality, while maintaining the
       
  1179  * workarounds it provides, set the following defines to:
       
  1180  * BUNDLESMALL 0
       
  1181  * BUNDLEMAX 1
       
  1182  * INTDELAY 1
       
  1183  */
       
  1184 #define BUNDLESMALL 1
       
  1185 #define BUNDLEMAX (u16)6
       
  1186 #define INTDELAY (u16)1536 /* 0x600 */
       
  1187 
       
  1188 /* Initialize firmware */
       
  1189 static const struct firmware *e100_request_firmware(struct nic *nic)
       
  1190 {
       
  1191 	const char *fw_name;
       
  1192 	const struct firmware *fw;
       
  1193 	u8 timer, bundle, min_size;
       
  1194 	int err;
       
  1195 
       
  1196 	/* do not load u-code for ICH devices */
       
  1197 	if (nic->flags & ich)
       
  1198 		return NULL;
       
  1199 
       
  1200 	/* Search for ucode match against h/w revision */
       
  1201 	if (nic->mac == mac_82559_D101M)
       
  1202 		fw_name = FIRMWARE_D101M;
       
  1203 	else if (nic->mac == mac_82559_D101S)
       
  1204 		fw_name = FIRMWARE_D101S;
       
  1205 	else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
       
  1206 		fw_name = FIRMWARE_D102E;
       
  1207 	else /* No ucode on other devices */
       
  1208 		return NULL;
       
  1209 
       
  1210 	err = request_firmware(&fw, fw_name, &nic->pdev->dev);
       
  1211 	if (err) {
       
  1212 		DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
       
  1213 			fw_name, err);
       
  1214 		return ERR_PTR(err);
       
  1215 	}
       
  1216 	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
       
  1217 	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
       
  1218 	if (fw->size != UCODE_SIZE * 4 + 3) {
       
  1219 		DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
       
  1220 			fw_name, fw->size);
       
  1221 		release_firmware(fw);
       
  1222 		return ERR_PTR(-EINVAL);
       
  1223 	}
       
  1224 
       
  1225 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1226 	timer = fw->data[UCODE_SIZE * 4];
       
  1227 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1228 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1229 
       
  1230 	if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
       
  1231 	    min_size >= UCODE_SIZE) {
       
  1232 		DPRINTK(PROBE, ERR,
       
  1233 			"\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
       
  1234 			fw_name, timer, bundle, min_size);
       
  1235 		release_firmware(fw);
       
  1236 		return ERR_PTR(-EINVAL);
       
  1237 	}
       
  1238 	/* OK, firmware is validated and ready to use... */
       
  1239 	return fw;
       
  1240 }
       
  1241 
       
  1242 static void e100_setup_ucode(struct nic *nic, struct cb *cb,
       
  1243 			     struct sk_buff *skb)
       
  1244 {
       
  1245 	const struct firmware *fw = (void *)skb;
       
  1246 	u8 timer, bundle, min_size;
       
  1247 
       
  1248 	/* It's not a real skb; we just abused the fact that e100_exec_cb
       
  1249 	   will pass it through to here... */
       
  1250 	cb->skb = NULL;
       
  1251 
       
  1252 	/* firmware is stored as little endian already */
       
  1253 	memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
       
  1254 
       
  1255 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1256 	timer = fw->data[UCODE_SIZE * 4];
       
  1257 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1258 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1259 
       
  1260 	/* Insert user-tunable settings in cb->u.ucode */
       
  1261 	cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
       
  1262 	cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
       
  1263 	cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
       
  1264 	cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
       
  1265 	cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
       
  1266 	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
       
  1267 
       
  1268 	cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1269 }
       
  1270 
       
  1271 static inline int e100_load_ucode_wait(struct nic *nic)
       
  1272 {
       
  1273 	const struct firmware *fw;
       
  1274 	int err = 0, counter = 50;
       
  1275 	struct cb *cb = nic->cb_to_clean;
       
  1276 
       
  1277 	fw = e100_request_firmware(nic);
       
  1278 	/* If it's NULL, then no ucode is required */
       
  1279 	if (!fw || IS_ERR(fw))
       
  1280 		return PTR_ERR(fw);
       
  1281 
       
  1282 	if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
       
  1283 		DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
       
  1284 
       
  1285 	/* must restart cuc */
       
  1286 	nic->cuc_cmd = cuc_start;
       
  1287 
       
  1288 	/* wait for completion */
       
  1289 	e100_write_flush(nic);
       
  1290 	udelay(10);
       
  1291 
       
  1292 	/* wait for possibly (ouch) 500ms */
       
  1293 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1294 		msleep(10);
       
  1295 		if (!--counter) break;
       
  1296 	}
       
  1297 
       
  1298 	/* ack any interrupts, something could have been set */
       
  1299 	iowrite8(~0, &nic->csr->scb.stat_ack);
       
  1300 
       
  1301 	/* if the command failed, or is not OK, notify and return */
       
  1302 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1303 		DPRINTK(PROBE,ERR, "ucode load failed\n");
       
  1304 		err = -EPERM;
       
  1305 	}
       
  1306 
       
  1307 	return err;
       
  1308 }
       
  1309 
       
  1310 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1311 	struct sk_buff *skb)
       
  1312 {
       
  1313 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1314 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1315 }
       
  1316 
       
  1317 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1318 {
       
  1319 	cb->command = cpu_to_le16(cb_dump);
       
  1320 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1321 		offsetof(struct mem, dump_buf));
       
  1322 }
       
  1323 
       
  1324 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1325 #define MII_NSC_CONG		MII_RESV1
       
  1326 #define NSC_CONG_ENABLE		0x0100
       
  1327 #define NSC_CONG_TXREADY	0x0400
       
  1328 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1329 static int e100_phy_init(struct nic *nic)
       
  1330 {
       
  1331 	struct net_device *netdev = nic->netdev;
       
  1332 	u32 addr;
       
  1333 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1334 
       
  1335 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1336 	for (addr = 0; addr < 32; addr++) {
       
  1337 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1338 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1339 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1340 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1341 		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1342 			break;
       
  1343 	}
       
  1344 	DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
       
  1345 	if (addr == 32)
       
  1346 		return -EAGAIN;
       
  1347 
       
  1348 	/* Selected the phy and isolate the rest */
       
  1349 	for (addr = 0; addr < 32; addr++) {
       
  1350 		if (addr != nic->mii.phy_id) {
       
  1351 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1352 		} else {
       
  1353 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1354 			mdio_write(netdev, addr, MII_BMCR,
       
  1355 				bmcr & ~BMCR_ISOLATE);
       
  1356 		}
       
  1357 	}
       
  1358 
       
  1359 	/* Get phy ID */
       
  1360 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1361 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1362 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1363 	DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
       
  1364 
       
  1365 	/* Handle National tx phys */
       
  1366 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1367 	if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1368 		/* Disable congestion control */
       
  1369 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1370 		cong |= NSC_CONG_TXREADY;
       
  1371 		cong &= ~NSC_CONG_ENABLE;
       
  1372 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1373 	}
       
  1374 
       
  1375 	if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1376 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1377 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1378 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1379 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1380 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1381 	}
       
  1382 
       
  1383 	return 0;
       
  1384 }
       
  1385 
       
  1386 static int e100_hw_init(struct nic *nic)
       
  1387 {
       
  1388 	int err;
       
  1389 
       
  1390 	e100_hw_reset(nic);
       
  1391 
       
  1392 	DPRINTK(HW, ERR, "e100_hw_init\n");
       
  1393 	if (!in_interrupt() && (err = e100_self_test(nic)))
       
  1394 		return err;
       
  1395 
       
  1396 	if ((err = e100_phy_init(nic)))
       
  1397 		return err;
       
  1398 	if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1399 		return err;
       
  1400 	if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1401 		return err;
       
  1402 	if ((err = e100_load_ucode_wait(nic)))
       
  1403 		return err;
       
  1404 	if ((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1405 		return err;
       
  1406 	if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1407 		return err;
       
  1408 	if ((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1409 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1410 		return err;
       
  1411 	if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1412 		return err;
       
  1413 
       
  1414 	e100_disable_irq(nic);
       
  1415 
       
  1416 	return 0;
       
  1417 }
       
  1418 
       
  1419 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1420 {
       
  1421 	struct net_device *netdev = nic->netdev;
       
  1422 	struct dev_mc_list *list = netdev->mc_list;
       
  1423 	u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
       
  1424 
       
  1425 	cb->command = cpu_to_le16(cb_multi);
       
  1426 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1427 	for (i = 0; list && i < count; i++, list = list->next)
       
  1428 		memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
       
  1429 			ETH_ALEN);
       
  1430 }
       
  1431 
       
  1432 static void e100_set_multicast_list(struct net_device *netdev)
       
  1433 {
       
  1434 	struct nic *nic = netdev_priv(netdev);
       
  1435 
       
  1436 	DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
       
  1437 		netdev->mc_count, netdev->flags);
       
  1438 
       
  1439 	if (netdev->flags & IFF_PROMISC)
       
  1440 		nic->flags |= promiscuous;
       
  1441 	else
       
  1442 		nic->flags &= ~promiscuous;
       
  1443 
       
  1444 	if (netdev->flags & IFF_ALLMULTI ||
       
  1445 		netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
       
  1446 		nic->flags |= multicast_all;
       
  1447 	else
       
  1448 		nic->flags &= ~multicast_all;
       
  1449 
       
  1450 	e100_exec_cb(nic, NULL, e100_configure);
       
  1451 	e100_exec_cb(nic, NULL, e100_multi);
       
  1452 }
       
  1453 
       
  1454 static void e100_update_stats(struct nic *nic)
       
  1455 {
       
  1456 	struct net_device *dev = nic->netdev;
       
  1457 	struct net_device_stats *ns = &dev->stats;
       
  1458 	struct stats *s = &nic->mem->stats;
       
  1459 	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1460 		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
       
  1461 		&s->complete;
       
  1462 
       
  1463 	/* Device's stats reporting may take several microseconds to
       
  1464 	 * complete, so we're always waiting for results of the
       
  1465 	 * previous command. */
       
  1466 
       
  1467 	if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
       
  1468 		*complete = 0;
       
  1469 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1470 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1471 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1472 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1473 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1474 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1475 		ns->collisions += nic->tx_collisions;
       
  1476 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1477 			le32_to_cpu(s->tx_lost_crs);
       
  1478 		ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
       
  1479 			nic->rx_over_length_errors;
       
  1480 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1481 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1482 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1483 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1484 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1485 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1486 			le32_to_cpu(s->rx_alignment_errors) +
       
  1487 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1488 			le32_to_cpu(s->rx_cdt_errors);
       
  1489 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1490 		nic->tx_single_collisions +=
       
  1491 			le32_to_cpu(s->tx_single_collisions);
       
  1492 		nic->tx_multiple_collisions +=
       
  1493 			le32_to_cpu(s->tx_multiple_collisions);
       
  1494 		if (nic->mac >= mac_82558_D101_A4) {
       
  1495 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1496 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1497 			nic->rx_fc_unsupported +=
       
  1498 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1499 			if (nic->mac >= mac_82559_D101M) {
       
  1500 				nic->tx_tco_frames +=
       
  1501 					le16_to_cpu(s->xmt_tco_frames);
       
  1502 				nic->rx_tco_frames +=
       
  1503 					le16_to_cpu(s->rcv_tco_frames);
       
  1504 			}
       
  1505 		}
       
  1506 	}
       
  1507 
       
  1508 
       
  1509 	if (e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1510 		DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
       
  1511 }
       
  1512 
       
  1513 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1514 {
       
  1515 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1516 	 * we're getting collisions on a half-duplex connection. */
       
  1517 
       
  1518 	if (duplex == DUPLEX_HALF) {
       
  1519 		u32 prev = nic->adaptive_ifs;
       
  1520 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1521 
       
  1522 		if ((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1523 		   (nic->tx_frames > min_frames)) {
       
  1524 			if (nic->adaptive_ifs < 60)
       
  1525 				nic->adaptive_ifs += 5;
       
  1526 		} else if (nic->tx_frames < min_frames) {
       
  1527 			if (nic->adaptive_ifs >= 5)
       
  1528 				nic->adaptive_ifs -= 5;
       
  1529 		}
       
  1530 		if (nic->adaptive_ifs != prev)
       
  1531 			e100_exec_cb(nic, NULL, e100_configure);
       
  1532 	}
       
  1533 }
       
  1534 
       
  1535 static void e100_watchdog(unsigned long data)
       
  1536 {
       
  1537 	struct nic *nic = (struct nic *)data;
       
  1538 	struct ethtool_cmd cmd;
       
  1539 
       
  1540 	DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
       
  1541 
       
  1542 	/* mii library handles link maintenance tasks */
       
  1543 
       
  1544     if (nic->ecdev) {
       
  1545     	ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1546 	} else {
       
  1547 		mii_ethtool_gset(&nic->mii, &cmd);
       
  1548 
       
  1549 		if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1550 			printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
       
  1551 					nic->netdev->name,
       
  1552 					cmd.speed == SPEED_100 ? "100" : "10",
       
  1553 					cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
       
  1554 		} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1555 			printk(KERN_INFO "e100: %s NIC Link is Down\n",
       
  1556 					nic->netdev->name);
       
  1557 		}
       
  1558 
       
  1559 		mii_check_link(&nic->mii);
       
  1560 
       
  1561 		/* Software generated interrupt to recover from (rare) Rx
       
  1562 		 * allocation failure.
       
  1563 		 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1564 		 * accidentally, due to hardware that shares a register between the
       
  1565 		 * interrupt mask bit and the SW Interrupt generation bit */
       
  1566 		spin_lock_irq(&nic->cmd_lock);
       
  1567 		iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1568 		e100_write_flush(nic);
       
  1569 		spin_unlock_irq(&nic->cmd_lock);
       
  1570 
       
  1571 		e100_update_stats(nic);
       
  1572 		e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
       
  1573 
       
  1574 		if (nic->mac <= mac_82557_D100_C)
       
  1575 			/* Issue a multicast command to workaround a 557 lock up */
       
  1576 			e100_set_multicast_list(nic->netdev);
       
  1577 
       
  1578 		if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
       
  1579 			/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1580 			nic->flags |= ich_10h_workaround;
       
  1581 		else
       
  1582 			nic->flags &= ~ich_10h_workaround;
       
  1583 
       
  1584 		mod_timer(&nic->watchdog,
       
  1585 				round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
       
  1586 	}
       
  1587 }
       
  1588 
       
  1589 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1590 	struct sk_buff *skb)
       
  1591 {
       
  1592 	cb->command = nic->tx_command;
       
  1593 	/* interrupt every 16 packets regardless of delay */
       
  1594 	if ((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1595 		cb->command |= cpu_to_le16(cb_i);
       
  1596 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1597 	cb->u.tcb.tcb_byte_count = 0;
       
  1598 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1599 	cb->u.tcb.tbd_count = 1;
       
  1600 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
       
  1601 		skb->data, skb->len, PCI_DMA_TODEVICE));
       
  1602 	/* check for mapping failure? */
       
  1603 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1604 }
       
  1605 
       
  1606 static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
       
  1607 {
       
  1608 	struct nic *nic = netdev_priv(netdev);
       
  1609 	int err;
       
  1610 
       
  1611 	if (nic->flags & ich_10h_workaround) {
       
  1612 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1613 		   Issue a NOP command followed by a 1us delay before
       
  1614 		   issuing the Tx command. */
       
  1615 		if (e100_exec_cmd(nic, cuc_nop, 0))
       
  1616 			DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
       
  1617 		udelay(1);
       
  1618 	}
       
  1619 
       
  1620 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1621 
       
  1622 	switch (err) {
       
  1623 	case -ENOSPC:
       
  1624 		/* We queued the skb, but now we're out of space. */
       
  1625 		DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
       
  1626 		if (!nic->ecdev)
       
  1627 			netif_stop_queue(netdev);
       
  1628 		break;
       
  1629 	case -ENOMEM:
       
  1630 		/* This is a hard error - log it. */
       
  1631 		DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
       
  1632 		if (!nic->ecdev)
       
  1633 			netif_stop_queue(netdev);
       
  1634 		return 1;
       
  1635 	}
       
  1636 
       
  1637 	netdev->trans_start = jiffies;
       
  1638 	return 0;
       
  1639 }
       
  1640 
       
  1641 static int e100_tx_clean(struct nic *nic)
       
  1642 {
       
  1643 	struct net_device *dev = nic->netdev;
       
  1644 	struct cb *cb;
       
  1645 	int tx_cleaned = 0;
       
  1646 
       
  1647 	if (!nic->ecdev)
       
  1648 		spin_lock(&nic->cb_lock);
       
  1649 
       
  1650 	/* Clean CBs marked complete */
       
  1651 	for (cb = nic->cb_to_clean;
       
  1652 	    cb->status & cpu_to_le16(cb_complete);
       
  1653 	    cb = nic->cb_to_clean = cb->next) {
       
  1654 		DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
       
  1655 		        (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1656 		        cb->status);
       
  1657 
       
  1658 		if (likely(cb->skb != NULL)) {
       
  1659 			dev->stats.tx_packets++;
       
  1660 			dev->stats.tx_bytes += cb->skb->len;
       
  1661 
       
  1662 			pci_unmap_single(nic->pdev,
       
  1663 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1664 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1665 				PCI_DMA_TODEVICE);
       
  1666 			if (!nic->ecdev)
       
  1667 				dev_kfree_skb_any(cb->skb);
       
  1668 			cb->skb = NULL;
       
  1669 			tx_cleaned = 1;
       
  1670 		}
       
  1671 		cb->status = 0;
       
  1672 		nic->cbs_avail++;
       
  1673 	}
       
  1674 
       
  1675 	if (!nic->ecdev) {
       
  1676 		spin_unlock(&nic->cb_lock);
       
  1677 
       
  1678 		/* Recover from running out of Tx resources in xmit_frame */
       
  1679 		if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1680 			netif_wake_queue(nic->netdev);
       
  1681 	}
       
  1682 
       
  1683 	return tx_cleaned;
       
  1684 }
       
  1685 
       
  1686 static void e100_clean_cbs(struct nic *nic)
       
  1687 {
       
  1688 	if (nic->cbs) {
       
  1689 		while (nic->cbs_avail != nic->params.cbs.count) {
       
  1690 			struct cb *cb = nic->cb_to_clean;
       
  1691 			if (cb->skb) {
       
  1692 				pci_unmap_single(nic->pdev,
       
  1693 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1694 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1695 					PCI_DMA_TODEVICE);
       
  1696 				if (!nic->ecdev)
       
  1697 					dev_kfree_skb(cb->skb);
       
  1698 			}
       
  1699 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1700 			nic->cbs_avail++;
       
  1701 		}
       
  1702 		pci_free_consistent(nic->pdev,
       
  1703 			sizeof(struct cb) * nic->params.cbs.count,
       
  1704 			nic->cbs, nic->cbs_dma_addr);
       
  1705 		nic->cbs = NULL;
       
  1706 		nic->cbs_avail = 0;
       
  1707 	}
       
  1708 	nic->cuc_cmd = cuc_start;
       
  1709 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1710 		nic->cbs;
       
  1711 }
       
  1712 
       
  1713 static int e100_alloc_cbs(struct nic *nic)
       
  1714 {
       
  1715 	struct cb *cb;
       
  1716 	unsigned int i, count = nic->params.cbs.count;
       
  1717 
       
  1718 	nic->cuc_cmd = cuc_start;
       
  1719 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1720 	nic->cbs_avail = 0;
       
  1721 
       
  1722 	nic->cbs = pci_alloc_consistent(nic->pdev,
       
  1723 		sizeof(struct cb) * count, &nic->cbs_dma_addr);
       
  1724 	if (!nic->cbs)
       
  1725 		return -ENOMEM;
       
  1726 
       
  1727 	for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1728 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1729 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  1730 
       
  1731 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  1732 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  1733 			((i+1) % count) * sizeof(struct cb));
       
  1734 		cb->skb = NULL;
       
  1735 	}
       
  1736 
       
  1737 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  1738 	nic->cbs_avail = count;
       
  1739 
       
  1740 	return 0;
       
  1741 }
       
  1742 
       
  1743 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  1744 {
       
  1745 	if (!nic->rxs) return;
       
  1746 	if (RU_SUSPENDED != nic->ru_running) return;
       
  1747 
       
  1748 	/* handle init time starts */
       
  1749 	if (!rx) rx = nic->rxs;
       
  1750 
       
  1751 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  1752 	if (rx->skb) {
       
  1753 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  1754 		nic->ru_running = RU_RUNNING;
       
  1755 	}
       
  1756 }
       
  1757 
       
  1758 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
       
  1759 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  1760 {
       
  1761 	if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
       
  1762 		return -ENOMEM;
       
  1763 
       
  1764 	/* Align, init, and map the RFD. */
       
  1765 	skb_reserve(rx->skb, NET_IP_ALIGN);
       
  1766 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
       
  1767 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  1768 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1769 
       
  1770 	if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  1771 		dev_kfree_skb_any(rx->skb);
       
  1772 		rx->skb = NULL;
       
  1773 		rx->dma_addr = 0;
       
  1774 		return -ENOMEM;
       
  1775 	}
       
  1776 
       
  1777 	/* Link the RFD to end of RFA by linking previous RFD to
       
  1778 	 * this one.  We are safe to touch the previous RFD because
       
  1779 	 * it is protected by the before last buffer's el bit being set */
       
  1780 	if (rx->prev->skb) {
       
  1781 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  1782 		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  1783 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  1784 			sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  1785 	}
       
  1786 
       
  1787 	return 0;
       
  1788 }
       
  1789 
       
  1790 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  1791 	unsigned int *work_done, unsigned int work_to_do)
       
  1792 {
       
  1793 	struct net_device *dev = nic->netdev;
       
  1794 	struct sk_buff *skb = rx->skb;
       
  1795 	struct rfd *rfd = (struct rfd *)skb->data;
       
  1796 	u16 rfd_status, actual_size;
       
  1797 
       
  1798 	if (unlikely(work_done && *work_done >= work_to_do))
       
  1799 		return -EAGAIN;
       
  1800 
       
  1801 	/* Need to sync before taking a peek at cb_complete bit */
       
  1802 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  1803 		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  1804 	rfd_status = le16_to_cpu(rfd->status);
       
  1805 
       
  1806 	DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
       
  1807 
       
  1808 	/* If data isn't ready, nothing to indicate */
       
  1809 	if (unlikely(!(rfd_status & cb_complete))) {
       
  1810 		/* If the next buffer has the el bit, but we think the receiver
       
  1811 		 * is still running, check to see if it really stopped while
       
  1812 		 * we had interrupts off.
       
  1813 		 * This allows for a fast restart without re-enabling
       
  1814 		 * interrupts */
       
  1815 		if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  1816 		    (RU_RUNNING == nic->ru_running))
       
  1817 
       
  1818 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  1819 				nic->ru_running = RU_SUSPENDED;
       
  1820 		return -ENODATA;
       
  1821 	}
       
  1822 
       
  1823 	/* Get actual data size */
       
  1824 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  1825 	if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  1826 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  1827 
       
  1828 	/* Get data */
       
  1829 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  1830 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1831 
       
  1832 	/* If this buffer has the el bit, but we think the receiver
       
  1833 	 * is still running, check to see if it really stopped while
       
  1834 	 * we had interrupts off.
       
  1835 	 * This allows for a fast restart without re-enabling interrupts.
       
  1836 	 * This can happen when the RU sees the size change but also sees
       
  1837 	 * the el bit set. */
       
  1838 	if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  1839 	    (RU_RUNNING == nic->ru_running)) {
       
  1840 
       
  1841 	    if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  1842 		nic->ru_running = RU_SUSPENDED;
       
  1843 	}
       
  1844 
       
  1845 	if (!nic->ecdev) {
       
  1846 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  1847 		skb_reserve(skb, sizeof(struct rfd));
       
  1848 		skb_put(skb, actual_size);
       
  1849 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  1850 	}
       
  1851 
       
  1852 	if (unlikely(!(rfd_status & cb_ok))) {
       
  1853 		if (!nic->ecdev) {
       
  1854 			/* Don't indicate if hardware indicates errors */
       
  1855 			dev_kfree_skb_any(skb);
       
  1856 		}
       
  1857 	} else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
       
  1858 		/* Don't indicate oversized frames */
       
  1859 		nic->rx_over_length_errors++;
       
  1860 		if (!nic->ecdev)
       
  1861 			dev_kfree_skb_any(skb);
       
  1862 	} else {
       
  1863 		dev->stats.rx_packets++;
       
  1864 		dev->stats.rx_bytes += actual_size;
       
  1865 		if (nic->ecdev) {
       
  1866 			ecdev_receive(nic->ecdev,
       
  1867 					skb->data + sizeof(struct rfd), actual_size);
       
  1868 
       
  1869 			// No need to detect link status as
       
  1870 			// long as frames are received: Reset watchdog.
       
  1871 			nic->ec_watchdog_jiffies = jiffies;
       
  1872 		} else {
       
  1873 			netif_receive_skb(skb);
       
  1874 		}
       
  1875 		if (work_done)
       
  1876 			(*work_done)++;
       
  1877 	}
       
  1878 
       
  1879 	if (nic->ecdev) {
       
  1880 		// make receive frame descriptior usable again
       
  1881 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  1882 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  1883 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1884 		if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  1885 			rx->dma_addr = 0;
       
  1886 		}
       
  1887 
       
  1888 		/* Link the RFD to end of RFA by linking previous RFD to
       
  1889 		 * this one.  We are safe to touch the previous RFD because
       
  1890 		 * it is protected by the before last buffer's el bit being set */
       
  1891 		if (rx->prev->skb) {
       
  1892 			struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  1893 			put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  1894 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  1895 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  1896 		}
       
  1897 	} else {
       
  1898 		rx->skb = NULL;
       
  1899 	}
       
  1900 
       
  1901 	return 0;
       
  1902 }
       
  1903 
       
  1904 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  1905 	unsigned int work_to_do)
       
  1906 {
       
  1907 	struct rx *rx;
       
  1908 	int restart_required = 0, err = 0;
       
  1909 	struct rx *old_before_last_rx, *new_before_last_rx;
       
  1910 	struct rfd *old_before_last_rfd, *new_before_last_rfd;
       
  1911 
       
  1912 	/* Indicate newly arrived packets */
       
  1913 	for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  1914 		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  1915 		/* Hit quota or no more to clean */
       
  1916 		if (-EAGAIN == err || -ENODATA == err)
       
  1917 			break;
       
  1918 	}
       
  1919 
       
  1920 
       
  1921 	/* On EAGAIN, hit quota so have more work to do, restart once
       
  1922 	 * cleanup is complete.
       
  1923 	 * Else, are we already rnr? then pay attention!!! this ensures that
       
  1924 	 * the state machine progression never allows a start with a
       
  1925 	 * partially cleaned list, avoiding a race between hardware
       
  1926 	 * and rx_to_clean when in NAPI mode */
       
  1927 	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
       
  1928 		restart_required = 1;
       
  1929 
       
  1930 	old_before_last_rx = nic->rx_to_use->prev->prev;
       
  1931 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
       
  1932 
       
  1933 	if (!nic->ecdev) {
       
  1934 		/* Alloc new skbs to refill list */
       
  1935 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  1936 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  1937 				break; /* Better luck next time (see watchdog) */
       
  1938 		}
       
  1939 	}
       
  1940 
       
  1941 	new_before_last_rx = nic->rx_to_use->prev->prev;
       
  1942 	if (new_before_last_rx != old_before_last_rx) {
       
  1943 		/* Set the el-bit on the buffer that is before the last buffer.
       
  1944 		 * This lets us update the next pointer on the last buffer
       
  1945 		 * without worrying about hardware touching it.
       
  1946 		 * We set the size to 0 to prevent hardware from touching this
       
  1947 		 * buffer.
       
  1948 		 * When the hardware hits the before last buffer with el-bit
       
  1949 		 * and size of 0, it will RNR interrupt, the RUS will go into
       
  1950 		 * the No Resources state.  It will not complete nor write to
       
  1951 		 * this buffer. */
       
  1952 		new_before_last_rfd =
       
  1953 			(struct rfd *)new_before_last_rx->skb->data;
       
  1954 		new_before_last_rfd->size = 0;
       
  1955 		new_before_last_rfd->command |= cpu_to_le16(cb_el);
       
  1956 		pci_dma_sync_single_for_device(nic->pdev,
       
  1957 			new_before_last_rx->dma_addr, sizeof(struct rfd),
       
  1958 			PCI_DMA_BIDIRECTIONAL);
       
  1959 
       
  1960 		/* Now that we have a new stopping point, we can clear the old
       
  1961 		 * stopping point.  We must sync twice to get the proper
       
  1962 		 * ordering on the hardware side of things. */
       
  1963 		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
       
  1964 		pci_dma_sync_single_for_device(nic->pdev,
       
  1965 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  1966 			PCI_DMA_BIDIRECTIONAL);
       
  1967 		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  1968 		pci_dma_sync_single_for_device(nic->pdev,
       
  1969 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  1970 			PCI_DMA_BIDIRECTIONAL);
       
  1971 	}
       
  1972 
       
  1973 	if (restart_required) {
       
  1974 		// ack the rnr?
       
  1975 		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  1976 		e100_start_receiver(nic, nic->rx_to_clean);
       
  1977 		if (work_done)
       
  1978 			(*work_done)++;
       
  1979 	}
       
  1980 }
       
  1981 
       
  1982 static void e100_rx_clean_list(struct nic *nic)
       
  1983 {
       
  1984 	struct rx *rx;
       
  1985 	unsigned int i, count = nic->params.rfds.count;
       
  1986 
       
  1987 	nic->ru_running = RU_UNINITIALIZED;
       
  1988 
       
  1989 	if (nic->rxs) {
       
  1990 		for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  1991 			if (rx->skb) {
       
  1992 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  1993 					RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1994 				dev_kfree_skb(rx->skb);
       
  1995 			}
       
  1996 		}
       
  1997 		kfree(nic->rxs);
       
  1998 		nic->rxs = NULL;
       
  1999 	}
       
  2000 
       
  2001 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2002 }
       
  2003 
       
  2004 static int e100_rx_alloc_list(struct nic *nic)
       
  2005 {
       
  2006 	struct rx *rx;
       
  2007 	unsigned int i, count = nic->params.rfds.count;
       
  2008 	struct rfd *before_last;
       
  2009 
       
  2010 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2011 	nic->ru_running = RU_UNINITIALIZED;
       
  2012 
       
  2013 	if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2014 		return -ENOMEM;
       
  2015 
       
  2016 	for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2017 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2018 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2019 		if (e100_rx_alloc_skb(nic, rx)) {
       
  2020 			e100_rx_clean_list(nic);
       
  2021 			return -ENOMEM;
       
  2022 		}
       
  2023 	}
       
  2024 
       
  2025 	if (!nic->ecdev) {
       
  2026 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2027 		 * This lets us update the next pointer on the last buffer without
       
  2028 		 * worrying about hardware touching it.
       
  2029 		 * We set the size to 0 to prevent hardware from touching this buffer.
       
  2030 		 * When the hardware hits the before last buffer with el-bit and size
       
  2031 		 * of 0, it will RNR interrupt, the RU will go into the No Resources
       
  2032 		 * state.  It will not complete nor write to this buffer. */
       
  2033 		rx = nic->rxs->prev->prev;
       
  2034 		before_last = (struct rfd *)rx->skb->data;
       
  2035 		before_last->command |= cpu_to_le16(cb_el);
       
  2036 		before_last->size = 0;
       
  2037 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2038 				sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2039 	}
       
  2040 
       
  2041 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2042 	nic->ru_running = RU_SUSPENDED;
       
  2043 
       
  2044 	return 0;
       
  2045 }
       
  2046 
       
  2047 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2048 {
       
  2049 	struct net_device *netdev = dev_id;
       
  2050 	struct nic *nic = netdev_priv(netdev);
       
  2051 	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
       
  2052 
       
  2053 	DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
       
  2054 
       
  2055 	if (stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2056 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2057 		return IRQ_NONE;
       
  2058 
       
  2059 	/* Ack interrupt(s) */
       
  2060 	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
       
  2061 
       
  2062 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2063 	if (stat_ack & stat_ack_rnr)
       
  2064 		nic->ru_running = RU_SUSPENDED;
       
  2065 
       
  2066 	if(!nic->ecdev && likely(netif_rx_schedule_prep(&nic->napi))) {
       
  2067 		e100_disable_irq(nic);
       
  2068 		__netif_rx_schedule(&nic->napi);
       
  2069 	}
       
  2070 
       
  2071 	return IRQ_HANDLED;
       
  2072 }
       
  2073 
       
  2074 void e100_ec_poll(struct net_device *netdev)
       
  2075 {
       
  2076 	struct nic *nic = netdev_priv(netdev);
       
  2077 
       
  2078 	e100_rx_clean(nic, NULL, 100);
       
  2079 	e100_tx_clean(nic);
       
  2080 
       
  2081     if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2082         e100_watchdog((unsigned long) nic);
       
  2083         nic->ec_watchdog_jiffies = jiffies;
       
  2084     }
       
  2085 }
       
  2086 
       
  2087 
       
  2088 static int e100_poll(struct napi_struct *napi, int budget)
       
  2089 {
       
  2090 	struct nic *nic = container_of(napi, struct nic, napi);
       
  2091 	unsigned int work_done = 0;
       
  2092 
       
  2093 	e100_rx_clean(nic, &work_done, budget);
       
  2094 	e100_tx_clean(nic);
       
  2095 
       
  2096 	/* If budget not fully consumed, exit the polling mode */
       
  2097 	if (work_done < budget) {
       
  2098 		netif_rx_complete(napi);
       
  2099 		e100_enable_irq(nic);
       
  2100 	}
       
  2101 
       
  2102 	return work_done;
       
  2103 }
       
  2104 
       
  2105 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2106 static void e100_netpoll(struct net_device *netdev)
       
  2107 {
       
  2108 	struct nic *nic = netdev_priv(netdev);
       
  2109 
       
  2110 	e100_disable_irq(nic);
       
  2111 	e100_intr(nic->pdev->irq, netdev);
       
  2112 	e100_tx_clean(nic);
       
  2113 	e100_enable_irq(nic);
       
  2114 }
       
  2115 #endif
       
  2116 
       
  2117 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2118 {
       
  2119 	struct nic *nic = netdev_priv(netdev);
       
  2120 	struct sockaddr *addr = p;
       
  2121 
       
  2122 	if (!is_valid_ether_addr(addr->sa_data))
       
  2123 		return -EADDRNOTAVAIL;
       
  2124 
       
  2125 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2126 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2127 
       
  2128 	return 0;
       
  2129 }
       
  2130 
       
  2131 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2132 {
       
  2133 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2134 		return -EINVAL;
       
  2135 	netdev->mtu = new_mtu;
       
  2136 	return 0;
       
  2137 }
       
  2138 
       
  2139 static int e100_asf(struct nic *nic)
       
  2140 {
       
  2141 	/* ASF can be enabled from eeprom */
       
  2142 	return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2143 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2144 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2145 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
       
  2146 }
       
  2147 
       
  2148 static int e100_up(struct nic *nic)
       
  2149 {
       
  2150 	int err;
       
  2151 
       
  2152 	if ((err = e100_rx_alloc_list(nic)))
       
  2153 		return err;
       
  2154 	if ((err = e100_alloc_cbs(nic)))
       
  2155 		goto err_rx_clean_list;
       
  2156 	if ((err = e100_hw_init(nic)))
       
  2157 		goto err_clean_cbs;
       
  2158 	e100_set_multicast_list(nic->netdev);
       
  2159 	e100_start_receiver(nic, NULL);
       
  2160 	if (!nic->ecdev) {
       
  2161 		mod_timer(&nic->watchdog, jiffies);
       
  2162 	}
       
  2163 	if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2164 					nic->netdev->name, nic->netdev)))
       
  2165 		goto err_no_irq;
       
  2166 	if (!nic->ecdev) {
       
  2167 		netif_wake_queue(nic->netdev);
       
  2168 		napi_enable(&nic->napi);
       
  2169 		/* enable ints _after_ enabling poll, preventing a race between
       
  2170 		 * disable ints+schedule */
       
  2171 		e100_enable_irq(nic);
       
  2172 	}
       
  2173 	return 0;
       
  2174 
       
  2175 err_no_irq:
       
  2176 	if (!nic->ecdev)
       
  2177 		del_timer_sync(&nic->watchdog);
       
  2178 err_clean_cbs:
       
  2179 	e100_clean_cbs(nic);
       
  2180 err_rx_clean_list:
       
  2181 	e100_rx_clean_list(nic);
       
  2182 	return err;
       
  2183 }
       
  2184 
       
  2185 static void e100_down(struct nic *nic)
       
  2186 {
       
  2187 	if (!nic->ecdev) {
       
  2188 		/* wait here for poll to complete */
       
  2189 		napi_disable(&nic->napi);
       
  2190 		netif_stop_queue(nic->netdev);
       
  2191 	}
       
  2192 	e100_hw_reset(nic);
       
  2193 	if (!nic->ecdev) {
       
  2194 		free_irq(nic->pdev->irq, nic->netdev);
       
  2195 		del_timer_sync(&nic->watchdog);
       
  2196 		netif_carrier_off(nic->netdev);
       
  2197 	}
       
  2198 	e100_clean_cbs(nic);
       
  2199 	e100_rx_clean_list(nic);
       
  2200 }
       
  2201 
       
  2202 static void e100_tx_timeout(struct net_device *netdev)
       
  2203 {
       
  2204 	struct nic *nic = netdev_priv(netdev);
       
  2205 
       
  2206 	/* Reset outside of interrupt context, to avoid request_irq
       
  2207 	 * in interrupt context */
       
  2208 	schedule_work(&nic->tx_timeout_task);
       
  2209 }
       
  2210 
       
  2211 static void e100_tx_timeout_task(struct work_struct *work)
       
  2212 {
       
  2213 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2214 	struct net_device *netdev = nic->netdev;
       
  2215 
       
  2216 	DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
       
  2217 		ioread8(&nic->csr->scb.status));
       
  2218 	e100_down(netdev_priv(netdev));
       
  2219 	e100_up(netdev_priv(netdev));
       
  2220 }
       
  2221 
       
  2222 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2223 {
       
  2224 	int err;
       
  2225 	struct sk_buff *skb;
       
  2226 
       
  2227 	/* Use driver resources to perform internal MAC or PHY
       
  2228 	 * loopback test.  A single packet is prepared and transmitted
       
  2229 	 * in loopback mode, and the test passes if the received
       
  2230 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2231 
       
  2232 	if ((err = e100_rx_alloc_list(nic)))
       
  2233 		return err;
       
  2234 	if ((err = e100_alloc_cbs(nic)))
       
  2235 		goto err_clean_rx;
       
  2236 
       
  2237 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2238 	if (nic->flags & ich && loopback_mode == lb_phy)
       
  2239 		loopback_mode = lb_mac;
       
  2240 
       
  2241 	nic->loopback = loopback_mode;
       
  2242 	if ((err = e100_hw_init(nic)))
       
  2243 		goto err_loopback_none;
       
  2244 
       
  2245 	if (loopback_mode == lb_phy)
       
  2246 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2247 			BMCR_LOOPBACK);
       
  2248 
       
  2249 	e100_start_receiver(nic, NULL);
       
  2250 
       
  2251 	if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2252 		err = -ENOMEM;
       
  2253 		goto err_loopback_none;
       
  2254 	}
       
  2255 	skb_put(skb, ETH_DATA_LEN);
       
  2256 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2257 	e100_xmit_frame(skb, nic->netdev);
       
  2258 
       
  2259 	msleep(10);
       
  2260 
       
  2261 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2262 			RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2263 
       
  2264 	if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2265 	   skb->data, ETH_DATA_LEN))
       
  2266 		err = -EAGAIN;
       
  2267 
       
  2268 err_loopback_none:
       
  2269 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2270 	nic->loopback = lb_none;
       
  2271 	e100_clean_cbs(nic);
       
  2272 	e100_hw_reset(nic);
       
  2273 err_clean_rx:
       
  2274 	e100_rx_clean_list(nic);
       
  2275 	return err;
       
  2276 }
       
  2277 
       
  2278 #define MII_LED_CONTROL	0x1B
       
  2279 static void e100_blink_led(unsigned long data)
       
  2280 {
       
  2281 	struct nic *nic = (struct nic *)data;
       
  2282 	enum led_state {
       
  2283 		led_on     = 0x01,
       
  2284 		led_off    = 0x04,
       
  2285 		led_on_559 = 0x05,
       
  2286 		led_on_557 = 0x07,
       
  2287 	};
       
  2288 
       
  2289 	nic->leds = (nic->leds & led_on) ? led_off :
       
  2290 		(nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
       
  2291 	mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
       
  2292 	mod_timer(&nic->blink_timer, jiffies + HZ / 4);
       
  2293 }
       
  2294 
       
  2295 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2296 {
       
  2297 	struct nic *nic = netdev_priv(netdev);
       
  2298 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2299 }
       
  2300 
       
  2301 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2302 {
       
  2303 	struct nic *nic = netdev_priv(netdev);
       
  2304 	int err;
       
  2305 
       
  2306 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2307 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2308 	e100_exec_cb(nic, NULL, e100_configure);
       
  2309 
       
  2310 	return err;
       
  2311 }
       
  2312 
       
  2313 static void e100_get_drvinfo(struct net_device *netdev,
       
  2314 	struct ethtool_drvinfo *info)
       
  2315 {
       
  2316 	struct nic *nic = netdev_priv(netdev);
       
  2317 	strcpy(info->driver, DRV_NAME);
       
  2318 	strcpy(info->version, DRV_VERSION);
       
  2319 	strcpy(info->fw_version, "N/A");
       
  2320 	strcpy(info->bus_info, pci_name(nic->pdev));
       
  2321 }
       
  2322 
       
  2323 #define E100_PHY_REGS 0x1C
       
  2324 static int e100_get_regs_len(struct net_device *netdev)
       
  2325 {
       
  2326 	struct nic *nic = netdev_priv(netdev);
       
  2327 	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
       
  2328 }
       
  2329 
       
  2330 static void e100_get_regs(struct net_device *netdev,
       
  2331 	struct ethtool_regs *regs, void *p)
       
  2332 {
       
  2333 	struct nic *nic = netdev_priv(netdev);
       
  2334 	u32 *buff = p;
       
  2335 	int i;
       
  2336 
       
  2337 	regs->version = (1 << 24) | nic->pdev->revision;
       
  2338 	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
       
  2339 		ioread8(&nic->csr->scb.cmd_lo) << 16 |
       
  2340 		ioread16(&nic->csr->scb.status);
       
  2341 	for (i = E100_PHY_REGS; i >= 0; i--)
       
  2342 		buff[1 + E100_PHY_REGS - i] =
       
  2343 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2344 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2345 	e100_exec_cb(nic, NULL, e100_dump);
       
  2346 	msleep(10);
       
  2347 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2348 		sizeof(nic->mem->dump_buf));
       
  2349 }
       
  2350 
       
  2351 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2352 {
       
  2353 	struct nic *nic = netdev_priv(netdev);
       
  2354 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2355 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2356 }
       
  2357 
       
  2358 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2359 {
       
  2360 	struct nic *nic = netdev_priv(netdev);
       
  2361 
       
  2362 	if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
       
  2363 	    !device_can_wakeup(&nic->pdev->dev))
       
  2364 		return -EOPNOTSUPP;
       
  2365 
       
  2366 	if (wol->wolopts)
       
  2367 		nic->flags |= wol_magic;
       
  2368 	else
       
  2369 		nic->flags &= ~wol_magic;
       
  2370 
       
  2371 	device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
       
  2372 
       
  2373 	e100_exec_cb(nic, NULL, e100_configure);
       
  2374 
       
  2375 	return 0;
       
  2376 }
       
  2377 
       
  2378 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2379 {
       
  2380 	struct nic *nic = netdev_priv(netdev);
       
  2381 	return nic->msg_enable;
       
  2382 }
       
  2383 
       
  2384 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2385 {
       
  2386 	struct nic *nic = netdev_priv(netdev);
       
  2387 	nic->msg_enable = value;
       
  2388 }
       
  2389 
       
  2390 static int e100_nway_reset(struct net_device *netdev)
       
  2391 {
       
  2392 	struct nic *nic = netdev_priv(netdev);
       
  2393 	return mii_nway_restart(&nic->mii);
       
  2394 }
       
  2395 
       
  2396 static u32 e100_get_link(struct net_device *netdev)
       
  2397 {
       
  2398 	struct nic *nic = netdev_priv(netdev);
       
  2399 	return mii_link_ok(&nic->mii);
       
  2400 }
       
  2401 
       
  2402 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2403 {
       
  2404 	struct nic *nic = netdev_priv(netdev);
       
  2405 	return nic->eeprom_wc << 1;
       
  2406 }
       
  2407 
       
  2408 #define E100_EEPROM_MAGIC	0x1234
       
  2409 static int e100_get_eeprom(struct net_device *netdev,
       
  2410 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2411 {
       
  2412 	struct nic *nic = netdev_priv(netdev);
       
  2413 
       
  2414 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2415 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2416 
       
  2417 	return 0;
       
  2418 }
       
  2419 
       
  2420 static int e100_set_eeprom(struct net_device *netdev,
       
  2421 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2422 {
       
  2423 	struct nic *nic = netdev_priv(netdev);
       
  2424 
       
  2425 	if (eeprom->magic != E100_EEPROM_MAGIC)
       
  2426 		return -EINVAL;
       
  2427 
       
  2428 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2429 
       
  2430 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2431 		(eeprom->len >> 1) + 1);
       
  2432 }
       
  2433 
       
  2434 static void e100_get_ringparam(struct net_device *netdev,
       
  2435 	struct ethtool_ringparam *ring)
       
  2436 {
       
  2437 	struct nic *nic = netdev_priv(netdev);
       
  2438 	struct param_range *rfds = &nic->params.rfds;
       
  2439 	struct param_range *cbs = &nic->params.cbs;
       
  2440 
       
  2441 	ring->rx_max_pending = rfds->max;
       
  2442 	ring->tx_max_pending = cbs->max;
       
  2443 	ring->rx_mini_max_pending = 0;
       
  2444 	ring->rx_jumbo_max_pending = 0;
       
  2445 	ring->rx_pending = rfds->count;
       
  2446 	ring->tx_pending = cbs->count;
       
  2447 	ring->rx_mini_pending = 0;
       
  2448 	ring->rx_jumbo_pending = 0;
       
  2449 }
       
  2450 
       
  2451 static int e100_set_ringparam(struct net_device *netdev,
       
  2452 	struct ethtool_ringparam *ring)
       
  2453 {
       
  2454 	struct nic *nic = netdev_priv(netdev);
       
  2455 	struct param_range *rfds = &nic->params.rfds;
       
  2456 	struct param_range *cbs = &nic->params.cbs;
       
  2457 
       
  2458 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2459 		return -EINVAL;
       
  2460 
       
  2461 	if (netif_running(netdev))
       
  2462 		e100_down(nic);
       
  2463 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2464 	rfds->count = min(rfds->count, rfds->max);
       
  2465 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2466 	cbs->count = min(cbs->count, cbs->max);
       
  2467 	DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
       
  2468 	        rfds->count, cbs->count);
       
  2469 	if (netif_running(netdev))
       
  2470 		e100_up(nic);
       
  2471 
       
  2472 	return 0;
       
  2473 }
       
  2474 
       
  2475 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2476 	"Link test     (on/offline)",
       
  2477 	"Eeprom test   (on/offline)",
       
  2478 	"Self test        (offline)",
       
  2479 	"Mac loopback     (offline)",
       
  2480 	"Phy loopback     (offline)",
       
  2481 };
       
  2482 #define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
       
  2483 
       
  2484 static void e100_diag_test(struct net_device *netdev,
       
  2485 	struct ethtool_test *test, u64 *data)
       
  2486 {
       
  2487 	struct ethtool_cmd cmd;
       
  2488 	struct nic *nic = netdev_priv(netdev);
       
  2489 	int i, err;
       
  2490 
       
  2491 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2492 	data[0] = !mii_link_ok(&nic->mii);
       
  2493 	data[1] = e100_eeprom_load(nic);
       
  2494 	if (test->flags & ETH_TEST_FL_OFFLINE) {
       
  2495 
       
  2496 		/* save speed, duplex & autoneg settings */
       
  2497 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2498 
       
  2499 		if (netif_running(netdev))
       
  2500 			e100_down(nic);
       
  2501 		data[2] = e100_self_test(nic);
       
  2502 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2503 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2504 
       
  2505 		/* restore speed, duplex & autoneg settings */
       
  2506 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2507 
       
  2508 		if (netif_running(netdev))
       
  2509 			e100_up(nic);
       
  2510 	}
       
  2511 	for (i = 0; i < E100_TEST_LEN; i++)
       
  2512 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2513 
       
  2514 	msleep_interruptible(4 * 1000);
       
  2515 }
       
  2516 
       
  2517 static int e100_phys_id(struct net_device *netdev, u32 data)
       
  2518 {
       
  2519 	struct nic *nic = netdev_priv(netdev);
       
  2520 
       
  2521 	if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
       
  2522 		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
       
  2523 	mod_timer(&nic->blink_timer, jiffies);
       
  2524 	msleep_interruptible(data * 1000);
       
  2525 	del_timer_sync(&nic->blink_timer);
       
  2526 	mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
       
  2527 
       
  2528 	return 0;
       
  2529 }
       
  2530 
       
  2531 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2532 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2533 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2534 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2535 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2536 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2537 	"tx_heartbeat_errors", "tx_window_errors",
       
  2538 	/* device-specific stats */
       
  2539 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2540 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2541 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2542 };
       
  2543 #define E100_NET_STATS_LEN	21
       
  2544 #define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
       
  2545 
       
  2546 static int e100_get_sset_count(struct net_device *netdev, int sset)
       
  2547 {
       
  2548 	switch (sset) {
       
  2549 	case ETH_SS_TEST:
       
  2550 		return E100_TEST_LEN;
       
  2551 	case ETH_SS_STATS:
       
  2552 		return E100_STATS_LEN;
       
  2553 	default:
       
  2554 		return -EOPNOTSUPP;
       
  2555 	}
       
  2556 }
       
  2557 
       
  2558 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2559 	struct ethtool_stats *stats, u64 *data)
       
  2560 {
       
  2561 	struct nic *nic = netdev_priv(netdev);
       
  2562 	int i;
       
  2563 
       
  2564 	for (i = 0; i < E100_NET_STATS_LEN; i++)
       
  2565 		data[i] = ((unsigned long *)&netdev->stats)[i];
       
  2566 
       
  2567 	data[i++] = nic->tx_deferred;
       
  2568 	data[i++] = nic->tx_single_collisions;
       
  2569 	data[i++] = nic->tx_multiple_collisions;
       
  2570 	data[i++] = nic->tx_fc_pause;
       
  2571 	data[i++] = nic->rx_fc_pause;
       
  2572 	data[i++] = nic->rx_fc_unsupported;
       
  2573 	data[i++] = nic->tx_tco_frames;
       
  2574 	data[i++] = nic->rx_tco_frames;
       
  2575 }
       
  2576 
       
  2577 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2578 {
       
  2579 	switch (stringset) {
       
  2580 	case ETH_SS_TEST:
       
  2581 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2582 		break;
       
  2583 	case ETH_SS_STATS:
       
  2584 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2585 		break;
       
  2586 	}
       
  2587 }
       
  2588 
       
  2589 static const struct ethtool_ops e100_ethtool_ops = {
       
  2590 	.get_settings		= e100_get_settings,
       
  2591 	.set_settings		= e100_set_settings,
       
  2592 	.get_drvinfo		= e100_get_drvinfo,
       
  2593 	.get_regs_len		= e100_get_regs_len,
       
  2594 	.get_regs		= e100_get_regs,
       
  2595 	.get_wol		= e100_get_wol,
       
  2596 	.set_wol		= e100_set_wol,
       
  2597 	.get_msglevel		= e100_get_msglevel,
       
  2598 	.set_msglevel		= e100_set_msglevel,
       
  2599 	.nway_reset		= e100_nway_reset,
       
  2600 	.get_link		= e100_get_link,
       
  2601 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2602 	.get_eeprom		= e100_get_eeprom,
       
  2603 	.set_eeprom		= e100_set_eeprom,
       
  2604 	.get_ringparam		= e100_get_ringparam,
       
  2605 	.set_ringparam		= e100_set_ringparam,
       
  2606 	.self_test		= e100_diag_test,
       
  2607 	.get_strings		= e100_get_strings,
       
  2608 	.phys_id		= e100_phys_id,
       
  2609 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2610 	.get_sset_count		= e100_get_sset_count,
       
  2611 };
       
  2612 
       
  2613 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2614 {
       
  2615 	struct nic *nic = netdev_priv(netdev);
       
  2616 
       
  2617 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2618 }
       
  2619 
       
  2620 static int e100_alloc(struct nic *nic)
       
  2621 {
       
  2622 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2623 		&nic->dma_addr);
       
  2624 	return nic->mem ? 0 : -ENOMEM;
       
  2625 }
       
  2626 
       
  2627 static void e100_free(struct nic *nic)
       
  2628 {
       
  2629 	if (nic->mem) {
       
  2630 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2631 			nic->mem, nic->dma_addr);
       
  2632 		nic->mem = NULL;
       
  2633 	}
       
  2634 }
       
  2635 
       
  2636 static int e100_open(struct net_device *netdev)
       
  2637 {
       
  2638 	struct nic *nic = netdev_priv(netdev);
       
  2639 	int err = 0;
       
  2640 
       
  2641 	if (!nic->ecdev)
       
  2642 		netif_carrier_off(netdev);
       
  2643 	if ((err = e100_up(nic)))
       
  2644 		DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
       
  2645 	return err;
       
  2646 }
       
  2647 
       
  2648 static int e100_close(struct net_device *netdev)
       
  2649 {
       
  2650 	e100_down(netdev_priv(netdev));
       
  2651 	return 0;
       
  2652 }
       
  2653 
       
  2654 static const struct net_device_ops e100_netdev_ops = {
       
  2655 	.ndo_open		= e100_open,
       
  2656 	.ndo_stop		= e100_close,
       
  2657 	.ndo_start_xmit		= e100_xmit_frame,
       
  2658 	.ndo_validate_addr	= eth_validate_addr,
       
  2659 	.ndo_set_multicast_list	= e100_set_multicast_list,
       
  2660 	.ndo_set_mac_address	= e100_set_mac_address,
       
  2661 	.ndo_change_mtu		= e100_change_mtu,
       
  2662 	.ndo_do_ioctl		= e100_do_ioctl,
       
  2663 	.ndo_tx_timeout		= e100_tx_timeout,
       
  2664 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2665 	.ndo_poll_controller	= e100_netpoll,
       
  2666 #endif
       
  2667 };
       
  2668 
       
  2669 static int __devinit e100_probe(struct pci_dev *pdev,
       
  2670 	const struct pci_device_id *ent)
       
  2671 {
       
  2672 	struct net_device *netdev;
       
  2673 	struct nic *nic;
       
  2674 	int err;
       
  2675 
       
  2676 	if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
       
  2677 		if (((1 << debug) - 1) & NETIF_MSG_PROBE)
       
  2678 			printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
       
  2679 		return -ENOMEM;
       
  2680 	}
       
  2681 
       
  2682 	netdev->netdev_ops = &e100_netdev_ops;
       
  2683 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  2684 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  2685 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  2686 
       
  2687 	nic = netdev_priv(netdev);
       
  2688 	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
       
  2689 	nic->netdev = netdev;
       
  2690 	nic->pdev = pdev;
       
  2691 	nic->msg_enable = (1 << debug) - 1;
       
  2692 	pci_set_drvdata(pdev, netdev);
       
  2693 
       
  2694 	if ((err = pci_enable_device(pdev))) {
       
  2695 		DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
       
  2696 		goto err_out_free_dev;
       
  2697 	}
       
  2698 
       
  2699 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  2700 		DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
       
  2701 			"base address, aborting.\n");
       
  2702 		err = -ENODEV;
       
  2703 		goto err_out_disable_pdev;
       
  2704 	}
       
  2705 
       
  2706 	if ((err = pci_request_regions(pdev, DRV_NAME))) {
       
  2707 		DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
       
  2708 		goto err_out_disable_pdev;
       
  2709 	}
       
  2710 
       
  2711 	if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
       
  2712 		DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
       
  2713 		goto err_out_free_res;
       
  2714 	}
       
  2715 
       
  2716 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  2717 
       
  2718 	if (use_io)
       
  2719 		DPRINTK(PROBE, INFO, "using i/o access mode\n");
       
  2720 
       
  2721 	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
       
  2722 	if (!nic->csr) {
       
  2723 		DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
       
  2724 		err = -ENOMEM;
       
  2725 		goto err_out_free_res;
       
  2726 	}
       
  2727 
       
  2728 	if (ent->driver_data)
       
  2729 		nic->flags |= ich;
       
  2730 	else
       
  2731 		nic->flags &= ~ich;
       
  2732 
       
  2733 	e100_get_defaults(nic);
       
  2734 
       
  2735 	/* locks must be initialized before calling hw_reset */
       
  2736 	spin_lock_init(&nic->cb_lock);
       
  2737 	spin_lock_init(&nic->cmd_lock);
       
  2738 	spin_lock_init(&nic->mdio_lock);
       
  2739 
       
  2740 	/* Reset the device before pci_set_master() in case device is in some
       
  2741 	 * funky state and has an interrupt pending - hint: we don't have the
       
  2742 	 * interrupt handler registered yet. */
       
  2743 	e100_hw_reset(nic);
       
  2744 
       
  2745 	pci_set_master(pdev);
       
  2746 
       
  2747 	init_timer(&nic->watchdog);
       
  2748 	nic->watchdog.function = e100_watchdog;
       
  2749 	nic->watchdog.data = (unsigned long)nic;
       
  2750 	init_timer(&nic->blink_timer);
       
  2751 	nic->blink_timer.function = e100_blink_led;
       
  2752 	nic->blink_timer.data = (unsigned long)nic;
       
  2753 
       
  2754 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  2755 
       
  2756 	if ((err = e100_alloc(nic))) {
       
  2757 		DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
       
  2758 		goto err_out_iounmap;
       
  2759 	}
       
  2760 
       
  2761 	if ((err = e100_eeprom_load(nic)))
       
  2762 		goto err_out_free;
       
  2763 
       
  2764 	e100_phy_init(nic);
       
  2765 
       
  2766 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  2767 	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
       
  2768 	if (!is_valid_ether_addr(netdev->perm_addr)) {
       
  2769 		if (!eeprom_bad_csum_allow) {
       
  2770 			DPRINTK(PROBE, ERR, "Invalid MAC address from "
       
  2771 			        "EEPROM, aborting.\n");
       
  2772 			err = -EAGAIN;
       
  2773 			goto err_out_free;
       
  2774 		} else {
       
  2775 			DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
       
  2776 			        "you MUST configure one.\n");
       
  2777 		}
       
  2778 	}
       
  2779 
       
  2780 	/* Wol magic packet can be enabled from eeprom */
       
  2781 	if ((nic->mac >= mac_82558_D101_A4) &&
       
  2782 	   (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
       
  2783 		nic->flags |= wol_magic;
       
  2784 		device_set_wakeup_enable(&pdev->dev, true);
       
  2785 	}
       
  2786 
       
  2787 	/* ack any pending wake events, disable PME */
       
  2788 	pci_pme_active(pdev, false);
       
  2789 
       
  2790 	// offer device to EtherCAT master module
       
  2791 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  2792 	if (nic->ecdev) {
       
  2793 		if (ecdev_open(nic->ecdev)) {
       
  2794 			ecdev_withdraw(nic->ecdev);
       
  2795 			goto err_out_free;
       
  2796 		}
       
  2797 	} else {
       
  2798 		strcpy(netdev->name, "eth%d");
       
  2799 		if((err = register_netdev(netdev))) {
       
  2800 			DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
       
  2801 			goto err_out_free;
       
  2802 		}
       
  2803 	}
       
  2804 
       
  2805 	DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
       
  2806 		(unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
       
  2807 		pdev->irq, netdev->dev_addr);
       
  2808 
       
  2809 	return 0;
       
  2810 
       
  2811 err_out_free:
       
  2812 	e100_free(nic);
       
  2813 err_out_iounmap:
       
  2814 	pci_iounmap(pdev, nic->csr);
       
  2815 err_out_free_res:
       
  2816 	pci_release_regions(pdev);
       
  2817 err_out_disable_pdev:
       
  2818 	pci_disable_device(pdev);
       
  2819 err_out_free_dev:
       
  2820 	pci_set_drvdata(pdev, NULL);
       
  2821 	free_netdev(netdev);
       
  2822 	return err;
       
  2823 }
       
  2824 
       
  2825 static void __devexit e100_remove(struct pci_dev *pdev)
       
  2826 {
       
  2827 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2828 
       
  2829 	if (netdev) {
       
  2830 		struct nic *nic = netdev_priv(netdev);
       
  2831 		if (nic->ecdev) {
       
  2832 			ecdev_close(nic->ecdev);
       
  2833 			ecdev_withdraw(nic->ecdev);
       
  2834 		} else {
       
  2835 			unregister_netdev(netdev);
       
  2836 		}
       
  2837 
       
  2838 		e100_free(nic);
       
  2839 		pci_iounmap(pdev, nic->csr);
       
  2840 		free_netdev(netdev);
       
  2841 		pci_release_regions(pdev);
       
  2842 		pci_disable_device(pdev);
       
  2843 		pci_set_drvdata(pdev, NULL);
       
  2844 	}
       
  2845 }
       
  2846 
       
  2847 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  2848 {
       
  2849 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2850 	struct nic *nic = netdev_priv(netdev);
       
  2851 
       
  2852 	if (nic->ecdev)
       
  2853 		return 0;
       
  2854 
       
  2855 	if (netif_running(netdev))
       
  2856 		e100_down(nic);
       
  2857 	netif_device_detach(netdev);
       
  2858 
       
  2859 	pci_save_state(pdev);
       
  2860 
       
  2861 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  2862 		if (pci_enable_wake(pdev, PCI_D3cold, true))
       
  2863 			pci_enable_wake(pdev, PCI_D3hot, true);
       
  2864 	} else {
       
  2865 		pci_enable_wake(pdev, PCI_D3hot, false);
       
  2866 	}
       
  2867 
       
  2868 	pci_disable_device(pdev);
       
  2869 	pci_set_power_state(pdev, PCI_D3hot);
       
  2870 
       
  2871 	return 0;
       
  2872 }
       
  2873 
       
  2874 #ifdef CONFIG_PM
       
  2875 static int e100_resume(struct pci_dev *pdev)
       
  2876 {
       
  2877 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2878 	struct nic *nic = netdev_priv(netdev);
       
  2879 
       
  2880 	if (nic->ecdev)
       
  2881 		return 0;
       
  2882 
       
  2883 	pci_set_power_state(pdev, PCI_D0);
       
  2884 	pci_restore_state(pdev);
       
  2885 	/* ack any pending wake events, disable PME */
       
  2886 	pci_enable_wake(pdev, 0, 0);
       
  2887 
       
  2888 	netif_device_attach(netdev);
       
  2889 	if (netif_running(netdev))
       
  2890 		e100_up(nic);
       
  2891 
       
  2892 	return 0;
       
  2893 }
       
  2894 #endif /* CONFIG_PM */
       
  2895 
       
  2896 static void e100_shutdown(struct pci_dev *pdev)
       
  2897 {
       
  2898 	e100_suspend(pdev, PMSG_SUSPEND);
       
  2899 }
       
  2900 
       
  2901 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  2902 /**
       
  2903  * e100_io_error_detected - called when PCI error is detected.
       
  2904  * @pdev: Pointer to PCI device
       
  2905  * @state: The current pci connection state
       
  2906  */
       
  2907 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  2908 {
       
  2909 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2910 	struct nic *nic = netdev_priv(netdev);
       
  2911 
       
  2912 	/* Similar to calling e100_down(), but avoids adapter I/O. */
       
  2913 	e100_close(netdev);
       
  2914 
       
  2915 	if (!nic->ecdev) {
       
  2916 		/* Detach; put netif into a state similar to hotplug unplug. */
       
  2917 		napi_enable(&nic->napi);
       
  2918 		netif_device_detach(netdev);
       
  2919 	}
       
  2920 	pci_disable_device(pdev);
       
  2921 
       
  2922 	/* Request a slot reset. */
       
  2923 	return PCI_ERS_RESULT_NEED_RESET;
       
  2924 }
       
  2925 
       
  2926 /**
       
  2927  * e100_io_slot_reset - called after the pci bus has been reset.
       
  2928  * @pdev: Pointer to PCI device
       
  2929  *
       
  2930  * Restart the card from scratch.
       
  2931  */
       
  2932 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  2933 {
       
  2934 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2935 	struct nic *nic = netdev_priv(netdev);
       
  2936 
       
  2937 	if (pci_enable_device(pdev)) {
       
  2938 		printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
       
  2939 		return PCI_ERS_RESULT_DISCONNECT;
       
  2940 	}
       
  2941 	pci_set_master(pdev);
       
  2942 
       
  2943 	/* Only one device per card can do a reset */
       
  2944 	if (0 != PCI_FUNC(pdev->devfn))
       
  2945 		return PCI_ERS_RESULT_RECOVERED;
       
  2946 	e100_hw_reset(nic);
       
  2947 	e100_phy_init(nic);
       
  2948 
       
  2949 	return PCI_ERS_RESULT_RECOVERED;
       
  2950 }
       
  2951 
       
  2952 /**
       
  2953  * e100_io_resume - resume normal operations
       
  2954  * @pdev: Pointer to PCI device
       
  2955  *
       
  2956  * Resume normal operations after an error recovery
       
  2957  * sequence has been completed.
       
  2958  */
       
  2959 static void e100_io_resume(struct pci_dev *pdev)
       
  2960 {
       
  2961 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2962 	struct nic *nic = netdev_priv(netdev);
       
  2963 
       
  2964 	/* ack any pending wake events, disable PME */
       
  2965 	pci_enable_wake(pdev, 0, 0);
       
  2966 
       
  2967 	if (!nic->ecdev)
       
  2968 		netif_device_attach(netdev);
       
  2969 	if (nic->ecdev || netif_running(netdev)) {
       
  2970 		e100_open(netdev);
       
  2971 		if (!nic->ecdev)
       
  2972 			mod_timer(&nic->watchdog, jiffies);
       
  2973 	}
       
  2974 }
       
  2975 
       
  2976 static struct pci_error_handlers e100_err_handler = {
       
  2977 	.error_detected = e100_io_error_detected,
       
  2978 	.slot_reset = e100_io_slot_reset,
       
  2979 	.resume = e100_io_resume,
       
  2980 };
       
  2981 
       
  2982 static struct pci_driver e100_driver = {
       
  2983 	.name =         DRV_NAME,
       
  2984 	.id_table =     e100_id_table,
       
  2985 	.probe =        e100_probe,
       
  2986 	.remove =       __devexit_p(e100_remove),
       
  2987 #ifdef CONFIG_PM
       
  2988 	/* Power Management hooks */
       
  2989 	.suspend =      e100_suspend,
       
  2990 	.resume =       e100_resume,
       
  2991 #endif
       
  2992 	.shutdown =     e100_shutdown,
       
  2993 	.err_handler = &e100_err_handler,
       
  2994 };
       
  2995 
       
  2996 static int __init e100_init_module(void)
       
  2997 {
       
  2998 	printk(KERN_INFO DRV_NAME " " DRV_DESCRIPTION " " DRV_VERSION
       
  2999 			", master " EC_MASTER_VERSION "\n");
       
  3000  
       
  3001  	return pci_register_driver(&e100_driver);
       
  3002 }
       
  3003 
       
  3004 static void __exit e100_cleanup_module(void)
       
  3005 {
       
  3006 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3007 	pci_unregister_driver(&e100_driver);
       
  3008 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3009 }
       
  3010 
       
  3011 module_init(e100_init_module);
       
  3012 module_exit(e100_cleanup_module);