devices/e100-3.4-ethercat.c
branchstable-1.5
changeset 2493 fcd918d2122f
child 2531 193443bcd7dc
equal deleted inserted replaced
2492:d7b1a62709af 2493:fcd918d2122f
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007-2012  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it and/or
       
    10  *  modify it under the terms of the GNU General Public License version 2, as
       
    11  *  published by the Free Software Foundation.
       
    12  *
       
    13  *  The IgH EtherCAT Master is distributed in the hope that it will be useful,
       
    14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
       
    16  *  Public License for more details.
       
    17  *
       
    18  *  You should have received a copy of the GNU General Public License along
       
    19  *  with the IgH EtherCAT Master; if not, write to the Free Software
       
    20  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    21  *
       
    22  *  ---
       
    23  *
       
    24  *  The license mentioned above concerns the source code only. Using the
       
    25  *  EtherCAT technology and brand is only permitted in compliance with the
       
    26  *  industrial property and similar rights of Beckhoff Automation GmbH.
       
    27  *
       
    28  *  ---
       
    29  *
       
    30  *  vim: noexpandtab
       
    31  *
       
    32  *****************************************************************************/
       
    33 
       
    34 /**
       
    35    \file
       
    36    EtherCAT driver for e100-compatible NICs.
       
    37 */
       
    38 
       
    39 /* Former documentation: */
       
    40 
       
    41 /*******************************************************************************
       
    42 
       
    43   Intel PRO/100 Linux driver
       
    44   Copyright(c) 1999 - 2006 Intel Corporation.
       
    45 
       
    46   This program is free software; you can redistribute it and/or modify it
       
    47   under the terms and conditions of the GNU General Public License,
       
    48   version 2, as published by the Free Software Foundation.
       
    49 
       
    50   This program is distributed in the hope it will be useful, but WITHOUT
       
    51   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    52   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    53   more details.
       
    54 
       
    55   You should have received a copy of the GNU General Public License along with
       
    56   this program; if not, write to the Free Software Foundation, Inc.,
       
    57   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    58 
       
    59   The full GNU General Public License is included in this distribution in
       
    60   the file called "COPYING".
       
    61 
       
    62   Contact Information:
       
    63   Linux NICS <linux.nics@intel.com>
       
    64   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    65   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    66 
       
    67 *******************************************************************************/
       
    68 
       
    69 /*
       
    70  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    71  *
       
    72  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    73  *	original e100 driver, but better described as a munging of
       
    74  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    75  *
       
    76  *	References:
       
    77  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    78  *		Open Source Software Developers Manual,
       
    79  *		http://sourceforge.net/projects/e1000
       
    80  *
       
    81  *
       
    82  *	                      Theory of Operation
       
    83  *
       
    84  *	I.   General
       
    85  *
       
    86  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    87  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    88  *	82551, and 82562 devices.  82558 and greater controllers
       
    89  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    90  *	server and client network interface cards, as well as in
       
    91  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    92  *	configurations.  8255x supports a 32-bit linear addressing
       
    93  *	mode and operates at 33Mhz PCI clock rate.
       
    94  *
       
    95  *	II.  Driver Operation
       
    96  *
       
    97  *	Memory-mapped mode is used exclusively to access the device's
       
    98  *	shared-memory structure, the Control/Status Registers (CSR). All
       
    99  *	setup, configuration, and control of the device, including queuing
       
   100  *	of Tx, Rx, and configuration commands is through the CSR.
       
   101  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   102  *	protects the shared Command Block List (CBL).
       
   103  *
       
   104  *	8255x is highly MII-compliant and all access to the PHY go
       
   105  *	through the Management Data Interface (MDI).  Consequently, the
       
   106  *	driver leverages the mii.c library shared with other MII-compliant
       
   107  *	devices.
       
   108  *
       
   109  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   110  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   111  *	archs are supported.
       
   112  *
       
   113  *	III. Transmit
       
   114  *
       
   115  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   116  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   117  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   118  *	the end of the ring.  The last TCB processed suspends the
       
   119  *	controller, and the controller can be restarted by issue a CU
       
   120  *	resume command to continue from the suspend point, or a CU start
       
   121  *	command to start at a given position in the ring.
       
   122  *
       
   123  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   124  *	into the CBL ring along with Tx commands.  The common structure
       
   125  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   126  *
       
   127  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   128  *	is the next CB to check for completion; cb_to_send is the first
       
   129  *	CB to start on in case of a previous failure to resume.  CB clean
       
   130  *	up happens in interrupt context in response to a CU interrupt.
       
   131  *	cbs_avail keeps track of number of free CB resources available.
       
   132  *
       
   133  * 	Hardware padding of short packets to minimum packet size is
       
   134  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   135  * 	with 00h.
       
   136  *
       
   137  *	IV.  Receive
       
   138  *
       
   139  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   140  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   141  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   142  *	and the data buffer, but the RFD is pulled off before the skb is
       
   143  *	indicated.  The data buffer is aligned such that encapsulated
       
   144  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   145  *	mapped shared memory, and completion status is contained within
       
   146  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   147  *	view from software and hardware.
       
   148  *
       
   149  *	In order to keep updates to the RFD link field from colliding with
       
   150  *	hardware writes to mark packets complete, we use the feature that
       
   151  *	hardware will not write to a size 0 descriptor and mark the previous
       
   152  *	packet as end-of-list (EL).   After updating the link, we remove EL
       
   153  *	and only then restore the size such that hardware may use the
       
   154  *	previous-to-end RFD.
       
   155  *
       
   156  *	Under typical operation, the  receive unit (RU) is start once,
       
   157  *	and the controller happily fills RFDs as frames arrive.  If
       
   158  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   159  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   160  *	and Rx indication and re-allocation happen in the same context,
       
   161  *	therefore no locking is required.  A software-generated interrupt
       
   162  *	is generated from the watchdog to recover from a failed allocation
       
   163  *	scenario where all Rx resources have been indicated and none re-
       
   164  *	placed.
       
   165  *
       
   166  *	V.   Miscellaneous
       
   167  *
       
   168  * 	VLAN offloading of tagging, stripping and filtering is not
       
   169  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   170  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   171  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   172  * 	not supported (hardware limitation).
       
   173  *
       
   174  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   175  *
       
   176  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   177  * 	testing/troubleshooting the development driver.
       
   178  *
       
   179  * 	TODO:
       
   180  * 	o several entry points race with dev->close
       
   181  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   182  *
       
   183  *	FIXES:
       
   184  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   185  *	- Stratus87247: protect MDI control register manipulations
       
   186  * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
       
   187  *      - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
       
   188  */
       
   189 
       
   190 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
       
   191 
       
   192 #include <linux/hardirq.h>
       
   193 #include <linux/interrupt.h>
       
   194 #include <linux/module.h>
       
   195 #include <linux/moduleparam.h>
       
   196 #include <linux/kernel.h>
       
   197 #include <linux/types.h>
       
   198 #include <linux/sched.h>
       
   199 #include <linux/slab.h>
       
   200 #include <linux/delay.h>
       
   201 #include <linux/init.h>
       
   202 #include <linux/pci.h>
       
   203 #include <linux/dma-mapping.h>
       
   204 #include <linux/dmapool.h>
       
   205 #include <linux/netdevice.h>
       
   206 #include <linux/etherdevice.h>
       
   207 #include <linux/mii.h>
       
   208 #include <linux/if_vlan.h>
       
   209 #include <linux/skbuff.h>
       
   210 #include <linux/ethtool.h>
       
   211 #include <linux/string.h>
       
   212 #include <linux/firmware.h>
       
   213 #include <linux/rtnetlink.h>
       
   214 #include <asm/unaligned.h>
       
   215 
       
   216 // EtherCAT includes
       
   217 #include "../globals.h"
       
   218 #include "ecdev.h"
       
   219 
       
   220 #define DRV_NAME		"ec_e100"
       
   221 #define DRV_EXT			"-NAPI"
       
   222 #define DRV_VERSION		"3.5.24-k2"DRV_EXT
       
   223 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   224 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   225 
       
   226 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   227 #define E100_NAPI_WEIGHT	16
       
   228 
       
   229 #define FIRMWARE_D101M		"e100/d101m_ucode.bin"
       
   230 #define FIRMWARE_D101S		"e100/d101s_ucode.bin"
       
   231 #define FIRMWARE_D102E		"e100/d102e_ucode.bin"
       
   232 
       
   233 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   234 MODULE_AUTHOR(DRV_COPYRIGHT);
       
   235 MODULE_LICENSE("GPL");
       
   236 MODULE_VERSION(DRV_VERSION);
       
   237 MODULE_FIRMWARE(FIRMWARE_D101M);
       
   238 MODULE_FIRMWARE(FIRMWARE_D101S);
       
   239 MODULE_FIRMWARE(FIRMWARE_D102E);
       
   240 
       
   241 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   242 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   243 MODULE_LICENSE("GPL");
       
   244 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   245 
       
   246 void e100_ec_poll(struct net_device *);
       
   247 
       
   248 static int debug = 3;
       
   249 static int eeprom_bad_csum_allow = 0;
       
   250 static int use_io = 0;
       
   251 module_param(debug, int, 0);
       
   252 module_param(eeprom_bad_csum_allow, int, 0);
       
   253 module_param(use_io, int, 0);
       
   254 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   255 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   256 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
       
   257 
       
   258 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   259 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   260 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   261 static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
       
   262 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   263 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   277 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   278 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   279 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   280 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   281 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   282 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   283 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   284 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   285 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   286 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   287 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   288 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   289 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   290 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   291 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   292 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   293 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   294 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   295 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   296 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   297 	INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
       
   298 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   299 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   300 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   301 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   302 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   303 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   304 	{ 0, }
       
   305 };
       
   306 
       
   307 // prevent from being loaded automatically
       
   308 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   309 
       
   310 enum mac {
       
   311 	mac_82557_D100_A  = 0,
       
   312 	mac_82557_D100_B  = 1,
       
   313 	mac_82557_D100_C  = 2,
       
   314 	mac_82558_D101_A4 = 4,
       
   315 	mac_82558_D101_B0 = 5,
       
   316 	mac_82559_D101M   = 8,
       
   317 	mac_82559_D101S   = 9,
       
   318 	mac_82550_D102    = 12,
       
   319 	mac_82550_D102_C  = 13,
       
   320 	mac_82551_E       = 14,
       
   321 	mac_82551_F       = 15,
       
   322 	mac_82551_10      = 16,
       
   323 	mac_unknown       = 0xFF,
       
   324 };
       
   325 
       
   326 enum phy {
       
   327 	phy_100a     = 0x000003E0,
       
   328 	phy_100c     = 0x035002A8,
       
   329 	phy_82555_tx = 0x015002A8,
       
   330 	phy_nsc_tx   = 0x5C002000,
       
   331 	phy_82562_et = 0x033002A8,
       
   332 	phy_82562_em = 0x032002A8,
       
   333 	phy_82562_ek = 0x031002A8,
       
   334 	phy_82562_eh = 0x017002A8,
       
   335 	phy_82552_v  = 0xd061004d,
       
   336 	phy_unknown  = 0xFFFFFFFF,
       
   337 };
       
   338 
       
   339 /* CSR (Control/Status Registers) */
       
   340 struct csr {
       
   341 	struct {
       
   342 		u8 status;
       
   343 		u8 stat_ack;
       
   344 		u8 cmd_lo;
       
   345 		u8 cmd_hi;
       
   346 		u32 gen_ptr;
       
   347 	} scb;
       
   348 	u32 port;
       
   349 	u16 flash_ctrl;
       
   350 	u8 eeprom_ctrl_lo;
       
   351 	u8 eeprom_ctrl_hi;
       
   352 	u32 mdi_ctrl;
       
   353 	u32 rx_dma_count;
       
   354 };
       
   355 
       
   356 enum scb_status {
       
   357 	rus_no_res       = 0x08,
       
   358 	rus_ready        = 0x10,
       
   359 	rus_mask         = 0x3C,
       
   360 };
       
   361 
       
   362 enum ru_state  {
       
   363 	RU_SUSPENDED = 0,
       
   364 	RU_RUNNING	 = 1,
       
   365 	RU_UNINITIALIZED = -1,
       
   366 };
       
   367 
       
   368 enum scb_stat_ack {
       
   369 	stat_ack_not_ours    = 0x00,
       
   370 	stat_ack_sw_gen      = 0x04,
       
   371 	stat_ack_rnr         = 0x10,
       
   372 	stat_ack_cu_idle     = 0x20,
       
   373 	stat_ack_frame_rx    = 0x40,
       
   374 	stat_ack_cu_cmd_done = 0x80,
       
   375 	stat_ack_not_present = 0xFF,
       
   376 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   377 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   378 };
       
   379 
       
   380 enum scb_cmd_hi {
       
   381 	irq_mask_none = 0x00,
       
   382 	irq_mask_all  = 0x01,
       
   383 	irq_sw_gen    = 0x02,
       
   384 };
       
   385 
       
   386 enum scb_cmd_lo {
       
   387 	cuc_nop        = 0x00,
       
   388 	ruc_start      = 0x01,
       
   389 	ruc_load_base  = 0x06,
       
   390 	cuc_start      = 0x10,
       
   391 	cuc_resume     = 0x20,
       
   392 	cuc_dump_addr  = 0x40,
       
   393 	cuc_dump_stats = 0x50,
       
   394 	cuc_load_base  = 0x60,
       
   395 	cuc_dump_reset = 0x70,
       
   396 };
       
   397 
       
   398 enum cuc_dump {
       
   399 	cuc_dump_complete       = 0x0000A005,
       
   400 	cuc_dump_reset_complete = 0x0000A007,
       
   401 };
       
   402 
       
   403 enum port {
       
   404 	software_reset  = 0x0000,
       
   405 	selftest        = 0x0001,
       
   406 	selective_reset = 0x0002,
       
   407 };
       
   408 
       
   409 enum eeprom_ctrl_lo {
       
   410 	eesk = 0x01,
       
   411 	eecs = 0x02,
       
   412 	eedi = 0x04,
       
   413 	eedo = 0x08,
       
   414 };
       
   415 
       
   416 enum mdi_ctrl {
       
   417 	mdi_write = 0x04000000,
       
   418 	mdi_read  = 0x08000000,
       
   419 	mdi_ready = 0x10000000,
       
   420 };
       
   421 
       
   422 enum eeprom_op {
       
   423 	op_write = 0x05,
       
   424 	op_read  = 0x06,
       
   425 	op_ewds  = 0x10,
       
   426 	op_ewen  = 0x13,
       
   427 };
       
   428 
       
   429 enum eeprom_offsets {
       
   430 	eeprom_cnfg_mdix  = 0x03,
       
   431 	eeprom_phy_iface  = 0x06,
       
   432 	eeprom_id         = 0x0A,
       
   433 	eeprom_config_asf = 0x0D,
       
   434 	eeprom_smbus_addr = 0x90,
       
   435 };
       
   436 
       
   437 enum eeprom_cnfg_mdix {
       
   438 	eeprom_mdix_enabled = 0x0080,
       
   439 };
       
   440 
       
   441 enum eeprom_phy_iface {
       
   442 	NoSuchPhy = 0,
       
   443 	I82553AB,
       
   444 	I82553C,
       
   445 	I82503,
       
   446 	DP83840,
       
   447 	S80C240,
       
   448 	S80C24,
       
   449 	I82555,
       
   450 	DP83840A = 10,
       
   451 };
       
   452 
       
   453 enum eeprom_id {
       
   454 	eeprom_id_wol = 0x0020,
       
   455 };
       
   456 
       
   457 enum eeprom_config_asf {
       
   458 	eeprom_asf = 0x8000,
       
   459 	eeprom_gcl = 0x4000,
       
   460 };
       
   461 
       
   462 enum cb_status {
       
   463 	cb_complete = 0x8000,
       
   464 	cb_ok       = 0x2000,
       
   465 };
       
   466 
       
   467 /**
       
   468  * cb_command - Command Block flags
       
   469  * @cb_tx_nc:  0: controler does CRC (normal),  1: CRC from skb memory
       
   470  */
       
   471 enum cb_command {
       
   472 	cb_nop    = 0x0000,
       
   473 	cb_iaaddr = 0x0001,
       
   474 	cb_config = 0x0002,
       
   475 	cb_multi  = 0x0003,
       
   476 	cb_tx     = 0x0004,
       
   477 	cb_ucode  = 0x0005,
       
   478 	cb_dump   = 0x0006,
       
   479 	cb_tx_sf  = 0x0008,
       
   480 	cb_tx_nc  = 0x0010,
       
   481 	cb_cid    = 0x1f00,
       
   482 	cb_i      = 0x2000,
       
   483 	cb_s      = 0x4000,
       
   484 	cb_el     = 0x8000,
       
   485 };
       
   486 
       
   487 struct rfd {
       
   488 	__le16 status;
       
   489 	__le16 command;
       
   490 	__le32 link;
       
   491 	__le32 rbd;
       
   492 	__le16 actual_size;
       
   493 	__le16 size;
       
   494 };
       
   495 
       
   496 struct rx {
       
   497 	struct rx *next, *prev;
       
   498 	struct sk_buff *skb;
       
   499 	dma_addr_t dma_addr;
       
   500 };
       
   501 
       
   502 #if defined(__BIG_ENDIAN_BITFIELD)
       
   503 #define X(a,b)	b,a
       
   504 #else
       
   505 #define X(a,b)	a,b
       
   506 #endif
       
   507 struct config {
       
   508 /*0*/	u8 X(byte_count:6, pad0:2);
       
   509 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   510 /*2*/	u8 adaptive_ifs;
       
   511 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   512 	   term_write_cache_line:1), pad3:4);
       
   513 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   514 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   515 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   516 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   517 	   rx_save_overruns : 1), rx_save_bad_frames : 1);
       
   518 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   519 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   520 	   tx_dynamic_tbd:1);
       
   521 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   522 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   523 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   524 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   525 	   loopback:2);
       
   526 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   527 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   528 /*13*/	u8 ip_addr_lo;
       
   529 /*14*/	u8 ip_addr_hi;
       
   530 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   531 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   532 	   pad15_2:1), crs_or_cdt:1);
       
   533 /*16*/	u8 fc_delay_lo;
       
   534 /*17*/	u8 fc_delay_hi;
       
   535 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   536 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   537 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   538 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   539 	   full_duplex_force:1), full_duplex_pin:1);
       
   540 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   541 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   542 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   543 	u8 pad_d102[9];
       
   544 };
       
   545 
       
   546 #define E100_MAX_MULTICAST_ADDRS	64
       
   547 struct multi {
       
   548 	__le16 count;
       
   549 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   550 };
       
   551 
       
   552 /* Important: keep total struct u32-aligned */
       
   553 #define UCODE_SIZE			134
       
   554 struct cb {
       
   555 	__le16 status;
       
   556 	__le16 command;
       
   557 	__le32 link;
       
   558 	union {
       
   559 		u8 iaaddr[ETH_ALEN];
       
   560 		__le32 ucode[UCODE_SIZE];
       
   561 		struct config config;
       
   562 		struct multi multi;
       
   563 		struct {
       
   564 			u32 tbd_array;
       
   565 			u16 tcb_byte_count;
       
   566 			u8 threshold;
       
   567 			u8 tbd_count;
       
   568 			struct {
       
   569 				__le32 buf_addr;
       
   570 				__le16 size;
       
   571 				u16 eol;
       
   572 			} tbd;
       
   573 		} tcb;
       
   574 		__le32 dump_buffer_addr;
       
   575 	} u;
       
   576 	struct cb *next, *prev;
       
   577 	dma_addr_t dma_addr;
       
   578 	struct sk_buff *skb;
       
   579 };
       
   580 
       
   581 enum loopback {
       
   582 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   583 };
       
   584 
       
   585 struct stats {
       
   586 	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   587 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   588 		tx_multiple_collisions, tx_total_collisions;
       
   589 	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   590 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   591 		rx_short_frame_errors;
       
   592 	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   593 	__le16 xmt_tco_frames, rcv_tco_frames;
       
   594 	__le32 complete;
       
   595 };
       
   596 
       
   597 struct mem {
       
   598 	struct {
       
   599 		u32 signature;
       
   600 		u32 result;
       
   601 	} selftest;
       
   602 	struct stats stats;
       
   603 	u8 dump_buf[596];
       
   604 };
       
   605 
       
   606 struct param_range {
       
   607 	u32 min;
       
   608 	u32 max;
       
   609 	u32 count;
       
   610 };
       
   611 
       
   612 struct params {
       
   613 	struct param_range rfds;
       
   614 	struct param_range cbs;
       
   615 };
       
   616 
       
   617 struct nic {
       
   618 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   619 	u32 msg_enable				____cacheline_aligned;
       
   620 	struct net_device *netdev;
       
   621 	struct pci_dev *pdev;
       
   622 	u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
       
   623 
       
   624 	struct rx *rxs				____cacheline_aligned;
       
   625 	struct rx *rx_to_use;
       
   626 	struct rx *rx_to_clean;
       
   627 	struct rfd blank_rfd;
       
   628 	enum ru_state ru_running;
       
   629 
       
   630 	spinlock_t cb_lock			____cacheline_aligned;
       
   631 	spinlock_t cmd_lock;
       
   632 	struct csr __iomem *csr;
       
   633 	enum scb_cmd_lo cuc_cmd;
       
   634 	unsigned int cbs_avail;
       
   635 	struct napi_struct napi;
       
   636 	struct cb *cbs;
       
   637 	struct cb *cb_to_use;
       
   638 	struct cb *cb_to_send;
       
   639 	struct cb *cb_to_clean;
       
   640 	__le16 tx_command;
       
   641 	/* End: frequently used values: keep adjacent for cache effect */
       
   642 
       
   643 	enum {
       
   644 		ich                = (1 << 0),
       
   645 		promiscuous        = (1 << 1),
       
   646 		multicast_all      = (1 << 2),
       
   647 		wol_magic          = (1 << 3),
       
   648 		ich_10h_workaround = (1 << 4),
       
   649 	} flags					____cacheline_aligned;
       
   650 
       
   651 	enum mac mac;
       
   652 	enum phy phy;
       
   653 	struct params params;
       
   654 	struct timer_list watchdog;
       
   655 	struct mii_if_info mii;
       
   656 	struct work_struct tx_timeout_task;
       
   657 	enum loopback loopback;
       
   658 
       
   659 	struct mem *mem;
       
   660 	dma_addr_t dma_addr;
       
   661 
       
   662 	struct pci_pool *cbs_pool;
       
   663 	dma_addr_t cbs_dma_addr;
       
   664 	u8 adaptive_ifs;
       
   665 	u8 tx_threshold;
       
   666 	u32 tx_frames;
       
   667 	u32 tx_collisions;
       
   668 
       
   669 	u32 tx_deferred;
       
   670 	u32 tx_single_collisions;
       
   671 	u32 tx_multiple_collisions;
       
   672 	u32 tx_fc_pause;
       
   673 	u32 tx_tco_frames;
       
   674 
       
   675 	u32 rx_fc_pause;
       
   676 	u32 rx_fc_unsupported;
       
   677 	u32 rx_tco_frames;
       
   678 	u32 rx_short_frame_errors;
       
   679 	u32 rx_over_length_errors;
       
   680 
       
   681 	u16 eeprom_wc;
       
   682 
       
   683 	__le16 eeprom[256];
       
   684 	spinlock_t mdio_lock;
       
   685 	const struct firmware *fw;
       
   686 	ec_device_t *ecdev;
       
   687 	unsigned long ec_watchdog_jiffies;
       
   688 };
       
   689 
       
   690 static inline void e100_write_flush(struct nic *nic)
       
   691 {
       
   692 	/* Flush previous PCI writes through intermediate bridges
       
   693 	 * by doing a benign read */
       
   694 	(void)ioread8(&nic->csr->scb.status);
       
   695 }
       
   696 
       
   697 static void e100_enable_irq(struct nic *nic)
       
   698 {
       
   699 	unsigned long flags;
       
   700 
       
   701 	if (nic->ecdev)
       
   702 		return;
       
   703 
       
   704 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   705 	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   706 	e100_write_flush(nic);
       
   707 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   708 }
       
   709 
       
   710 static void e100_disable_irq(struct nic *nic)
       
   711 {
       
   712 	unsigned long flags = 0;
       
   713 
       
   714 	if (!nic->ecdev)
       
   715 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   716 	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   717 	e100_write_flush(nic);
       
   718 	if (!nic->ecdev)
       
   719 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   720 }
       
   721 
       
   722 static void e100_hw_reset(struct nic *nic)
       
   723 {
       
   724 	/* Put CU and RU into idle with a selective reset to get
       
   725 	 * device off of PCI bus */
       
   726 	iowrite32(selective_reset, &nic->csr->port);
       
   727 	e100_write_flush(nic); udelay(20);
       
   728 
       
   729 	/* Now fully reset device */
       
   730 	iowrite32(software_reset, &nic->csr->port);
       
   731 	e100_write_flush(nic); udelay(20);
       
   732 
       
   733 	/* Mask off our interrupt line - it's unmasked after reset */
       
   734 	e100_disable_irq(nic);
       
   735 }
       
   736 
       
   737 static int e100_self_test(struct nic *nic)
       
   738 {
       
   739 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   740 
       
   741 	/* Passing the self-test is a pretty good indication
       
   742 	 * that the device can DMA to/from host memory */
       
   743 
       
   744 	nic->mem->selftest.signature = 0;
       
   745 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   746 
       
   747 	iowrite32(selftest | dma_addr, &nic->csr->port);
       
   748 	e100_write_flush(nic);
       
   749 	/* Wait 10 msec for self-test to complete */
       
   750 	msleep(10);
       
   751 
       
   752 	/* Interrupts are enabled after self-test */
       
   753 	e100_disable_irq(nic);
       
   754 
       
   755 	/* Check results of self-test */
       
   756 	if (nic->mem->selftest.result != 0) {
       
   757 		netif_err(nic, hw, nic->netdev,
       
   758 			  "Self-test failed: result=0x%08X\n",
       
   759 			  nic->mem->selftest.result);
       
   760 		return -ETIMEDOUT;
       
   761 	}
       
   762 	if (nic->mem->selftest.signature == 0) {
       
   763 		netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
       
   764 		return -ETIMEDOUT;
       
   765 	}
       
   766 
       
   767 	return 0;
       
   768 }
       
   769 
       
   770 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
       
   771 {
       
   772 	u32 cmd_addr_data[3];
       
   773 	u8 ctrl;
       
   774 	int i, j;
       
   775 
       
   776 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   777 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   778 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   779 		le16_to_cpu(data);
       
   780 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   781 
       
   782 	/* Bit-bang cmds to write word to eeprom */
       
   783 	for (j = 0; j < 3; j++) {
       
   784 
       
   785 		/* Chip select */
       
   786 		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   787 		e100_write_flush(nic); udelay(4);
       
   788 
       
   789 		for (i = 31; i >= 0; i--) {
       
   790 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   791 				eecs | eedi : eecs;
       
   792 			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   793 			e100_write_flush(nic); udelay(4);
       
   794 
       
   795 			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   796 			e100_write_flush(nic); udelay(4);
       
   797 		}
       
   798 		/* Wait 10 msec for cmd to complete */
       
   799 		msleep(10);
       
   800 
       
   801 		/* Chip deselect */
       
   802 		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   803 		e100_write_flush(nic); udelay(4);
       
   804 	}
       
   805 };
       
   806 
       
   807 /* General technique stolen from the eepro100 driver - very clever */
       
   808 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   809 {
       
   810 	u32 cmd_addr_data;
       
   811 	u16 data = 0;
       
   812 	u8 ctrl;
       
   813 	int i;
       
   814 
       
   815 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   816 
       
   817 	/* Chip select */
       
   818 	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   819 	e100_write_flush(nic); udelay(4);
       
   820 
       
   821 	/* Bit-bang to read word from eeprom */
       
   822 	for (i = 31; i >= 0; i--) {
       
   823 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   824 		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   825 		e100_write_flush(nic); udelay(4);
       
   826 
       
   827 		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   828 		e100_write_flush(nic); udelay(4);
       
   829 
       
   830 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   831 		 * complete address.  Use this to adjust addr_len. */
       
   832 		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
       
   833 		if (!(ctrl & eedo) && i > 16) {
       
   834 			*addr_len -= (i - 16);
       
   835 			i = 17;
       
   836 		}
       
   837 
       
   838 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   839 	}
       
   840 
       
   841 	/* Chip deselect */
       
   842 	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   843 	e100_write_flush(nic); udelay(4);
       
   844 
       
   845 	return cpu_to_le16(data);
       
   846 };
       
   847 
       
   848 /* Load entire EEPROM image into driver cache and validate checksum */
       
   849 static int e100_eeprom_load(struct nic *nic)
       
   850 {
       
   851 	u16 addr, addr_len = 8, checksum = 0;
       
   852 
       
   853 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   854 	e100_eeprom_read(nic, &addr_len, 0);
       
   855 	nic->eeprom_wc = 1 << addr_len;
       
   856 
       
   857 	for (addr = 0; addr < nic->eeprom_wc; addr++) {
       
   858 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   859 		if (addr < nic->eeprom_wc - 1)
       
   860 			checksum += le16_to_cpu(nic->eeprom[addr]);
       
   861 	}
       
   862 
       
   863 	/* The checksum, stored in the last word, is calculated such that
       
   864 	 * the sum of words should be 0xBABA */
       
   865 	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
       
   866 		netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
       
   867 		if (!eeprom_bad_csum_allow)
       
   868 			return -EAGAIN;
       
   869 	}
       
   870 
       
   871 	return 0;
       
   872 }
       
   873 
       
   874 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   875 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   876 {
       
   877 	u16 addr, addr_len = 8, checksum = 0;
       
   878 
       
   879 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   880 	e100_eeprom_read(nic, &addr_len, 0);
       
   881 	nic->eeprom_wc = 1 << addr_len;
       
   882 
       
   883 	if (start + count >= nic->eeprom_wc)
       
   884 		return -EINVAL;
       
   885 
       
   886 	for (addr = start; addr < start + count; addr++)
       
   887 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   888 
       
   889 	/* The checksum, stored in the last word, is calculated such that
       
   890 	 * the sum of words should be 0xBABA */
       
   891 	for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   892 		checksum += le16_to_cpu(nic->eeprom[addr]);
       
   893 	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
       
   894 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   895 		nic->eeprom[nic->eeprom_wc - 1]);
       
   896 
       
   897 	return 0;
       
   898 }
       
   899 
       
   900 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   901 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   902 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   903 {
       
   904 	unsigned long flags = 0;
       
   905 	unsigned int i;
       
   906 	int err = 0;
       
   907 
       
   908 	if (!nic->ecdev)
       
   909 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   910 
       
   911 	/* Previous command is accepted when SCB clears */
       
   912 	for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   913 		if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
       
   914 			break;
       
   915 		cpu_relax();
       
   916 		if (unlikely(i > E100_WAIT_SCB_FAST))
       
   917 			udelay(5);
       
   918 	}
       
   919 	if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   920 		err = -EAGAIN;
       
   921 		goto err_unlock;
       
   922 	}
       
   923 
       
   924 	if (unlikely(cmd != cuc_resume))
       
   925 		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
       
   926 	iowrite8(cmd, &nic->csr->scb.cmd_lo);
       
   927 
       
   928 err_unlock:
       
   929 	if (!nic->ecdev)
       
   930 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   931 
       
   932 	return err;
       
   933 }
       
   934 
       
   935 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   936 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   937 {
       
   938 	struct cb *cb;
       
   939 	unsigned long flags = 0;
       
   940 	int err = 0;
       
   941 
       
   942 	if (!nic->ecdev)
       
   943 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   944 
       
   945 	if (unlikely(!nic->cbs_avail)) {
       
   946 		err = -ENOMEM;
       
   947 		goto err_unlock;
       
   948 	}
       
   949 
       
   950 	cb = nic->cb_to_use;
       
   951 	nic->cb_to_use = cb->next;
       
   952 	nic->cbs_avail--;
       
   953 	cb->skb = skb;
       
   954 
       
   955 	if (unlikely(!nic->cbs_avail))
       
   956 		err = -ENOSPC;
       
   957 
       
   958 	cb_prepare(nic, cb, skb);
       
   959 
       
   960 	/* Order is important otherwise we'll be in a race with h/w:
       
   961 	 * set S-bit in current first, then clear S-bit in previous. */
       
   962 	cb->command |= cpu_to_le16(cb_s);
       
   963 	wmb();
       
   964 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   965 
       
   966 	while (nic->cb_to_send != nic->cb_to_use) {
       
   967 		if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   968 			nic->cb_to_send->dma_addr))) {
       
   969 			/* Ok, here's where things get sticky.  It's
       
   970 			 * possible that we can't schedule the command
       
   971 			 * because the controller is too busy, so
       
   972 			 * let's just queue the command and try again
       
   973 			 * when another command is scheduled. */
       
   974 			if (err == -ENOSPC) {
       
   975 				//request a reset
       
   976 				schedule_work(&nic->tx_timeout_task);
       
   977 			}
       
   978 			break;
       
   979 		} else {
       
   980 			nic->cuc_cmd = cuc_resume;
       
   981 			nic->cb_to_send = nic->cb_to_send->next;
       
   982 		}
       
   983 	}
       
   984 
       
   985 err_unlock:
       
   986 	if (!nic->ecdev)
       
   987 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   988 
       
   989 	return err;
       
   990 }
       
   991 
       
   992 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
   993 {
       
   994 	struct nic *nic = netdev_priv(netdev);
       
   995 	return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
       
   996 }
       
   997 
       
   998 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
   999 {
       
  1000 	struct nic *nic = netdev_priv(netdev);
       
  1001 
       
  1002 	nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
       
  1003 }
       
  1004 
       
  1005 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
       
  1006 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
  1007 {
       
  1008 	u32 data_out = 0;
       
  1009 	unsigned int i;
       
  1010 	unsigned long flags = 0;
       
  1011 
       
  1012 
       
  1013 	/*
       
  1014 	 * Stratus87247: we shouldn't be writing the MDI control
       
  1015 	 * register until the Ready bit shows True.  Also, since
       
  1016 	 * manipulation of the MDI control registers is a multi-step
       
  1017 	 * procedure it should be done under lock.
       
  1018 	 */
       
  1019 	if (!nic->ecdev)
       
  1020 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
  1021 	for (i = 100; i; --i) {
       
  1022 		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
       
  1023 			break;
       
  1024 		udelay(20);
       
  1025 	}
       
  1026 	if (unlikely(!i)) {
       
  1027 		netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
       
  1028 		if (!nic->ecdev)
       
  1029 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1030 		return 0;		/* No way to indicate timeout error */
       
  1031 	}
       
  1032 	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
  1033 
       
  1034 	for (i = 0; i < 100; i++) {
       
  1035 		udelay(20);
       
  1036 		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
       
  1037 			break;
       
  1038 	}
       
  1039 	if (!nic->ecdev)
       
  1040 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1041 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1042 		     "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
  1043 		     dir == mdi_read ? "READ" : "WRITE",
       
  1044 		     addr, reg, data, data_out);
       
  1045 	return (u16)data_out;
       
  1046 }
       
  1047 
       
  1048 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
       
  1049 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
       
  1050 				 u32 addr,
       
  1051 				 u32 dir,
       
  1052 				 u32 reg,
       
  1053 				 u16 data)
       
  1054 {
       
  1055 	if ((reg == MII_BMCR) && (dir == mdi_write)) {
       
  1056 		if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
       
  1057 			u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
       
  1058 							MII_ADVERTISE);
       
  1059 
       
  1060 			/*
       
  1061 			 * Workaround Si issue where sometimes the part will not
       
  1062 			 * autoneg to 100Mbps even when advertised.
       
  1063 			 */
       
  1064 			if (advert & ADVERTISE_100FULL)
       
  1065 				data |= BMCR_SPEED100 | BMCR_FULLDPLX;
       
  1066 			else if (advert & ADVERTISE_100HALF)
       
  1067 				data |= BMCR_SPEED100;
       
  1068 		}
       
  1069 	}
       
  1070 	return mdio_ctrl_hw(nic, addr, dir, reg, data);
       
  1071 }
       
  1072 
       
  1073 /* Fully software-emulated mdio_ctrl() function for cards without
       
  1074  * MII-compliant PHYs.
       
  1075  * For now, this is mainly geared towards 80c24 support; in case of further
       
  1076  * requirements for other types (i82503, ...?) either extend this mechanism
       
  1077  * or split it, whichever is cleaner.
       
  1078  */
       
  1079 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
       
  1080 				      u32 addr,
       
  1081 				      u32 dir,
       
  1082 				      u32 reg,
       
  1083 				      u16 data)
       
  1084 {
       
  1085 	/* might need to allocate a netdev_priv'ed register array eventually
       
  1086 	 * to be able to record state changes, but for now
       
  1087 	 * some fully hardcoded register handling ought to be ok I guess. */
       
  1088 
       
  1089 	if (dir == mdi_read) {
       
  1090 		switch (reg) {
       
  1091 		case MII_BMCR:
       
  1092 			/* Auto-negotiation, right? */
       
  1093 			return  BMCR_ANENABLE |
       
  1094 				BMCR_FULLDPLX;
       
  1095 		case MII_BMSR:
       
  1096 			return	BMSR_LSTATUS /* for mii_link_ok() */ |
       
  1097 				BMSR_ANEGCAPABLE |
       
  1098 				BMSR_10FULL;
       
  1099 		case MII_ADVERTISE:
       
  1100 			/* 80c24 is a "combo card" PHY, right? */
       
  1101 			return	ADVERTISE_10HALF |
       
  1102 				ADVERTISE_10FULL;
       
  1103 		default:
       
  1104 			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1105 				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1106 				     dir == mdi_read ? "READ" : "WRITE",
       
  1107 				     addr, reg, data);
       
  1108 			return 0xFFFF;
       
  1109 		}
       
  1110 	} else {
       
  1111 		switch (reg) {
       
  1112 		default:
       
  1113 			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1114 				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1115 				     dir == mdi_read ? "READ" : "WRITE",
       
  1116 				     addr, reg, data);
       
  1117 			return 0xFFFF;
       
  1118 		}
       
  1119 	}
       
  1120 }
       
  1121 static inline int e100_phy_supports_mii(struct nic *nic)
       
  1122 {
       
  1123 	/* for now, just check it by comparing whether we
       
  1124 	   are using MII software emulation.
       
  1125 	*/
       
  1126 	return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
       
  1127 }
       
  1128 
       
  1129 static void e100_get_defaults(struct nic *nic)
       
  1130 {
       
  1131 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
  1132 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
  1133 
       
  1134 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1135 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
       
  1136 	if (nic->mac == mac_unknown)
       
  1137 		nic->mac = mac_82557_D100_A;
       
  1138 
       
  1139 	nic->params.rfds = rfds;
       
  1140 	nic->params.cbs = cbs;
       
  1141 
       
  1142 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1143 	nic->tx_threshold = 0xE0;
       
  1144 
       
  1145 	/* no interrupt for every tx completion, delay = 256us if not 557 */
       
  1146 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1147 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1148 
       
  1149 	/* Template for a freshly allocated RFD */
       
  1150 	nic->blank_rfd.command = 0;
       
  1151 	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
       
  1152 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
       
  1153 
       
  1154 	/* MII setup */
       
  1155 	nic->mii.phy_id_mask = 0x1F;
       
  1156 	nic->mii.reg_num_mask = 0x1F;
       
  1157 	nic->mii.dev = nic->netdev;
       
  1158 	nic->mii.mdio_read = mdio_read;
       
  1159 	nic->mii.mdio_write = mdio_write;
       
  1160 }
       
  1161 
       
  1162 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1163 {
       
  1164 	struct config *config = &cb->u.config;
       
  1165 	u8 *c = (u8 *)config;
       
  1166 	struct net_device *netdev = nic->netdev;
       
  1167 
       
  1168 	cb->command = cpu_to_le16(cb_config);
       
  1169 
       
  1170 	memset(config, 0, sizeof(struct config));
       
  1171 
       
  1172 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1173 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1174 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1175 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1176 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1177 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1178 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1179 	if (e100_phy_supports_mii(nic))
       
  1180 		config->mii_mode = 1;           /* 1=MII mode, 0=i82503 mode */
       
  1181 	config->pad10 = 0x6;
       
  1182 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1183 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1184 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1185 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1186 	config->pad15_1 = 0x1;
       
  1187 	config->pad15_2 = 0x1;
       
  1188 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1189 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1190 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1191 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1192 	config->pad18 = 0x1;
       
  1193 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1194 	config->pad20_1 = 0x1F;
       
  1195 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1196 	config->pad21_1 = 0x5;
       
  1197 
       
  1198 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1199 	config->loopback = nic->loopback;
       
  1200 
       
  1201 	if (nic->mii.force_media && nic->mii.full_duplex)
       
  1202 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1203 
       
  1204 	if (nic->flags & promiscuous || nic->loopback) {
       
  1205 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1206 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1207 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1208 	}
       
  1209 
       
  1210 	if (unlikely(netdev->features & NETIF_F_RXFCS))
       
  1211 		config->rx_crc_transfer = 0x1;	/* 1=save, 0=discard */
       
  1212 
       
  1213 	if (nic->flags & multicast_all)
       
  1214 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1215 
       
  1216 	/* disable WoL when up */
       
  1217 	if (nic->ecdev || 
       
  1218 			(netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1219 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1220 
       
  1221 	if (nic->mac >= mac_82558_D101_A4) {
       
  1222 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1223 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1224 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1225 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1226 		if (nic->mac >= mac_82559_D101M) {
       
  1227 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1228 			/* Enable TCO in extended config */
       
  1229 			if (nic->mac >= mac_82551_10) {
       
  1230 				config->byte_count = 0x20; /* extended bytes */
       
  1231 				config->rx_d102_mode = 0x1; /* GMRC for TCO */
       
  1232 			}
       
  1233 		} else {
       
  1234 			config->standard_stat_counter = 0x0;
       
  1235 		}
       
  1236 	}
       
  1237 
       
  1238 	if (netdev->features & NETIF_F_RXALL) {
       
  1239 		config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
       
  1240 		config->rx_save_bad_frames = 0x1;       /* 1=save, 0=discard */
       
  1241 		config->rx_discard_short_frames = 0x0;  /* 1=discard, 0=save */
       
  1242 	}
       
  1243 
       
  1244 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1245 		     "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1246 		     c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1247 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1248 		     "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1249 		     c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1250 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1251 		     "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1252 		     c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1253 }
       
  1254 
       
  1255 /*************************************************************************
       
  1256 *  CPUSaver parameters
       
  1257 *
       
  1258 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1259 *  "move immediate value" instruction.  By changing the value of
       
  1260 *  the literal in the instruction before the code is loaded, the
       
  1261 *  driver can change the algorithm.
       
  1262 *
       
  1263 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1264 *    When this timer expires the interrupt is asserted, and the
       
  1265 *    timer is reset each time a new packet is received.  (see
       
  1266 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1267 *    The current default is 0x600 or 1536.  Experiments show that
       
  1268 *    the value should probably stay within the 0x200 - 0x1000.
       
  1269 *
       
  1270 *  BUNDLEMAX -
       
  1271 *    This sets the maximum number of frames that will be bundled.  In
       
  1272 *    some situations, such as the TCP windowing algorithm, it may be
       
  1273 *    better to limit the growth of the bundle size than let it go as
       
  1274 *    high as it can, because that could cause too much added latency.
       
  1275 *    The default is six, because this is the number of packets in the
       
  1276 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1277 *    an interrupt for every frame received.  If you do not want to put
       
  1278 *    a limit on the bundle size, set this value to xFFFF.
       
  1279 *
       
  1280 *  BUNDLESMALL -
       
  1281 *    This contains a bit-mask describing the minimum size frame that
       
  1282 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1283 *    that any frame less than 128 bytes in length will not be bundled,
       
  1284 *    but will instead immediately generate an interrupt.  This does
       
  1285 *    not affect the current bundle in any way.  Any frame that is 128
       
  1286 *    bytes or large will be bundled normally.  This feature is meant
       
  1287 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1288 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1289 *    enabled was sending but not receiving.  The delay introduced when
       
  1290 *    the ACKs were received was enough to reduce total throughput, because
       
  1291 *    the sender would sit idle until the ACK was finally seen.
       
  1292 *
       
  1293 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1294 *    This means that any frame which is x7F (127) bytes or smaller
       
  1295 *    will cause an immediate interrupt.  Because this value must be a
       
  1296 *    bit mask, there are only a few valid values that can be used.  To
       
  1297 *    turn this feature off, the driver can write the value xFFFF to the
       
  1298 *    lower word of this instruction (in the same way that the other
       
  1299 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1300 *    cause an interrupt to be generated for every frame, because all
       
  1301 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1302 *************************************************************************/
       
  1303 
       
  1304 /* if you wish to disable the ucode functionality, while maintaining the
       
  1305  * workarounds it provides, set the following defines to:
       
  1306  * BUNDLESMALL 0
       
  1307  * BUNDLEMAX 1
       
  1308  * INTDELAY 1
       
  1309  */
       
  1310 #define BUNDLESMALL 1
       
  1311 #define BUNDLEMAX (u16)6
       
  1312 #define INTDELAY (u16)1536 /* 0x600 */
       
  1313 
       
  1314 /* Initialize firmware */
       
  1315 static const struct firmware *e100_request_firmware(struct nic *nic)
       
  1316 {
       
  1317 	const char *fw_name;
       
  1318 	const struct firmware *fw = nic->fw;
       
  1319 	u8 timer, bundle, min_size;
       
  1320 	int err = 0;
       
  1321 
       
  1322 	/* do not load u-code for ICH devices */
       
  1323 	if (nic->flags & ich)
       
  1324 		return NULL;
       
  1325 
       
  1326 	/* Search for ucode match against h/w revision */
       
  1327 	if (nic->mac == mac_82559_D101M)
       
  1328 		fw_name = FIRMWARE_D101M;
       
  1329 	else if (nic->mac == mac_82559_D101S)
       
  1330 		fw_name = FIRMWARE_D101S;
       
  1331 	else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
       
  1332 		fw_name = FIRMWARE_D102E;
       
  1333 	else /* No ucode on other devices */
       
  1334 		return NULL;
       
  1335 
       
  1336 	/* If the firmware has not previously been loaded, request a pointer
       
  1337 	 * to it. If it was previously loaded, we are reinitializing the
       
  1338 	 * adapter, possibly in a resume from hibernate, in which case
       
  1339 	 * request_firmware() cannot be used.
       
  1340 	 */
       
  1341 	if (!fw)
       
  1342 		err = request_firmware(&fw, fw_name, &nic->pdev->dev);
       
  1343 
       
  1344 	if (err) {
       
  1345 		netif_err(nic, probe, nic->netdev,
       
  1346 			  "Failed to load firmware \"%s\": %d\n",
       
  1347 			  fw_name, err);
       
  1348 		return ERR_PTR(err);
       
  1349 	}
       
  1350 
       
  1351 	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
       
  1352 	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
       
  1353 	if (fw->size != UCODE_SIZE * 4 + 3) {
       
  1354 		netif_err(nic, probe, nic->netdev,
       
  1355 			  "Firmware \"%s\" has wrong size %zu\n",
       
  1356 			  fw_name, fw->size);
       
  1357 		release_firmware(fw);
       
  1358 		return ERR_PTR(-EINVAL);
       
  1359 	}
       
  1360 
       
  1361 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1362 	timer = fw->data[UCODE_SIZE * 4];
       
  1363 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1364 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1365 
       
  1366 	if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
       
  1367 	    min_size >= UCODE_SIZE) {
       
  1368 		netif_err(nic, probe, nic->netdev,
       
  1369 			  "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
       
  1370 			  fw_name, timer, bundle, min_size);
       
  1371 		release_firmware(fw);
       
  1372 		return ERR_PTR(-EINVAL);
       
  1373 	}
       
  1374 
       
  1375 	/* OK, firmware is validated and ready to use. Save a pointer
       
  1376 	 * to it in the nic */
       
  1377 	nic->fw = fw;
       
  1378 	return fw;
       
  1379 }
       
  1380 
       
  1381 static void e100_setup_ucode(struct nic *nic, struct cb *cb,
       
  1382 			     struct sk_buff *skb)
       
  1383 {
       
  1384 	const struct firmware *fw = (void *)skb;
       
  1385 	u8 timer, bundle, min_size;
       
  1386 
       
  1387 	/* It's not a real skb; we just abused the fact that e100_exec_cb
       
  1388 	   will pass it through to here... */
       
  1389 	cb->skb = NULL;
       
  1390 
       
  1391 	/* firmware is stored as little endian already */
       
  1392 	memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
       
  1393 
       
  1394 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1395 	timer = fw->data[UCODE_SIZE * 4];
       
  1396 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1397 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1398 
       
  1399 	/* Insert user-tunable settings in cb->u.ucode */
       
  1400 	cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
       
  1401 	cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
       
  1402 	cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
       
  1403 	cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
       
  1404 	cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
       
  1405 	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
       
  1406 
       
  1407 	cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1408 }
       
  1409 
       
  1410 static inline int e100_load_ucode_wait(struct nic *nic)
       
  1411 {
       
  1412 	const struct firmware *fw;
       
  1413 	int err = 0, counter = 50;
       
  1414 	struct cb *cb = nic->cb_to_clean;
       
  1415 
       
  1416 	fw = e100_request_firmware(nic);
       
  1417 	/* If it's NULL, then no ucode is required */
       
  1418 	if (!fw || IS_ERR(fw))
       
  1419 		return PTR_ERR(fw);
       
  1420 
       
  1421 	if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
       
  1422 		netif_err(nic, probe, nic->netdev,
       
  1423 			  "ucode cmd failed with error %d\n", err);
       
  1424 
       
  1425 	/* must restart cuc */
       
  1426 	nic->cuc_cmd = cuc_start;
       
  1427 
       
  1428 	/* wait for completion */
       
  1429 	e100_write_flush(nic);
       
  1430 	udelay(10);
       
  1431 
       
  1432 	/* wait for possibly (ouch) 500ms */
       
  1433 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1434 		msleep(10);
       
  1435 		if (!--counter) break;
       
  1436 	}
       
  1437 
       
  1438 	/* ack any interrupts, something could have been set */
       
  1439 	iowrite8(~0, &nic->csr->scb.stat_ack);
       
  1440 
       
  1441 	/* if the command failed, or is not OK, notify and return */
       
  1442 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1443 		netif_err(nic, probe, nic->netdev, "ucode load failed\n");
       
  1444 		err = -EPERM;
       
  1445 	}
       
  1446 
       
  1447 	return err;
       
  1448 }
       
  1449 
       
  1450 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1451 	struct sk_buff *skb)
       
  1452 {
       
  1453 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1454 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1455 }
       
  1456 
       
  1457 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1458 {
       
  1459 	cb->command = cpu_to_le16(cb_dump);
       
  1460 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1461 		offsetof(struct mem, dump_buf));
       
  1462 }
       
  1463 
       
  1464 static int e100_phy_check_without_mii(struct nic *nic)
       
  1465 {
       
  1466 	u8 phy_type;
       
  1467 	int without_mii;
       
  1468 
       
  1469 	phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
       
  1470 
       
  1471 	switch (phy_type) {
       
  1472 	case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
       
  1473 	case I82503: /* Non-MII PHY; UNTESTED! */
       
  1474 	case S80C24: /* Non-MII PHY; tested and working */
       
  1475 		/* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
       
  1476 		 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
       
  1477 		 * doesn't have a programming interface of any sort.  The
       
  1478 		 * media is sensed automatically based on how the link partner
       
  1479 		 * is configured.  This is, in essence, manual configuration.
       
  1480 		 */
       
  1481 		netif_info(nic, probe, nic->netdev,
       
  1482 			   "found MII-less i82503 or 80c24 or other PHY\n");
       
  1483 
       
  1484 		nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
       
  1485 		nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
       
  1486 
       
  1487 		/* these might be needed for certain MII-less cards...
       
  1488 		 * nic->flags |= ich;
       
  1489 		 * nic->flags |= ich_10h_workaround; */
       
  1490 
       
  1491 		without_mii = 1;
       
  1492 		break;
       
  1493 	default:
       
  1494 		without_mii = 0;
       
  1495 		break;
       
  1496 	}
       
  1497 	return without_mii;
       
  1498 }
       
  1499 
       
  1500 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1501 #define MII_NSC_CONG		MII_RESV1
       
  1502 #define NSC_CONG_ENABLE		0x0100
       
  1503 #define NSC_CONG_TXREADY	0x0400
       
  1504 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1505 static int e100_phy_init(struct nic *nic)
       
  1506 {
       
  1507 	struct net_device *netdev = nic->netdev;
       
  1508 	u32 addr;
       
  1509 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1510 
       
  1511 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1512 	for (addr = 0; addr < 32; addr++) {
       
  1513 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1514 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1515 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1516 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1517 		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1518 			break;
       
  1519 	}
       
  1520 	if (addr == 32) {
       
  1521 		/* uhoh, no PHY detected: check whether we seem to be some
       
  1522 		 * weird, rare variant which is *known* to not have any MII.
       
  1523 		 * But do this AFTER MII checking only, since this does
       
  1524 		 * lookup of EEPROM values which may easily be unreliable. */
       
  1525 		if (e100_phy_check_without_mii(nic))
       
  1526 			return 0; /* simply return and hope for the best */
       
  1527 		else {
       
  1528 			/* for unknown cases log a fatal error */
       
  1529 			netif_err(nic, hw, nic->netdev,
       
  1530 				  "Failed to locate any known PHY, aborting\n");
       
  1531 			return -EAGAIN;
       
  1532 		}
       
  1533 	} else
       
  1534 		netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1535 			     "phy_addr = %d\n", nic->mii.phy_id);
       
  1536 
       
  1537 	/* Get phy ID */
       
  1538 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1539 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1540 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1541 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1542 		     "phy ID = 0x%08X\n", nic->phy);
       
  1543 
       
  1544 	/* Select the phy and isolate the rest */
       
  1545 	for (addr = 0; addr < 32; addr++) {
       
  1546 		if (addr != nic->mii.phy_id) {
       
  1547 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1548 		} else if (nic->phy != phy_82552_v) {
       
  1549 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1550 			mdio_write(netdev, addr, MII_BMCR,
       
  1551 				bmcr & ~BMCR_ISOLATE);
       
  1552 		}
       
  1553 	}
       
  1554 	/*
       
  1555 	 * Workaround for 82552:
       
  1556 	 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
       
  1557 	 * other phy_id's) using bmcr value from addr discovery loop above.
       
  1558 	 */
       
  1559 	if (nic->phy == phy_82552_v)
       
  1560 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
       
  1561 			bmcr & ~BMCR_ISOLATE);
       
  1562 
       
  1563 	/* Handle National tx phys */
       
  1564 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1565 	if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1566 		/* Disable congestion control */
       
  1567 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1568 		cong |= NSC_CONG_TXREADY;
       
  1569 		cong &= ~NSC_CONG_ENABLE;
       
  1570 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1571 	}
       
  1572 
       
  1573 	if (nic->phy == phy_82552_v) {
       
  1574 		u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
       
  1575 
       
  1576 		/* assign special tweaked mdio_ctrl() function */
       
  1577 		nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
       
  1578 
       
  1579 		/* Workaround Si not advertising flow-control during autoneg */
       
  1580 		advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
       
  1581 		mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
       
  1582 
       
  1583 		/* Reset for the above changes to take effect */
       
  1584 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1585 		bmcr |= BMCR_RESET;
       
  1586 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
       
  1587 	} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1588 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1589 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1590 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1591 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1592 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1593 	}
       
  1594 
       
  1595 	return 0;
       
  1596 }
       
  1597 
       
  1598 static int e100_hw_init(struct nic *nic)
       
  1599 {
       
  1600 	int err = 0;
       
  1601 
       
  1602 	e100_hw_reset(nic);
       
  1603 
       
  1604 	netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
       
  1605 	if (!in_interrupt() && (err = e100_self_test(nic)))
       
  1606 		return err;
       
  1607 
       
  1608 	if ((err = e100_phy_init(nic)))
       
  1609 		return err;
       
  1610 	if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1611 		return err;
       
  1612 	if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1613 		return err;
       
  1614 	if ((err = e100_load_ucode_wait(nic)))
       
  1615 		return err;
       
  1616 	if ((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1617 		return err;
       
  1618 	if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1619 		return err;
       
  1620 	if ((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1621 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1622 		return err;
       
  1623 	if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1624 		return err;
       
  1625 
       
  1626 	e100_disable_irq(nic);
       
  1627 
       
  1628 	return 0;
       
  1629 }
       
  1630 
       
  1631 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1632 {
       
  1633 	struct net_device *netdev = nic->netdev;
       
  1634 	struct netdev_hw_addr *ha;
       
  1635 	u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
       
  1636 
       
  1637 	cb->command = cpu_to_le16(cb_multi);
       
  1638 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1639 	i = 0;
       
  1640 	netdev_for_each_mc_addr(ha, netdev) {
       
  1641 		if (i == count)
       
  1642 			break;
       
  1643 		memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
       
  1644 			ETH_ALEN);
       
  1645 	}
       
  1646 }
       
  1647 
       
  1648 static void e100_set_multicast_list(struct net_device *netdev)
       
  1649 {
       
  1650 	struct nic *nic = netdev_priv(netdev);
       
  1651 
       
  1652 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1653 		     "mc_count=%d, flags=0x%04X\n",
       
  1654 		     netdev_mc_count(netdev), netdev->flags);
       
  1655 
       
  1656 	if (netdev->flags & IFF_PROMISC)
       
  1657 		nic->flags |= promiscuous;
       
  1658 	else
       
  1659 		nic->flags &= ~promiscuous;
       
  1660 
       
  1661 	if (netdev->flags & IFF_ALLMULTI ||
       
  1662 		netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
       
  1663 		nic->flags |= multicast_all;
       
  1664 	else
       
  1665 		nic->flags &= ~multicast_all;
       
  1666 
       
  1667 	e100_exec_cb(nic, NULL, e100_configure);
       
  1668 	e100_exec_cb(nic, NULL, e100_multi);
       
  1669 }
       
  1670 
       
  1671 static void e100_update_stats(struct nic *nic)
       
  1672 {
       
  1673 	struct net_device *dev = nic->netdev;
       
  1674 	struct net_device_stats *ns = &dev->stats;
       
  1675 	struct stats *s = &nic->mem->stats;
       
  1676 	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1677 		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
       
  1678 		&s->complete;
       
  1679 
       
  1680 	/* Device's stats reporting may take several microseconds to
       
  1681 	 * complete, so we're always waiting for results of the
       
  1682 	 * previous command. */
       
  1683 
       
  1684 	if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
       
  1685 		*complete = 0;
       
  1686 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1687 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1688 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1689 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1690 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1691 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1692 		ns->collisions += nic->tx_collisions;
       
  1693 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1694 			le32_to_cpu(s->tx_lost_crs);
       
  1695 		nic->rx_short_frame_errors +=
       
  1696 			le32_to_cpu(s->rx_short_frame_errors);
       
  1697 		ns->rx_length_errors = nic->rx_short_frame_errors +
       
  1698 			nic->rx_over_length_errors;
       
  1699 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1700 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1701 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1702 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1703 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1704 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1705 			le32_to_cpu(s->rx_alignment_errors) +
       
  1706 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1707 			le32_to_cpu(s->rx_cdt_errors);
       
  1708 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1709 		nic->tx_single_collisions +=
       
  1710 			le32_to_cpu(s->tx_single_collisions);
       
  1711 		nic->tx_multiple_collisions +=
       
  1712 			le32_to_cpu(s->tx_multiple_collisions);
       
  1713 		if (nic->mac >= mac_82558_D101_A4) {
       
  1714 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1715 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1716 			nic->rx_fc_unsupported +=
       
  1717 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1718 			if (nic->mac >= mac_82559_D101M) {
       
  1719 				nic->tx_tco_frames +=
       
  1720 					le16_to_cpu(s->xmt_tco_frames);
       
  1721 				nic->rx_tco_frames +=
       
  1722 					le16_to_cpu(s->rcv_tco_frames);
       
  1723 			}
       
  1724 		}
       
  1725 	}
       
  1726 
       
  1727 
       
  1728 	if (e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1729 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1730 			     "exec cuc_dump_reset failed\n");
       
  1731 }
       
  1732 
       
  1733 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1734 {
       
  1735 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1736 	 * we're getting collisions on a half-duplex connection. */
       
  1737 
       
  1738 	if (duplex == DUPLEX_HALF) {
       
  1739 		u32 prev = nic->adaptive_ifs;
       
  1740 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1741 
       
  1742 		if ((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1743 		   (nic->tx_frames > min_frames)) {
       
  1744 			if (nic->adaptive_ifs < 60)
       
  1745 				nic->adaptive_ifs += 5;
       
  1746 		} else if (nic->tx_frames < min_frames) {
       
  1747 			if (nic->adaptive_ifs >= 5)
       
  1748 				nic->adaptive_ifs -= 5;
       
  1749 		}
       
  1750 		if (nic->adaptive_ifs != prev)
       
  1751 			e100_exec_cb(nic, NULL, e100_configure);
       
  1752 	}
       
  1753 }
       
  1754 
       
  1755 static void e100_watchdog(unsigned long data)
       
  1756 {
       
  1757 	struct nic *nic = (struct nic *)data;
       
  1758 	struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
       
  1759 	u32 speed;
       
  1760 
       
  1761 	if (nic->ecdev) {
       
  1762 		ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1763 		return;
       
  1764 	}
       
  1765 
       
  1766 	netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
       
  1767 		     "right now = %ld\n", jiffies);
       
  1768 
       
  1769 	/* mii library handles link maintenance tasks */
       
  1770 
       
  1771 	mii_ethtool_gset(&nic->mii, &cmd);
       
  1772 	speed = ethtool_cmd_speed(&cmd);
       
  1773 
       
  1774 	if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1775 		netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
       
  1776 			    speed == SPEED_100 ? 100 : 10,
       
  1777 			    cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
       
  1778 	} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1779 		netdev_info(nic->netdev, "NIC Link is Down\n");
       
  1780 	}
       
  1781 
       
  1782 	mii_check_link(&nic->mii);
       
  1783 
       
  1784 	/* Software generated interrupt to recover from (rare) Rx
       
  1785 	 * allocation failure.
       
  1786 	 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1787 	 * accidentally, due to hardware that shares a register between the
       
  1788 	 * interrupt mask bit and the SW Interrupt generation bit */
       
  1789 	spin_lock_irq(&nic->cmd_lock);
       
  1790 	iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1791 	e100_write_flush(nic);
       
  1792 	spin_unlock_irq(&nic->cmd_lock);
       
  1793 
       
  1794 	e100_update_stats(nic);
       
  1795 	e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
       
  1796 
       
  1797 	if (nic->mac <= mac_82557_D100_C)
       
  1798 		/* Issue a multicast command to workaround a 557 lock up */
       
  1799 		e100_set_multicast_list(nic->netdev);
       
  1800 
       
  1801 	if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
       
  1802 		/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1803 		nic->flags |= ich_10h_workaround;
       
  1804 	else
       
  1805 		nic->flags &= ~ich_10h_workaround;
       
  1806 
       
  1807 	mod_timer(&nic->watchdog,
       
  1808 		  round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
       
  1809 }
       
  1810 
       
  1811 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1812 	struct sk_buff *skb)
       
  1813 {
       
  1814 	cb->command = nic->tx_command;
       
  1815 
       
  1816 	/*
       
  1817 	 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
       
  1818 	 * testing, ie sending frames with bad CRC.
       
  1819 	 */
       
  1820 	if (unlikely(skb->no_fcs))
       
  1821 		cb->command |= __constant_cpu_to_le16(cb_tx_nc);
       
  1822 	else
       
  1823 		cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
       
  1824 
       
  1825 	/* interrupt every 16 packets regardless of delay */
       
  1826 	if ((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1827 		cb->command |= cpu_to_le16(cb_i);
       
  1828 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1829 	cb->u.tcb.tcb_byte_count = 0;
       
  1830 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1831 	cb->u.tcb.tbd_count = 1;
       
  1832 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
       
  1833 		skb->data, skb->len, PCI_DMA_TODEVICE));
       
  1834 	/* check for mapping failure? */
       
  1835 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1836 }
       
  1837 
       
  1838 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
       
  1839 				   struct net_device *netdev)
       
  1840 {
       
  1841 	struct nic *nic = netdev_priv(netdev);
       
  1842 	int err;
       
  1843 
       
  1844 	if (nic->flags & ich_10h_workaround) {
       
  1845 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1846 		   Issue a NOP command followed by a 1us delay before
       
  1847 		   issuing the Tx command. */
       
  1848 		if (e100_exec_cmd(nic, cuc_nop, 0))
       
  1849 			netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1850 				     "exec cuc_nop failed\n");
       
  1851 		udelay(1);
       
  1852 	}
       
  1853 
       
  1854 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1855 
       
  1856 	switch (err) {
       
  1857 	case -ENOSPC:
       
  1858 		/* We queued the skb, but now we're out of space. */
       
  1859 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1860 			     "No space for CB\n");
       
  1861 		if (!nic->ecdev)
       
  1862 			netif_stop_queue(netdev);
       
  1863 		break;
       
  1864 	case -ENOMEM:
       
  1865 		/* This is a hard error - log it. */
       
  1866 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1867 			     "Out of Tx resources, returning skb\n");
       
  1868 		if (!nic->ecdev)
       
  1869 			netif_stop_queue(netdev);
       
  1870 		return NETDEV_TX_BUSY;
       
  1871 	}
       
  1872 
       
  1873 	return NETDEV_TX_OK;
       
  1874 }
       
  1875 
       
  1876 static int e100_tx_clean(struct nic *nic)
       
  1877 {
       
  1878 	struct net_device *dev = nic->netdev;
       
  1879 	struct cb *cb;
       
  1880 	int tx_cleaned = 0;
       
  1881 
       
  1882 	if (!nic->ecdev)
       
  1883 		spin_lock(&nic->cb_lock);
       
  1884 
       
  1885 	/* Clean CBs marked complete */
       
  1886 	for (cb = nic->cb_to_clean;
       
  1887 	    cb->status & cpu_to_le16(cb_complete);
       
  1888 	    cb = nic->cb_to_clean = cb->next) {
       
  1889 		rmb(); /* read skb after status */
       
  1890 		netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
       
  1891 			     "cb[%d]->status = 0x%04X\n",
       
  1892 			     (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1893 			     cb->status);
       
  1894 
       
  1895 		if (likely(cb->skb != NULL)) {
       
  1896 			dev->stats.tx_packets++;
       
  1897 			dev->stats.tx_bytes += cb->skb->len;
       
  1898 
       
  1899 			pci_unmap_single(nic->pdev,
       
  1900 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1901 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1902 				PCI_DMA_TODEVICE);
       
  1903 			if (!nic->ecdev)
       
  1904 				dev_kfree_skb_any(cb->skb);
       
  1905 			cb->skb = NULL;
       
  1906 			tx_cleaned = 1;
       
  1907 		}
       
  1908 		cb->status = 0;
       
  1909 		nic->cbs_avail++;
       
  1910 	}
       
  1911 
       
  1912 	if (!nic->ecdev) {
       
  1913 		spin_unlock(&nic->cb_lock);
       
  1914 
       
  1915 		/* Recover from running out of Tx resources in xmit_frame */
       
  1916 		if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1917 			netif_wake_queue(nic->netdev);
       
  1918 	}
       
  1919 
       
  1920 	return tx_cleaned;
       
  1921 }
       
  1922 
       
  1923 static void e100_clean_cbs(struct nic *nic)
       
  1924 {
       
  1925 	if (nic->cbs) {
       
  1926 		while (nic->cbs_avail != nic->params.cbs.count) {
       
  1927 			struct cb *cb = nic->cb_to_clean;
       
  1928 			if (cb->skb) {
       
  1929 				pci_unmap_single(nic->pdev,
       
  1930 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1931 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1932 					PCI_DMA_TODEVICE);
       
  1933 				if (!nic->ecdev)
       
  1934 					dev_kfree_skb(cb->skb);
       
  1935 			}
       
  1936 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1937 			nic->cbs_avail++;
       
  1938 		}
       
  1939 		pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
       
  1940 		nic->cbs = NULL;
       
  1941 		nic->cbs_avail = 0;
       
  1942 	}
       
  1943 	nic->cuc_cmd = cuc_start;
       
  1944 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1945 		nic->cbs;
       
  1946 }
       
  1947 
       
  1948 static int e100_alloc_cbs(struct nic *nic)
       
  1949 {
       
  1950 	struct cb *cb;
       
  1951 	unsigned int i, count = nic->params.cbs.count;
       
  1952 
       
  1953 	nic->cuc_cmd = cuc_start;
       
  1954 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1955 	nic->cbs_avail = 0;
       
  1956 
       
  1957 	nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
       
  1958 				  &nic->cbs_dma_addr);
       
  1959 	if (!nic->cbs)
       
  1960 		return -ENOMEM;
       
  1961 	memset(nic->cbs, 0, count * sizeof(struct cb));
       
  1962 
       
  1963 	for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1964 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1965 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  1966 
       
  1967 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  1968 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  1969 			((i+1) % count) * sizeof(struct cb));
       
  1970 	}
       
  1971 
       
  1972 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  1973 	nic->cbs_avail = count;
       
  1974 
       
  1975 	return 0;
       
  1976 }
       
  1977 
       
  1978 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  1979 {
       
  1980 	if (!nic->rxs) return;
       
  1981 	if (RU_SUSPENDED != nic->ru_running) return;
       
  1982 
       
  1983 	/* handle init time starts */
       
  1984 	if (!rx) rx = nic->rxs;
       
  1985 
       
  1986 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  1987 	if (rx->skb) {
       
  1988 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  1989 		nic->ru_running = RU_RUNNING;
       
  1990 	}
       
  1991 }
       
  1992 
       
  1993 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
       
  1994 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  1995 {
       
  1996 	if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
       
  1997 		return -ENOMEM;
       
  1998 
       
  1999 	/* Init, and map the RFD. */
       
  2000 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
       
  2001 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  2002 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2003 
       
  2004 	if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2005 		dev_kfree_skb_any(rx->skb);
       
  2006 		rx->skb = NULL;
       
  2007 		rx->dma_addr = 0;
       
  2008 		return -ENOMEM;
       
  2009 	}
       
  2010 
       
  2011 	/* Link the RFD to end of RFA by linking previous RFD to
       
  2012 	 * this one.  We are safe to touch the previous RFD because
       
  2013 	 * it is protected by the before last buffer's el bit being set */
       
  2014 	if (rx->prev->skb) {
       
  2015 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  2016 		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2017 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2018 			sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2019 	}
       
  2020 
       
  2021 	return 0;
       
  2022 }
       
  2023 
       
  2024 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  2025 	unsigned int *work_done, unsigned int work_to_do)
       
  2026 {
       
  2027 	struct net_device *dev = nic->netdev;
       
  2028 	struct sk_buff *skb = rx->skb;
       
  2029 	struct rfd *rfd = (struct rfd *)skb->data;
       
  2030 	u16 rfd_status, actual_size;
       
  2031 	u16 fcs_pad = 0;
       
  2032 
       
  2033 	if (unlikely(work_done && *work_done >= work_to_do))
       
  2034 		return -EAGAIN;
       
  2035 
       
  2036 	/* Need to sync before taking a peek at cb_complete bit */
       
  2037 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  2038 		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2039 	rfd_status = le16_to_cpu(rfd->status);
       
  2040 
       
  2041 	netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
       
  2042 		     "status=0x%04X\n", rfd_status);
       
  2043 	rmb(); /* read size after status bit */
       
  2044 
       
  2045 	/* If data isn't ready, nothing to indicate */
       
  2046 	if (unlikely(!(rfd_status & cb_complete))) {
       
  2047 		/* If the next buffer has the el bit, but we think the receiver
       
  2048 		 * is still running, check to see if it really stopped while
       
  2049 		 * we had interrupts off.
       
  2050 		 * This allows for a fast restart without re-enabling
       
  2051 		 * interrupts */
       
  2052 		if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2053 		    (RU_RUNNING == nic->ru_running))
       
  2054 
       
  2055 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2056 				nic->ru_running = RU_SUSPENDED;
       
  2057 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2058 					       sizeof(struct rfd),
       
  2059 					       PCI_DMA_FROMDEVICE);
       
  2060 		return -ENODATA;
       
  2061 	}
       
  2062 
       
  2063 	/* Get actual data size */
       
  2064 	if (unlikely(dev->features & NETIF_F_RXFCS))
       
  2065 		fcs_pad = 4;
       
  2066 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  2067 	if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  2068 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  2069 
       
  2070 	/* Get data */
       
  2071 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2072 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2073 
       
  2074 	/* If this buffer has the el bit, but we think the receiver
       
  2075 	 * is still running, check to see if it really stopped while
       
  2076 	 * we had interrupts off.
       
  2077 	 * This allows for a fast restart without re-enabling interrupts.
       
  2078 	 * This can happen when the RU sees the size change but also sees
       
  2079 	 * the el bit set. */
       
  2080 	if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2081 	    (RU_RUNNING == nic->ru_running)) {
       
  2082 
       
  2083 	    if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2084 		nic->ru_running = RU_SUSPENDED;
       
  2085 	}
       
  2086 
       
  2087 	if (!nic->ecdev) {
       
  2088 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  2089 		skb_reserve(skb, sizeof(struct rfd));
       
  2090 		skb_put(skb, actual_size);
       
  2091 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  2092 	}
       
  2093 
       
  2094 	/* If we are receiving all frames, then don't bother
       
  2095 	 * checking for errors.
       
  2096 	 */
       
  2097 	if (unlikely(dev->features & NETIF_F_RXALL)) {
       
  2098 		if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
       
  2099 			/* Received oversized frame, but keep it. */
       
  2100 			nic->rx_over_length_errors++;
       
  2101 		goto process_skb;
       
  2102 	}
       
  2103 
       
  2104 	if (unlikely(!(rfd_status & cb_ok))) {
       
  2105 		if (!nic->ecdev) {
       
  2106 			/* Don't indicate if hardware indicates errors */
       
  2107 			dev_kfree_skb_any(skb);
       
  2108 		}
       
  2109 	} else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
       
  2110 		/* Don't indicate oversized frames */
       
  2111 		nic->rx_over_length_errors++;
       
  2112 		if (!nic->ecdev) {
       
  2113 			dev_kfree_skb_any(skb);
       
  2114 		}
       
  2115 	} else {
       
  2116 process_skb:
       
  2117 		dev->stats.rx_packets++;
       
  2118 		dev->stats.rx_bytes += (actual_size - fcs_pad);
       
  2119 		if (nic->ecdev) {
       
  2120 			ecdev_receive(nic->ecdev,
       
  2121 					skb->data + sizeof(struct rfd), actual_size - fcs_pad);
       
  2122 
       
  2123 			// No need to detect link status as
       
  2124 			// long as frames are received: Reset watchdog.
       
  2125 			nic->ec_watchdog_jiffies = jiffies;
       
  2126 		} else {
       
  2127 			netif_receive_skb(skb);
       
  2128 		}
       
  2129 		if (work_done)
       
  2130 			(*work_done)++;
       
  2131 	}
       
  2132 
       
  2133 	if (nic->ecdev) {
       
  2134 		// make receive frame descriptior usable again
       
  2135 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  2136 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  2137 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2138 		if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2139 			rx->dma_addr = 0;
       
  2140 		}
       
  2141 
       
  2142 		/* Link the RFD to end of RFA by linking previous RFD to
       
  2143 		 * this one.  We are safe to touch the previous RFD because
       
  2144 		 * it is protected by the before last buffer's el bit being set */
       
  2145 		if (rx->prev->skb) {
       
  2146 			struct rfd *prev_rfd = (struct rfd *) rx->prev->skb->data;
       
  2147 			put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2148 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2149 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  2150 		}
       
  2151 	} else {
       
  2152 		rx->skb = NULL;
       
  2153 	}
       
  2154 
       
  2155 	return 0;
       
  2156 }
       
  2157 
       
  2158 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  2159 	unsigned int work_to_do)
       
  2160 {
       
  2161 	struct rx *rx;
       
  2162 	int restart_required = 0, err = 0;
       
  2163 	struct rx *old_before_last_rx, *new_before_last_rx;
       
  2164 	struct rfd *old_before_last_rfd, *new_before_last_rfd;
       
  2165 
       
  2166 	/* Indicate newly arrived packets */
       
  2167 	for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  2168 		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  2169 		/* Hit quota or no more to clean */
       
  2170 		if (-EAGAIN == err || -ENODATA == err)
       
  2171 			break;
       
  2172 	}
       
  2173 
       
  2174 
       
  2175 	/* On EAGAIN, hit quota so have more work to do, restart once
       
  2176 	 * cleanup is complete.
       
  2177 	 * Else, are we already rnr? then pay attention!!! this ensures that
       
  2178 	 * the state machine progression never allows a start with a
       
  2179 	 * partially cleaned list, avoiding a race between hardware
       
  2180 	 * and rx_to_clean when in NAPI mode */
       
  2181 	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
       
  2182 		restart_required = 1;
       
  2183 
       
  2184 	old_before_last_rx = nic->rx_to_use->prev->prev;
       
  2185 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
       
  2186 
       
  2187 	if (!nic->ecdev) {
       
  2188 		/* Alloc new skbs to refill list */
       
  2189 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  2190 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  2191 				break; /* Better luck next time (see watchdog) */
       
  2192 		}
       
  2193 	}
       
  2194 
       
  2195 	new_before_last_rx = nic->rx_to_use->prev->prev;
       
  2196 	if (new_before_last_rx != old_before_last_rx) {
       
  2197 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2198 		 * This lets us update the next pointer on the last buffer
       
  2199 		 * without worrying about hardware touching it.
       
  2200 		 * We set the size to 0 to prevent hardware from touching this
       
  2201 		 * buffer.
       
  2202 		 * When the hardware hits the before last buffer with el-bit
       
  2203 		 * and size of 0, it will RNR interrupt, the RUS will go into
       
  2204 		 * the No Resources state.  It will not complete nor write to
       
  2205 		 * this buffer. */
       
  2206 		new_before_last_rfd =
       
  2207 			(struct rfd *)new_before_last_rx->skb->data;
       
  2208 		new_before_last_rfd->size = 0;
       
  2209 		new_before_last_rfd->command |= cpu_to_le16(cb_el);
       
  2210 		pci_dma_sync_single_for_device(nic->pdev,
       
  2211 			new_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2212 			PCI_DMA_BIDIRECTIONAL);
       
  2213 
       
  2214 		/* Now that we have a new stopping point, we can clear the old
       
  2215 		 * stopping point.  We must sync twice to get the proper
       
  2216 		 * ordering on the hardware side of things. */
       
  2217 		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
       
  2218 		pci_dma_sync_single_for_device(nic->pdev,
       
  2219 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2220 			PCI_DMA_BIDIRECTIONAL);
       
  2221 		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
       
  2222 							+ ETH_FCS_LEN);
       
  2223 		pci_dma_sync_single_for_device(nic->pdev,
       
  2224 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2225 			PCI_DMA_BIDIRECTIONAL);
       
  2226 	}
       
  2227 
       
  2228 	if (restart_required) {
       
  2229 		// ack the rnr?
       
  2230 		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2231 		e100_start_receiver(nic, nic->rx_to_clean);
       
  2232 		if (work_done)
       
  2233 			(*work_done)++;
       
  2234 	}
       
  2235 }
       
  2236 
       
  2237 static void e100_rx_clean_list(struct nic *nic)
       
  2238 {
       
  2239 	struct rx *rx;
       
  2240 	unsigned int i, count = nic->params.rfds.count;
       
  2241 
       
  2242 	nic->ru_running = RU_UNINITIALIZED;
       
  2243 
       
  2244 	if (nic->rxs) {
       
  2245 		for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2246 			if (rx->skb) {
       
  2247 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2248 					RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2249 				dev_kfree_skb(rx->skb);
       
  2250 			}
       
  2251 		}
       
  2252 		kfree(nic->rxs);
       
  2253 		nic->rxs = NULL;
       
  2254 	}
       
  2255 
       
  2256 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2257 }
       
  2258 
       
  2259 static int e100_rx_alloc_list(struct nic *nic)
       
  2260 {
       
  2261 	struct rx *rx;
       
  2262 	unsigned int i, count = nic->params.rfds.count;
       
  2263 	struct rfd *before_last;
       
  2264 
       
  2265 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2266 	nic->ru_running = RU_UNINITIALIZED;
       
  2267 
       
  2268 	if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2269 		return -ENOMEM;
       
  2270 
       
  2271 	for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2272 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2273 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2274 		if (e100_rx_alloc_skb(nic, rx)) {
       
  2275 			e100_rx_clean_list(nic);
       
  2276 			return -ENOMEM;
       
  2277 		}
       
  2278 	}
       
  2279 
       
  2280 	if (!nic->ecdev) {
       
  2281 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2282 		 * This lets us update the next pointer on the last buffer without
       
  2283 		 * worrying about hardware touching it.
       
  2284 		 * We set the size to 0 to prevent hardware from touching this buffer.
       
  2285 		 * When the hardware hits the before last buffer with el-bit and size
       
  2286 		 * of 0, it will RNR interrupt, the RU will go into the No Resources
       
  2287 		 * state.  It will not complete nor write to this buffer. */
       
  2288 		rx = nic->rxs->prev->prev;
       
  2289 		before_last = (struct rfd *)rx->skb->data;
       
  2290 		before_last->command |= cpu_to_le16(cb_el);
       
  2291 		before_last->size = 0;
       
  2292 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2293 				sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2294 	}
       
  2295 
       
  2296 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2297 	nic->ru_running = RU_SUSPENDED;
       
  2298 
       
  2299 	return 0;
       
  2300 }
       
  2301 
       
  2302 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2303 {
       
  2304 	struct net_device *netdev = dev_id;
       
  2305 	struct nic *nic = netdev_priv(netdev);
       
  2306 	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
       
  2307 
       
  2308 	netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
       
  2309 		     "stat_ack = 0x%02X\n", stat_ack);
       
  2310 
       
  2311 	if (stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2312 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2313 		return IRQ_NONE;
       
  2314 
       
  2315 	/* Ack interrupt(s) */
       
  2316 	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
       
  2317 
       
  2318 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2319 	if (stat_ack & stat_ack_rnr)
       
  2320 		nic->ru_running = RU_SUSPENDED;
       
  2321 
       
  2322 	if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) {
       
  2323 		e100_disable_irq(nic);
       
  2324 		__napi_schedule(&nic->napi);
       
  2325 	}
       
  2326 
       
  2327 	return IRQ_HANDLED;
       
  2328 }
       
  2329 
       
  2330 void e100_ec_poll(struct net_device *netdev)
       
  2331 {
       
  2332 	struct nic *nic = netdev_priv(netdev);
       
  2333 
       
  2334 	e100_rx_clean(nic, NULL, 100);
       
  2335 	e100_tx_clean(nic);
       
  2336 
       
  2337 	if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2338 		e100_watchdog((unsigned long) nic);
       
  2339 		nic->ec_watchdog_jiffies = jiffies;
       
  2340 	}
       
  2341 }
       
  2342 
       
  2343 
       
  2344 static int e100_poll(struct napi_struct *napi, int budget)
       
  2345 {
       
  2346 	struct nic *nic = container_of(napi, struct nic, napi);
       
  2347 	unsigned int work_done = 0;
       
  2348 
       
  2349 	e100_rx_clean(nic, &work_done, budget);
       
  2350 	e100_tx_clean(nic);
       
  2351 
       
  2352 	/* If budget not fully consumed, exit the polling mode */
       
  2353 	if (work_done < budget) {
       
  2354 		napi_complete(napi);
       
  2355 		e100_enable_irq(nic);
       
  2356 	}
       
  2357 
       
  2358 	return work_done;
       
  2359 }
       
  2360 
       
  2361 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2362 static void e100_netpoll(struct net_device *netdev)
       
  2363 {
       
  2364 	struct nic *nic = netdev_priv(netdev);
       
  2365 
       
  2366 	e100_disable_irq(nic);
       
  2367 	e100_intr(nic->pdev->irq, netdev);
       
  2368 	e100_tx_clean(nic);
       
  2369 	e100_enable_irq(nic);
       
  2370 }
       
  2371 #endif
       
  2372 
       
  2373 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2374 {
       
  2375 	struct nic *nic = netdev_priv(netdev);
       
  2376 	struct sockaddr *addr = p;
       
  2377 
       
  2378 	if (!is_valid_ether_addr(addr->sa_data))
       
  2379 		return -EADDRNOTAVAIL;
       
  2380 
       
  2381 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2382 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2383 
       
  2384 	return 0;
       
  2385 }
       
  2386 
       
  2387 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2388 {
       
  2389 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2390 		return -EINVAL;
       
  2391 	netdev->mtu = new_mtu;
       
  2392 	return 0;
       
  2393 }
       
  2394 
       
  2395 static int e100_asf(struct nic *nic)
       
  2396 {
       
  2397 	/* ASF can be enabled from eeprom */
       
  2398 	return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2399 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2400 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2401 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
       
  2402 }
       
  2403 
       
  2404 static int e100_up(struct nic *nic)
       
  2405 {
       
  2406 	int err;
       
  2407 
       
  2408 	if ((err = e100_rx_alloc_list(nic)))
       
  2409 		return err;
       
  2410 	if ((err = e100_alloc_cbs(nic)))
       
  2411 		goto err_rx_clean_list;
       
  2412 	if ((err = e100_hw_init(nic)))
       
  2413 		goto err_clean_cbs;
       
  2414 	e100_set_multicast_list(nic->netdev);
       
  2415 	e100_start_receiver(nic, NULL);
       
  2416 	if (!nic->ecdev) {
       
  2417 		mod_timer(&nic->watchdog, jiffies);
       
  2418 	}
       
  2419 	if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2420 		nic->netdev->name, nic->netdev)))
       
  2421 		goto err_no_irq;
       
  2422 	if (!nic->ecdev) {
       
  2423 		netif_wake_queue(nic->netdev);
       
  2424 		napi_enable(&nic->napi);
       
  2425 		/* enable ints _after_ enabling poll, preventing a race between
       
  2426 		 * disable ints+schedule */
       
  2427 		e100_enable_irq(nic);
       
  2428 	}
       
  2429 	return 0;
       
  2430 
       
  2431 err_no_irq:
       
  2432 	if (!nic->ecdev)
       
  2433 		del_timer_sync(&nic->watchdog);
       
  2434 err_clean_cbs:
       
  2435 	e100_clean_cbs(nic);
       
  2436 err_rx_clean_list:
       
  2437 	e100_rx_clean_list(nic);
       
  2438 	return err;
       
  2439 }
       
  2440 
       
  2441 static void e100_down(struct nic *nic)
       
  2442 {
       
  2443 	if (!nic->ecdev) {
       
  2444 		/* wait here for poll to complete */
       
  2445 		napi_disable(&nic->napi);
       
  2446 		netif_stop_queue(nic->netdev);
       
  2447 	}
       
  2448 	e100_hw_reset(nic);
       
  2449 	free_irq(nic->pdev->irq, nic->netdev);
       
  2450 	if (!nic->ecdev) {
       
  2451 		del_timer_sync(&nic->watchdog);
       
  2452 		netif_carrier_off(nic->netdev);
       
  2453 	}
       
  2454 	e100_clean_cbs(nic);
       
  2455 	e100_rx_clean_list(nic);
       
  2456 }
       
  2457 
       
  2458 static void e100_tx_timeout(struct net_device *netdev)
       
  2459 {
       
  2460 	struct nic *nic = netdev_priv(netdev);
       
  2461 
       
  2462 	/* Reset outside of interrupt context, to avoid request_irq
       
  2463 	 * in interrupt context */
       
  2464 	schedule_work(&nic->tx_timeout_task);
       
  2465 }
       
  2466 
       
  2467 static void e100_tx_timeout_task(struct work_struct *work)
       
  2468 {
       
  2469 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2470 	struct net_device *netdev = nic->netdev;
       
  2471 
       
  2472 	netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  2473 		     "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
       
  2474 
       
  2475 	rtnl_lock();
       
  2476 	if (netif_running(netdev)) {
       
  2477 		e100_down(netdev_priv(netdev));
       
  2478 		e100_up(netdev_priv(netdev));
       
  2479 	}
       
  2480 	rtnl_unlock();
       
  2481 }
       
  2482 
       
  2483 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2484 {
       
  2485 	int err;
       
  2486 	struct sk_buff *skb;
       
  2487 
       
  2488 	/* Use driver resources to perform internal MAC or PHY
       
  2489 	 * loopback test.  A single packet is prepared and transmitted
       
  2490 	 * in loopback mode, and the test passes if the received
       
  2491 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2492 
       
  2493 	if ((err = e100_rx_alloc_list(nic)))
       
  2494 		return err;
       
  2495 	if ((err = e100_alloc_cbs(nic)))
       
  2496 		goto err_clean_rx;
       
  2497 
       
  2498 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2499 	if (nic->flags & ich && loopback_mode == lb_phy)
       
  2500 		loopback_mode = lb_mac;
       
  2501 
       
  2502 	nic->loopback = loopback_mode;
       
  2503 	if ((err = e100_hw_init(nic)))
       
  2504 		goto err_loopback_none;
       
  2505 
       
  2506 	if (loopback_mode == lb_phy)
       
  2507 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2508 			BMCR_LOOPBACK);
       
  2509 
       
  2510 	e100_start_receiver(nic, NULL);
       
  2511 
       
  2512 	if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2513 		err = -ENOMEM;
       
  2514 		goto err_loopback_none;
       
  2515 	}
       
  2516 	skb_put(skb, ETH_DATA_LEN);
       
  2517 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2518 	e100_xmit_frame(skb, nic->netdev);
       
  2519 
       
  2520 	msleep(10);
       
  2521 
       
  2522 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2523 			RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2524 
       
  2525 	if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2526 	   skb->data, ETH_DATA_LEN))
       
  2527 		err = -EAGAIN;
       
  2528 
       
  2529 err_loopback_none:
       
  2530 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2531 	nic->loopback = lb_none;
       
  2532 	e100_clean_cbs(nic);
       
  2533 	e100_hw_reset(nic);
       
  2534 err_clean_rx:
       
  2535 	e100_rx_clean_list(nic);
       
  2536 	return err;
       
  2537 }
       
  2538 
       
  2539 #define MII_LED_CONTROL	0x1B
       
  2540 #define E100_82552_LED_OVERRIDE 0x19
       
  2541 #define E100_82552_LED_ON       0x000F /* LEDTX and LED_RX both on */
       
  2542 #define E100_82552_LED_OFF      0x000A /* LEDTX and LED_RX both off */
       
  2543 
       
  2544 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2545 {
       
  2546 	struct nic *nic = netdev_priv(netdev);
       
  2547 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2548 }
       
  2549 
       
  2550 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2551 {
       
  2552 	struct nic *nic = netdev_priv(netdev);
       
  2553 	int err;
       
  2554 
       
  2555 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2556 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2557 	e100_exec_cb(nic, NULL, e100_configure);
       
  2558 
       
  2559 	return err;
       
  2560 }
       
  2561 
       
  2562 static void e100_get_drvinfo(struct net_device *netdev,
       
  2563 	struct ethtool_drvinfo *info)
       
  2564 {
       
  2565 	struct nic *nic = netdev_priv(netdev);
       
  2566 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
       
  2567 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
       
  2568 	strlcpy(info->bus_info, pci_name(nic->pdev),
       
  2569 		sizeof(info->bus_info));
       
  2570 }
       
  2571 
       
  2572 #define E100_PHY_REGS 0x1C
       
  2573 static int e100_get_regs_len(struct net_device *netdev)
       
  2574 {
       
  2575 	struct nic *nic = netdev_priv(netdev);
       
  2576 	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
       
  2577 }
       
  2578 
       
  2579 static void e100_get_regs(struct net_device *netdev,
       
  2580 	struct ethtool_regs *regs, void *p)
       
  2581 {
       
  2582 	struct nic *nic = netdev_priv(netdev);
       
  2583 	u32 *buff = p;
       
  2584 	int i;
       
  2585 
       
  2586 	regs->version = (1 << 24) | nic->pdev->revision;
       
  2587 	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
       
  2588 		ioread8(&nic->csr->scb.cmd_lo) << 16 |
       
  2589 		ioread16(&nic->csr->scb.status);
       
  2590 	for (i = E100_PHY_REGS; i >= 0; i--)
       
  2591 		buff[1 + E100_PHY_REGS - i] =
       
  2592 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2593 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2594 	e100_exec_cb(nic, NULL, e100_dump);
       
  2595 	msleep(10);
       
  2596 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2597 		sizeof(nic->mem->dump_buf));
       
  2598 }
       
  2599 
       
  2600 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2601 {
       
  2602 	struct nic *nic = netdev_priv(netdev);
       
  2603 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2604 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2605 }
       
  2606 
       
  2607 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2608 {
       
  2609 	struct nic *nic = netdev_priv(netdev);
       
  2610 
       
  2611 	if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
       
  2612 	    !device_can_wakeup(&nic->pdev->dev))
       
  2613 		return -EOPNOTSUPP;
       
  2614 
       
  2615 	if (wol->wolopts)
       
  2616 		nic->flags |= wol_magic;
       
  2617 	else
       
  2618 		nic->flags &= ~wol_magic;
       
  2619 
       
  2620 	device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
       
  2621 
       
  2622 	e100_exec_cb(nic, NULL, e100_configure);
       
  2623 
       
  2624 	return 0;
       
  2625 }
       
  2626 
       
  2627 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2628 {
       
  2629 	struct nic *nic = netdev_priv(netdev);
       
  2630 	return nic->msg_enable;
       
  2631 }
       
  2632 
       
  2633 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2634 {
       
  2635 	struct nic *nic = netdev_priv(netdev);
       
  2636 	nic->msg_enable = value;
       
  2637 }
       
  2638 
       
  2639 static int e100_nway_reset(struct net_device *netdev)
       
  2640 {
       
  2641 	struct nic *nic = netdev_priv(netdev);
       
  2642 	return mii_nway_restart(&nic->mii);
       
  2643 }
       
  2644 
       
  2645 static u32 e100_get_link(struct net_device *netdev)
       
  2646 {
       
  2647 	struct nic *nic = netdev_priv(netdev);
       
  2648 	return mii_link_ok(&nic->mii);
       
  2649 }
       
  2650 
       
  2651 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2652 {
       
  2653 	struct nic *nic = netdev_priv(netdev);
       
  2654 	return nic->eeprom_wc << 1;
       
  2655 }
       
  2656 
       
  2657 #define E100_EEPROM_MAGIC	0x1234
       
  2658 static int e100_get_eeprom(struct net_device *netdev,
       
  2659 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2660 {
       
  2661 	struct nic *nic = netdev_priv(netdev);
       
  2662 
       
  2663 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2664 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2665 
       
  2666 	return 0;
       
  2667 }
       
  2668 
       
  2669 static int e100_set_eeprom(struct net_device *netdev,
       
  2670 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2671 {
       
  2672 	struct nic *nic = netdev_priv(netdev);
       
  2673 
       
  2674 	if (eeprom->magic != E100_EEPROM_MAGIC)
       
  2675 		return -EINVAL;
       
  2676 
       
  2677 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2678 
       
  2679 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2680 		(eeprom->len >> 1) + 1);
       
  2681 }
       
  2682 
       
  2683 static void e100_get_ringparam(struct net_device *netdev,
       
  2684 	struct ethtool_ringparam *ring)
       
  2685 {
       
  2686 	struct nic *nic = netdev_priv(netdev);
       
  2687 	struct param_range *rfds = &nic->params.rfds;
       
  2688 	struct param_range *cbs = &nic->params.cbs;
       
  2689 
       
  2690 	ring->rx_max_pending = rfds->max;
       
  2691 	ring->tx_max_pending = cbs->max;
       
  2692 	ring->rx_pending = rfds->count;
       
  2693 	ring->tx_pending = cbs->count;
       
  2694 }
       
  2695 
       
  2696 static int e100_set_ringparam(struct net_device *netdev,
       
  2697 	struct ethtool_ringparam *ring)
       
  2698 {
       
  2699 	struct nic *nic = netdev_priv(netdev);
       
  2700 	struct param_range *rfds = &nic->params.rfds;
       
  2701 	struct param_range *cbs = &nic->params.cbs;
       
  2702 
       
  2703 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2704 		return -EINVAL;
       
  2705 
       
  2706 	if (netif_running(netdev))
       
  2707 		e100_down(nic);
       
  2708 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2709 	rfds->count = min(rfds->count, rfds->max);
       
  2710 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2711 	cbs->count = min(cbs->count, cbs->max);
       
  2712 	netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
       
  2713 		   rfds->count, cbs->count);
       
  2714 	if (netif_running(netdev))
       
  2715 		e100_up(nic);
       
  2716 
       
  2717 	return 0;
       
  2718 }
       
  2719 
       
  2720 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2721 	"Link test     (on/offline)",
       
  2722 	"Eeprom test   (on/offline)",
       
  2723 	"Self test        (offline)",
       
  2724 	"Mac loopback     (offline)",
       
  2725 	"Phy loopback     (offline)",
       
  2726 };
       
  2727 #define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
       
  2728 
       
  2729 static void e100_diag_test(struct net_device *netdev,
       
  2730 	struct ethtool_test *test, u64 *data)
       
  2731 {
       
  2732 	struct ethtool_cmd cmd;
       
  2733 	struct nic *nic = netdev_priv(netdev);
       
  2734 	int i, err;
       
  2735 
       
  2736 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2737 	data[0] = !mii_link_ok(&nic->mii);
       
  2738 	data[1] = e100_eeprom_load(nic);
       
  2739 	if (test->flags & ETH_TEST_FL_OFFLINE) {
       
  2740 
       
  2741 		/* save speed, duplex & autoneg settings */
       
  2742 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2743 
       
  2744 		if (netif_running(netdev))
       
  2745 			e100_down(nic);
       
  2746 		data[2] = e100_self_test(nic);
       
  2747 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2748 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2749 
       
  2750 		/* restore speed, duplex & autoneg settings */
       
  2751 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2752 
       
  2753 		if (netif_running(netdev))
       
  2754 			e100_up(nic);
       
  2755 	}
       
  2756 	for (i = 0; i < E100_TEST_LEN; i++)
       
  2757 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2758 
       
  2759 	msleep_interruptible(4 * 1000);
       
  2760 }
       
  2761 
       
  2762 static int e100_set_phys_id(struct net_device *netdev,
       
  2763 			    enum ethtool_phys_id_state state)
       
  2764 {
       
  2765 	struct nic *nic = netdev_priv(netdev);
       
  2766 	enum led_state {
       
  2767 		led_on     = 0x01,
       
  2768 		led_off    = 0x04,
       
  2769 		led_on_559 = 0x05,
       
  2770 		led_on_557 = 0x07,
       
  2771 	};
       
  2772 	u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
       
  2773 		MII_LED_CONTROL;
       
  2774 	u16 leds = 0;
       
  2775 
       
  2776 	switch (state) {
       
  2777 	case ETHTOOL_ID_ACTIVE:
       
  2778 		return 2;
       
  2779 
       
  2780 	case ETHTOOL_ID_ON:
       
  2781 		leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
       
  2782 		       (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
       
  2783 		break;
       
  2784 
       
  2785 	case ETHTOOL_ID_OFF:
       
  2786 		leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
       
  2787 		break;
       
  2788 
       
  2789 	case ETHTOOL_ID_INACTIVE:
       
  2790 		break;
       
  2791 	}
       
  2792 
       
  2793 	mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
       
  2794 	return 0;
       
  2795 }
       
  2796 
       
  2797 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2798 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2799 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2800 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2801 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2802 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2803 	"tx_heartbeat_errors", "tx_window_errors",
       
  2804 	/* device-specific stats */
       
  2805 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2806 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2807 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2808 	"rx_short_frame_errors", "rx_over_length_errors",
       
  2809 };
       
  2810 #define E100_NET_STATS_LEN	21
       
  2811 #define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
       
  2812 
       
  2813 static int e100_get_sset_count(struct net_device *netdev, int sset)
       
  2814 {
       
  2815 	switch (sset) {
       
  2816 	case ETH_SS_TEST:
       
  2817 		return E100_TEST_LEN;
       
  2818 	case ETH_SS_STATS:
       
  2819 		return E100_STATS_LEN;
       
  2820 	default:
       
  2821 		return -EOPNOTSUPP;
       
  2822 	}
       
  2823 }
       
  2824 
       
  2825 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2826 	struct ethtool_stats *stats, u64 *data)
       
  2827 {
       
  2828 	struct nic *nic = netdev_priv(netdev);
       
  2829 	int i;
       
  2830 
       
  2831 	for (i = 0; i < E100_NET_STATS_LEN; i++)
       
  2832 		data[i] = ((unsigned long *)&netdev->stats)[i];
       
  2833 
       
  2834 	data[i++] = nic->tx_deferred;
       
  2835 	data[i++] = nic->tx_single_collisions;
       
  2836 	data[i++] = nic->tx_multiple_collisions;
       
  2837 	data[i++] = nic->tx_fc_pause;
       
  2838 	data[i++] = nic->rx_fc_pause;
       
  2839 	data[i++] = nic->rx_fc_unsupported;
       
  2840 	data[i++] = nic->tx_tco_frames;
       
  2841 	data[i++] = nic->rx_tco_frames;
       
  2842 	data[i++] = nic->rx_short_frame_errors;
       
  2843 	data[i++] = nic->rx_over_length_errors;
       
  2844 }
       
  2845 
       
  2846 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2847 {
       
  2848 	switch (stringset) {
       
  2849 	case ETH_SS_TEST:
       
  2850 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2851 		break;
       
  2852 	case ETH_SS_STATS:
       
  2853 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2854 		break;
       
  2855 	}
       
  2856 }
       
  2857 
       
  2858 static const struct ethtool_ops e100_ethtool_ops = {
       
  2859 	.get_settings		= e100_get_settings,
       
  2860 	.set_settings		= e100_set_settings,
       
  2861 	.get_drvinfo		= e100_get_drvinfo,
       
  2862 	.get_regs_len		= e100_get_regs_len,
       
  2863 	.get_regs		= e100_get_regs,
       
  2864 	.get_wol		= e100_get_wol,
       
  2865 	.set_wol		= e100_set_wol,
       
  2866 	.get_msglevel		= e100_get_msglevel,
       
  2867 	.set_msglevel		= e100_set_msglevel,
       
  2868 	.nway_reset		= e100_nway_reset,
       
  2869 	.get_link		= e100_get_link,
       
  2870 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2871 	.get_eeprom		= e100_get_eeprom,
       
  2872 	.set_eeprom		= e100_set_eeprom,
       
  2873 	.get_ringparam		= e100_get_ringparam,
       
  2874 	.set_ringparam		= e100_set_ringparam,
       
  2875 	.self_test		= e100_diag_test,
       
  2876 	.get_strings		= e100_get_strings,
       
  2877 	.set_phys_id		= e100_set_phys_id,
       
  2878 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2879 	.get_sset_count		= e100_get_sset_count,
       
  2880 };
       
  2881 
       
  2882 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2883 {
       
  2884 	struct nic *nic = netdev_priv(netdev);
       
  2885 
       
  2886 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2887 }
       
  2888 
       
  2889 static int e100_alloc(struct nic *nic)
       
  2890 {
       
  2891 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2892 		&nic->dma_addr);
       
  2893 	return nic->mem ? 0 : -ENOMEM;
       
  2894 }
       
  2895 
       
  2896 static void e100_free(struct nic *nic)
       
  2897 {
       
  2898 	if (nic->mem) {
       
  2899 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2900 			nic->mem, nic->dma_addr);
       
  2901 		nic->mem = NULL;
       
  2902 	}
       
  2903 }
       
  2904 
       
  2905 static int e100_open(struct net_device *netdev)
       
  2906 {
       
  2907 	struct nic *nic = netdev_priv(netdev);
       
  2908 	int err = 0;
       
  2909 
       
  2910 	if (!nic->ecdev)
       
  2911 		netif_carrier_off(netdev);
       
  2912 	if ((err = e100_up(nic)))
       
  2913 		netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
       
  2914 	return err;
       
  2915 }
       
  2916 
       
  2917 static int e100_close(struct net_device *netdev)
       
  2918 {
       
  2919 	e100_down(netdev_priv(netdev));
       
  2920 	return 0;
       
  2921 }
       
  2922 
       
  2923 static int e100_set_features(struct net_device *netdev,
       
  2924 			     netdev_features_t features)
       
  2925 {
       
  2926 	struct nic *nic = netdev_priv(netdev);
       
  2927 	netdev_features_t changed = features ^ netdev->features;
       
  2928 
       
  2929 	if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
       
  2930 		return 0;
       
  2931 
       
  2932 	netdev->features = features;
       
  2933 	e100_exec_cb(nic, NULL, e100_configure);
       
  2934 	return 0;
       
  2935 }
       
  2936 
       
  2937 static const struct net_device_ops e100_netdev_ops = {
       
  2938 	.ndo_open		= e100_open,
       
  2939 	.ndo_stop		= e100_close,
       
  2940 	.ndo_start_xmit		= e100_xmit_frame,
       
  2941 	.ndo_validate_addr	= eth_validate_addr,
       
  2942 	.ndo_set_rx_mode	= e100_set_multicast_list,
       
  2943 	.ndo_set_mac_address	= e100_set_mac_address,
       
  2944 	.ndo_change_mtu		= e100_change_mtu,
       
  2945 	.ndo_do_ioctl		= e100_do_ioctl,
       
  2946 	.ndo_tx_timeout		= e100_tx_timeout,
       
  2947 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2948 	.ndo_poll_controller	= e100_netpoll,
       
  2949 #endif
       
  2950 	.ndo_set_features	= e100_set_features,
       
  2951 };
       
  2952 
       
  2953 static int __devinit e100_probe(struct pci_dev *pdev,
       
  2954 	const struct pci_device_id *ent)
       
  2955 {
       
  2956 	struct net_device *netdev;
       
  2957 	struct nic *nic;
       
  2958 	int err;
       
  2959 
       
  2960 	if (!(netdev = alloc_etherdev(sizeof(struct nic))))
       
  2961 		return -ENOMEM;
       
  2962 
       
  2963 	netdev->hw_features |= NETIF_F_RXFCS;
       
  2964 	netdev->priv_flags |= IFF_SUPP_NOFCS;
       
  2965 	netdev->hw_features |= NETIF_F_RXALL;
       
  2966 
       
  2967 	netdev->netdev_ops = &e100_netdev_ops;
       
  2968 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  2969 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  2970 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  2971 
       
  2972 	nic = netdev_priv(netdev);
       
  2973 	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
       
  2974 	nic->netdev = netdev;
       
  2975 	nic->pdev = pdev;
       
  2976 	nic->msg_enable = (1 << debug) - 1;
       
  2977 	nic->mdio_ctrl = mdio_ctrl_hw;
       
  2978 	pci_set_drvdata(pdev, netdev);
       
  2979 
       
  2980 	if ((err = pci_enable_device(pdev))) {
       
  2981 		netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
       
  2982 		goto err_out_free_dev;
       
  2983 	}
       
  2984 
       
  2985 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  2986 		netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
       
  2987 		err = -ENODEV;
       
  2988 		goto err_out_disable_pdev;
       
  2989 	}
       
  2990 
       
  2991 	if ((err = pci_request_regions(pdev, DRV_NAME))) {
       
  2992 		netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
       
  2993 		goto err_out_disable_pdev;
       
  2994 	}
       
  2995 
       
  2996 	if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
       
  2997 		netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
       
  2998 		goto err_out_free_res;
       
  2999 	}
       
  3000 
       
  3001 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  3002 
       
  3003 	if (use_io)
       
  3004 		netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
       
  3005 
       
  3006 	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
       
  3007 	if (!nic->csr) {
       
  3008 		netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
       
  3009 		err = -ENOMEM;
       
  3010 		goto err_out_free_res;
       
  3011 	}
       
  3012 
       
  3013 	if (ent->driver_data)
       
  3014 		nic->flags |= ich;
       
  3015 	else
       
  3016 		nic->flags &= ~ich;
       
  3017 
       
  3018 	e100_get_defaults(nic);
       
  3019 
       
  3020 	/* D100 MAC doesn't allow rx of vlan packets with normal MTU */
       
  3021 	if (nic->mac < mac_82558_D101_A4)
       
  3022 		netdev->features |= NETIF_F_VLAN_CHALLENGED;
       
  3023 
       
  3024 	/* locks must be initialized before calling hw_reset */
       
  3025 	spin_lock_init(&nic->cb_lock);
       
  3026 	spin_lock_init(&nic->cmd_lock);
       
  3027 	spin_lock_init(&nic->mdio_lock);
       
  3028 
       
  3029 	/* Reset the device before pci_set_master() in case device is in some
       
  3030 	 * funky state and has an interrupt pending - hint: we don't have the
       
  3031 	 * interrupt handler registered yet. */
       
  3032 	e100_hw_reset(nic);
       
  3033 
       
  3034 	pci_set_master(pdev);
       
  3035 
       
  3036 	init_timer(&nic->watchdog);
       
  3037 	nic->watchdog.function = e100_watchdog;
       
  3038 	nic->watchdog.data = (unsigned long)nic;
       
  3039 
       
  3040 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  3041 
       
  3042 	if ((err = e100_alloc(nic))) {
       
  3043 		netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
       
  3044 		goto err_out_iounmap;
       
  3045 	}
       
  3046 
       
  3047 	if ((err = e100_eeprom_load(nic)))
       
  3048 		goto err_out_free;
       
  3049 
       
  3050 	e100_phy_init(nic);
       
  3051 
       
  3052 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  3053 	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
       
  3054 	if (!is_valid_ether_addr(netdev->perm_addr)) {
       
  3055 		if (!eeprom_bad_csum_allow) {
       
  3056 			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
       
  3057 			err = -EAGAIN;
       
  3058 			goto err_out_free;
       
  3059 		} else {
       
  3060 			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
       
  3061 		}
       
  3062 	}
       
  3063 
       
  3064 	/* Wol magic packet can be enabled from eeprom */
       
  3065 	if ((nic->mac >= mac_82558_D101_A4) &&
       
  3066 	   (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
       
  3067 		nic->flags |= wol_magic;
       
  3068 		device_set_wakeup_enable(&pdev->dev, true);
       
  3069 	}
       
  3070 
       
  3071 	/* ack any pending wake events, disable PME */
       
  3072 	pci_pme_active(pdev, false);
       
  3073 
       
  3074 	// offer device to EtherCAT master module
       
  3075 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  3076 
       
  3077 	if (!nic->ecdev) {
       
  3078 		strcpy(netdev->name, "eth%d");
       
  3079 		if ((err = register_netdev(netdev))) {
       
  3080 			netif_err(nic, probe, nic->netdev,
       
  3081 					"Cannot register net device, aborting\n");
       
  3082 			goto err_out_free;
       
  3083 		}
       
  3084 	}
       
  3085 
       
  3086 	nic->cbs_pool = pci_pool_create(netdev->name,
       
  3087 			   nic->pdev,
       
  3088 			   nic->params.cbs.max * sizeof(struct cb),
       
  3089 			   sizeof(u32),
       
  3090 			   0);
       
  3091 	netif_info(nic, probe, nic->netdev,
       
  3092 		   "addr 0x%llx, irq %d, MAC addr %pM\n",
       
  3093 		   (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
       
  3094 		   pdev->irq, netdev->dev_addr);
       
  3095 
       
  3096 	if (nic->ecdev) {
       
  3097 		if (ecdev_open(nic->ecdev)) {
       
  3098 			ecdev_withdraw(nic->ecdev);
       
  3099 			goto err_out_free;
       
  3100 		}
       
  3101 	}
       
  3102 
       
  3103 	return 0;
       
  3104 
       
  3105 err_out_free:
       
  3106 	e100_free(nic);
       
  3107 err_out_iounmap:
       
  3108 	pci_iounmap(pdev, nic->csr);
       
  3109 err_out_free_res:
       
  3110 	pci_release_regions(pdev);
       
  3111 err_out_disable_pdev:
       
  3112 	pci_disable_device(pdev);
       
  3113 err_out_free_dev:
       
  3114 	pci_set_drvdata(pdev, NULL);
       
  3115 	free_netdev(netdev);
       
  3116 	return err;
       
  3117 }
       
  3118 
       
  3119 static void __devexit e100_remove(struct pci_dev *pdev)
       
  3120 {
       
  3121 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3122 
       
  3123 	if (netdev) {
       
  3124 		struct nic *nic = netdev_priv(netdev);
       
  3125 		if (nic->ecdev) {
       
  3126 			ecdev_close(nic->ecdev);
       
  3127 			ecdev_withdraw(nic->ecdev);
       
  3128 		} else {
       
  3129 			unregister_netdev(netdev);
       
  3130 		}
       
  3131 
       
  3132 		e100_free(nic);
       
  3133 		pci_iounmap(pdev, nic->csr);
       
  3134 		pci_pool_destroy(nic->cbs_pool);
       
  3135 		free_netdev(netdev);
       
  3136 		pci_release_regions(pdev);
       
  3137 		pci_disable_device(pdev);
       
  3138 		pci_set_drvdata(pdev, NULL);
       
  3139 	}
       
  3140 }
       
  3141 
       
  3142 #define E100_82552_SMARTSPEED   0x14   /* SmartSpeed Ctrl register */
       
  3143 #define E100_82552_REV_ANEG     0x0200 /* Reverse auto-negotiation */
       
  3144 #define E100_82552_ANEG_NOW     0x0400 /* Auto-negotiate now */
       
  3145 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  3146 {
       
  3147 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3148 	struct nic *nic = netdev_priv(netdev);
       
  3149 
       
  3150 	if (netif_running(netdev))
       
  3151 		e100_down(nic);
       
  3152 	netif_device_detach(netdev);
       
  3153 
       
  3154 	pci_save_state(pdev);
       
  3155 
       
  3156 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  3157 		/* enable reverse auto-negotiation */
       
  3158 		if (nic->phy == phy_82552_v) {
       
  3159 			u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3160 			                           E100_82552_SMARTSPEED);
       
  3161 
       
  3162 			mdio_write(netdev, nic->mii.phy_id,
       
  3163 			           E100_82552_SMARTSPEED, smartspeed |
       
  3164 			           E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
       
  3165 		}
       
  3166 		*enable_wake = true;
       
  3167 	} else {
       
  3168 		*enable_wake = false;
       
  3169 	}
       
  3170 
       
  3171 	pci_disable_device(pdev);
       
  3172 }
       
  3173 
       
  3174 static int __e100_power_off(struct pci_dev *pdev, bool wake)
       
  3175 {
       
  3176 	if (wake)
       
  3177 		return pci_prepare_to_sleep(pdev);
       
  3178 
       
  3179 	pci_wake_from_d3(pdev, false);
       
  3180 	pci_set_power_state(pdev, PCI_D3hot);
       
  3181 
       
  3182 	return 0;
       
  3183 }
       
  3184 
       
  3185 #ifdef CONFIG_PM
       
  3186 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  3187 {
       
  3188 	bool wake;
       
  3189 	__e100_shutdown(pdev, &wake);
       
  3190 	return __e100_power_off(pdev, wake);
       
  3191 }
       
  3192 
       
  3193 static int e100_resume(struct pci_dev *pdev)
       
  3194 {
       
  3195 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3196 	struct nic *nic = netdev_priv(netdev);
       
  3197 
       
  3198 	pci_set_power_state(pdev, PCI_D0);
       
  3199 	pci_restore_state(pdev);
       
  3200 	/* ack any pending wake events, disable PME */
       
  3201 	pci_enable_wake(pdev, 0, 0);
       
  3202 
       
  3203 	/* disable reverse auto-negotiation */
       
  3204 	if (nic->phy == phy_82552_v) {
       
  3205 		u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3206 		                           E100_82552_SMARTSPEED);
       
  3207 
       
  3208 		mdio_write(netdev, nic->mii.phy_id,
       
  3209 		           E100_82552_SMARTSPEED,
       
  3210 		           smartspeed & ~(E100_82552_REV_ANEG));
       
  3211 	}
       
  3212 
       
  3213 	netif_device_attach(netdev);
       
  3214 	if (netif_running(netdev))
       
  3215 		e100_up(nic);
       
  3216 
       
  3217 	return 0;
       
  3218 }
       
  3219 #endif /* CONFIG_PM */
       
  3220 
       
  3221 static void e100_shutdown(struct pci_dev *pdev)
       
  3222 {
       
  3223 	bool wake;
       
  3224 	__e100_shutdown(pdev, &wake);
       
  3225 	if (system_state == SYSTEM_POWER_OFF)
       
  3226 		__e100_power_off(pdev, wake);
       
  3227 }
       
  3228 
       
  3229 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  3230 /**
       
  3231  * e100_io_error_detected - called when PCI error is detected.
       
  3232  * @pdev: Pointer to PCI device
       
  3233  * @state: The current pci connection state
       
  3234  */
       
  3235 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  3236 {
       
  3237 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3238 	struct nic *nic = netdev_priv(netdev);
       
  3239 
       
  3240 	if (nic->ecdev)
       
  3241 		return -EBUSY;
       
  3242 
       
  3243 	netif_device_detach(netdev);
       
  3244 
       
  3245 	if (state == pci_channel_io_perm_failure)
       
  3246 		return PCI_ERS_RESULT_DISCONNECT;
       
  3247 
       
  3248 	if (netif_running(netdev))
       
  3249 		e100_down(nic);
       
  3250 	pci_disable_device(pdev);
       
  3251 
       
  3252 	/* Request a slot reset. */
       
  3253 	return PCI_ERS_RESULT_NEED_RESET;
       
  3254 }
       
  3255 
       
  3256 /**
       
  3257  * e100_io_slot_reset - called after the pci bus has been reset.
       
  3258  * @pdev: Pointer to PCI device
       
  3259  *
       
  3260  * Restart the card from scratch.
       
  3261  */
       
  3262 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  3263 {
       
  3264 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3265 	struct nic *nic = netdev_priv(netdev);
       
  3266 
       
  3267 	if (nic->ecdev)
       
  3268 		return -EBUSY;
       
  3269 
       
  3270 	if (pci_enable_device(pdev)) {
       
  3271 		pr_err("Cannot re-enable PCI device after reset\n");
       
  3272 		return PCI_ERS_RESULT_DISCONNECT;
       
  3273 	}
       
  3274 	pci_set_master(pdev);
       
  3275 
       
  3276 	/* Only one device per card can do a reset */
       
  3277 	if (0 != PCI_FUNC(pdev->devfn))
       
  3278 		return PCI_ERS_RESULT_RECOVERED;
       
  3279 	e100_hw_reset(nic);
       
  3280 	e100_phy_init(nic);
       
  3281 
       
  3282 	return PCI_ERS_RESULT_RECOVERED;
       
  3283 }
       
  3284 
       
  3285 /**
       
  3286  * e100_io_resume - resume normal operations
       
  3287  * @pdev: Pointer to PCI device
       
  3288  *
       
  3289  * Resume normal operations after an error recovery
       
  3290  * sequence has been completed.
       
  3291  */
       
  3292 static void e100_io_resume(struct pci_dev *pdev)
       
  3293 {
       
  3294 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3295 	struct nic *nic = netdev_priv(netdev);
       
  3296 
       
  3297 	/* ack any pending wake events, disable PME */
       
  3298 	pci_enable_wake(pdev, 0, 0);
       
  3299 
       
  3300 	if (!nic->ecdev)
       
  3301 		netif_device_attach(netdev);
       
  3302 	if (nic->ecdev || netif_running(netdev)) {
       
  3303 		e100_open(netdev);
       
  3304 		if (!nic->ecdev)
       
  3305 			mod_timer(&nic->watchdog, jiffies);
       
  3306 	}
       
  3307 }
       
  3308 
       
  3309 static struct pci_error_handlers e100_err_handler = {
       
  3310 	.error_detected = e100_io_error_detected,
       
  3311 	.slot_reset = e100_io_slot_reset,
       
  3312 	.resume = e100_io_resume,
       
  3313 };
       
  3314 
       
  3315 static struct pci_driver e100_driver = {
       
  3316 	.name =         DRV_NAME,
       
  3317 	.id_table =     e100_id_table,
       
  3318 	.probe =        e100_probe,
       
  3319 	.remove =       __devexit_p(e100_remove),
       
  3320 #ifdef CONFIG_PM
       
  3321 	/* Power Management hooks */
       
  3322 	.suspend =      e100_suspend,
       
  3323 	.resume =       e100_resume,
       
  3324 #endif
       
  3325 	.shutdown =     e100_shutdown,
       
  3326 	.err_handler = &e100_err_handler,
       
  3327 };
       
  3328 
       
  3329 static int __init e100_init_module(void)
       
  3330 {
       
  3331 	if (((1 << debug) - 1) & NETIF_MSG_DRV) {
       
  3332 		pr_info("%s %s, %s\n", DRV_NAME, DRV_DESCRIPTION, DRV_VERSION);
       
  3333 		pr_info("%s\n", DRV_COPYRIGHT);
       
  3334 	}
       
  3335 	return pci_register_driver(&e100_driver);
       
  3336 }
       
  3337 
       
  3338 static void __exit e100_cleanup_module(void)
       
  3339 {
       
  3340 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3341 	pci_unregister_driver(&e100_driver);
       
  3342 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3343 }
       
  3344 
       
  3345 module_init(e100_init_module);
       
  3346 module_exit(e100_cleanup_module);