devices/e100-3.10-ethercat.c
branchstable-1.5
changeset 2585 26480934a057
equal deleted inserted replaced
2584:0e3d989ff233 2585:26480934a057
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007-2012  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it and/or
       
    10  *  modify it under the terms of the GNU General Public License version 2, as
       
    11  *  published by the Free Software Foundation.
       
    12  *
       
    13  *  The IgH EtherCAT Master is distributed in the hope that it will be useful,
       
    14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
       
    16  *  Public License for more details.
       
    17  *
       
    18  *  You should have received a copy of the GNU General Public License along
       
    19  *  with the IgH EtherCAT Master; if not, write to the Free Software
       
    20  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    21  *
       
    22  *  ---
       
    23  *
       
    24  *  The license mentioned above concerns the source code only. Using the
       
    25  *  EtherCAT technology and brand is only permitted in compliance with the
       
    26  *  industrial property and similar rights of Beckhoff Automation GmbH.
       
    27  *
       
    28  *  ---
       
    29  *
       
    30  *  vim: noexpandtab
       
    31  *
       
    32  *****************************************************************************/
       
    33 
       
    34 /**
       
    35    \file
       
    36    EtherCAT driver for e100-compatible NICs.
       
    37 */
       
    38 
       
    39 /* Former documentation: */
       
    40 
       
    41 /*******************************************************************************
       
    42 
       
    43   Intel PRO/100 Linux driver
       
    44   Copyright(c) 1999 - 2006 Intel Corporation.
       
    45 
       
    46   This program is free software; you can redistribute it and/or modify it
       
    47   under the terms and conditions of the GNU General Public License,
       
    48   version 2, as published by the Free Software Foundation.
       
    49 
       
    50   This program is distributed in the hope it will be useful, but WITHOUT
       
    51   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    52   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    53   more details.
       
    54 
       
    55   You should have received a copy of the GNU General Public License along with
       
    56   this program; if not, write to the Free Software Foundation, Inc.,
       
    57   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    58 
       
    59   The full GNU General Public License is included in this distribution in
       
    60   the file called "COPYING".
       
    61 
       
    62   Contact Information:
       
    63   Linux NICS <linux.nics@intel.com>
       
    64   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    65   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    66 
       
    67 *******************************************************************************/
       
    68 
       
    69 /*
       
    70  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    71  *
       
    72  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    73  *	original e100 driver, but better described as a munging of
       
    74  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    75  *
       
    76  *	References:
       
    77  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    78  *		Open Source Software Developers Manual,
       
    79  *		http://sourceforge.net/projects/e1000
       
    80  *
       
    81  *
       
    82  *	                      Theory of Operation
       
    83  *
       
    84  *	I.   General
       
    85  *
       
    86  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    87  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    88  *	82551, and 82562 devices.  82558 and greater controllers
       
    89  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    90  *	server and client network interface cards, as well as in
       
    91  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    92  *	configurations.  8255x supports a 32-bit linear addressing
       
    93  *	mode and operates at 33Mhz PCI clock rate.
       
    94  *
       
    95  *	II.  Driver Operation
       
    96  *
       
    97  *	Memory-mapped mode is used exclusively to access the device's
       
    98  *	shared-memory structure, the Control/Status Registers (CSR). All
       
    99  *	setup, configuration, and control of the device, including queuing
       
   100  *	of Tx, Rx, and configuration commands is through the CSR.
       
   101  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   102  *	protects the shared Command Block List (CBL).
       
   103  *
       
   104  *	8255x is highly MII-compliant and all access to the PHY go
       
   105  *	through the Management Data Interface (MDI).  Consequently, the
       
   106  *	driver leverages the mii.c library shared with other MII-compliant
       
   107  *	devices.
       
   108  *
       
   109  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   110  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   111  *	archs are supported.
       
   112  *
       
   113  *	III. Transmit
       
   114  *
       
   115  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   116  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   117  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   118  *	the end of the ring.  The last TCB processed suspends the
       
   119  *	controller, and the controller can be restarted by issue a CU
       
   120  *	resume command to continue from the suspend point, or a CU start
       
   121  *	command to start at a given position in the ring.
       
   122  *
       
   123  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   124  *	into the CBL ring along with Tx commands.  The common structure
       
   125  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   126  *
       
   127  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   128  *	is the next CB to check for completion; cb_to_send is the first
       
   129  *	CB to start on in case of a previous failure to resume.  CB clean
       
   130  *	up happens in interrupt context in response to a CU interrupt.
       
   131  *	cbs_avail keeps track of number of free CB resources available.
       
   132  *
       
   133  * 	Hardware padding of short packets to minimum packet size is
       
   134  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   135  * 	with 00h.
       
   136  *
       
   137  *	IV.  Receive
       
   138  *
       
   139  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   140  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   141  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   142  *	and the data buffer, but the RFD is pulled off before the skb is
       
   143  *	indicated.  The data buffer is aligned such that encapsulated
       
   144  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   145  *	mapped shared memory, and completion status is contained within
       
   146  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   147  *	view from software and hardware.
       
   148  *
       
   149  *	In order to keep updates to the RFD link field from colliding with
       
   150  *	hardware writes to mark packets complete, we use the feature that
       
   151  *	hardware will not write to a size 0 descriptor and mark the previous
       
   152  *	packet as end-of-list (EL).   After updating the link, we remove EL
       
   153  *	and only then restore the size such that hardware may use the
       
   154  *	previous-to-end RFD.
       
   155  *
       
   156  *	Under typical operation, the  receive unit (RU) is start once,
       
   157  *	and the controller happily fills RFDs as frames arrive.  If
       
   158  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   159  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   160  *	and Rx indication and re-allocation happen in the same context,
       
   161  *	therefore no locking is required.  A software-generated interrupt
       
   162  *	is generated from the watchdog to recover from a failed allocation
       
   163  *	scenario where all Rx resources have been indicated and none re-
       
   164  *	placed.
       
   165  *
       
   166  *	V.   Miscellaneous
       
   167  *
       
   168  * 	VLAN offloading of tagging, stripping and filtering is not
       
   169  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   170  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   171  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   172  * 	not supported (hardware limitation).
       
   173  *
       
   174  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   175  *
       
   176  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   177  * 	testing/troubleshooting the development driver.
       
   178  *
       
   179  * 	TODO:
       
   180  * 	o several entry points race with dev->close
       
   181  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   182  *
       
   183  *	FIXES:
       
   184  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   185  *	- Stratus87247: protect MDI control register manipulations
       
   186  * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
       
   187  *      - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
       
   188  */
       
   189 
       
   190 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
       
   191 
       
   192 #include <linux/hardirq.h>
       
   193 #include <linux/interrupt.h>
       
   194 #include <linux/module.h>
       
   195 #include <linux/moduleparam.h>
       
   196 #include <linux/kernel.h>
       
   197 #include <linux/types.h>
       
   198 #include <linux/sched.h>
       
   199 #include <linux/slab.h>
       
   200 #include <linux/delay.h>
       
   201 #include <linux/init.h>
       
   202 #include <linux/pci.h>
       
   203 #include <linux/dma-mapping.h>
       
   204 #include <linux/dmapool.h>
       
   205 #include <linux/netdevice.h>
       
   206 #include <linux/etherdevice.h>
       
   207 #include <linux/mii.h>
       
   208 #include <linux/if_vlan.h>
       
   209 #include <linux/skbuff.h>
       
   210 #include <linux/ethtool.h>
       
   211 #include <linux/string.h>
       
   212 #include <linux/firmware.h>
       
   213 #include <linux/rtnetlink.h>
       
   214 #include <asm/unaligned.h>
       
   215 
       
   216 // EtherCAT includes
       
   217 #include "../globals.h"
       
   218 #include "ecdev.h"
       
   219 
       
   220 #define DRV_NAME		"ec_e100"
       
   221 #define DRV_EXT			"-NAPI"
       
   222 #define DRV_VERSION		"3.5.24-k2"DRV_EXT
       
   223 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   224 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   225 
       
   226 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   227 #define E100_NAPI_WEIGHT	16
       
   228 
       
   229 #define FIRMWARE_D101M		"e100/d101m_ucode.bin"
       
   230 #define FIRMWARE_D101S		"e100/d101s_ucode.bin"
       
   231 #define FIRMWARE_D102E		"e100/d102e_ucode.bin"
       
   232 
       
   233 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   234 MODULE_AUTHOR(DRV_COPYRIGHT);
       
   235 MODULE_LICENSE("GPL");
       
   236 MODULE_VERSION(DRV_VERSION);
       
   237 MODULE_FIRMWARE(FIRMWARE_D101M);
       
   238 MODULE_FIRMWARE(FIRMWARE_D101S);
       
   239 MODULE_FIRMWARE(FIRMWARE_D102E);
       
   240 
       
   241 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   242 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   243 MODULE_LICENSE("GPL");
       
   244 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   245 
       
   246 void e100_ec_poll(struct net_device *);
       
   247 
       
   248 static int debug = 3;
       
   249 static int eeprom_bad_csum_allow = 0;
       
   250 static int use_io = 0;
       
   251 module_param(debug, int, 0);
       
   252 module_param(eeprom_bad_csum_allow, int, 0);
       
   253 module_param(use_io, int, 0);
       
   254 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   255 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   256 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
       
   257 
       
   258 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   259 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   260 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   261 static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
       
   262 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   263 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   277 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   278 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   279 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   280 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   281 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   282 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   283 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   284 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   285 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   286 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   287 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   288 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   289 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   290 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   291 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   292 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   293 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   294 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   295 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   296 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   297 	INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
       
   298 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   299 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   300 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   301 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   302 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   303 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   304 	{ 0, }
       
   305 };
       
   306 
       
   307 // prevent from being loaded automatically
       
   308 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   309 
       
   310 enum mac {
       
   311 	mac_82557_D100_A  = 0,
       
   312 	mac_82557_D100_B  = 1,
       
   313 	mac_82557_D100_C  = 2,
       
   314 	mac_82558_D101_A4 = 4,
       
   315 	mac_82558_D101_B0 = 5,
       
   316 	mac_82559_D101M   = 8,
       
   317 	mac_82559_D101S   = 9,
       
   318 	mac_82550_D102    = 12,
       
   319 	mac_82550_D102_C  = 13,
       
   320 	mac_82551_E       = 14,
       
   321 	mac_82551_F       = 15,
       
   322 	mac_82551_10      = 16,
       
   323 	mac_unknown       = 0xFF,
       
   324 };
       
   325 
       
   326 enum phy {
       
   327 	phy_100a     = 0x000003E0,
       
   328 	phy_100c     = 0x035002A8,
       
   329 	phy_82555_tx = 0x015002A8,
       
   330 	phy_nsc_tx   = 0x5C002000,
       
   331 	phy_82562_et = 0x033002A8,
       
   332 	phy_82562_em = 0x032002A8,
       
   333 	phy_82562_ek = 0x031002A8,
       
   334 	phy_82562_eh = 0x017002A8,
       
   335 	phy_82552_v  = 0xd061004d,
       
   336 	phy_unknown  = 0xFFFFFFFF,
       
   337 };
       
   338 
       
   339 /* CSR (Control/Status Registers) */
       
   340 struct csr {
       
   341 	struct {
       
   342 		u8 status;
       
   343 		u8 stat_ack;
       
   344 		u8 cmd_lo;
       
   345 		u8 cmd_hi;
       
   346 		u32 gen_ptr;
       
   347 	} scb;
       
   348 	u32 port;
       
   349 	u16 flash_ctrl;
       
   350 	u8 eeprom_ctrl_lo;
       
   351 	u8 eeprom_ctrl_hi;
       
   352 	u32 mdi_ctrl;
       
   353 	u32 rx_dma_count;
       
   354 };
       
   355 
       
   356 enum scb_status {
       
   357 	rus_no_res       = 0x08,
       
   358 	rus_ready        = 0x10,
       
   359 	rus_mask         = 0x3C,
       
   360 };
       
   361 
       
   362 enum ru_state  {
       
   363 	RU_SUSPENDED = 0,
       
   364 	RU_RUNNING	 = 1,
       
   365 	RU_UNINITIALIZED = -1,
       
   366 };
       
   367 
       
   368 enum scb_stat_ack {
       
   369 	stat_ack_not_ours    = 0x00,
       
   370 	stat_ack_sw_gen      = 0x04,
       
   371 	stat_ack_rnr         = 0x10,
       
   372 	stat_ack_cu_idle     = 0x20,
       
   373 	stat_ack_frame_rx    = 0x40,
       
   374 	stat_ack_cu_cmd_done = 0x80,
       
   375 	stat_ack_not_present = 0xFF,
       
   376 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   377 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   378 };
       
   379 
       
   380 enum scb_cmd_hi {
       
   381 	irq_mask_none = 0x00,
       
   382 	irq_mask_all  = 0x01,
       
   383 	irq_sw_gen    = 0x02,
       
   384 };
       
   385 
       
   386 enum scb_cmd_lo {
       
   387 	cuc_nop        = 0x00,
       
   388 	ruc_start      = 0x01,
       
   389 	ruc_load_base  = 0x06,
       
   390 	cuc_start      = 0x10,
       
   391 	cuc_resume     = 0x20,
       
   392 	cuc_dump_addr  = 0x40,
       
   393 	cuc_dump_stats = 0x50,
       
   394 	cuc_load_base  = 0x60,
       
   395 	cuc_dump_reset = 0x70,
       
   396 };
       
   397 
       
   398 enum cuc_dump {
       
   399 	cuc_dump_complete       = 0x0000A005,
       
   400 	cuc_dump_reset_complete = 0x0000A007,
       
   401 };
       
   402 
       
   403 enum port {
       
   404 	software_reset  = 0x0000,
       
   405 	selftest        = 0x0001,
       
   406 	selective_reset = 0x0002,
       
   407 };
       
   408 
       
   409 enum eeprom_ctrl_lo {
       
   410 	eesk = 0x01,
       
   411 	eecs = 0x02,
       
   412 	eedi = 0x04,
       
   413 	eedo = 0x08,
       
   414 };
       
   415 
       
   416 enum mdi_ctrl {
       
   417 	mdi_write = 0x04000000,
       
   418 	mdi_read  = 0x08000000,
       
   419 	mdi_ready = 0x10000000,
       
   420 };
       
   421 
       
   422 enum eeprom_op {
       
   423 	op_write = 0x05,
       
   424 	op_read  = 0x06,
       
   425 	op_ewds  = 0x10,
       
   426 	op_ewen  = 0x13,
       
   427 };
       
   428 
       
   429 enum eeprom_offsets {
       
   430 	eeprom_cnfg_mdix  = 0x03,
       
   431 	eeprom_phy_iface  = 0x06,
       
   432 	eeprom_id         = 0x0A,
       
   433 	eeprom_config_asf = 0x0D,
       
   434 	eeprom_smbus_addr = 0x90,
       
   435 };
       
   436 
       
   437 enum eeprom_cnfg_mdix {
       
   438 	eeprom_mdix_enabled = 0x0080,
       
   439 };
       
   440 
       
   441 enum eeprom_phy_iface {
       
   442 	NoSuchPhy = 0,
       
   443 	I82553AB,
       
   444 	I82553C,
       
   445 	I82503,
       
   446 	DP83840,
       
   447 	S80C240,
       
   448 	S80C24,
       
   449 	I82555,
       
   450 	DP83840A = 10,
       
   451 };
       
   452 
       
   453 enum eeprom_id {
       
   454 	eeprom_id_wol = 0x0020,
       
   455 };
       
   456 
       
   457 enum eeprom_config_asf {
       
   458 	eeprom_asf = 0x8000,
       
   459 	eeprom_gcl = 0x4000,
       
   460 };
       
   461 
       
   462 enum cb_status {
       
   463 	cb_complete = 0x8000,
       
   464 	cb_ok       = 0x2000,
       
   465 };
       
   466 
       
   467 /**
       
   468  * cb_command - Command Block flags
       
   469  * @cb_tx_nc:  0: controler does CRC (normal),  1: CRC from skb memory
       
   470  */
       
   471 enum cb_command {
       
   472 	cb_nop    = 0x0000,
       
   473 	cb_iaaddr = 0x0001,
       
   474 	cb_config = 0x0002,
       
   475 	cb_multi  = 0x0003,
       
   476 	cb_tx     = 0x0004,
       
   477 	cb_ucode  = 0x0005,
       
   478 	cb_dump   = 0x0006,
       
   479 	cb_tx_sf  = 0x0008,
       
   480 	cb_tx_nc  = 0x0010,
       
   481 	cb_cid    = 0x1f00,
       
   482 	cb_i      = 0x2000,
       
   483 	cb_s      = 0x4000,
       
   484 	cb_el     = 0x8000,
       
   485 };
       
   486 
       
   487 struct rfd {
       
   488 	__le16 status;
       
   489 	__le16 command;
       
   490 	__le32 link;
       
   491 	__le32 rbd;
       
   492 	__le16 actual_size;
       
   493 	__le16 size;
       
   494 };
       
   495 
       
   496 struct rx {
       
   497 	struct rx *next, *prev;
       
   498 	struct sk_buff *skb;
       
   499 	dma_addr_t dma_addr;
       
   500 };
       
   501 
       
   502 #if defined(__BIG_ENDIAN_BITFIELD)
       
   503 #define X(a,b)	b,a
       
   504 #else
       
   505 #define X(a,b)	a,b
       
   506 #endif
       
   507 struct config {
       
   508 /*0*/	u8 X(byte_count:6, pad0:2);
       
   509 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   510 /*2*/	u8 adaptive_ifs;
       
   511 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   512 	   term_write_cache_line:1), pad3:4);
       
   513 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   514 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   515 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   516 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   517 	   rx_save_overruns : 1), rx_save_bad_frames : 1);
       
   518 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   519 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   520 	   tx_dynamic_tbd:1);
       
   521 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   522 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   523 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   524 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   525 	   loopback:2);
       
   526 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   527 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   528 /*13*/	u8 ip_addr_lo;
       
   529 /*14*/	u8 ip_addr_hi;
       
   530 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   531 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   532 	   pad15_2:1), crs_or_cdt:1);
       
   533 /*16*/	u8 fc_delay_lo;
       
   534 /*17*/	u8 fc_delay_hi;
       
   535 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   536 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   537 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   538 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   539 	   full_duplex_force:1), full_duplex_pin:1);
       
   540 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   541 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   542 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   543 	u8 pad_d102[9];
       
   544 };
       
   545 
       
   546 #define E100_MAX_MULTICAST_ADDRS	64
       
   547 struct multi {
       
   548 	__le16 count;
       
   549 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   550 };
       
   551 
       
   552 /* Important: keep total struct u32-aligned */
       
   553 #define UCODE_SIZE			134
       
   554 struct cb {
       
   555 	__le16 status;
       
   556 	__le16 command;
       
   557 	__le32 link;
       
   558 	union {
       
   559 		u8 iaaddr[ETH_ALEN];
       
   560 		__le32 ucode[UCODE_SIZE];
       
   561 		struct config config;
       
   562 		struct multi multi;
       
   563 		struct {
       
   564 			u32 tbd_array;
       
   565 			u16 tcb_byte_count;
       
   566 			u8 threshold;
       
   567 			u8 tbd_count;
       
   568 			struct {
       
   569 				__le32 buf_addr;
       
   570 				__le16 size;
       
   571 				u16 eol;
       
   572 			} tbd;
       
   573 		} tcb;
       
   574 		__le32 dump_buffer_addr;
       
   575 	} u;
       
   576 	struct cb *next, *prev;
       
   577 	dma_addr_t dma_addr;
       
   578 	struct sk_buff *skb;
       
   579 };
       
   580 
       
   581 enum loopback {
       
   582 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   583 };
       
   584 
       
   585 struct stats {
       
   586 	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   587 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   588 		tx_multiple_collisions, tx_total_collisions;
       
   589 	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   590 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   591 		rx_short_frame_errors;
       
   592 	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   593 	__le16 xmt_tco_frames, rcv_tco_frames;
       
   594 	__le32 complete;
       
   595 };
       
   596 
       
   597 struct mem {
       
   598 	struct {
       
   599 		u32 signature;
       
   600 		u32 result;
       
   601 	} selftest;
       
   602 	struct stats stats;
       
   603 	u8 dump_buf[596];
       
   604 };
       
   605 
       
   606 struct param_range {
       
   607 	u32 min;
       
   608 	u32 max;
       
   609 	u32 count;
       
   610 };
       
   611 
       
   612 struct params {
       
   613 	struct param_range rfds;
       
   614 	struct param_range cbs;
       
   615 };
       
   616 
       
   617 struct nic {
       
   618 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   619 	u32 msg_enable				____cacheline_aligned;
       
   620 	struct net_device *netdev;
       
   621 	struct pci_dev *pdev;
       
   622 	u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
       
   623 
       
   624 	struct rx *rxs				____cacheline_aligned;
       
   625 	struct rx *rx_to_use;
       
   626 	struct rx *rx_to_clean;
       
   627 	struct rfd blank_rfd;
       
   628 	enum ru_state ru_running;
       
   629 
       
   630 	spinlock_t cb_lock			____cacheline_aligned;
       
   631 	spinlock_t cmd_lock;
       
   632 	struct csr __iomem *csr;
       
   633 	enum scb_cmd_lo cuc_cmd;
       
   634 	unsigned int cbs_avail;
       
   635 	struct napi_struct napi;
       
   636 	struct cb *cbs;
       
   637 	struct cb *cb_to_use;
       
   638 	struct cb *cb_to_send;
       
   639 	struct cb *cb_to_clean;
       
   640 	__le16 tx_command;
       
   641 	/* End: frequently used values: keep adjacent for cache effect */
       
   642 
       
   643 	enum {
       
   644 		ich                = (1 << 0),
       
   645 		promiscuous        = (1 << 1),
       
   646 		multicast_all      = (1 << 2),
       
   647 		wol_magic          = (1 << 3),
       
   648 		ich_10h_workaround = (1 << 4),
       
   649 	} flags					____cacheline_aligned;
       
   650 
       
   651 	enum mac mac;
       
   652 	enum phy phy;
       
   653 	struct params params;
       
   654 	struct timer_list watchdog;
       
   655 	struct mii_if_info mii;
       
   656 	struct work_struct tx_timeout_task;
       
   657 	enum loopback loopback;
       
   658 
       
   659 	struct mem *mem;
       
   660 	dma_addr_t dma_addr;
       
   661 
       
   662 	struct pci_pool *cbs_pool;
       
   663 	dma_addr_t cbs_dma_addr;
       
   664 	u8 adaptive_ifs;
       
   665 	u8 tx_threshold;
       
   666 	u32 tx_frames;
       
   667 	u32 tx_collisions;
       
   668 
       
   669 	u32 tx_deferred;
       
   670 	u32 tx_single_collisions;
       
   671 	u32 tx_multiple_collisions;
       
   672 	u32 tx_fc_pause;
       
   673 	u32 tx_tco_frames;
       
   674 
       
   675 	u32 rx_fc_pause;
       
   676 	u32 rx_fc_unsupported;
       
   677 	u32 rx_tco_frames;
       
   678 	u32 rx_short_frame_errors;
       
   679 	u32 rx_over_length_errors;
       
   680 
       
   681 	u16 eeprom_wc;
       
   682 
       
   683 	__le16 eeprom[256];
       
   684 	spinlock_t mdio_lock;
       
   685 	const struct firmware *fw;
       
   686 	ec_device_t *ecdev;
       
   687 	unsigned long ec_watchdog_jiffies;
       
   688 };
       
   689 
       
   690 static inline void e100_write_flush(struct nic *nic)
       
   691 {
       
   692 	/* Flush previous PCI writes through intermediate bridges
       
   693 	 * by doing a benign read */
       
   694 	(void)ioread8(&nic->csr->scb.status);
       
   695 }
       
   696 
       
   697 static void e100_enable_irq(struct nic *nic)
       
   698 {
       
   699 	unsigned long flags;
       
   700 
       
   701 	if (nic->ecdev)
       
   702 		return;
       
   703 
       
   704 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   705 	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   706 	e100_write_flush(nic);
       
   707 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   708 }
       
   709 
       
   710 static void e100_disable_irq(struct nic *nic)
       
   711 {
       
   712 	unsigned long flags = 0;
       
   713 
       
   714 	if (!nic->ecdev)
       
   715 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   716 	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   717 	e100_write_flush(nic);
       
   718 	if (!nic->ecdev)
       
   719 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   720 }
       
   721 
       
   722 static void e100_hw_reset(struct nic *nic)
       
   723 {
       
   724 	/* Put CU and RU into idle with a selective reset to get
       
   725 	 * device off of PCI bus */
       
   726 	iowrite32(selective_reset, &nic->csr->port);
       
   727 	e100_write_flush(nic); udelay(20);
       
   728 
       
   729 	/* Now fully reset device */
       
   730 	iowrite32(software_reset, &nic->csr->port);
       
   731 	e100_write_flush(nic); udelay(20);
       
   732 
       
   733 	/* Mask off our interrupt line - it's unmasked after reset */
       
   734 	e100_disable_irq(nic);
       
   735 }
       
   736 
       
   737 static int e100_self_test(struct nic *nic)
       
   738 {
       
   739 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   740 
       
   741 	/* Passing the self-test is a pretty good indication
       
   742 	 * that the device can DMA to/from host memory */
       
   743 
       
   744 	nic->mem->selftest.signature = 0;
       
   745 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   746 
       
   747 	iowrite32(selftest | dma_addr, &nic->csr->port);
       
   748 	e100_write_flush(nic);
       
   749 	/* Wait 10 msec for self-test to complete */
       
   750 	msleep(10);
       
   751 
       
   752 	/* Interrupts are enabled after self-test */
       
   753 	e100_disable_irq(nic);
       
   754 
       
   755 	/* Check results of self-test */
       
   756 	if (nic->mem->selftest.result != 0) {
       
   757 		netif_err(nic, hw, nic->netdev,
       
   758 			  "Self-test failed: result=0x%08X\n",
       
   759 			  nic->mem->selftest.result);
       
   760 		return -ETIMEDOUT;
       
   761 	}
       
   762 	if (nic->mem->selftest.signature == 0) {
       
   763 		netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
       
   764 		return -ETIMEDOUT;
       
   765 	}
       
   766 
       
   767 	return 0;
       
   768 }
       
   769 
       
   770 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
       
   771 {
       
   772 	u32 cmd_addr_data[3];
       
   773 	u8 ctrl;
       
   774 	int i, j;
       
   775 
       
   776 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   777 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   778 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   779 		le16_to_cpu(data);
       
   780 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   781 
       
   782 	/* Bit-bang cmds to write word to eeprom */
       
   783 	for (j = 0; j < 3; j++) {
       
   784 
       
   785 		/* Chip select */
       
   786 		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   787 		e100_write_flush(nic); udelay(4);
       
   788 
       
   789 		for (i = 31; i >= 0; i--) {
       
   790 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   791 				eecs | eedi : eecs;
       
   792 			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   793 			e100_write_flush(nic); udelay(4);
       
   794 
       
   795 			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   796 			e100_write_flush(nic); udelay(4);
       
   797 		}
       
   798 		/* Wait 10 msec for cmd to complete */
       
   799 		msleep(10);
       
   800 
       
   801 		/* Chip deselect */
       
   802 		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   803 		e100_write_flush(nic); udelay(4);
       
   804 	}
       
   805 };
       
   806 
       
   807 /* General technique stolen from the eepro100 driver - very clever */
       
   808 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   809 {
       
   810 	u32 cmd_addr_data;
       
   811 	u16 data = 0;
       
   812 	u8 ctrl;
       
   813 	int i;
       
   814 
       
   815 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   816 
       
   817 	/* Chip select */
       
   818 	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   819 	e100_write_flush(nic); udelay(4);
       
   820 
       
   821 	/* Bit-bang to read word from eeprom */
       
   822 	for (i = 31; i >= 0; i--) {
       
   823 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   824 		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   825 		e100_write_flush(nic); udelay(4);
       
   826 
       
   827 		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   828 		e100_write_flush(nic); udelay(4);
       
   829 
       
   830 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   831 		 * complete address.  Use this to adjust addr_len. */
       
   832 		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
       
   833 		if (!(ctrl & eedo) && i > 16) {
       
   834 			*addr_len -= (i - 16);
       
   835 			i = 17;
       
   836 		}
       
   837 
       
   838 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   839 	}
       
   840 
       
   841 	/* Chip deselect */
       
   842 	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   843 	e100_write_flush(nic); udelay(4);
       
   844 
       
   845 	return cpu_to_le16(data);
       
   846 };
       
   847 
       
   848 /* Load entire EEPROM image into driver cache and validate checksum */
       
   849 static int e100_eeprom_load(struct nic *nic)
       
   850 {
       
   851 	u16 addr, addr_len = 8, checksum = 0;
       
   852 
       
   853 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   854 	e100_eeprom_read(nic, &addr_len, 0);
       
   855 	nic->eeprom_wc = 1 << addr_len;
       
   856 
       
   857 	for (addr = 0; addr < nic->eeprom_wc; addr++) {
       
   858 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   859 		if (addr < nic->eeprom_wc - 1)
       
   860 			checksum += le16_to_cpu(nic->eeprom[addr]);
       
   861 	}
       
   862 
       
   863 	/* The checksum, stored in the last word, is calculated such that
       
   864 	 * the sum of words should be 0xBABA */
       
   865 	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
       
   866 		netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
       
   867 		if (!eeprom_bad_csum_allow)
       
   868 			return -EAGAIN;
       
   869 	}
       
   870 
       
   871 	return 0;
       
   872 }
       
   873 
       
   874 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   875 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   876 {
       
   877 	u16 addr, addr_len = 8, checksum = 0;
       
   878 
       
   879 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   880 	e100_eeprom_read(nic, &addr_len, 0);
       
   881 	nic->eeprom_wc = 1 << addr_len;
       
   882 
       
   883 	if (start + count >= nic->eeprom_wc)
       
   884 		return -EINVAL;
       
   885 
       
   886 	for (addr = start; addr < start + count; addr++)
       
   887 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   888 
       
   889 	/* The checksum, stored in the last word, is calculated such that
       
   890 	 * the sum of words should be 0xBABA */
       
   891 	for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   892 		checksum += le16_to_cpu(nic->eeprom[addr]);
       
   893 	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
       
   894 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   895 		nic->eeprom[nic->eeprom_wc - 1]);
       
   896 
       
   897 	return 0;
       
   898 }
       
   899 
       
   900 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   901 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   902 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   903 {
       
   904 	unsigned long flags = 0;
       
   905 	unsigned int i;
       
   906 	int err = 0;
       
   907 
       
   908 	if (!nic->ecdev)
       
   909 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   910 
       
   911 	/* Previous command is accepted when SCB clears */
       
   912 	for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   913 		if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
       
   914 			break;
       
   915 		cpu_relax();
       
   916 		if (unlikely(i > E100_WAIT_SCB_FAST))
       
   917 			udelay(5);
       
   918 	}
       
   919 	if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   920 		err = -EAGAIN;
       
   921 		goto err_unlock;
       
   922 	}
       
   923 
       
   924 	if (unlikely(cmd != cuc_resume))
       
   925 		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
       
   926 	iowrite8(cmd, &nic->csr->scb.cmd_lo);
       
   927 
       
   928 err_unlock:
       
   929 	if (!nic->ecdev)
       
   930 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   931 
       
   932 	return err;
       
   933 }
       
   934 
       
   935 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   936 	int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   937 {
       
   938 	struct cb *cb;
       
   939 	unsigned long flags = 0;
       
   940 	int err = 0;
       
   941 
       
   942 	if (!nic->ecdev)
       
   943 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   944 
       
   945 	if (unlikely(!nic->cbs_avail)) {
       
   946 		err = -ENOMEM;
       
   947 		goto err_unlock;
       
   948 	}
       
   949 
       
   950 	cb = nic->cb_to_use;
       
   951 	nic->cb_to_use = cb->next;
       
   952 	nic->cbs_avail--;
       
   953 	cb->skb = skb;
       
   954 
       
   955 	err = cb_prepare(nic, cb, skb);
       
   956 	if (err)
       
   957 		goto err_unlock;
       
   958 
       
   959 	if (unlikely(!nic->cbs_avail))
       
   960 		err = -ENOSPC;
       
   961 
       
   962 
       
   963 	/* Order is important otherwise we'll be in a race with h/w:
       
   964 	 * set S-bit in current first, then clear S-bit in previous. */
       
   965 	cb->command |= cpu_to_le16(cb_s);
       
   966 	wmb();
       
   967 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   968 
       
   969 	while (nic->cb_to_send != nic->cb_to_use) {
       
   970 		if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   971 			nic->cb_to_send->dma_addr))) {
       
   972 			/* Ok, here's where things get sticky.  It's
       
   973 			 * possible that we can't schedule the command
       
   974 			 * because the controller is too busy, so
       
   975 			 * let's just queue the command and try again
       
   976 			 * when another command is scheduled. */
       
   977 			if (err == -ENOSPC) {
       
   978 				//request a reset
       
   979 				schedule_work(&nic->tx_timeout_task);
       
   980 			}
       
   981 			break;
       
   982 		} else {
       
   983 			nic->cuc_cmd = cuc_resume;
       
   984 			nic->cb_to_send = nic->cb_to_send->next;
       
   985 		}
       
   986 	}
       
   987 
       
   988 err_unlock:
       
   989 	if (!nic->ecdev)
       
   990 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   991 
       
   992 	return err;
       
   993 }
       
   994 
       
   995 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
   996 {
       
   997 	struct nic *nic = netdev_priv(netdev);
       
   998 	return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
       
   999 }
       
  1000 
       
  1001 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
  1002 {
       
  1003 	struct nic *nic = netdev_priv(netdev);
       
  1004 
       
  1005 	nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
       
  1006 }
       
  1007 
       
  1008 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
       
  1009 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
  1010 {
       
  1011 	u32 data_out = 0;
       
  1012 	unsigned int i;
       
  1013 	unsigned long flags = 0;
       
  1014 
       
  1015 
       
  1016 	/*
       
  1017 	 * Stratus87247: we shouldn't be writing the MDI control
       
  1018 	 * register until the Ready bit shows True.  Also, since
       
  1019 	 * manipulation of the MDI control registers is a multi-step
       
  1020 	 * procedure it should be done under lock.
       
  1021 	 */
       
  1022 	if (!nic->ecdev)
       
  1023 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
  1024 	for (i = 100; i; --i) {
       
  1025 		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
       
  1026 			break;
       
  1027 		udelay(20);
       
  1028 	}
       
  1029 	if (unlikely(!i)) {
       
  1030 		netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
       
  1031 		if (!nic->ecdev)
       
  1032 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1033 		return 0;		/* No way to indicate timeout error */
       
  1034 	}
       
  1035 	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
  1036 
       
  1037 	for (i = 0; i < 100; i++) {
       
  1038 		udelay(20);
       
  1039 		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
       
  1040 			break;
       
  1041 	}
       
  1042 	if (!nic->ecdev)
       
  1043 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1044 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1045 		     "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
  1046 		     dir == mdi_read ? "READ" : "WRITE",
       
  1047 		     addr, reg, data, data_out);
       
  1048 	return (u16)data_out;
       
  1049 }
       
  1050 
       
  1051 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
       
  1052 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
       
  1053 				 u32 addr,
       
  1054 				 u32 dir,
       
  1055 				 u32 reg,
       
  1056 				 u16 data)
       
  1057 {
       
  1058 	if ((reg == MII_BMCR) && (dir == mdi_write)) {
       
  1059 		if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
       
  1060 			u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
       
  1061 							MII_ADVERTISE);
       
  1062 
       
  1063 			/*
       
  1064 			 * Workaround Si issue where sometimes the part will not
       
  1065 			 * autoneg to 100Mbps even when advertised.
       
  1066 			 */
       
  1067 			if (advert & ADVERTISE_100FULL)
       
  1068 				data |= BMCR_SPEED100 | BMCR_FULLDPLX;
       
  1069 			else if (advert & ADVERTISE_100HALF)
       
  1070 				data |= BMCR_SPEED100;
       
  1071 		}
       
  1072 	}
       
  1073 	return mdio_ctrl_hw(nic, addr, dir, reg, data);
       
  1074 }
       
  1075 
       
  1076 /* Fully software-emulated mdio_ctrl() function for cards without
       
  1077  * MII-compliant PHYs.
       
  1078  * For now, this is mainly geared towards 80c24 support; in case of further
       
  1079  * requirements for other types (i82503, ...?) either extend this mechanism
       
  1080  * or split it, whichever is cleaner.
       
  1081  */
       
  1082 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
       
  1083 				      u32 addr,
       
  1084 				      u32 dir,
       
  1085 				      u32 reg,
       
  1086 				      u16 data)
       
  1087 {
       
  1088 	/* might need to allocate a netdev_priv'ed register array eventually
       
  1089 	 * to be able to record state changes, but for now
       
  1090 	 * some fully hardcoded register handling ought to be ok I guess. */
       
  1091 
       
  1092 	if (dir == mdi_read) {
       
  1093 		switch (reg) {
       
  1094 		case MII_BMCR:
       
  1095 			/* Auto-negotiation, right? */
       
  1096 			return  BMCR_ANENABLE |
       
  1097 				BMCR_FULLDPLX;
       
  1098 		case MII_BMSR:
       
  1099 			return	BMSR_LSTATUS /* for mii_link_ok() */ |
       
  1100 				BMSR_ANEGCAPABLE |
       
  1101 				BMSR_10FULL;
       
  1102 		case MII_ADVERTISE:
       
  1103 			/* 80c24 is a "combo card" PHY, right? */
       
  1104 			return	ADVERTISE_10HALF |
       
  1105 				ADVERTISE_10FULL;
       
  1106 		default:
       
  1107 			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1108 				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1109 				     dir == mdi_read ? "READ" : "WRITE",
       
  1110 				     addr, reg, data);
       
  1111 			return 0xFFFF;
       
  1112 		}
       
  1113 	} else {
       
  1114 		switch (reg) {
       
  1115 		default:
       
  1116 			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1117 				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1118 				     dir == mdi_read ? "READ" : "WRITE",
       
  1119 				     addr, reg, data);
       
  1120 			return 0xFFFF;
       
  1121 		}
       
  1122 	}
       
  1123 }
       
  1124 static inline int e100_phy_supports_mii(struct nic *nic)
       
  1125 {
       
  1126 	/* for now, just check it by comparing whether we
       
  1127 	   are using MII software emulation.
       
  1128 	*/
       
  1129 	return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
       
  1130 }
       
  1131 
       
  1132 static void e100_get_defaults(struct nic *nic)
       
  1133 {
       
  1134 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
  1135 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
  1136 
       
  1137 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1138 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
       
  1139 	if (nic->mac == mac_unknown)
       
  1140 		nic->mac = mac_82557_D100_A;
       
  1141 
       
  1142 	nic->params.rfds = rfds;
       
  1143 	nic->params.cbs = cbs;
       
  1144 
       
  1145 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1146 	nic->tx_threshold = 0xE0;
       
  1147 
       
  1148 	/* no interrupt for every tx completion, delay = 256us if not 557 */
       
  1149 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1150 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1151 
       
  1152 	/* Template for a freshly allocated RFD */
       
  1153 	nic->blank_rfd.command = 0;
       
  1154 	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
       
  1155 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
       
  1156 
       
  1157 	/* MII setup */
       
  1158 	nic->mii.phy_id_mask = 0x1F;
       
  1159 	nic->mii.reg_num_mask = 0x1F;
       
  1160 	nic->mii.dev = nic->netdev;
       
  1161 	nic->mii.mdio_read = mdio_read;
       
  1162 	nic->mii.mdio_write = mdio_write;
       
  1163 }
       
  1164 
       
  1165 static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1166 {
       
  1167 	struct config *config = &cb->u.config;
       
  1168 	u8 *c = (u8 *)config;
       
  1169 	struct net_device *netdev = nic->netdev;
       
  1170 
       
  1171 	cb->command = cpu_to_le16(cb_config);
       
  1172 
       
  1173 	memset(config, 0, sizeof(struct config));
       
  1174 
       
  1175 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1176 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1177 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1178 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1179 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1180 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1181 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1182 	if (e100_phy_supports_mii(nic))
       
  1183 		config->mii_mode = 1;           /* 1=MII mode, 0=i82503 mode */
       
  1184 	config->pad10 = 0x6;
       
  1185 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1186 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1187 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1188 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1189 	config->pad15_1 = 0x1;
       
  1190 	config->pad15_2 = 0x1;
       
  1191 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1192 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1193 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1194 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1195 	config->pad18 = 0x1;
       
  1196 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1197 	config->pad20_1 = 0x1F;
       
  1198 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1199 	config->pad21_1 = 0x5;
       
  1200 
       
  1201 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1202 	config->loopback = nic->loopback;
       
  1203 
       
  1204 	if (nic->mii.force_media && nic->mii.full_duplex)
       
  1205 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1206 
       
  1207 	if (nic->flags & promiscuous || nic->loopback) {
       
  1208 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1209 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1210 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1211 	}
       
  1212 
       
  1213 	if (unlikely(netdev->features & NETIF_F_RXFCS))
       
  1214 		config->rx_crc_transfer = 0x1;	/* 1=save, 0=discard */
       
  1215 
       
  1216 	if (nic->flags & multicast_all)
       
  1217 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1218 
       
  1219 	/* disable WoL when up */
       
  1220 	if (nic->ecdev || 
       
  1221 			(netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1222 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1223 
       
  1224 	if (nic->mac >= mac_82558_D101_A4) {
       
  1225 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1226 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1227 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1228 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1229 		if (nic->mac >= mac_82559_D101M) {
       
  1230 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1231 			/* Enable TCO in extended config */
       
  1232 			if (nic->mac >= mac_82551_10) {
       
  1233 				config->byte_count = 0x20; /* extended bytes */
       
  1234 				config->rx_d102_mode = 0x1; /* GMRC for TCO */
       
  1235 			}
       
  1236 		} else {
       
  1237 			config->standard_stat_counter = 0x0;
       
  1238 		}
       
  1239 	}
       
  1240 
       
  1241 	if (netdev->features & NETIF_F_RXALL) {
       
  1242 		config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
       
  1243 		config->rx_save_bad_frames = 0x1;       /* 1=save, 0=discard */
       
  1244 		config->rx_discard_short_frames = 0x0;  /* 1=discard, 0=save */
       
  1245 	}
       
  1246 
       
  1247 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1248 		     "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1249 		     c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1250 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1251 		     "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1252 		     c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1253 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1254 		     "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1255 		     c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1256 	return 0;
       
  1257 }
       
  1258 
       
  1259 /*************************************************************************
       
  1260 *  CPUSaver parameters
       
  1261 *
       
  1262 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1263 *  "move immediate value" instruction.  By changing the value of
       
  1264 *  the literal in the instruction before the code is loaded, the
       
  1265 *  driver can change the algorithm.
       
  1266 *
       
  1267 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1268 *    When this timer expires the interrupt is asserted, and the
       
  1269 *    timer is reset each time a new packet is received.  (see
       
  1270 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1271 *    The current default is 0x600 or 1536.  Experiments show that
       
  1272 *    the value should probably stay within the 0x200 - 0x1000.
       
  1273 *
       
  1274 *  BUNDLEMAX -
       
  1275 *    This sets the maximum number of frames that will be bundled.  In
       
  1276 *    some situations, such as the TCP windowing algorithm, it may be
       
  1277 *    better to limit the growth of the bundle size than let it go as
       
  1278 *    high as it can, because that could cause too much added latency.
       
  1279 *    The default is six, because this is the number of packets in the
       
  1280 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1281 *    an interrupt for every frame received.  If you do not want to put
       
  1282 *    a limit on the bundle size, set this value to xFFFF.
       
  1283 *
       
  1284 *  BUNDLESMALL -
       
  1285 *    This contains a bit-mask describing the minimum size frame that
       
  1286 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1287 *    that any frame less than 128 bytes in length will not be bundled,
       
  1288 *    but will instead immediately generate an interrupt.  This does
       
  1289 *    not affect the current bundle in any way.  Any frame that is 128
       
  1290 *    bytes or large will be bundled normally.  This feature is meant
       
  1291 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1292 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1293 *    enabled was sending but not receiving.  The delay introduced when
       
  1294 *    the ACKs were received was enough to reduce total throughput, because
       
  1295 *    the sender would sit idle until the ACK was finally seen.
       
  1296 *
       
  1297 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1298 *    This means that any frame which is x7F (127) bytes or smaller
       
  1299 *    will cause an immediate interrupt.  Because this value must be a
       
  1300 *    bit mask, there are only a few valid values that can be used.  To
       
  1301 *    turn this feature off, the driver can write the value xFFFF to the
       
  1302 *    lower word of this instruction (in the same way that the other
       
  1303 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1304 *    cause an interrupt to be generated for every frame, because all
       
  1305 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1306 *************************************************************************/
       
  1307 
       
  1308 /* if you wish to disable the ucode functionality, while maintaining the
       
  1309  * workarounds it provides, set the following defines to:
       
  1310  * BUNDLESMALL 0
       
  1311  * BUNDLEMAX 1
       
  1312  * INTDELAY 1
       
  1313  */
       
  1314 #define BUNDLESMALL 1
       
  1315 #define BUNDLEMAX (u16)6
       
  1316 #define INTDELAY (u16)1536 /* 0x600 */
       
  1317 
       
  1318 /* Initialize firmware */
       
  1319 static const struct firmware *e100_request_firmware(struct nic *nic)
       
  1320 {
       
  1321 	const char *fw_name;
       
  1322 	const struct firmware *fw = nic->fw;
       
  1323 	u8 timer, bundle, min_size;
       
  1324 	int err = 0;
       
  1325 	bool required = false;
       
  1326 
       
  1327 	/* do not load u-code for ICH devices */
       
  1328 	if (nic->flags & ich)
       
  1329 		return NULL;
       
  1330 
       
  1331 	/* Search for ucode match against h/w revision
       
  1332 	 *
       
  1333 	 * Based on comments in the source code for the FreeBSD fxp
       
  1334 	 * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
       
  1335 	 *
       
  1336 	 *    "fixes for bugs in the B-step hardware (specifically, bugs
       
  1337 	 *     with Inline Receive)."
       
  1338 	 *
       
  1339 	 * So we must fail if it cannot be loaded.
       
  1340 	 *
       
  1341 	 * The other microcode files are only required for the optional
       
  1342 	 * CPUSaver feature.  Nice to have, but no reason to fail.
       
  1343 	 */
       
  1344 	if (nic->mac == mac_82559_D101M) {
       
  1345 		fw_name = FIRMWARE_D101M;
       
  1346 	} else if (nic->mac == mac_82559_D101S) {
       
  1347 		fw_name = FIRMWARE_D101S;
       
  1348 	} else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
       
  1349 		fw_name = FIRMWARE_D102E;
       
  1350 		required = true;
       
  1351 	} else { /* No ucode on other devices */
       
  1352 		return NULL;
       
  1353 	}
       
  1354 
       
  1355 	/* If the firmware has not previously been loaded, request a pointer
       
  1356 	 * to it. If it was previously loaded, we are reinitializing the
       
  1357 	 * adapter, possibly in a resume from hibernate, in which case
       
  1358 	 * request_firmware() cannot be used.
       
  1359 	 */
       
  1360 	if (!fw)
       
  1361 		err = request_firmware(&fw, fw_name, &nic->pdev->dev);
       
  1362 
       
  1363 	if (err) {
       
  1364 		if (required) {
       
  1365 			netif_err(nic, probe, nic->netdev,
       
  1366 				  "Failed to load firmware \"%s\": %d\n",
       
  1367 				  fw_name, err);
       
  1368 			return ERR_PTR(err);
       
  1369 		} else {
       
  1370 			netif_info(nic, probe, nic->netdev,
       
  1371 				   "CPUSaver disabled. Needs \"%s\": %d\n",
       
  1372 				   fw_name, err);
       
  1373 			return NULL;
       
  1374 		}
       
  1375 	}
       
  1376 
       
  1377 	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
       
  1378 	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
       
  1379 	if (fw->size != UCODE_SIZE * 4 + 3) {
       
  1380 		netif_err(nic, probe, nic->netdev,
       
  1381 			  "Firmware \"%s\" has wrong size %zu\n",
       
  1382 			  fw_name, fw->size);
       
  1383 		release_firmware(fw);
       
  1384 		return ERR_PTR(-EINVAL);
       
  1385 	}
       
  1386 
       
  1387 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1388 	timer = fw->data[UCODE_SIZE * 4];
       
  1389 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1390 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1391 
       
  1392 	if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
       
  1393 	    min_size >= UCODE_SIZE) {
       
  1394 		netif_err(nic, probe, nic->netdev,
       
  1395 			  "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
       
  1396 			  fw_name, timer, bundle, min_size);
       
  1397 		release_firmware(fw);
       
  1398 		return ERR_PTR(-EINVAL);
       
  1399 	}
       
  1400 
       
  1401 	/* OK, firmware is validated and ready to use. Save a pointer
       
  1402 	 * to it in the nic */
       
  1403 	nic->fw = fw;
       
  1404 	return fw;
       
  1405 }
       
  1406 
       
  1407 static int e100_setup_ucode(struct nic *nic, struct cb *cb,
       
  1408 			     struct sk_buff *skb)
       
  1409 {
       
  1410 	const struct firmware *fw = (void *)skb;
       
  1411 	u8 timer, bundle, min_size;
       
  1412 
       
  1413 	/* It's not a real skb; we just abused the fact that e100_exec_cb
       
  1414 	   will pass it through to here... */
       
  1415 	cb->skb = NULL;
       
  1416 
       
  1417 	/* firmware is stored as little endian already */
       
  1418 	memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
       
  1419 
       
  1420 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1421 	timer = fw->data[UCODE_SIZE * 4];
       
  1422 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1423 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1424 
       
  1425 	/* Insert user-tunable settings in cb->u.ucode */
       
  1426 	cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
       
  1427 	cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
       
  1428 	cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
       
  1429 	cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
       
  1430 	cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
       
  1431 	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
       
  1432 
       
  1433 	cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1434 	return 0;
       
  1435 }
       
  1436 
       
  1437 static inline int e100_load_ucode_wait(struct nic *nic)
       
  1438 {
       
  1439 	const struct firmware *fw;
       
  1440 	int err = 0, counter = 50;
       
  1441 	struct cb *cb = nic->cb_to_clean;
       
  1442 
       
  1443 	fw = e100_request_firmware(nic);
       
  1444 	/* If it's NULL, then no ucode is required */
       
  1445 	if (!fw || IS_ERR(fw))
       
  1446 		return PTR_ERR(fw);
       
  1447 
       
  1448 	if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
       
  1449 		netif_err(nic, probe, nic->netdev,
       
  1450 			  "ucode cmd failed with error %d\n", err);
       
  1451 
       
  1452 	/* must restart cuc */
       
  1453 	nic->cuc_cmd = cuc_start;
       
  1454 
       
  1455 	/* wait for completion */
       
  1456 	e100_write_flush(nic);
       
  1457 	udelay(10);
       
  1458 
       
  1459 	/* wait for possibly (ouch) 500ms */
       
  1460 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1461 		msleep(10);
       
  1462 		if (!--counter) break;
       
  1463 	}
       
  1464 
       
  1465 	/* ack any interrupts, something could have been set */
       
  1466 	iowrite8(~0, &nic->csr->scb.stat_ack);
       
  1467 
       
  1468 	/* if the command failed, or is not OK, notify and return */
       
  1469 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1470 		netif_err(nic, probe, nic->netdev, "ucode load failed\n");
       
  1471 		err = -EPERM;
       
  1472 	}
       
  1473 
       
  1474 	return err;
       
  1475 }
       
  1476 
       
  1477 static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1478 	struct sk_buff *skb)
       
  1479 {
       
  1480 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1481 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1482 	return 0;
       
  1483 }
       
  1484 
       
  1485 static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1486 {
       
  1487 	cb->command = cpu_to_le16(cb_dump);
       
  1488 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1489 		offsetof(struct mem, dump_buf));
       
  1490 	return 0;
       
  1491 }
       
  1492 
       
  1493 static int e100_phy_check_without_mii(struct nic *nic)
       
  1494 {
       
  1495 	u8 phy_type;
       
  1496 	int without_mii;
       
  1497 
       
  1498 	phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
       
  1499 
       
  1500 	switch (phy_type) {
       
  1501 	case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
       
  1502 	case I82503: /* Non-MII PHY; UNTESTED! */
       
  1503 	case S80C24: /* Non-MII PHY; tested and working */
       
  1504 		/* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
       
  1505 		 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
       
  1506 		 * doesn't have a programming interface of any sort.  The
       
  1507 		 * media is sensed automatically based on how the link partner
       
  1508 		 * is configured.  This is, in essence, manual configuration.
       
  1509 		 */
       
  1510 		netif_info(nic, probe, nic->netdev,
       
  1511 			   "found MII-less i82503 or 80c24 or other PHY\n");
       
  1512 
       
  1513 		nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
       
  1514 		nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
       
  1515 
       
  1516 		/* these might be needed for certain MII-less cards...
       
  1517 		 * nic->flags |= ich;
       
  1518 		 * nic->flags |= ich_10h_workaround; */
       
  1519 
       
  1520 		without_mii = 1;
       
  1521 		break;
       
  1522 	default:
       
  1523 		without_mii = 0;
       
  1524 		break;
       
  1525 	}
       
  1526 	return without_mii;
       
  1527 }
       
  1528 
       
  1529 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1530 #define MII_NSC_CONG		MII_RESV1
       
  1531 #define NSC_CONG_ENABLE		0x0100
       
  1532 #define NSC_CONG_TXREADY	0x0400
       
  1533 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1534 static int e100_phy_init(struct nic *nic)
       
  1535 {
       
  1536 	struct net_device *netdev = nic->netdev;
       
  1537 	u32 addr;
       
  1538 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1539 
       
  1540 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1541 	for (addr = 0; addr < 32; addr++) {
       
  1542 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1543 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1544 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1545 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1546 		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1547 			break;
       
  1548 	}
       
  1549 	if (addr == 32) {
       
  1550 		/* uhoh, no PHY detected: check whether we seem to be some
       
  1551 		 * weird, rare variant which is *known* to not have any MII.
       
  1552 		 * But do this AFTER MII checking only, since this does
       
  1553 		 * lookup of EEPROM values which may easily be unreliable. */
       
  1554 		if (e100_phy_check_without_mii(nic))
       
  1555 			return 0; /* simply return and hope for the best */
       
  1556 		else {
       
  1557 			/* for unknown cases log a fatal error */
       
  1558 			netif_err(nic, hw, nic->netdev,
       
  1559 				  "Failed to locate any known PHY, aborting\n");
       
  1560 			return -EAGAIN;
       
  1561 		}
       
  1562 	} else
       
  1563 		netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1564 			     "phy_addr = %d\n", nic->mii.phy_id);
       
  1565 
       
  1566 	/* Get phy ID */
       
  1567 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1568 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1569 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1570 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1571 		     "phy ID = 0x%08X\n", nic->phy);
       
  1572 
       
  1573 	/* Select the phy and isolate the rest */
       
  1574 	for (addr = 0; addr < 32; addr++) {
       
  1575 		if (addr != nic->mii.phy_id) {
       
  1576 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1577 		} else if (nic->phy != phy_82552_v) {
       
  1578 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1579 			mdio_write(netdev, addr, MII_BMCR,
       
  1580 				bmcr & ~BMCR_ISOLATE);
       
  1581 		}
       
  1582 	}
       
  1583 	/*
       
  1584 	 * Workaround for 82552:
       
  1585 	 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
       
  1586 	 * other phy_id's) using bmcr value from addr discovery loop above.
       
  1587 	 */
       
  1588 	if (nic->phy == phy_82552_v)
       
  1589 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
       
  1590 			bmcr & ~BMCR_ISOLATE);
       
  1591 
       
  1592 	/* Handle National tx phys */
       
  1593 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1594 	if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1595 		/* Disable congestion control */
       
  1596 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1597 		cong |= NSC_CONG_TXREADY;
       
  1598 		cong &= ~NSC_CONG_ENABLE;
       
  1599 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1600 	}
       
  1601 
       
  1602 	if (nic->phy == phy_82552_v) {
       
  1603 		u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
       
  1604 
       
  1605 		/* assign special tweaked mdio_ctrl() function */
       
  1606 		nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
       
  1607 
       
  1608 		/* Workaround Si not advertising flow-control during autoneg */
       
  1609 		advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
       
  1610 		mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
       
  1611 
       
  1612 		/* Reset for the above changes to take effect */
       
  1613 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1614 		bmcr |= BMCR_RESET;
       
  1615 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
       
  1616 	} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1617 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1618 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1619 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1620 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1621 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1622 	}
       
  1623 
       
  1624 	return 0;
       
  1625 }
       
  1626 
       
  1627 static int e100_hw_init(struct nic *nic)
       
  1628 {
       
  1629 	int err = 0;
       
  1630 
       
  1631 	e100_hw_reset(nic);
       
  1632 
       
  1633 	netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
       
  1634 	if (!in_interrupt() && (err = e100_self_test(nic)))
       
  1635 		return err;
       
  1636 
       
  1637 	if ((err = e100_phy_init(nic)))
       
  1638 		return err;
       
  1639 	if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1640 		return err;
       
  1641 	if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1642 		return err;
       
  1643 	if ((err = e100_load_ucode_wait(nic)))
       
  1644 		return err;
       
  1645 	if ((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1646 		return err;
       
  1647 	if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1648 		return err;
       
  1649 	if ((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1650 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1651 		return err;
       
  1652 	if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1653 		return err;
       
  1654 
       
  1655 	e100_disable_irq(nic);
       
  1656 
       
  1657 	return 0;
       
  1658 }
       
  1659 
       
  1660 static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1661 {
       
  1662 	struct net_device *netdev = nic->netdev;
       
  1663 	struct netdev_hw_addr *ha;
       
  1664 	u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
       
  1665 
       
  1666 	cb->command = cpu_to_le16(cb_multi);
       
  1667 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1668 	i = 0;
       
  1669 	netdev_for_each_mc_addr(ha, netdev) {
       
  1670 		if (i == count)
       
  1671 			break;
       
  1672 		memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
       
  1673 			ETH_ALEN);
       
  1674 	}
       
  1675 	return 0;
       
  1676 }
       
  1677 
       
  1678 static void e100_set_multicast_list(struct net_device *netdev)
       
  1679 {
       
  1680 	struct nic *nic = netdev_priv(netdev);
       
  1681 
       
  1682 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1683 		     "mc_count=%d, flags=0x%04X\n",
       
  1684 		     netdev_mc_count(netdev), netdev->flags);
       
  1685 
       
  1686 	if (netdev->flags & IFF_PROMISC)
       
  1687 		nic->flags |= promiscuous;
       
  1688 	else
       
  1689 		nic->flags &= ~promiscuous;
       
  1690 
       
  1691 	if (netdev->flags & IFF_ALLMULTI ||
       
  1692 		netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
       
  1693 		nic->flags |= multicast_all;
       
  1694 	else
       
  1695 		nic->flags &= ~multicast_all;
       
  1696 
       
  1697 	e100_exec_cb(nic, NULL, e100_configure);
       
  1698 	e100_exec_cb(nic, NULL, e100_multi);
       
  1699 }
       
  1700 
       
  1701 static void e100_update_stats(struct nic *nic)
       
  1702 {
       
  1703 	struct net_device *dev = nic->netdev;
       
  1704 	struct net_device_stats *ns = &dev->stats;
       
  1705 	struct stats *s = &nic->mem->stats;
       
  1706 	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1707 		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
       
  1708 		&s->complete;
       
  1709 
       
  1710 	/* Device's stats reporting may take several microseconds to
       
  1711 	 * complete, so we're always waiting for results of the
       
  1712 	 * previous command. */
       
  1713 
       
  1714 	if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
       
  1715 		*complete = 0;
       
  1716 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1717 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1718 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1719 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1720 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1721 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1722 		ns->collisions += nic->tx_collisions;
       
  1723 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1724 			le32_to_cpu(s->tx_lost_crs);
       
  1725 		nic->rx_short_frame_errors +=
       
  1726 			le32_to_cpu(s->rx_short_frame_errors);
       
  1727 		ns->rx_length_errors = nic->rx_short_frame_errors +
       
  1728 			nic->rx_over_length_errors;
       
  1729 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1730 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1731 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1732 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1733 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1734 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1735 			le32_to_cpu(s->rx_alignment_errors) +
       
  1736 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1737 			le32_to_cpu(s->rx_cdt_errors);
       
  1738 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1739 		nic->tx_single_collisions +=
       
  1740 			le32_to_cpu(s->tx_single_collisions);
       
  1741 		nic->tx_multiple_collisions +=
       
  1742 			le32_to_cpu(s->tx_multiple_collisions);
       
  1743 		if (nic->mac >= mac_82558_D101_A4) {
       
  1744 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1745 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1746 			nic->rx_fc_unsupported +=
       
  1747 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1748 			if (nic->mac >= mac_82559_D101M) {
       
  1749 				nic->tx_tco_frames +=
       
  1750 					le16_to_cpu(s->xmt_tco_frames);
       
  1751 				nic->rx_tco_frames +=
       
  1752 					le16_to_cpu(s->rcv_tco_frames);
       
  1753 			}
       
  1754 		}
       
  1755 	}
       
  1756 
       
  1757 
       
  1758 	if (e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1759 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1760 			     "exec cuc_dump_reset failed\n");
       
  1761 }
       
  1762 
       
  1763 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1764 {
       
  1765 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1766 	 * we're getting collisions on a half-duplex connection. */
       
  1767 
       
  1768 	if (duplex == DUPLEX_HALF) {
       
  1769 		u32 prev = nic->adaptive_ifs;
       
  1770 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1771 
       
  1772 		if ((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1773 		   (nic->tx_frames > min_frames)) {
       
  1774 			if (nic->adaptive_ifs < 60)
       
  1775 				nic->adaptive_ifs += 5;
       
  1776 		} else if (nic->tx_frames < min_frames) {
       
  1777 			if (nic->adaptive_ifs >= 5)
       
  1778 				nic->adaptive_ifs -= 5;
       
  1779 		}
       
  1780 		if (nic->adaptive_ifs != prev)
       
  1781 			e100_exec_cb(nic, NULL, e100_configure);
       
  1782 	}
       
  1783 }
       
  1784 
       
  1785 static void e100_watchdog(unsigned long data)
       
  1786 {
       
  1787 	struct nic *nic = (struct nic *)data;
       
  1788 	struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
       
  1789 	u32 speed;
       
  1790 
       
  1791 	if (nic->ecdev) {
       
  1792 		ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1793 		return;
       
  1794 	}
       
  1795 
       
  1796 	netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
       
  1797 		     "right now = %ld\n", jiffies);
       
  1798 
       
  1799 	/* mii library handles link maintenance tasks */
       
  1800 
       
  1801 	mii_ethtool_gset(&nic->mii, &cmd);
       
  1802 	speed = ethtool_cmd_speed(&cmd);
       
  1803 
       
  1804 	if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1805 		netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
       
  1806 			    speed == SPEED_100 ? 100 : 10,
       
  1807 			    cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
       
  1808 	} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1809 		netdev_info(nic->netdev, "NIC Link is Down\n");
       
  1810 	}
       
  1811 
       
  1812 	mii_check_link(&nic->mii);
       
  1813 
       
  1814 	/* Software generated interrupt to recover from (rare) Rx
       
  1815 	 * allocation failure.
       
  1816 	 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1817 	 * accidentally, due to hardware that shares a register between the
       
  1818 	 * interrupt mask bit and the SW Interrupt generation bit */
       
  1819 	spin_lock_irq(&nic->cmd_lock);
       
  1820 	iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1821 	e100_write_flush(nic);
       
  1822 	spin_unlock_irq(&nic->cmd_lock);
       
  1823 
       
  1824 	e100_update_stats(nic);
       
  1825 	e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
       
  1826 
       
  1827 	if (nic->mac <= mac_82557_D100_C)
       
  1828 		/* Issue a multicast command to workaround a 557 lock up */
       
  1829 		e100_set_multicast_list(nic->netdev);
       
  1830 
       
  1831 	if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
       
  1832 		/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1833 		nic->flags |= ich_10h_workaround;
       
  1834 	else
       
  1835 		nic->flags &= ~ich_10h_workaround;
       
  1836 
       
  1837 	mod_timer(&nic->watchdog,
       
  1838 		  round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
       
  1839 }
       
  1840 
       
  1841 static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1842 	struct sk_buff *skb)
       
  1843 {
       
  1844 	dma_addr_t dma_addr;
       
  1845 	cb->command = nic->tx_command;
       
  1846 
       
  1847 	dma_addr = pci_map_single(nic->pdev,
       
  1848 				  skb->data, skb->len, PCI_DMA_TODEVICE);
       
  1849 	/* If we can't map the skb, have the upper layer try later */
       
  1850 	if (pci_dma_mapping_error(nic->pdev, dma_addr))
       
  1851 		return -ENOMEM;
       
  1852 
       
  1853 	/*
       
  1854 	 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
       
  1855 	 * testing, ie sending frames with bad CRC.
       
  1856 	 */
       
  1857 	if (unlikely(skb->no_fcs))
       
  1858 		cb->command |= __constant_cpu_to_le16(cb_tx_nc);
       
  1859 	else
       
  1860 		cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
       
  1861 
       
  1862 	/* interrupt every 16 packets regardless of delay */
       
  1863 	if ((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1864 		cb->command |= cpu_to_le16(cb_i);
       
  1865 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1866 	cb->u.tcb.tcb_byte_count = 0;
       
  1867 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1868 	cb->u.tcb.tbd_count = 1;
       
  1869 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
       
  1870 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1871 	skb_tx_timestamp(skb);
       
  1872 	return 0;
       
  1873 }
       
  1874 
       
  1875 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
       
  1876 				   struct net_device *netdev)
       
  1877 {
       
  1878 	struct nic *nic = netdev_priv(netdev);
       
  1879 	int err;
       
  1880 
       
  1881 	if (nic->flags & ich_10h_workaround) {
       
  1882 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1883 		   Issue a NOP command followed by a 1us delay before
       
  1884 		   issuing the Tx command. */
       
  1885 		if (e100_exec_cmd(nic, cuc_nop, 0))
       
  1886 			netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1887 				     "exec cuc_nop failed\n");
       
  1888 		udelay(1);
       
  1889 	}
       
  1890 
       
  1891 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1892 
       
  1893 	switch (err) {
       
  1894 	case -ENOSPC:
       
  1895 		/* We queued the skb, but now we're out of space. */
       
  1896 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1897 			     "No space for CB\n");
       
  1898 		if (!nic->ecdev)
       
  1899 			netif_stop_queue(netdev);
       
  1900 		break;
       
  1901 	case -ENOMEM:
       
  1902 		/* This is a hard error - log it. */
       
  1903 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1904 			     "Out of Tx resources, returning skb\n");
       
  1905 		if (!nic->ecdev)
       
  1906 			netif_stop_queue(netdev);
       
  1907 		return NETDEV_TX_BUSY;
       
  1908 	}
       
  1909 
       
  1910 	return NETDEV_TX_OK;
       
  1911 }
       
  1912 
       
  1913 static int e100_tx_clean(struct nic *nic)
       
  1914 {
       
  1915 	struct net_device *dev = nic->netdev;
       
  1916 	struct cb *cb;
       
  1917 	int tx_cleaned = 0;
       
  1918 
       
  1919 	if (!nic->ecdev)
       
  1920 		spin_lock(&nic->cb_lock);
       
  1921 
       
  1922 	/* Clean CBs marked complete */
       
  1923 	for (cb = nic->cb_to_clean;
       
  1924 	    cb->status & cpu_to_le16(cb_complete);
       
  1925 	    cb = nic->cb_to_clean = cb->next) {
       
  1926 		rmb(); /* read skb after status */
       
  1927 		netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
       
  1928 			     "cb[%d]->status = 0x%04X\n",
       
  1929 			     (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1930 			     cb->status);
       
  1931 
       
  1932 		if (likely(cb->skb != NULL)) {
       
  1933 			dev->stats.tx_packets++;
       
  1934 			dev->stats.tx_bytes += cb->skb->len;
       
  1935 
       
  1936 			pci_unmap_single(nic->pdev,
       
  1937 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1938 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1939 				PCI_DMA_TODEVICE);
       
  1940 			if (!nic->ecdev)
       
  1941 				dev_kfree_skb_any(cb->skb);
       
  1942 			cb->skb = NULL;
       
  1943 			tx_cleaned = 1;
       
  1944 		}
       
  1945 		cb->status = 0;
       
  1946 		nic->cbs_avail++;
       
  1947 	}
       
  1948 
       
  1949 	if (!nic->ecdev) {
       
  1950 		spin_unlock(&nic->cb_lock);
       
  1951 
       
  1952 		/* Recover from running out of Tx resources in xmit_frame */
       
  1953 		if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1954 			netif_wake_queue(nic->netdev);
       
  1955 	}
       
  1956 
       
  1957 	return tx_cleaned;
       
  1958 }
       
  1959 
       
  1960 static void e100_clean_cbs(struct nic *nic)
       
  1961 {
       
  1962 	if (nic->cbs) {
       
  1963 		while (nic->cbs_avail != nic->params.cbs.count) {
       
  1964 			struct cb *cb = nic->cb_to_clean;
       
  1965 			if (cb->skb) {
       
  1966 				pci_unmap_single(nic->pdev,
       
  1967 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1968 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1969 					PCI_DMA_TODEVICE);
       
  1970 				if (!nic->ecdev)
       
  1971 					dev_kfree_skb(cb->skb);
       
  1972 			}
       
  1973 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1974 			nic->cbs_avail++;
       
  1975 		}
       
  1976 		pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
       
  1977 		nic->cbs = NULL;
       
  1978 		nic->cbs_avail = 0;
       
  1979 	}
       
  1980 	nic->cuc_cmd = cuc_start;
       
  1981 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1982 		nic->cbs;
       
  1983 }
       
  1984 
       
  1985 static int e100_alloc_cbs(struct nic *nic)
       
  1986 {
       
  1987 	struct cb *cb;
       
  1988 	unsigned int i, count = nic->params.cbs.count;
       
  1989 
       
  1990 	nic->cuc_cmd = cuc_start;
       
  1991 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1992 	nic->cbs_avail = 0;
       
  1993 
       
  1994 	nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
       
  1995 				  &nic->cbs_dma_addr);
       
  1996 	if (!nic->cbs)
       
  1997 		return -ENOMEM;
       
  1998 	memset(nic->cbs, 0, count * sizeof(struct cb));
       
  1999 
       
  2000 	for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  2001 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  2002 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  2003 
       
  2004 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  2005 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  2006 			((i+1) % count) * sizeof(struct cb));
       
  2007 	}
       
  2008 
       
  2009 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  2010 	nic->cbs_avail = count;
       
  2011 
       
  2012 	return 0;
       
  2013 }
       
  2014 
       
  2015 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  2016 {
       
  2017 	if (!nic->rxs) return;
       
  2018 	if (RU_SUSPENDED != nic->ru_running) return;
       
  2019 
       
  2020 	/* handle init time starts */
       
  2021 	if (!rx) rx = nic->rxs;
       
  2022 
       
  2023 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  2024 	if (rx->skb) {
       
  2025 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  2026 		nic->ru_running = RU_RUNNING;
       
  2027 	}
       
  2028 }
       
  2029 
       
  2030 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
       
  2031 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  2032 {
       
  2033 	if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
       
  2034 		return -ENOMEM;
       
  2035 
       
  2036 	/* Init, and map the RFD. */
       
  2037 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
       
  2038 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  2039 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2040 
       
  2041 	if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2042 		dev_kfree_skb_any(rx->skb);
       
  2043 		rx->skb = NULL;
       
  2044 		rx->dma_addr = 0;
       
  2045 		return -ENOMEM;
       
  2046 	}
       
  2047 
       
  2048 	/* Link the RFD to end of RFA by linking previous RFD to
       
  2049 	 * this one.  We are safe to touch the previous RFD because
       
  2050 	 * it is protected by the before last buffer's el bit being set */
       
  2051 	if (rx->prev->skb) {
       
  2052 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  2053 		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2054 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2055 			sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2056 	}
       
  2057 
       
  2058 	return 0;
       
  2059 }
       
  2060 
       
  2061 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  2062 	unsigned int *work_done, unsigned int work_to_do)
       
  2063 {
       
  2064 	struct net_device *dev = nic->netdev;
       
  2065 	struct sk_buff *skb = rx->skb;
       
  2066 	struct rfd *rfd = (struct rfd *)skb->data;
       
  2067 	u16 rfd_status, actual_size;
       
  2068 	u16 fcs_pad = 0;
       
  2069 
       
  2070 	if (unlikely(work_done && *work_done >= work_to_do))
       
  2071 		return -EAGAIN;
       
  2072 
       
  2073 	/* Need to sync before taking a peek at cb_complete bit */
       
  2074 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  2075 		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2076 	rfd_status = le16_to_cpu(rfd->status);
       
  2077 
       
  2078 	netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
       
  2079 		     "status=0x%04X\n", rfd_status);
       
  2080 	rmb(); /* read size after status bit */
       
  2081 
       
  2082 	/* If data isn't ready, nothing to indicate */
       
  2083 	if (unlikely(!(rfd_status & cb_complete))) {
       
  2084 		/* If the next buffer has the el bit, but we think the receiver
       
  2085 		 * is still running, check to see if it really stopped while
       
  2086 		 * we had interrupts off.
       
  2087 		 * This allows for a fast restart without re-enabling
       
  2088 		 * interrupts */
       
  2089 		if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2090 		    (RU_RUNNING == nic->ru_running))
       
  2091 
       
  2092 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2093 				nic->ru_running = RU_SUSPENDED;
       
  2094 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2095 					       sizeof(struct rfd),
       
  2096 					       PCI_DMA_FROMDEVICE);
       
  2097 		return -ENODATA;
       
  2098 	}
       
  2099 
       
  2100 	/* Get actual data size */
       
  2101 	if (unlikely(dev->features & NETIF_F_RXFCS))
       
  2102 		fcs_pad = 4;
       
  2103 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  2104 	if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  2105 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  2106 
       
  2107 	/* Get data */
       
  2108 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2109 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2110 
       
  2111 	/* If this buffer has the el bit, but we think the receiver
       
  2112 	 * is still running, check to see if it really stopped while
       
  2113 	 * we had interrupts off.
       
  2114 	 * This allows for a fast restart without re-enabling interrupts.
       
  2115 	 * This can happen when the RU sees the size change but also sees
       
  2116 	 * the el bit set. */
       
  2117 	if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2118 	    (RU_RUNNING == nic->ru_running)) {
       
  2119 
       
  2120 	    if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2121 		nic->ru_running = RU_SUSPENDED;
       
  2122 	}
       
  2123 
       
  2124 	if (!nic->ecdev) {
       
  2125 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  2126 		skb_reserve(skb, sizeof(struct rfd));
       
  2127 		skb_put(skb, actual_size);
       
  2128 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  2129 	}
       
  2130 
       
  2131 	/* If we are receiving all frames, then don't bother
       
  2132 	 * checking for errors.
       
  2133 	 */
       
  2134 	if (unlikely(dev->features & NETIF_F_RXALL)) {
       
  2135 		if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
       
  2136 			/* Received oversized frame, but keep it. */
       
  2137 			nic->rx_over_length_errors++;
       
  2138 		goto process_skb;
       
  2139 	}
       
  2140 
       
  2141 	if (unlikely(!(rfd_status & cb_ok))) {
       
  2142 		if (!nic->ecdev) {
       
  2143 			/* Don't indicate if hardware indicates errors */
       
  2144 			dev_kfree_skb_any(skb);
       
  2145 		}
       
  2146 	} else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
       
  2147 		/* Don't indicate oversized frames */
       
  2148 		nic->rx_over_length_errors++;
       
  2149 		if (!nic->ecdev) {
       
  2150 			dev_kfree_skb_any(skb);
       
  2151 		}
       
  2152 	} else {
       
  2153 process_skb:
       
  2154 		dev->stats.rx_packets++;
       
  2155 		dev->stats.rx_bytes += (actual_size - fcs_pad);
       
  2156 		if (nic->ecdev) {
       
  2157 			ecdev_receive(nic->ecdev,
       
  2158 					skb->data + sizeof(struct rfd), actual_size - fcs_pad);
       
  2159 
       
  2160 			// No need to detect link status as
       
  2161 			// long as frames are received: Reset watchdog.
       
  2162 			if (ecdev_get_link(nic->ecdev)) {
       
  2163 				nic->ec_watchdog_jiffies = jiffies;
       
  2164 			}
       
  2165 		} else {
       
  2166 			netif_receive_skb(skb);
       
  2167 		}
       
  2168 		if (work_done)
       
  2169 			(*work_done)++;
       
  2170 	}
       
  2171 
       
  2172 	if (nic->ecdev) {
       
  2173 		// make receive frame descriptior usable again
       
  2174 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  2175 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  2176 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2177 		if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2178 			rx->dma_addr = 0;
       
  2179 		}
       
  2180 
       
  2181 		/* Link the RFD to end of RFA by linking previous RFD to
       
  2182 		 * this one.  We are safe to touch the previous RFD because
       
  2183 		 * it is protected by the before last buffer's el bit being set */
       
  2184 		if (rx->prev->skb) {
       
  2185 			struct rfd *prev_rfd = (struct rfd *) rx->prev->skb->data;
       
  2186 			put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2187 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2188 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  2189 		}
       
  2190 	} else {
       
  2191 		rx->skb = NULL;
       
  2192 	}
       
  2193 
       
  2194 	return 0;
       
  2195 }
       
  2196 
       
  2197 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  2198 	unsigned int work_to_do)
       
  2199 {
       
  2200 	struct rx *rx;
       
  2201 	int restart_required = 0, err = 0;
       
  2202 	struct rx *old_before_last_rx, *new_before_last_rx;
       
  2203 	struct rfd *old_before_last_rfd, *new_before_last_rfd;
       
  2204 
       
  2205 	/* Indicate newly arrived packets */
       
  2206 	for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  2207 		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  2208 		/* Hit quota or no more to clean */
       
  2209 		if (-EAGAIN == err || -ENODATA == err)
       
  2210 			break;
       
  2211 	}
       
  2212 
       
  2213 
       
  2214 	/* On EAGAIN, hit quota so have more work to do, restart once
       
  2215 	 * cleanup is complete.
       
  2216 	 * Else, are we already rnr? then pay attention!!! this ensures that
       
  2217 	 * the state machine progression never allows a start with a
       
  2218 	 * partially cleaned list, avoiding a race between hardware
       
  2219 	 * and rx_to_clean when in NAPI mode */
       
  2220 	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
       
  2221 		restart_required = 1;
       
  2222 
       
  2223 	old_before_last_rx = nic->rx_to_use->prev->prev;
       
  2224 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
       
  2225 
       
  2226 	if (!nic->ecdev) {
       
  2227 		/* Alloc new skbs to refill list */
       
  2228 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  2229 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  2230 				break; /* Better luck next time (see watchdog) */
       
  2231 		}
       
  2232 	}
       
  2233 
       
  2234 	new_before_last_rx = nic->rx_to_use->prev->prev;
       
  2235 	if (new_before_last_rx != old_before_last_rx) {
       
  2236 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2237 		 * This lets us update the next pointer on the last buffer
       
  2238 		 * without worrying about hardware touching it.
       
  2239 		 * We set the size to 0 to prevent hardware from touching this
       
  2240 		 * buffer.
       
  2241 		 * When the hardware hits the before last buffer with el-bit
       
  2242 		 * and size of 0, it will RNR interrupt, the RUS will go into
       
  2243 		 * the No Resources state.  It will not complete nor write to
       
  2244 		 * this buffer. */
       
  2245 		new_before_last_rfd =
       
  2246 			(struct rfd *)new_before_last_rx->skb->data;
       
  2247 		new_before_last_rfd->size = 0;
       
  2248 		new_before_last_rfd->command |= cpu_to_le16(cb_el);
       
  2249 		pci_dma_sync_single_for_device(nic->pdev,
       
  2250 			new_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2251 			PCI_DMA_BIDIRECTIONAL);
       
  2252 
       
  2253 		/* Now that we have a new stopping point, we can clear the old
       
  2254 		 * stopping point.  We must sync twice to get the proper
       
  2255 		 * ordering on the hardware side of things. */
       
  2256 		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
       
  2257 		pci_dma_sync_single_for_device(nic->pdev,
       
  2258 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2259 			PCI_DMA_BIDIRECTIONAL);
       
  2260 		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
       
  2261 							+ ETH_FCS_LEN);
       
  2262 		pci_dma_sync_single_for_device(nic->pdev,
       
  2263 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2264 			PCI_DMA_BIDIRECTIONAL);
       
  2265 	}
       
  2266 
       
  2267 	if (restart_required) {
       
  2268 		// ack the rnr?
       
  2269 		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2270 		e100_start_receiver(nic, nic->rx_to_clean);
       
  2271 		if (work_done)
       
  2272 			(*work_done)++;
       
  2273 	}
       
  2274 }
       
  2275 
       
  2276 static void e100_rx_clean_list(struct nic *nic)
       
  2277 {
       
  2278 	struct rx *rx;
       
  2279 	unsigned int i, count = nic->params.rfds.count;
       
  2280 
       
  2281 	nic->ru_running = RU_UNINITIALIZED;
       
  2282 
       
  2283 	if (nic->rxs) {
       
  2284 		for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2285 			if (rx->skb) {
       
  2286 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2287 					RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2288 				dev_kfree_skb(rx->skb);
       
  2289 			}
       
  2290 		}
       
  2291 		kfree(nic->rxs);
       
  2292 		nic->rxs = NULL;
       
  2293 	}
       
  2294 
       
  2295 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2296 }
       
  2297 
       
  2298 static int e100_rx_alloc_list(struct nic *nic)
       
  2299 {
       
  2300 	struct rx *rx;
       
  2301 	unsigned int i, count = nic->params.rfds.count;
       
  2302 	struct rfd *before_last;
       
  2303 
       
  2304 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2305 	nic->ru_running = RU_UNINITIALIZED;
       
  2306 
       
  2307 	if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2308 		return -ENOMEM;
       
  2309 
       
  2310 	for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2311 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2312 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2313 		if (e100_rx_alloc_skb(nic, rx)) {
       
  2314 			e100_rx_clean_list(nic);
       
  2315 			return -ENOMEM;
       
  2316 		}
       
  2317 	}
       
  2318 
       
  2319 	if (!nic->ecdev) {
       
  2320 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2321 		 * This lets us update the next pointer on the last buffer without
       
  2322 		 * worrying about hardware touching it.
       
  2323 		 * We set the size to 0 to prevent hardware from touching this buffer.
       
  2324 		 * When the hardware hits the before last buffer with el-bit and size
       
  2325 		 * of 0, it will RNR interrupt, the RU will go into the No Resources
       
  2326 		 * state.  It will not complete nor write to this buffer. */
       
  2327 		rx = nic->rxs->prev->prev;
       
  2328 		before_last = (struct rfd *)rx->skb->data;
       
  2329 		before_last->command |= cpu_to_le16(cb_el);
       
  2330 		before_last->size = 0;
       
  2331 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2332 				sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2333 	}
       
  2334 
       
  2335 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2336 	nic->ru_running = RU_SUSPENDED;
       
  2337 
       
  2338 	return 0;
       
  2339 }
       
  2340 
       
  2341 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2342 {
       
  2343 	struct net_device *netdev = dev_id;
       
  2344 	struct nic *nic = netdev_priv(netdev);
       
  2345 	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
       
  2346 
       
  2347 	netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
       
  2348 		     "stat_ack = 0x%02X\n", stat_ack);
       
  2349 
       
  2350 	if (stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2351 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2352 		return IRQ_NONE;
       
  2353 
       
  2354 	/* Ack interrupt(s) */
       
  2355 	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
       
  2356 
       
  2357 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2358 	if (stat_ack & stat_ack_rnr)
       
  2359 		nic->ru_running = RU_SUSPENDED;
       
  2360 
       
  2361 	if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) {
       
  2362 		e100_disable_irq(nic);
       
  2363 		__napi_schedule(&nic->napi);
       
  2364 	}
       
  2365 
       
  2366 	return IRQ_HANDLED;
       
  2367 }
       
  2368 
       
  2369 void e100_ec_poll(struct net_device *netdev)
       
  2370 {
       
  2371 	struct nic *nic = netdev_priv(netdev);
       
  2372 
       
  2373 	e100_rx_clean(nic, NULL, 100);
       
  2374 	e100_tx_clean(nic);
       
  2375 
       
  2376 	if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2377 		e100_watchdog((unsigned long) nic);
       
  2378 		nic->ec_watchdog_jiffies = jiffies;
       
  2379 	}
       
  2380 }
       
  2381 
       
  2382 
       
  2383 static int e100_poll(struct napi_struct *napi, int budget)
       
  2384 {
       
  2385 	struct nic *nic = container_of(napi, struct nic, napi);
       
  2386 	unsigned int work_done = 0;
       
  2387 
       
  2388 	e100_rx_clean(nic, &work_done, budget);
       
  2389 	e100_tx_clean(nic);
       
  2390 
       
  2391 	/* If budget not fully consumed, exit the polling mode */
       
  2392 	if (work_done < budget) {
       
  2393 		napi_complete(napi);
       
  2394 		e100_enable_irq(nic);
       
  2395 	}
       
  2396 
       
  2397 	return work_done;
       
  2398 }
       
  2399 
       
  2400 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2401 static void e100_netpoll(struct net_device *netdev)
       
  2402 {
       
  2403 	struct nic *nic = netdev_priv(netdev);
       
  2404 
       
  2405 	e100_disable_irq(nic);
       
  2406 	e100_intr(nic->pdev->irq, netdev);
       
  2407 	e100_tx_clean(nic);
       
  2408 	e100_enable_irq(nic);
       
  2409 }
       
  2410 #endif
       
  2411 
       
  2412 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2413 {
       
  2414 	struct nic *nic = netdev_priv(netdev);
       
  2415 	struct sockaddr *addr = p;
       
  2416 
       
  2417 	if (!is_valid_ether_addr(addr->sa_data))
       
  2418 		return -EADDRNOTAVAIL;
       
  2419 
       
  2420 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2421 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2422 
       
  2423 	return 0;
       
  2424 }
       
  2425 
       
  2426 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2427 {
       
  2428 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2429 		return -EINVAL;
       
  2430 	netdev->mtu = new_mtu;
       
  2431 	return 0;
       
  2432 }
       
  2433 
       
  2434 static int e100_asf(struct nic *nic)
       
  2435 {
       
  2436 	/* ASF can be enabled from eeprom */
       
  2437 	return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2438 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2439 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2440 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
       
  2441 }
       
  2442 
       
  2443 static int e100_up(struct nic *nic)
       
  2444 {
       
  2445 	int err;
       
  2446 
       
  2447 	if ((err = e100_rx_alloc_list(nic)))
       
  2448 		return err;
       
  2449 	if ((err = e100_alloc_cbs(nic)))
       
  2450 		goto err_rx_clean_list;
       
  2451 	if ((err = e100_hw_init(nic)))
       
  2452 		goto err_clean_cbs;
       
  2453 	e100_set_multicast_list(nic->netdev);
       
  2454 	e100_start_receiver(nic, NULL);
       
  2455 	if (!nic->ecdev) {
       
  2456 		mod_timer(&nic->watchdog, jiffies);
       
  2457 	}
       
  2458 	if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2459 		nic->netdev->name, nic->netdev)))
       
  2460 		goto err_no_irq;
       
  2461 	if (!nic->ecdev) {
       
  2462 		netif_wake_queue(nic->netdev);
       
  2463 		napi_enable(&nic->napi);
       
  2464 		/* enable ints _after_ enabling poll, preventing a race between
       
  2465 		 * disable ints+schedule */
       
  2466 		e100_enable_irq(nic);
       
  2467 	}
       
  2468 	return 0;
       
  2469 
       
  2470 err_no_irq:
       
  2471 	if (!nic->ecdev)
       
  2472 		del_timer_sync(&nic->watchdog);
       
  2473 err_clean_cbs:
       
  2474 	e100_clean_cbs(nic);
       
  2475 err_rx_clean_list:
       
  2476 	e100_rx_clean_list(nic);
       
  2477 	return err;
       
  2478 }
       
  2479 
       
  2480 static void e100_down(struct nic *nic)
       
  2481 {
       
  2482 	if (!nic->ecdev) {
       
  2483 		/* wait here for poll to complete */
       
  2484 		napi_disable(&nic->napi);
       
  2485 		netif_stop_queue(nic->netdev);
       
  2486 	}
       
  2487 	e100_hw_reset(nic);
       
  2488 	free_irq(nic->pdev->irq, nic->netdev);
       
  2489 	if (!nic->ecdev) {
       
  2490 		del_timer_sync(&nic->watchdog);
       
  2491 		netif_carrier_off(nic->netdev);
       
  2492 	}
       
  2493 	e100_clean_cbs(nic);
       
  2494 	e100_rx_clean_list(nic);
       
  2495 }
       
  2496 
       
  2497 static void e100_tx_timeout(struct net_device *netdev)
       
  2498 {
       
  2499 	struct nic *nic = netdev_priv(netdev);
       
  2500 
       
  2501 	/* Reset outside of interrupt context, to avoid request_irq
       
  2502 	 * in interrupt context */
       
  2503 	schedule_work(&nic->tx_timeout_task);
       
  2504 }
       
  2505 
       
  2506 static void e100_tx_timeout_task(struct work_struct *work)
       
  2507 {
       
  2508 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2509 	struct net_device *netdev = nic->netdev;
       
  2510 
       
  2511 	netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  2512 		     "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
       
  2513 
       
  2514 	rtnl_lock();
       
  2515 	if (netif_running(netdev)) {
       
  2516 		e100_down(netdev_priv(netdev));
       
  2517 		e100_up(netdev_priv(netdev));
       
  2518 	}
       
  2519 	rtnl_unlock();
       
  2520 }
       
  2521 
       
  2522 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2523 {
       
  2524 	int err;
       
  2525 	struct sk_buff *skb;
       
  2526 
       
  2527 	/* Use driver resources to perform internal MAC or PHY
       
  2528 	 * loopback test.  A single packet is prepared and transmitted
       
  2529 	 * in loopback mode, and the test passes if the received
       
  2530 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2531 
       
  2532 	if ((err = e100_rx_alloc_list(nic)))
       
  2533 		return err;
       
  2534 	if ((err = e100_alloc_cbs(nic)))
       
  2535 		goto err_clean_rx;
       
  2536 
       
  2537 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2538 	if (nic->flags & ich && loopback_mode == lb_phy)
       
  2539 		loopback_mode = lb_mac;
       
  2540 
       
  2541 	nic->loopback = loopback_mode;
       
  2542 	if ((err = e100_hw_init(nic)))
       
  2543 		goto err_loopback_none;
       
  2544 
       
  2545 	if (loopback_mode == lb_phy)
       
  2546 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2547 			BMCR_LOOPBACK);
       
  2548 
       
  2549 	e100_start_receiver(nic, NULL);
       
  2550 
       
  2551 	if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2552 		err = -ENOMEM;
       
  2553 		goto err_loopback_none;
       
  2554 	}
       
  2555 	skb_put(skb, ETH_DATA_LEN);
       
  2556 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2557 	e100_xmit_frame(skb, nic->netdev);
       
  2558 
       
  2559 	msleep(10);
       
  2560 
       
  2561 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2562 			RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2563 
       
  2564 	if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2565 	   skb->data, ETH_DATA_LEN))
       
  2566 		err = -EAGAIN;
       
  2567 
       
  2568 err_loopback_none:
       
  2569 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2570 	nic->loopback = lb_none;
       
  2571 	e100_clean_cbs(nic);
       
  2572 	e100_hw_reset(nic);
       
  2573 err_clean_rx:
       
  2574 	e100_rx_clean_list(nic);
       
  2575 	return err;
       
  2576 }
       
  2577 
       
  2578 #define MII_LED_CONTROL	0x1B
       
  2579 #define E100_82552_LED_OVERRIDE 0x19
       
  2580 #define E100_82552_LED_ON       0x000F /* LEDTX and LED_RX both on */
       
  2581 #define E100_82552_LED_OFF      0x000A /* LEDTX and LED_RX both off */
       
  2582 
       
  2583 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2584 {
       
  2585 	struct nic *nic = netdev_priv(netdev);
       
  2586 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2587 }
       
  2588 
       
  2589 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2590 {
       
  2591 	struct nic *nic = netdev_priv(netdev);
       
  2592 	int err;
       
  2593 
       
  2594 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2595 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2596 	e100_exec_cb(nic, NULL, e100_configure);
       
  2597 
       
  2598 	return err;
       
  2599 }
       
  2600 
       
  2601 static void e100_get_drvinfo(struct net_device *netdev,
       
  2602 	struct ethtool_drvinfo *info)
       
  2603 {
       
  2604 	struct nic *nic = netdev_priv(netdev);
       
  2605 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
       
  2606 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
       
  2607 	strlcpy(info->bus_info, pci_name(nic->pdev),
       
  2608 		sizeof(info->bus_info));
       
  2609 }
       
  2610 
       
  2611 #define E100_PHY_REGS 0x1C
       
  2612 static int e100_get_regs_len(struct net_device *netdev)
       
  2613 {
       
  2614 	struct nic *nic = netdev_priv(netdev);
       
  2615 	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
       
  2616 }
       
  2617 
       
  2618 static void e100_get_regs(struct net_device *netdev,
       
  2619 	struct ethtool_regs *regs, void *p)
       
  2620 {
       
  2621 	struct nic *nic = netdev_priv(netdev);
       
  2622 	u32 *buff = p;
       
  2623 	int i;
       
  2624 
       
  2625 	regs->version = (1 << 24) | nic->pdev->revision;
       
  2626 	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
       
  2627 		ioread8(&nic->csr->scb.cmd_lo) << 16 |
       
  2628 		ioread16(&nic->csr->scb.status);
       
  2629 	for (i = E100_PHY_REGS; i >= 0; i--)
       
  2630 		buff[1 + E100_PHY_REGS - i] =
       
  2631 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2632 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2633 	e100_exec_cb(nic, NULL, e100_dump);
       
  2634 	msleep(10);
       
  2635 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2636 		sizeof(nic->mem->dump_buf));
       
  2637 }
       
  2638 
       
  2639 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2640 {
       
  2641 	struct nic *nic = netdev_priv(netdev);
       
  2642 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2643 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2644 }
       
  2645 
       
  2646 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2647 {
       
  2648 	struct nic *nic = netdev_priv(netdev);
       
  2649 
       
  2650 	if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
       
  2651 	    !device_can_wakeup(&nic->pdev->dev))
       
  2652 		return -EOPNOTSUPP;
       
  2653 
       
  2654 	if (wol->wolopts)
       
  2655 		nic->flags |= wol_magic;
       
  2656 	else
       
  2657 		nic->flags &= ~wol_magic;
       
  2658 
       
  2659 	device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
       
  2660 
       
  2661 	e100_exec_cb(nic, NULL, e100_configure);
       
  2662 
       
  2663 	return 0;
       
  2664 }
       
  2665 
       
  2666 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2667 {
       
  2668 	struct nic *nic = netdev_priv(netdev);
       
  2669 	return nic->msg_enable;
       
  2670 }
       
  2671 
       
  2672 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2673 {
       
  2674 	struct nic *nic = netdev_priv(netdev);
       
  2675 	nic->msg_enable = value;
       
  2676 }
       
  2677 
       
  2678 static int e100_nway_reset(struct net_device *netdev)
       
  2679 {
       
  2680 	struct nic *nic = netdev_priv(netdev);
       
  2681 	return mii_nway_restart(&nic->mii);
       
  2682 }
       
  2683 
       
  2684 static u32 e100_get_link(struct net_device *netdev)
       
  2685 {
       
  2686 	struct nic *nic = netdev_priv(netdev);
       
  2687 	return mii_link_ok(&nic->mii);
       
  2688 }
       
  2689 
       
  2690 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2691 {
       
  2692 	struct nic *nic = netdev_priv(netdev);
       
  2693 	return nic->eeprom_wc << 1;
       
  2694 }
       
  2695 
       
  2696 #define E100_EEPROM_MAGIC	0x1234
       
  2697 static int e100_get_eeprom(struct net_device *netdev,
       
  2698 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2699 {
       
  2700 	struct nic *nic = netdev_priv(netdev);
       
  2701 
       
  2702 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2703 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2704 
       
  2705 	return 0;
       
  2706 }
       
  2707 
       
  2708 static int e100_set_eeprom(struct net_device *netdev,
       
  2709 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2710 {
       
  2711 	struct nic *nic = netdev_priv(netdev);
       
  2712 
       
  2713 	if (eeprom->magic != E100_EEPROM_MAGIC)
       
  2714 		return -EINVAL;
       
  2715 
       
  2716 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2717 
       
  2718 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2719 		(eeprom->len >> 1) + 1);
       
  2720 }
       
  2721 
       
  2722 static void e100_get_ringparam(struct net_device *netdev,
       
  2723 	struct ethtool_ringparam *ring)
       
  2724 {
       
  2725 	struct nic *nic = netdev_priv(netdev);
       
  2726 	struct param_range *rfds = &nic->params.rfds;
       
  2727 	struct param_range *cbs = &nic->params.cbs;
       
  2728 
       
  2729 	ring->rx_max_pending = rfds->max;
       
  2730 	ring->tx_max_pending = cbs->max;
       
  2731 	ring->rx_pending = rfds->count;
       
  2732 	ring->tx_pending = cbs->count;
       
  2733 }
       
  2734 
       
  2735 static int e100_set_ringparam(struct net_device *netdev,
       
  2736 	struct ethtool_ringparam *ring)
       
  2737 {
       
  2738 	struct nic *nic = netdev_priv(netdev);
       
  2739 	struct param_range *rfds = &nic->params.rfds;
       
  2740 	struct param_range *cbs = &nic->params.cbs;
       
  2741 
       
  2742 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2743 		return -EINVAL;
       
  2744 
       
  2745 	if (netif_running(netdev))
       
  2746 		e100_down(nic);
       
  2747 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2748 	rfds->count = min(rfds->count, rfds->max);
       
  2749 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2750 	cbs->count = min(cbs->count, cbs->max);
       
  2751 	netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
       
  2752 		   rfds->count, cbs->count);
       
  2753 	if (netif_running(netdev))
       
  2754 		e100_up(nic);
       
  2755 
       
  2756 	return 0;
       
  2757 }
       
  2758 
       
  2759 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2760 	"Link test     (on/offline)",
       
  2761 	"Eeprom test   (on/offline)",
       
  2762 	"Self test        (offline)",
       
  2763 	"Mac loopback     (offline)",
       
  2764 	"Phy loopback     (offline)",
       
  2765 };
       
  2766 #define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
       
  2767 
       
  2768 static void e100_diag_test(struct net_device *netdev,
       
  2769 	struct ethtool_test *test, u64 *data)
       
  2770 {
       
  2771 	struct ethtool_cmd cmd;
       
  2772 	struct nic *nic = netdev_priv(netdev);
       
  2773 	int i, err;
       
  2774 
       
  2775 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2776 	data[0] = !mii_link_ok(&nic->mii);
       
  2777 	data[1] = e100_eeprom_load(nic);
       
  2778 	if (test->flags & ETH_TEST_FL_OFFLINE) {
       
  2779 
       
  2780 		/* save speed, duplex & autoneg settings */
       
  2781 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2782 
       
  2783 		if (netif_running(netdev))
       
  2784 			e100_down(nic);
       
  2785 		data[2] = e100_self_test(nic);
       
  2786 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2787 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2788 
       
  2789 		/* restore speed, duplex & autoneg settings */
       
  2790 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2791 
       
  2792 		if (netif_running(netdev))
       
  2793 			e100_up(nic);
       
  2794 	}
       
  2795 	for (i = 0; i < E100_TEST_LEN; i++)
       
  2796 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2797 
       
  2798 	msleep_interruptible(4 * 1000);
       
  2799 }
       
  2800 
       
  2801 static int e100_set_phys_id(struct net_device *netdev,
       
  2802 			    enum ethtool_phys_id_state state)
       
  2803 {
       
  2804 	struct nic *nic = netdev_priv(netdev);
       
  2805 	enum led_state {
       
  2806 		led_on     = 0x01,
       
  2807 		led_off    = 0x04,
       
  2808 		led_on_559 = 0x05,
       
  2809 		led_on_557 = 0x07,
       
  2810 	};
       
  2811 	u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
       
  2812 		MII_LED_CONTROL;
       
  2813 	u16 leds = 0;
       
  2814 
       
  2815 	switch (state) {
       
  2816 	case ETHTOOL_ID_ACTIVE:
       
  2817 		return 2;
       
  2818 
       
  2819 	case ETHTOOL_ID_ON:
       
  2820 		leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
       
  2821 		       (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
       
  2822 		break;
       
  2823 
       
  2824 	case ETHTOOL_ID_OFF:
       
  2825 		leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
       
  2826 		break;
       
  2827 
       
  2828 	case ETHTOOL_ID_INACTIVE:
       
  2829 		break;
       
  2830 	}
       
  2831 
       
  2832 	mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
       
  2833 	return 0;
       
  2834 }
       
  2835 
       
  2836 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2837 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2838 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2839 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2840 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2841 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2842 	"tx_heartbeat_errors", "tx_window_errors",
       
  2843 	/* device-specific stats */
       
  2844 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2845 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2846 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2847 	"rx_short_frame_errors", "rx_over_length_errors",
       
  2848 };
       
  2849 #define E100_NET_STATS_LEN	21
       
  2850 #define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
       
  2851 
       
  2852 static int e100_get_sset_count(struct net_device *netdev, int sset)
       
  2853 {
       
  2854 	switch (sset) {
       
  2855 	case ETH_SS_TEST:
       
  2856 		return E100_TEST_LEN;
       
  2857 	case ETH_SS_STATS:
       
  2858 		return E100_STATS_LEN;
       
  2859 	default:
       
  2860 		return -EOPNOTSUPP;
       
  2861 	}
       
  2862 }
       
  2863 
       
  2864 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2865 	struct ethtool_stats *stats, u64 *data)
       
  2866 {
       
  2867 	struct nic *nic = netdev_priv(netdev);
       
  2868 	int i;
       
  2869 
       
  2870 	for (i = 0; i < E100_NET_STATS_LEN; i++)
       
  2871 		data[i] = ((unsigned long *)&netdev->stats)[i];
       
  2872 
       
  2873 	data[i++] = nic->tx_deferred;
       
  2874 	data[i++] = nic->tx_single_collisions;
       
  2875 	data[i++] = nic->tx_multiple_collisions;
       
  2876 	data[i++] = nic->tx_fc_pause;
       
  2877 	data[i++] = nic->rx_fc_pause;
       
  2878 	data[i++] = nic->rx_fc_unsupported;
       
  2879 	data[i++] = nic->tx_tco_frames;
       
  2880 	data[i++] = nic->rx_tco_frames;
       
  2881 	data[i++] = nic->rx_short_frame_errors;
       
  2882 	data[i++] = nic->rx_over_length_errors;
       
  2883 }
       
  2884 
       
  2885 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2886 {
       
  2887 	switch (stringset) {
       
  2888 	case ETH_SS_TEST:
       
  2889 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2890 		break;
       
  2891 	case ETH_SS_STATS:
       
  2892 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2893 		break;
       
  2894 	}
       
  2895 }
       
  2896 
       
  2897 static const struct ethtool_ops e100_ethtool_ops = {
       
  2898 	.get_settings		= e100_get_settings,
       
  2899 	.set_settings		= e100_set_settings,
       
  2900 	.get_drvinfo		= e100_get_drvinfo,
       
  2901 	.get_regs_len		= e100_get_regs_len,
       
  2902 	.get_regs		= e100_get_regs,
       
  2903 	.get_wol		= e100_get_wol,
       
  2904 	.set_wol		= e100_set_wol,
       
  2905 	.get_msglevel		= e100_get_msglevel,
       
  2906 	.set_msglevel		= e100_set_msglevel,
       
  2907 	.nway_reset		= e100_nway_reset,
       
  2908 	.get_link		= e100_get_link,
       
  2909 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2910 	.get_eeprom		= e100_get_eeprom,
       
  2911 	.set_eeprom		= e100_set_eeprom,
       
  2912 	.get_ringparam		= e100_get_ringparam,
       
  2913 	.set_ringparam		= e100_set_ringparam,
       
  2914 	.self_test		= e100_diag_test,
       
  2915 	.get_strings		= e100_get_strings,
       
  2916 	.set_phys_id		= e100_set_phys_id,
       
  2917 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2918 	.get_sset_count		= e100_get_sset_count,
       
  2919 	.get_ts_info		= ethtool_op_get_ts_info,
       
  2920 };
       
  2921 
       
  2922 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2923 {
       
  2924 	struct nic *nic = netdev_priv(netdev);
       
  2925 
       
  2926 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2927 }
       
  2928 
       
  2929 static int e100_alloc(struct nic *nic)
       
  2930 {
       
  2931 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2932 		&nic->dma_addr);
       
  2933 	return nic->mem ? 0 : -ENOMEM;
       
  2934 }
       
  2935 
       
  2936 static void e100_free(struct nic *nic)
       
  2937 {
       
  2938 	if (nic->mem) {
       
  2939 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2940 			nic->mem, nic->dma_addr);
       
  2941 		nic->mem = NULL;
       
  2942 	}
       
  2943 }
       
  2944 
       
  2945 static int e100_open(struct net_device *netdev)
       
  2946 {
       
  2947 	struct nic *nic = netdev_priv(netdev);
       
  2948 	int err = 0;
       
  2949 
       
  2950 	if (!nic->ecdev)
       
  2951 		netif_carrier_off(netdev);
       
  2952 	if ((err = e100_up(nic)))
       
  2953 		netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
       
  2954 	return err;
       
  2955 }
       
  2956 
       
  2957 static int e100_close(struct net_device *netdev)
       
  2958 {
       
  2959 	e100_down(netdev_priv(netdev));
       
  2960 	return 0;
       
  2961 }
       
  2962 
       
  2963 static int e100_set_features(struct net_device *netdev,
       
  2964 			     netdev_features_t features)
       
  2965 {
       
  2966 	struct nic *nic = netdev_priv(netdev);
       
  2967 	netdev_features_t changed = features ^ netdev->features;
       
  2968 
       
  2969 	if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
       
  2970 		return 0;
       
  2971 
       
  2972 	netdev->features = features;
       
  2973 	e100_exec_cb(nic, NULL, e100_configure);
       
  2974 	return 0;
       
  2975 }
       
  2976 
       
  2977 static const struct net_device_ops e100_netdev_ops = {
       
  2978 	.ndo_open		= e100_open,
       
  2979 	.ndo_stop		= e100_close,
       
  2980 	.ndo_start_xmit		= e100_xmit_frame,
       
  2981 	.ndo_validate_addr	= eth_validate_addr,
       
  2982 	.ndo_set_rx_mode	= e100_set_multicast_list,
       
  2983 	.ndo_set_mac_address	= e100_set_mac_address,
       
  2984 	.ndo_change_mtu		= e100_change_mtu,
       
  2985 	.ndo_do_ioctl		= e100_do_ioctl,
       
  2986 	.ndo_tx_timeout		= e100_tx_timeout,
       
  2987 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2988 	.ndo_poll_controller	= e100_netpoll,
       
  2989 #endif
       
  2990 	.ndo_set_features	= e100_set_features,
       
  2991 };
       
  2992 
       
  2993 static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
       
  2994 {
       
  2995 	struct net_device *netdev;
       
  2996 	struct nic *nic;
       
  2997 	int err;
       
  2998 
       
  2999 	if (!(netdev = alloc_etherdev(sizeof(struct nic))))
       
  3000 		return -ENOMEM;
       
  3001 
       
  3002 	netdev->hw_features |= NETIF_F_RXFCS;
       
  3003 	netdev->priv_flags |= IFF_SUPP_NOFCS;
       
  3004 	netdev->hw_features |= NETIF_F_RXALL;
       
  3005 
       
  3006 	netdev->netdev_ops = &e100_netdev_ops;
       
  3007 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  3008 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  3009 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  3010 
       
  3011 	nic = netdev_priv(netdev);
       
  3012 	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
       
  3013 	nic->netdev = netdev;
       
  3014 	nic->pdev = pdev;
       
  3015 	nic->msg_enable = (1 << debug) - 1;
       
  3016 	nic->mdio_ctrl = mdio_ctrl_hw;
       
  3017 	pci_set_drvdata(pdev, netdev);
       
  3018 
       
  3019 	if ((err = pci_enable_device(pdev))) {
       
  3020 		netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
       
  3021 		goto err_out_free_dev;
       
  3022 	}
       
  3023 
       
  3024 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  3025 		netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
       
  3026 		err = -ENODEV;
       
  3027 		goto err_out_disable_pdev;
       
  3028 	}
       
  3029 
       
  3030 	if ((err = pci_request_regions(pdev, DRV_NAME))) {
       
  3031 		netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
       
  3032 		goto err_out_disable_pdev;
       
  3033 	}
       
  3034 
       
  3035 	if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
       
  3036 		netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
       
  3037 		goto err_out_free_res;
       
  3038 	}
       
  3039 
       
  3040 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  3041 
       
  3042 	if (use_io)
       
  3043 		netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
       
  3044 
       
  3045 	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
       
  3046 	if (!nic->csr) {
       
  3047 		netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
       
  3048 		err = -ENOMEM;
       
  3049 		goto err_out_free_res;
       
  3050 	}
       
  3051 
       
  3052 	if (ent->driver_data)
       
  3053 		nic->flags |= ich;
       
  3054 	else
       
  3055 		nic->flags &= ~ich;
       
  3056 
       
  3057 	e100_get_defaults(nic);
       
  3058 
       
  3059 	/* D100 MAC doesn't allow rx of vlan packets with normal MTU */
       
  3060 	if (nic->mac < mac_82558_D101_A4)
       
  3061 		netdev->features |= NETIF_F_VLAN_CHALLENGED;
       
  3062 
       
  3063 	/* locks must be initialized before calling hw_reset */
       
  3064 	spin_lock_init(&nic->cb_lock);
       
  3065 	spin_lock_init(&nic->cmd_lock);
       
  3066 	spin_lock_init(&nic->mdio_lock);
       
  3067 
       
  3068 	/* Reset the device before pci_set_master() in case device is in some
       
  3069 	 * funky state and has an interrupt pending - hint: we don't have the
       
  3070 	 * interrupt handler registered yet. */
       
  3071 	e100_hw_reset(nic);
       
  3072 
       
  3073 	pci_set_master(pdev);
       
  3074 
       
  3075 	init_timer(&nic->watchdog);
       
  3076 	nic->watchdog.function = e100_watchdog;
       
  3077 	nic->watchdog.data = (unsigned long)nic;
       
  3078 
       
  3079 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  3080 
       
  3081 	if ((err = e100_alloc(nic))) {
       
  3082 		netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
       
  3083 		goto err_out_iounmap;
       
  3084 	}
       
  3085 
       
  3086 	if ((err = e100_eeprom_load(nic)))
       
  3087 		goto err_out_free;
       
  3088 
       
  3089 	e100_phy_init(nic);
       
  3090 
       
  3091 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  3092 	if (!is_valid_ether_addr(netdev->dev_addr)) {
       
  3093 		if (!eeprom_bad_csum_allow) {
       
  3094 			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
       
  3095 			err = -EAGAIN;
       
  3096 			goto err_out_free;
       
  3097 		} else {
       
  3098 			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
       
  3099 		}
       
  3100 	}
       
  3101 
       
  3102 	/* Wol magic packet can be enabled from eeprom */
       
  3103 	if ((nic->mac >= mac_82558_D101_A4) &&
       
  3104 	   (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
       
  3105 		nic->flags |= wol_magic;
       
  3106 		device_set_wakeup_enable(&pdev->dev, true);
       
  3107 	}
       
  3108 
       
  3109 	/* ack any pending wake events, disable PME */
       
  3110 	pci_pme_active(pdev, false);
       
  3111 
       
  3112 	// offer device to EtherCAT master module
       
  3113 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  3114 
       
  3115 	if (!nic->ecdev) {
       
  3116 		strcpy(netdev->name, "eth%d");
       
  3117 		if ((err = register_netdev(netdev))) {
       
  3118 			netif_err(nic, probe, nic->netdev,
       
  3119 					"Cannot register net device, aborting\n");
       
  3120 			goto err_out_free;
       
  3121 		}
       
  3122 	}
       
  3123 
       
  3124 	nic->cbs_pool = pci_pool_create(netdev->name,
       
  3125 			   nic->pdev,
       
  3126 			   nic->params.cbs.max * sizeof(struct cb),
       
  3127 			   sizeof(u32),
       
  3128 			   0);
       
  3129 	netif_info(nic, probe, nic->netdev,
       
  3130 		   "addr 0x%llx, irq %d, MAC addr %pM\n",
       
  3131 		   (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
       
  3132 		   pdev->irq, netdev->dev_addr);
       
  3133 
       
  3134 	if (nic->ecdev) {
       
  3135 		err = ecdev_open(nic->ecdev);
       
  3136 		if (err) {
       
  3137 			ecdev_withdraw(nic->ecdev);
       
  3138 			goto err_out_free;
       
  3139 		}
       
  3140 	}
       
  3141 
       
  3142 	return 0;
       
  3143 
       
  3144 err_out_free:
       
  3145 	e100_free(nic);
       
  3146 err_out_iounmap:
       
  3147 	pci_iounmap(pdev, nic->csr);
       
  3148 err_out_free_res:
       
  3149 	pci_release_regions(pdev);
       
  3150 err_out_disable_pdev:
       
  3151 	pci_disable_device(pdev);
       
  3152 err_out_free_dev:
       
  3153 	pci_set_drvdata(pdev, NULL);
       
  3154 	free_netdev(netdev);
       
  3155 	return err;
       
  3156 }
       
  3157 
       
  3158 static void e100_remove(struct pci_dev *pdev)
       
  3159 {
       
  3160 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3161 
       
  3162 	if (netdev) {
       
  3163 		struct nic *nic = netdev_priv(netdev);
       
  3164 		if (nic->ecdev) {
       
  3165 			ecdev_close(nic->ecdev);
       
  3166 			ecdev_withdraw(nic->ecdev);
       
  3167 		} else {
       
  3168 			unregister_netdev(netdev);
       
  3169 		}
       
  3170 
       
  3171 		e100_free(nic);
       
  3172 		pci_iounmap(pdev, nic->csr);
       
  3173 		pci_pool_destroy(nic->cbs_pool);
       
  3174 		free_netdev(netdev);
       
  3175 		pci_release_regions(pdev);
       
  3176 		pci_disable_device(pdev);
       
  3177 		pci_set_drvdata(pdev, NULL);
       
  3178 	}
       
  3179 }
       
  3180 
       
  3181 #define E100_82552_SMARTSPEED   0x14   /* SmartSpeed Ctrl register */
       
  3182 #define E100_82552_REV_ANEG     0x0200 /* Reverse auto-negotiation */
       
  3183 #define E100_82552_ANEG_NOW     0x0400 /* Auto-negotiate now */
       
  3184 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  3185 {
       
  3186 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3187 	struct nic *nic = netdev_priv(netdev);
       
  3188 
       
  3189 	if (netif_running(netdev))
       
  3190 		e100_down(nic);
       
  3191 	netif_device_detach(netdev);
       
  3192 
       
  3193 	pci_save_state(pdev);
       
  3194 
       
  3195 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  3196 		/* enable reverse auto-negotiation */
       
  3197 		if (nic->phy == phy_82552_v) {
       
  3198 			u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3199 			                           E100_82552_SMARTSPEED);
       
  3200 
       
  3201 			mdio_write(netdev, nic->mii.phy_id,
       
  3202 			           E100_82552_SMARTSPEED, smartspeed |
       
  3203 			           E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
       
  3204 		}
       
  3205 		*enable_wake = true;
       
  3206 	} else {
       
  3207 		*enable_wake = false;
       
  3208 	}
       
  3209 
       
  3210 	pci_clear_master(pdev);
       
  3211 }
       
  3212 
       
  3213 static int __e100_power_off(struct pci_dev *pdev, bool wake)
       
  3214 {
       
  3215 	if (wake)
       
  3216 		return pci_prepare_to_sleep(pdev);
       
  3217 
       
  3218 	pci_wake_from_d3(pdev, false);
       
  3219 	pci_set_power_state(pdev, PCI_D3hot);
       
  3220 
       
  3221 	return 0;
       
  3222 }
       
  3223 
       
  3224 #ifdef CONFIG_PM
       
  3225 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  3226 {
       
  3227 	bool wake;
       
  3228 	__e100_shutdown(pdev, &wake);
       
  3229 	return __e100_power_off(pdev, wake);
       
  3230 }
       
  3231 
       
  3232 static int e100_resume(struct pci_dev *pdev)
       
  3233 {
       
  3234 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3235 	struct nic *nic = netdev_priv(netdev);
       
  3236 
       
  3237 	pci_set_power_state(pdev, PCI_D0);
       
  3238 	pci_restore_state(pdev);
       
  3239 	/* ack any pending wake events, disable PME */
       
  3240 	pci_enable_wake(pdev, 0, 0);
       
  3241 
       
  3242 	/* disable reverse auto-negotiation */
       
  3243 	if (nic->phy == phy_82552_v) {
       
  3244 		u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3245 		                           E100_82552_SMARTSPEED);
       
  3246 
       
  3247 		mdio_write(netdev, nic->mii.phy_id,
       
  3248 		           E100_82552_SMARTSPEED,
       
  3249 		           smartspeed & ~(E100_82552_REV_ANEG));
       
  3250 	}
       
  3251 
       
  3252 	netif_device_attach(netdev);
       
  3253 	if (netif_running(netdev))
       
  3254 		e100_up(nic);
       
  3255 
       
  3256 	return 0;
       
  3257 }
       
  3258 #endif /* CONFIG_PM */
       
  3259 
       
  3260 static void e100_shutdown(struct pci_dev *pdev)
       
  3261 {
       
  3262 	bool wake;
       
  3263 	__e100_shutdown(pdev, &wake);
       
  3264 	if (system_state == SYSTEM_POWER_OFF)
       
  3265 		__e100_power_off(pdev, wake);
       
  3266 }
       
  3267 
       
  3268 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  3269 /**
       
  3270  * e100_io_error_detected - called when PCI error is detected.
       
  3271  * @pdev: Pointer to PCI device
       
  3272  * @state: The current pci connection state
       
  3273  */
       
  3274 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  3275 {
       
  3276 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3277 	struct nic *nic = netdev_priv(netdev);
       
  3278 
       
  3279 	if (nic->ecdev)
       
  3280 		return -EBUSY;
       
  3281 
       
  3282 	netif_device_detach(netdev);
       
  3283 
       
  3284 	if (state == pci_channel_io_perm_failure)
       
  3285 		return PCI_ERS_RESULT_DISCONNECT;
       
  3286 
       
  3287 	if (netif_running(netdev))
       
  3288 		e100_down(nic);
       
  3289 	pci_disable_device(pdev);
       
  3290 
       
  3291 	/* Request a slot reset. */
       
  3292 	return PCI_ERS_RESULT_NEED_RESET;
       
  3293 }
       
  3294 
       
  3295 /**
       
  3296  * e100_io_slot_reset - called after the pci bus has been reset.
       
  3297  * @pdev: Pointer to PCI device
       
  3298  *
       
  3299  * Restart the card from scratch.
       
  3300  */
       
  3301 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  3302 {
       
  3303 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3304 	struct nic *nic = netdev_priv(netdev);
       
  3305 
       
  3306 	if (nic->ecdev)
       
  3307 		return -EBUSY;
       
  3308 
       
  3309 	if (pci_enable_device(pdev)) {
       
  3310 		pr_err("Cannot re-enable PCI device after reset\n");
       
  3311 		return PCI_ERS_RESULT_DISCONNECT;
       
  3312 	}
       
  3313 	pci_set_master(pdev);
       
  3314 
       
  3315 	/* Only one device per card can do a reset */
       
  3316 	if (0 != PCI_FUNC(pdev->devfn))
       
  3317 		return PCI_ERS_RESULT_RECOVERED;
       
  3318 	e100_hw_reset(nic);
       
  3319 	e100_phy_init(nic);
       
  3320 
       
  3321 	return PCI_ERS_RESULT_RECOVERED;
       
  3322 }
       
  3323 
       
  3324 /**
       
  3325  * e100_io_resume - resume normal operations
       
  3326  * @pdev: Pointer to PCI device
       
  3327  *
       
  3328  * Resume normal operations after an error recovery
       
  3329  * sequence has been completed.
       
  3330  */
       
  3331 static void e100_io_resume(struct pci_dev *pdev)
       
  3332 {
       
  3333 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3334 	struct nic *nic = netdev_priv(netdev);
       
  3335 
       
  3336 	/* ack any pending wake events, disable PME */
       
  3337 	pci_enable_wake(pdev, 0, 0);
       
  3338 
       
  3339 	if (!nic->ecdev)
       
  3340 		netif_device_attach(netdev);
       
  3341 	if (nic->ecdev || netif_running(netdev)) {
       
  3342 		e100_open(netdev);
       
  3343 		if (!nic->ecdev)
       
  3344 			mod_timer(&nic->watchdog, jiffies);
       
  3345 	}
       
  3346 }
       
  3347 
       
  3348 static const struct pci_error_handlers e100_err_handler = {
       
  3349 	.error_detected = e100_io_error_detected,
       
  3350 	.slot_reset = e100_io_slot_reset,
       
  3351 	.resume = e100_io_resume,
       
  3352 };
       
  3353 
       
  3354 static struct pci_driver e100_driver = {
       
  3355 	.name =         DRV_NAME,
       
  3356 	.id_table =     e100_id_table,
       
  3357 	.probe =        e100_probe,
       
  3358 	.remove =       e100_remove,
       
  3359 #ifdef CONFIG_PM
       
  3360 	/* Power Management hooks */
       
  3361 	.suspend =      e100_suspend,
       
  3362 	.resume =       e100_resume,
       
  3363 #endif
       
  3364 	.shutdown =     e100_shutdown,
       
  3365 	.err_handler = &e100_err_handler,
       
  3366 };
       
  3367 
       
  3368 static int __init e100_init_module(void)
       
  3369 {
       
  3370 	if (((1 << debug) - 1) & NETIF_MSG_DRV) {
       
  3371 		pr_info("%s %s, %s\n", DRV_NAME, DRV_DESCRIPTION, DRV_VERSION);
       
  3372 		pr_info("%s\n", DRV_COPYRIGHT);
       
  3373 	}
       
  3374 	return pci_register_driver(&e100_driver);
       
  3375 }
       
  3376 
       
  3377 static void __exit e100_cleanup_module(void)
       
  3378 {
       
  3379 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3380 	pci_unregister_driver(&e100_driver);
       
  3381 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3382 }
       
  3383 
       
  3384 module_init(e100_init_module);
       
  3385 module_exit(e100_cleanup_module);