devices/e100-3.8-ethercat.c
branchstable-1.5
changeset 2584 0e3d989ff233
equal deleted inserted replaced
2583:fe5687a0a322 2584:0e3d989ff233
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007-2012  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it and/or
       
    10  *  modify it under the terms of the GNU General Public License version 2, as
       
    11  *  published by the Free Software Foundation.
       
    12  *
       
    13  *  The IgH EtherCAT Master is distributed in the hope that it will be useful,
       
    14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
       
    16  *  Public License for more details.
       
    17  *
       
    18  *  You should have received a copy of the GNU General Public License along
       
    19  *  with the IgH EtherCAT Master; if not, write to the Free Software
       
    20  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    21  *
       
    22  *  ---
       
    23  *
       
    24  *  The license mentioned above concerns the source code only. Using the
       
    25  *  EtherCAT technology and brand is only permitted in compliance with the
       
    26  *  industrial property and similar rights of Beckhoff Automation GmbH.
       
    27  *
       
    28  *  ---
       
    29  *
       
    30  *  vim: noexpandtab
       
    31  *
       
    32  *****************************************************************************/
       
    33 
       
    34 /**
       
    35    \file
       
    36    EtherCAT driver for e100-compatible NICs.
       
    37 */
       
    38 
       
    39 /* Former documentation: */
       
    40 
       
    41 /*******************************************************************************
       
    42 
       
    43   Intel PRO/100 Linux driver
       
    44   Copyright(c) 1999 - 2006 Intel Corporation.
       
    45 
       
    46   This program is free software; you can redistribute it and/or modify it
       
    47   under the terms and conditions of the GNU General Public License,
       
    48   version 2, as published by the Free Software Foundation.
       
    49 
       
    50   This program is distributed in the hope it will be useful, but WITHOUT
       
    51   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    52   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    53   more details.
       
    54 
       
    55   You should have received a copy of the GNU General Public License along with
       
    56   this program; if not, write to the Free Software Foundation, Inc.,
       
    57   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    58 
       
    59   The full GNU General Public License is included in this distribution in
       
    60   the file called "COPYING".
       
    61 
       
    62   Contact Information:
       
    63   Linux NICS <linux.nics@intel.com>
       
    64   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    65   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    66 
       
    67 *******************************************************************************/
       
    68 
       
    69 /*
       
    70  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    71  *
       
    72  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    73  *	original e100 driver, but better described as a munging of
       
    74  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    75  *
       
    76  *	References:
       
    77  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    78  *		Open Source Software Developers Manual,
       
    79  *		http://sourceforge.net/projects/e1000
       
    80  *
       
    81  *
       
    82  *	                      Theory of Operation
       
    83  *
       
    84  *	I.   General
       
    85  *
       
    86  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    87  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    88  *	82551, and 82562 devices.  82558 and greater controllers
       
    89  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    90  *	server and client network interface cards, as well as in
       
    91  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    92  *	configurations.  8255x supports a 32-bit linear addressing
       
    93  *	mode and operates at 33Mhz PCI clock rate.
       
    94  *
       
    95  *	II.  Driver Operation
       
    96  *
       
    97  *	Memory-mapped mode is used exclusively to access the device's
       
    98  *	shared-memory structure, the Control/Status Registers (CSR). All
       
    99  *	setup, configuration, and control of the device, including queuing
       
   100  *	of Tx, Rx, and configuration commands is through the CSR.
       
   101  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   102  *	protects the shared Command Block List (CBL).
       
   103  *
       
   104  *	8255x is highly MII-compliant and all access to the PHY go
       
   105  *	through the Management Data Interface (MDI).  Consequently, the
       
   106  *	driver leverages the mii.c library shared with other MII-compliant
       
   107  *	devices.
       
   108  *
       
   109  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   110  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   111  *	archs are supported.
       
   112  *
       
   113  *	III. Transmit
       
   114  *
       
   115  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   116  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   117  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   118  *	the end of the ring.  The last TCB processed suspends the
       
   119  *	controller, and the controller can be restarted by issue a CU
       
   120  *	resume command to continue from the suspend point, or a CU start
       
   121  *	command to start at a given position in the ring.
       
   122  *
       
   123  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   124  *	into the CBL ring along with Tx commands.  The common structure
       
   125  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   126  *
       
   127  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   128  *	is the next CB to check for completion; cb_to_send is the first
       
   129  *	CB to start on in case of a previous failure to resume.  CB clean
       
   130  *	up happens in interrupt context in response to a CU interrupt.
       
   131  *	cbs_avail keeps track of number of free CB resources available.
       
   132  *
       
   133  * 	Hardware padding of short packets to minimum packet size is
       
   134  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   135  * 	with 00h.
       
   136  *
       
   137  *	IV.  Receive
       
   138  *
       
   139  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   140  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   141  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   142  *	and the data buffer, but the RFD is pulled off before the skb is
       
   143  *	indicated.  The data buffer is aligned such that encapsulated
       
   144  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   145  *	mapped shared memory, and completion status is contained within
       
   146  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   147  *	view from software and hardware.
       
   148  *
       
   149  *	In order to keep updates to the RFD link field from colliding with
       
   150  *	hardware writes to mark packets complete, we use the feature that
       
   151  *	hardware will not write to a size 0 descriptor and mark the previous
       
   152  *	packet as end-of-list (EL).   After updating the link, we remove EL
       
   153  *	and only then restore the size such that hardware may use the
       
   154  *	previous-to-end RFD.
       
   155  *
       
   156  *	Under typical operation, the  receive unit (RU) is start once,
       
   157  *	and the controller happily fills RFDs as frames arrive.  If
       
   158  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   159  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   160  *	and Rx indication and re-allocation happen in the same context,
       
   161  *	therefore no locking is required.  A software-generated interrupt
       
   162  *	is generated from the watchdog to recover from a failed allocation
       
   163  *	scenario where all Rx resources have been indicated and none re-
       
   164  *	placed.
       
   165  *
       
   166  *	V.   Miscellaneous
       
   167  *
       
   168  * 	VLAN offloading of tagging, stripping and filtering is not
       
   169  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   170  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   171  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   172  * 	not supported (hardware limitation).
       
   173  *
       
   174  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   175  *
       
   176  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   177  * 	testing/troubleshooting the development driver.
       
   178  *
       
   179  * 	TODO:
       
   180  * 	o several entry points race with dev->close
       
   181  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   182  *
       
   183  *	FIXES:
       
   184  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   185  *	- Stratus87247: protect MDI control register manipulations
       
   186  * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
       
   187  *      - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
       
   188  */
       
   189 
       
   190 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
       
   191 
       
   192 #include <linux/hardirq.h>
       
   193 #include <linux/interrupt.h>
       
   194 #include <linux/module.h>
       
   195 #include <linux/moduleparam.h>
       
   196 #include <linux/kernel.h>
       
   197 #include <linux/types.h>
       
   198 #include <linux/sched.h>
       
   199 #include <linux/slab.h>
       
   200 #include <linux/delay.h>
       
   201 #include <linux/init.h>
       
   202 #include <linux/pci.h>
       
   203 #include <linux/dma-mapping.h>
       
   204 #include <linux/dmapool.h>
       
   205 #include <linux/netdevice.h>
       
   206 #include <linux/etherdevice.h>
       
   207 #include <linux/mii.h>
       
   208 #include <linux/if_vlan.h>
       
   209 #include <linux/skbuff.h>
       
   210 #include <linux/ethtool.h>
       
   211 #include <linux/string.h>
       
   212 #include <linux/firmware.h>
       
   213 #include <linux/rtnetlink.h>
       
   214 #include <asm/unaligned.h>
       
   215 
       
   216 // EtherCAT includes
       
   217 #include "../globals.h"
       
   218 #include "ecdev.h"
       
   219 
       
   220 #define DRV_NAME		"ec_e100"
       
   221 #define DRV_EXT			"-NAPI"
       
   222 #define DRV_VERSION		"3.5.24-k2"DRV_EXT
       
   223 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   224 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   225 
       
   226 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   227 #define E100_NAPI_WEIGHT	16
       
   228 
       
   229 #define FIRMWARE_D101M		"e100/d101m_ucode.bin"
       
   230 #define FIRMWARE_D101S		"e100/d101s_ucode.bin"
       
   231 #define FIRMWARE_D102E		"e100/d102e_ucode.bin"
       
   232 
       
   233 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   234 MODULE_AUTHOR(DRV_COPYRIGHT);
       
   235 MODULE_LICENSE("GPL");
       
   236 MODULE_VERSION(DRV_VERSION);
       
   237 MODULE_FIRMWARE(FIRMWARE_D101M);
       
   238 MODULE_FIRMWARE(FIRMWARE_D101S);
       
   239 MODULE_FIRMWARE(FIRMWARE_D102E);
       
   240 
       
   241 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   242 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   243 MODULE_LICENSE("GPL");
       
   244 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   245 
       
   246 void e100_ec_poll(struct net_device *);
       
   247 
       
   248 static int debug = 3;
       
   249 static int eeprom_bad_csum_allow = 0;
       
   250 static int use_io = 0;
       
   251 module_param(debug, int, 0);
       
   252 module_param(eeprom_bad_csum_allow, int, 0);
       
   253 module_param(use_io, int, 0);
       
   254 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   255 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   256 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
       
   257 
       
   258 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   259 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   260 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   261 static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
       
   262 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   263 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   277 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   278 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   279 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   280 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   281 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   282 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   283 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   284 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   285 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   286 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   287 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   288 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   289 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   290 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   291 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   292 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   293 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   294 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   295 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   296 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   297 	INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
       
   298 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   299 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   300 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   301 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   302 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   303 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   304 	{ 0, }
       
   305 };
       
   306 
       
   307 // prevent from being loaded automatically
       
   308 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   309 
       
   310 enum mac {
       
   311 	mac_82557_D100_A  = 0,
       
   312 	mac_82557_D100_B  = 1,
       
   313 	mac_82557_D100_C  = 2,
       
   314 	mac_82558_D101_A4 = 4,
       
   315 	mac_82558_D101_B0 = 5,
       
   316 	mac_82559_D101M   = 8,
       
   317 	mac_82559_D101S   = 9,
       
   318 	mac_82550_D102    = 12,
       
   319 	mac_82550_D102_C  = 13,
       
   320 	mac_82551_E       = 14,
       
   321 	mac_82551_F       = 15,
       
   322 	mac_82551_10      = 16,
       
   323 	mac_unknown       = 0xFF,
       
   324 };
       
   325 
       
   326 enum phy {
       
   327 	phy_100a     = 0x000003E0,
       
   328 	phy_100c     = 0x035002A8,
       
   329 	phy_82555_tx = 0x015002A8,
       
   330 	phy_nsc_tx   = 0x5C002000,
       
   331 	phy_82562_et = 0x033002A8,
       
   332 	phy_82562_em = 0x032002A8,
       
   333 	phy_82562_ek = 0x031002A8,
       
   334 	phy_82562_eh = 0x017002A8,
       
   335 	phy_82552_v  = 0xd061004d,
       
   336 	phy_unknown  = 0xFFFFFFFF,
       
   337 };
       
   338 
       
   339 /* CSR (Control/Status Registers) */
       
   340 struct csr {
       
   341 	struct {
       
   342 		u8 status;
       
   343 		u8 stat_ack;
       
   344 		u8 cmd_lo;
       
   345 		u8 cmd_hi;
       
   346 		u32 gen_ptr;
       
   347 	} scb;
       
   348 	u32 port;
       
   349 	u16 flash_ctrl;
       
   350 	u8 eeprom_ctrl_lo;
       
   351 	u8 eeprom_ctrl_hi;
       
   352 	u32 mdi_ctrl;
       
   353 	u32 rx_dma_count;
       
   354 };
       
   355 
       
   356 enum scb_status {
       
   357 	rus_no_res       = 0x08,
       
   358 	rus_ready        = 0x10,
       
   359 	rus_mask         = 0x3C,
       
   360 };
       
   361 
       
   362 enum ru_state  {
       
   363 	RU_SUSPENDED = 0,
       
   364 	RU_RUNNING	 = 1,
       
   365 	RU_UNINITIALIZED = -1,
       
   366 };
       
   367 
       
   368 enum scb_stat_ack {
       
   369 	stat_ack_not_ours    = 0x00,
       
   370 	stat_ack_sw_gen      = 0x04,
       
   371 	stat_ack_rnr         = 0x10,
       
   372 	stat_ack_cu_idle     = 0x20,
       
   373 	stat_ack_frame_rx    = 0x40,
       
   374 	stat_ack_cu_cmd_done = 0x80,
       
   375 	stat_ack_not_present = 0xFF,
       
   376 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   377 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   378 };
       
   379 
       
   380 enum scb_cmd_hi {
       
   381 	irq_mask_none = 0x00,
       
   382 	irq_mask_all  = 0x01,
       
   383 	irq_sw_gen    = 0x02,
       
   384 };
       
   385 
       
   386 enum scb_cmd_lo {
       
   387 	cuc_nop        = 0x00,
       
   388 	ruc_start      = 0x01,
       
   389 	ruc_load_base  = 0x06,
       
   390 	cuc_start      = 0x10,
       
   391 	cuc_resume     = 0x20,
       
   392 	cuc_dump_addr  = 0x40,
       
   393 	cuc_dump_stats = 0x50,
       
   394 	cuc_load_base  = 0x60,
       
   395 	cuc_dump_reset = 0x70,
       
   396 };
       
   397 
       
   398 enum cuc_dump {
       
   399 	cuc_dump_complete       = 0x0000A005,
       
   400 	cuc_dump_reset_complete = 0x0000A007,
       
   401 };
       
   402 
       
   403 enum port {
       
   404 	software_reset  = 0x0000,
       
   405 	selftest        = 0x0001,
       
   406 	selective_reset = 0x0002,
       
   407 };
       
   408 
       
   409 enum eeprom_ctrl_lo {
       
   410 	eesk = 0x01,
       
   411 	eecs = 0x02,
       
   412 	eedi = 0x04,
       
   413 	eedo = 0x08,
       
   414 };
       
   415 
       
   416 enum mdi_ctrl {
       
   417 	mdi_write = 0x04000000,
       
   418 	mdi_read  = 0x08000000,
       
   419 	mdi_ready = 0x10000000,
       
   420 };
       
   421 
       
   422 enum eeprom_op {
       
   423 	op_write = 0x05,
       
   424 	op_read  = 0x06,
       
   425 	op_ewds  = 0x10,
       
   426 	op_ewen  = 0x13,
       
   427 };
       
   428 
       
   429 enum eeprom_offsets {
       
   430 	eeprom_cnfg_mdix  = 0x03,
       
   431 	eeprom_phy_iface  = 0x06,
       
   432 	eeprom_id         = 0x0A,
       
   433 	eeprom_config_asf = 0x0D,
       
   434 	eeprom_smbus_addr = 0x90,
       
   435 };
       
   436 
       
   437 enum eeprom_cnfg_mdix {
       
   438 	eeprom_mdix_enabled = 0x0080,
       
   439 };
       
   440 
       
   441 enum eeprom_phy_iface {
       
   442 	NoSuchPhy = 0,
       
   443 	I82553AB,
       
   444 	I82553C,
       
   445 	I82503,
       
   446 	DP83840,
       
   447 	S80C240,
       
   448 	S80C24,
       
   449 	I82555,
       
   450 	DP83840A = 10,
       
   451 };
       
   452 
       
   453 enum eeprom_id {
       
   454 	eeprom_id_wol = 0x0020,
       
   455 };
       
   456 
       
   457 enum eeprom_config_asf {
       
   458 	eeprom_asf = 0x8000,
       
   459 	eeprom_gcl = 0x4000,
       
   460 };
       
   461 
       
   462 enum cb_status {
       
   463 	cb_complete = 0x8000,
       
   464 	cb_ok       = 0x2000,
       
   465 };
       
   466 
       
   467 /**
       
   468  * cb_command - Command Block flags
       
   469  * @cb_tx_nc:  0: controler does CRC (normal),  1: CRC from skb memory
       
   470  */
       
   471 enum cb_command {
       
   472 	cb_nop    = 0x0000,
       
   473 	cb_iaaddr = 0x0001,
       
   474 	cb_config = 0x0002,
       
   475 	cb_multi  = 0x0003,
       
   476 	cb_tx     = 0x0004,
       
   477 	cb_ucode  = 0x0005,
       
   478 	cb_dump   = 0x0006,
       
   479 	cb_tx_sf  = 0x0008,
       
   480 	cb_tx_nc  = 0x0010,
       
   481 	cb_cid    = 0x1f00,
       
   482 	cb_i      = 0x2000,
       
   483 	cb_s      = 0x4000,
       
   484 	cb_el     = 0x8000,
       
   485 };
       
   486 
       
   487 struct rfd {
       
   488 	__le16 status;
       
   489 	__le16 command;
       
   490 	__le32 link;
       
   491 	__le32 rbd;
       
   492 	__le16 actual_size;
       
   493 	__le16 size;
       
   494 };
       
   495 
       
   496 struct rx {
       
   497 	struct rx *next, *prev;
       
   498 	struct sk_buff *skb;
       
   499 	dma_addr_t dma_addr;
       
   500 };
       
   501 
       
   502 #if defined(__BIG_ENDIAN_BITFIELD)
       
   503 #define X(a,b)	b,a
       
   504 #else
       
   505 #define X(a,b)	a,b
       
   506 #endif
       
   507 struct config {
       
   508 /*0*/	u8 X(byte_count:6, pad0:2);
       
   509 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   510 /*2*/	u8 adaptive_ifs;
       
   511 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   512 	   term_write_cache_line:1), pad3:4);
       
   513 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   514 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   515 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   516 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   517 	   rx_save_overruns : 1), rx_save_bad_frames : 1);
       
   518 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   519 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   520 	   tx_dynamic_tbd:1);
       
   521 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   522 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   523 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   524 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   525 	   loopback:2);
       
   526 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   527 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   528 /*13*/	u8 ip_addr_lo;
       
   529 /*14*/	u8 ip_addr_hi;
       
   530 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   531 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   532 	   pad15_2:1), crs_or_cdt:1);
       
   533 /*16*/	u8 fc_delay_lo;
       
   534 /*17*/	u8 fc_delay_hi;
       
   535 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   536 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   537 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   538 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   539 	   full_duplex_force:1), full_duplex_pin:1);
       
   540 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   541 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   542 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   543 	u8 pad_d102[9];
       
   544 };
       
   545 
       
   546 #define E100_MAX_MULTICAST_ADDRS	64
       
   547 struct multi {
       
   548 	__le16 count;
       
   549 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   550 };
       
   551 
       
   552 /* Important: keep total struct u32-aligned */
       
   553 #define UCODE_SIZE			134
       
   554 struct cb {
       
   555 	__le16 status;
       
   556 	__le16 command;
       
   557 	__le32 link;
       
   558 	union {
       
   559 		u8 iaaddr[ETH_ALEN];
       
   560 		__le32 ucode[UCODE_SIZE];
       
   561 		struct config config;
       
   562 		struct multi multi;
       
   563 		struct {
       
   564 			u32 tbd_array;
       
   565 			u16 tcb_byte_count;
       
   566 			u8 threshold;
       
   567 			u8 tbd_count;
       
   568 			struct {
       
   569 				__le32 buf_addr;
       
   570 				__le16 size;
       
   571 				u16 eol;
       
   572 			} tbd;
       
   573 		} tcb;
       
   574 		__le32 dump_buffer_addr;
       
   575 	} u;
       
   576 	struct cb *next, *prev;
       
   577 	dma_addr_t dma_addr;
       
   578 	struct sk_buff *skb;
       
   579 };
       
   580 
       
   581 enum loopback {
       
   582 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   583 };
       
   584 
       
   585 struct stats {
       
   586 	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   587 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   588 		tx_multiple_collisions, tx_total_collisions;
       
   589 	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   590 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   591 		rx_short_frame_errors;
       
   592 	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   593 	__le16 xmt_tco_frames, rcv_tco_frames;
       
   594 	__le32 complete;
       
   595 };
       
   596 
       
   597 struct mem {
       
   598 	struct {
       
   599 		u32 signature;
       
   600 		u32 result;
       
   601 	} selftest;
       
   602 	struct stats stats;
       
   603 	u8 dump_buf[596];
       
   604 };
       
   605 
       
   606 struct param_range {
       
   607 	u32 min;
       
   608 	u32 max;
       
   609 	u32 count;
       
   610 };
       
   611 
       
   612 struct params {
       
   613 	struct param_range rfds;
       
   614 	struct param_range cbs;
       
   615 };
       
   616 
       
   617 struct nic {
       
   618 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   619 	u32 msg_enable				____cacheline_aligned;
       
   620 	struct net_device *netdev;
       
   621 	struct pci_dev *pdev;
       
   622 	u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
       
   623 
       
   624 	struct rx *rxs				____cacheline_aligned;
       
   625 	struct rx *rx_to_use;
       
   626 	struct rx *rx_to_clean;
       
   627 	struct rfd blank_rfd;
       
   628 	enum ru_state ru_running;
       
   629 
       
   630 	spinlock_t cb_lock			____cacheline_aligned;
       
   631 	spinlock_t cmd_lock;
       
   632 	struct csr __iomem *csr;
       
   633 	enum scb_cmd_lo cuc_cmd;
       
   634 	unsigned int cbs_avail;
       
   635 	struct napi_struct napi;
       
   636 	struct cb *cbs;
       
   637 	struct cb *cb_to_use;
       
   638 	struct cb *cb_to_send;
       
   639 	struct cb *cb_to_clean;
       
   640 	__le16 tx_command;
       
   641 	/* End: frequently used values: keep adjacent for cache effect */
       
   642 
       
   643 	enum {
       
   644 		ich                = (1 << 0),
       
   645 		promiscuous        = (1 << 1),
       
   646 		multicast_all      = (1 << 2),
       
   647 		wol_magic          = (1 << 3),
       
   648 		ich_10h_workaround = (1 << 4),
       
   649 	} flags					____cacheline_aligned;
       
   650 
       
   651 	enum mac mac;
       
   652 	enum phy phy;
       
   653 	struct params params;
       
   654 	struct timer_list watchdog;
       
   655 	struct mii_if_info mii;
       
   656 	struct work_struct tx_timeout_task;
       
   657 	enum loopback loopback;
       
   658 
       
   659 	struct mem *mem;
       
   660 	dma_addr_t dma_addr;
       
   661 
       
   662 	struct pci_pool *cbs_pool;
       
   663 	dma_addr_t cbs_dma_addr;
       
   664 	u8 adaptive_ifs;
       
   665 	u8 tx_threshold;
       
   666 	u32 tx_frames;
       
   667 	u32 tx_collisions;
       
   668 
       
   669 	u32 tx_deferred;
       
   670 	u32 tx_single_collisions;
       
   671 	u32 tx_multiple_collisions;
       
   672 	u32 tx_fc_pause;
       
   673 	u32 tx_tco_frames;
       
   674 
       
   675 	u32 rx_fc_pause;
       
   676 	u32 rx_fc_unsupported;
       
   677 	u32 rx_tco_frames;
       
   678 	u32 rx_short_frame_errors;
       
   679 	u32 rx_over_length_errors;
       
   680 
       
   681 	u16 eeprom_wc;
       
   682 
       
   683 	__le16 eeprom[256];
       
   684 	spinlock_t mdio_lock;
       
   685 	const struct firmware *fw;
       
   686 	ec_device_t *ecdev;
       
   687 	unsigned long ec_watchdog_jiffies;
       
   688 };
       
   689 
       
   690 static inline void e100_write_flush(struct nic *nic)
       
   691 {
       
   692 	/* Flush previous PCI writes through intermediate bridges
       
   693 	 * by doing a benign read */
       
   694 	(void)ioread8(&nic->csr->scb.status);
       
   695 }
       
   696 
       
   697 static void e100_enable_irq(struct nic *nic)
       
   698 {
       
   699 	unsigned long flags;
       
   700 
       
   701 	if (nic->ecdev)
       
   702 		return;
       
   703 
       
   704 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   705 	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   706 	e100_write_flush(nic);
       
   707 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   708 }
       
   709 
       
   710 static void e100_disable_irq(struct nic *nic)
       
   711 {
       
   712 	unsigned long flags = 0;
       
   713 
       
   714 	if (!nic->ecdev)
       
   715 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   716 	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   717 	e100_write_flush(nic);
       
   718 	if (!nic->ecdev)
       
   719 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   720 }
       
   721 
       
   722 static void e100_hw_reset(struct nic *nic)
       
   723 {
       
   724 	/* Put CU and RU into idle with a selective reset to get
       
   725 	 * device off of PCI bus */
       
   726 	iowrite32(selective_reset, &nic->csr->port);
       
   727 	e100_write_flush(nic); udelay(20);
       
   728 
       
   729 	/* Now fully reset device */
       
   730 	iowrite32(software_reset, &nic->csr->port);
       
   731 	e100_write_flush(nic); udelay(20);
       
   732 
       
   733 	/* Mask off our interrupt line - it's unmasked after reset */
       
   734 	e100_disable_irq(nic);
       
   735 }
       
   736 
       
   737 static int e100_self_test(struct nic *nic)
       
   738 {
       
   739 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   740 
       
   741 	/* Passing the self-test is a pretty good indication
       
   742 	 * that the device can DMA to/from host memory */
       
   743 
       
   744 	nic->mem->selftest.signature = 0;
       
   745 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   746 
       
   747 	iowrite32(selftest | dma_addr, &nic->csr->port);
       
   748 	e100_write_flush(nic);
       
   749 	/* Wait 10 msec for self-test to complete */
       
   750 	msleep(10);
       
   751 
       
   752 	/* Interrupts are enabled after self-test */
       
   753 	e100_disable_irq(nic);
       
   754 
       
   755 	/* Check results of self-test */
       
   756 	if (nic->mem->selftest.result != 0) {
       
   757 		netif_err(nic, hw, nic->netdev,
       
   758 			  "Self-test failed: result=0x%08X\n",
       
   759 			  nic->mem->selftest.result);
       
   760 		return -ETIMEDOUT;
       
   761 	}
       
   762 	if (nic->mem->selftest.signature == 0) {
       
   763 		netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
       
   764 		return -ETIMEDOUT;
       
   765 	}
       
   766 
       
   767 	return 0;
       
   768 }
       
   769 
       
   770 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
       
   771 {
       
   772 	u32 cmd_addr_data[3];
       
   773 	u8 ctrl;
       
   774 	int i, j;
       
   775 
       
   776 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   777 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   778 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   779 		le16_to_cpu(data);
       
   780 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   781 
       
   782 	/* Bit-bang cmds to write word to eeprom */
       
   783 	for (j = 0; j < 3; j++) {
       
   784 
       
   785 		/* Chip select */
       
   786 		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   787 		e100_write_flush(nic); udelay(4);
       
   788 
       
   789 		for (i = 31; i >= 0; i--) {
       
   790 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   791 				eecs | eedi : eecs;
       
   792 			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   793 			e100_write_flush(nic); udelay(4);
       
   794 
       
   795 			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   796 			e100_write_flush(nic); udelay(4);
       
   797 		}
       
   798 		/* Wait 10 msec for cmd to complete */
       
   799 		msleep(10);
       
   800 
       
   801 		/* Chip deselect */
       
   802 		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   803 		e100_write_flush(nic); udelay(4);
       
   804 	}
       
   805 };
       
   806 
       
   807 /* General technique stolen from the eepro100 driver - very clever */
       
   808 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   809 {
       
   810 	u32 cmd_addr_data;
       
   811 	u16 data = 0;
       
   812 	u8 ctrl;
       
   813 	int i;
       
   814 
       
   815 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   816 
       
   817 	/* Chip select */
       
   818 	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   819 	e100_write_flush(nic); udelay(4);
       
   820 
       
   821 	/* Bit-bang to read word from eeprom */
       
   822 	for (i = 31; i >= 0; i--) {
       
   823 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   824 		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   825 		e100_write_flush(nic); udelay(4);
       
   826 
       
   827 		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   828 		e100_write_flush(nic); udelay(4);
       
   829 
       
   830 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   831 		 * complete address.  Use this to adjust addr_len. */
       
   832 		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
       
   833 		if (!(ctrl & eedo) && i > 16) {
       
   834 			*addr_len -= (i - 16);
       
   835 			i = 17;
       
   836 		}
       
   837 
       
   838 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   839 	}
       
   840 
       
   841 	/* Chip deselect */
       
   842 	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   843 	e100_write_flush(nic); udelay(4);
       
   844 
       
   845 	return cpu_to_le16(data);
       
   846 };
       
   847 
       
   848 /* Load entire EEPROM image into driver cache and validate checksum */
       
   849 static int e100_eeprom_load(struct nic *nic)
       
   850 {
       
   851 	u16 addr, addr_len = 8, checksum = 0;
       
   852 
       
   853 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   854 	e100_eeprom_read(nic, &addr_len, 0);
       
   855 	nic->eeprom_wc = 1 << addr_len;
       
   856 
       
   857 	for (addr = 0; addr < nic->eeprom_wc; addr++) {
       
   858 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   859 		if (addr < nic->eeprom_wc - 1)
       
   860 			checksum += le16_to_cpu(nic->eeprom[addr]);
       
   861 	}
       
   862 
       
   863 	/* The checksum, stored in the last word, is calculated such that
       
   864 	 * the sum of words should be 0xBABA */
       
   865 	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
       
   866 		netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
       
   867 		if (!eeprom_bad_csum_allow)
       
   868 			return -EAGAIN;
       
   869 	}
       
   870 
       
   871 	return 0;
       
   872 }
       
   873 
       
   874 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   875 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   876 {
       
   877 	u16 addr, addr_len = 8, checksum = 0;
       
   878 
       
   879 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   880 	e100_eeprom_read(nic, &addr_len, 0);
       
   881 	nic->eeprom_wc = 1 << addr_len;
       
   882 
       
   883 	if (start + count >= nic->eeprom_wc)
       
   884 		return -EINVAL;
       
   885 
       
   886 	for (addr = start; addr < start + count; addr++)
       
   887 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   888 
       
   889 	/* The checksum, stored in the last word, is calculated such that
       
   890 	 * the sum of words should be 0xBABA */
       
   891 	for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   892 		checksum += le16_to_cpu(nic->eeprom[addr]);
       
   893 	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
       
   894 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   895 		nic->eeprom[nic->eeprom_wc - 1]);
       
   896 
       
   897 	return 0;
       
   898 }
       
   899 
       
   900 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   901 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   902 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   903 {
       
   904 	unsigned long flags = 0;
       
   905 	unsigned int i;
       
   906 	int err = 0;
       
   907 
       
   908 	if (!nic->ecdev)
       
   909 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   910 
       
   911 	/* Previous command is accepted when SCB clears */
       
   912 	for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   913 		if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
       
   914 			break;
       
   915 		cpu_relax();
       
   916 		if (unlikely(i > E100_WAIT_SCB_FAST))
       
   917 			udelay(5);
       
   918 	}
       
   919 	if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   920 		err = -EAGAIN;
       
   921 		goto err_unlock;
       
   922 	}
       
   923 
       
   924 	if (unlikely(cmd != cuc_resume))
       
   925 		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
       
   926 	iowrite8(cmd, &nic->csr->scb.cmd_lo);
       
   927 
       
   928 err_unlock:
       
   929 	if (!nic->ecdev)
       
   930 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   931 
       
   932 	return err;
       
   933 }
       
   934 
       
   935 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   936 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   937 {
       
   938 	struct cb *cb;
       
   939 	unsigned long flags = 0;
       
   940 	int err = 0;
       
   941 
       
   942 	if (!nic->ecdev)
       
   943 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   944 
       
   945 	if (unlikely(!nic->cbs_avail)) {
       
   946 		err = -ENOMEM;
       
   947 		goto err_unlock;
       
   948 	}
       
   949 
       
   950 	cb = nic->cb_to_use;
       
   951 	nic->cb_to_use = cb->next;
       
   952 	nic->cbs_avail--;
       
   953 	cb->skb = skb;
       
   954 
       
   955 	if (unlikely(!nic->cbs_avail))
       
   956 		err = -ENOSPC;
       
   957 
       
   958 	cb_prepare(nic, cb, skb);
       
   959 
       
   960 	/* Order is important otherwise we'll be in a race with h/w:
       
   961 	 * set S-bit in current first, then clear S-bit in previous. */
       
   962 	cb->command |= cpu_to_le16(cb_s);
       
   963 	wmb();
       
   964 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   965 
       
   966 	while (nic->cb_to_send != nic->cb_to_use) {
       
   967 		if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   968 			nic->cb_to_send->dma_addr))) {
       
   969 			/* Ok, here's where things get sticky.  It's
       
   970 			 * possible that we can't schedule the command
       
   971 			 * because the controller is too busy, so
       
   972 			 * let's just queue the command and try again
       
   973 			 * when another command is scheduled. */
       
   974 			if (err == -ENOSPC) {
       
   975 				//request a reset
       
   976 				schedule_work(&nic->tx_timeout_task);
       
   977 			}
       
   978 			break;
       
   979 		} else {
       
   980 			nic->cuc_cmd = cuc_resume;
       
   981 			nic->cb_to_send = nic->cb_to_send->next;
       
   982 		}
       
   983 	}
       
   984 
       
   985 err_unlock:
       
   986 	if (!nic->ecdev)
       
   987 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   988 
       
   989 	return err;
       
   990 }
       
   991 
       
   992 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
   993 {
       
   994 	struct nic *nic = netdev_priv(netdev);
       
   995 	return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
       
   996 }
       
   997 
       
   998 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
   999 {
       
  1000 	struct nic *nic = netdev_priv(netdev);
       
  1001 
       
  1002 	nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
       
  1003 }
       
  1004 
       
  1005 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
       
  1006 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
  1007 {
       
  1008 	u32 data_out = 0;
       
  1009 	unsigned int i;
       
  1010 	unsigned long flags = 0;
       
  1011 
       
  1012 
       
  1013 	/*
       
  1014 	 * Stratus87247: we shouldn't be writing the MDI control
       
  1015 	 * register until the Ready bit shows True.  Also, since
       
  1016 	 * manipulation of the MDI control registers is a multi-step
       
  1017 	 * procedure it should be done under lock.
       
  1018 	 */
       
  1019 	if (!nic->ecdev)
       
  1020 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
  1021 	for (i = 100; i; --i) {
       
  1022 		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
       
  1023 			break;
       
  1024 		udelay(20);
       
  1025 	}
       
  1026 	if (unlikely(!i)) {
       
  1027 		netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
       
  1028 		if (!nic->ecdev)
       
  1029 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1030 		return 0;		/* No way to indicate timeout error */
       
  1031 	}
       
  1032 	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
  1033 
       
  1034 	for (i = 0; i < 100; i++) {
       
  1035 		udelay(20);
       
  1036 		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
       
  1037 			break;
       
  1038 	}
       
  1039 	if (!nic->ecdev)
       
  1040 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1041 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1042 		     "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
  1043 		     dir == mdi_read ? "READ" : "WRITE",
       
  1044 		     addr, reg, data, data_out);
       
  1045 	return (u16)data_out;
       
  1046 }
       
  1047 
       
  1048 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
       
  1049 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
       
  1050 				 u32 addr,
       
  1051 				 u32 dir,
       
  1052 				 u32 reg,
       
  1053 				 u16 data)
       
  1054 {
       
  1055 	if ((reg == MII_BMCR) && (dir == mdi_write)) {
       
  1056 		if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
       
  1057 			u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
       
  1058 							MII_ADVERTISE);
       
  1059 
       
  1060 			/*
       
  1061 			 * Workaround Si issue where sometimes the part will not
       
  1062 			 * autoneg to 100Mbps even when advertised.
       
  1063 			 */
       
  1064 			if (advert & ADVERTISE_100FULL)
       
  1065 				data |= BMCR_SPEED100 | BMCR_FULLDPLX;
       
  1066 			else if (advert & ADVERTISE_100HALF)
       
  1067 				data |= BMCR_SPEED100;
       
  1068 		}
       
  1069 	}
       
  1070 	return mdio_ctrl_hw(nic, addr, dir, reg, data);
       
  1071 }
       
  1072 
       
  1073 /* Fully software-emulated mdio_ctrl() function for cards without
       
  1074  * MII-compliant PHYs.
       
  1075  * For now, this is mainly geared towards 80c24 support; in case of further
       
  1076  * requirements for other types (i82503, ...?) either extend this mechanism
       
  1077  * or split it, whichever is cleaner.
       
  1078  */
       
  1079 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
       
  1080 				      u32 addr,
       
  1081 				      u32 dir,
       
  1082 				      u32 reg,
       
  1083 				      u16 data)
       
  1084 {
       
  1085 	/* might need to allocate a netdev_priv'ed register array eventually
       
  1086 	 * to be able to record state changes, but for now
       
  1087 	 * some fully hardcoded register handling ought to be ok I guess. */
       
  1088 
       
  1089 	if (dir == mdi_read) {
       
  1090 		switch (reg) {
       
  1091 		case MII_BMCR:
       
  1092 			/* Auto-negotiation, right? */
       
  1093 			return  BMCR_ANENABLE |
       
  1094 				BMCR_FULLDPLX;
       
  1095 		case MII_BMSR:
       
  1096 			return	BMSR_LSTATUS /* for mii_link_ok() */ |
       
  1097 				BMSR_ANEGCAPABLE |
       
  1098 				BMSR_10FULL;
       
  1099 		case MII_ADVERTISE:
       
  1100 			/* 80c24 is a "combo card" PHY, right? */
       
  1101 			return	ADVERTISE_10HALF |
       
  1102 				ADVERTISE_10FULL;
       
  1103 		default:
       
  1104 			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1105 				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1106 				     dir == mdi_read ? "READ" : "WRITE",
       
  1107 				     addr, reg, data);
       
  1108 			return 0xFFFF;
       
  1109 		}
       
  1110 	} else {
       
  1111 		switch (reg) {
       
  1112 		default:
       
  1113 			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1114 				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1115 				     dir == mdi_read ? "READ" : "WRITE",
       
  1116 				     addr, reg, data);
       
  1117 			return 0xFFFF;
       
  1118 		}
       
  1119 	}
       
  1120 }
       
  1121 static inline int e100_phy_supports_mii(struct nic *nic)
       
  1122 {
       
  1123 	/* for now, just check it by comparing whether we
       
  1124 	   are using MII software emulation.
       
  1125 	*/
       
  1126 	return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
       
  1127 }
       
  1128 
       
  1129 static void e100_get_defaults(struct nic *nic)
       
  1130 {
       
  1131 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
  1132 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
  1133 
       
  1134 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1135 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
       
  1136 	if (nic->mac == mac_unknown)
       
  1137 		nic->mac = mac_82557_D100_A;
       
  1138 
       
  1139 	nic->params.rfds = rfds;
       
  1140 	nic->params.cbs = cbs;
       
  1141 
       
  1142 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1143 	nic->tx_threshold = 0xE0;
       
  1144 
       
  1145 	/* no interrupt for every tx completion, delay = 256us if not 557 */
       
  1146 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1147 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1148 
       
  1149 	/* Template for a freshly allocated RFD */
       
  1150 	nic->blank_rfd.command = 0;
       
  1151 	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
       
  1152 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
       
  1153 
       
  1154 	/* MII setup */
       
  1155 	nic->mii.phy_id_mask = 0x1F;
       
  1156 	nic->mii.reg_num_mask = 0x1F;
       
  1157 	nic->mii.dev = nic->netdev;
       
  1158 	nic->mii.mdio_read = mdio_read;
       
  1159 	nic->mii.mdio_write = mdio_write;
       
  1160 }
       
  1161 
       
  1162 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1163 {
       
  1164 	struct config *config = &cb->u.config;
       
  1165 	u8 *c = (u8 *)config;
       
  1166 	struct net_device *netdev = nic->netdev;
       
  1167 
       
  1168 	cb->command = cpu_to_le16(cb_config);
       
  1169 
       
  1170 	memset(config, 0, sizeof(struct config));
       
  1171 
       
  1172 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1173 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1174 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1175 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1176 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1177 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1178 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1179 	if (e100_phy_supports_mii(nic))
       
  1180 		config->mii_mode = 1;           /* 1=MII mode, 0=i82503 mode */
       
  1181 	config->pad10 = 0x6;
       
  1182 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1183 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1184 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1185 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1186 	config->pad15_1 = 0x1;
       
  1187 	config->pad15_2 = 0x1;
       
  1188 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1189 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1190 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1191 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1192 	config->pad18 = 0x1;
       
  1193 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1194 	config->pad20_1 = 0x1F;
       
  1195 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1196 	config->pad21_1 = 0x5;
       
  1197 
       
  1198 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1199 	config->loopback = nic->loopback;
       
  1200 
       
  1201 	if (nic->mii.force_media && nic->mii.full_duplex)
       
  1202 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1203 
       
  1204 	if (nic->flags & promiscuous || nic->loopback) {
       
  1205 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1206 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1207 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1208 	}
       
  1209 
       
  1210 	if (unlikely(netdev->features & NETIF_F_RXFCS))
       
  1211 		config->rx_crc_transfer = 0x1;	/* 1=save, 0=discard */
       
  1212 
       
  1213 	if (nic->flags & multicast_all)
       
  1214 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1215 
       
  1216 	/* disable WoL when up */
       
  1217 	if (nic->ecdev || 
       
  1218 			(netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1219 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1220 
       
  1221 	if (nic->mac >= mac_82558_D101_A4) {
       
  1222 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1223 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1224 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1225 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1226 		if (nic->mac >= mac_82559_D101M) {
       
  1227 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1228 			/* Enable TCO in extended config */
       
  1229 			if (nic->mac >= mac_82551_10) {
       
  1230 				config->byte_count = 0x20; /* extended bytes */
       
  1231 				config->rx_d102_mode = 0x1; /* GMRC for TCO */
       
  1232 			}
       
  1233 		} else {
       
  1234 			config->standard_stat_counter = 0x0;
       
  1235 		}
       
  1236 	}
       
  1237 
       
  1238 	if (netdev->features & NETIF_F_RXALL) {
       
  1239 		config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
       
  1240 		config->rx_save_bad_frames = 0x1;       /* 1=save, 0=discard */
       
  1241 		config->rx_discard_short_frames = 0x0;  /* 1=discard, 0=save */
       
  1242 	}
       
  1243 
       
  1244 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1245 		     "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1246 		     c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1247 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1248 		     "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1249 		     c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1250 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1251 		     "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1252 		     c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1253 }
       
  1254 
       
  1255 /*************************************************************************
       
  1256 *  CPUSaver parameters
       
  1257 *
       
  1258 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1259 *  "move immediate value" instruction.  By changing the value of
       
  1260 *  the literal in the instruction before the code is loaded, the
       
  1261 *  driver can change the algorithm.
       
  1262 *
       
  1263 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1264 *    When this timer expires the interrupt is asserted, and the
       
  1265 *    timer is reset each time a new packet is received.  (see
       
  1266 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1267 *    The current default is 0x600 or 1536.  Experiments show that
       
  1268 *    the value should probably stay within the 0x200 - 0x1000.
       
  1269 *
       
  1270 *  BUNDLEMAX -
       
  1271 *    This sets the maximum number of frames that will be bundled.  In
       
  1272 *    some situations, such as the TCP windowing algorithm, it may be
       
  1273 *    better to limit the growth of the bundle size than let it go as
       
  1274 *    high as it can, because that could cause too much added latency.
       
  1275 *    The default is six, because this is the number of packets in the
       
  1276 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1277 *    an interrupt for every frame received.  If you do not want to put
       
  1278 *    a limit on the bundle size, set this value to xFFFF.
       
  1279 *
       
  1280 *  BUNDLESMALL -
       
  1281 *    This contains a bit-mask describing the minimum size frame that
       
  1282 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1283 *    that any frame less than 128 bytes in length will not be bundled,
       
  1284 *    but will instead immediately generate an interrupt.  This does
       
  1285 *    not affect the current bundle in any way.  Any frame that is 128
       
  1286 *    bytes or large will be bundled normally.  This feature is meant
       
  1287 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1288 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1289 *    enabled was sending but not receiving.  The delay introduced when
       
  1290 *    the ACKs were received was enough to reduce total throughput, because
       
  1291 *    the sender would sit idle until the ACK was finally seen.
       
  1292 *
       
  1293 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1294 *    This means that any frame which is x7F (127) bytes or smaller
       
  1295 *    will cause an immediate interrupt.  Because this value must be a
       
  1296 *    bit mask, there are only a few valid values that can be used.  To
       
  1297 *    turn this feature off, the driver can write the value xFFFF to the
       
  1298 *    lower word of this instruction (in the same way that the other
       
  1299 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1300 *    cause an interrupt to be generated for every frame, because all
       
  1301 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1302 *************************************************************************/
       
  1303 
       
  1304 /* if you wish to disable the ucode functionality, while maintaining the
       
  1305  * workarounds it provides, set the following defines to:
       
  1306  * BUNDLESMALL 0
       
  1307  * BUNDLEMAX 1
       
  1308  * INTDELAY 1
       
  1309  */
       
  1310 #define BUNDLESMALL 1
       
  1311 #define BUNDLEMAX (u16)6
       
  1312 #define INTDELAY (u16)1536 /* 0x600 */
       
  1313 
       
  1314 /* Initialize firmware */
       
  1315 static const struct firmware *e100_request_firmware(struct nic *nic)
       
  1316 {
       
  1317 	const char *fw_name;
       
  1318 	const struct firmware *fw = nic->fw;
       
  1319 	u8 timer, bundle, min_size;
       
  1320 	int err = 0;
       
  1321 	bool required = false;
       
  1322 
       
  1323 	/* do not load u-code for ICH devices */
       
  1324 	if (nic->flags & ich)
       
  1325 		return NULL;
       
  1326 
       
  1327 	/* Search for ucode match against h/w revision
       
  1328 	 *
       
  1329 	 * Based on comments in the source code for the FreeBSD fxp
       
  1330 	 * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
       
  1331 	 *
       
  1332 	 *    "fixes for bugs in the B-step hardware (specifically, bugs
       
  1333 	 *     with Inline Receive)."
       
  1334 	 *
       
  1335 	 * So we must fail if it cannot be loaded.
       
  1336 	 *
       
  1337 	 * The other microcode files are only required for the optional
       
  1338 	 * CPUSaver feature.  Nice to have, but no reason to fail.
       
  1339 	 */
       
  1340 	if (nic->mac == mac_82559_D101M) {
       
  1341 		fw_name = FIRMWARE_D101M;
       
  1342 	} else if (nic->mac == mac_82559_D101S) {
       
  1343 		fw_name = FIRMWARE_D101S;
       
  1344 	} else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
       
  1345 		fw_name = FIRMWARE_D102E;
       
  1346 		required = true;
       
  1347 	} else { /* No ucode on other devices */
       
  1348 		return NULL;
       
  1349 	}
       
  1350 
       
  1351 	/* If the firmware has not previously been loaded, request a pointer
       
  1352 	 * to it. If it was previously loaded, we are reinitializing the
       
  1353 	 * adapter, possibly in a resume from hibernate, in which case
       
  1354 	 * request_firmware() cannot be used.
       
  1355 	 */
       
  1356 	if (!fw)
       
  1357 		err = request_firmware(&fw, fw_name, &nic->pdev->dev);
       
  1358 
       
  1359 	if (err) {
       
  1360 		if (required) {
       
  1361 			netif_err(nic, probe, nic->netdev,
       
  1362 				  "Failed to load firmware \"%s\": %d\n",
       
  1363 				  fw_name, err);
       
  1364 			return ERR_PTR(err);
       
  1365 		} else {
       
  1366 			netif_info(nic, probe, nic->netdev,
       
  1367 				   "CPUSaver disabled. Needs \"%s\": %d\n",
       
  1368 				   fw_name, err);
       
  1369 			return NULL;
       
  1370 		}
       
  1371 	}
       
  1372 
       
  1373 	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
       
  1374 	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
       
  1375 	if (fw->size != UCODE_SIZE * 4 + 3) {
       
  1376 		netif_err(nic, probe, nic->netdev,
       
  1377 			  "Firmware \"%s\" has wrong size %zu\n",
       
  1378 			  fw_name, fw->size);
       
  1379 		release_firmware(fw);
       
  1380 		return ERR_PTR(-EINVAL);
       
  1381 	}
       
  1382 
       
  1383 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1384 	timer = fw->data[UCODE_SIZE * 4];
       
  1385 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1386 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1387 
       
  1388 	if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
       
  1389 	    min_size >= UCODE_SIZE) {
       
  1390 		netif_err(nic, probe, nic->netdev,
       
  1391 			  "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
       
  1392 			  fw_name, timer, bundle, min_size);
       
  1393 		release_firmware(fw);
       
  1394 		return ERR_PTR(-EINVAL);
       
  1395 	}
       
  1396 
       
  1397 	/* OK, firmware is validated and ready to use. Save a pointer
       
  1398 	 * to it in the nic */
       
  1399 	nic->fw = fw;
       
  1400 	return fw;
       
  1401 }
       
  1402 
       
  1403 static void e100_setup_ucode(struct nic *nic, struct cb *cb,
       
  1404 			     struct sk_buff *skb)
       
  1405 {
       
  1406 	const struct firmware *fw = (void *)skb;
       
  1407 	u8 timer, bundle, min_size;
       
  1408 
       
  1409 	/* It's not a real skb; we just abused the fact that e100_exec_cb
       
  1410 	   will pass it through to here... */
       
  1411 	cb->skb = NULL;
       
  1412 
       
  1413 	/* firmware is stored as little endian already */
       
  1414 	memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
       
  1415 
       
  1416 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1417 	timer = fw->data[UCODE_SIZE * 4];
       
  1418 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1419 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1420 
       
  1421 	/* Insert user-tunable settings in cb->u.ucode */
       
  1422 	cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
       
  1423 	cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
       
  1424 	cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
       
  1425 	cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
       
  1426 	cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
       
  1427 	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
       
  1428 
       
  1429 	cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1430 }
       
  1431 
       
  1432 static inline int e100_load_ucode_wait(struct nic *nic)
       
  1433 {
       
  1434 	const struct firmware *fw;
       
  1435 	int err = 0, counter = 50;
       
  1436 	struct cb *cb = nic->cb_to_clean;
       
  1437 
       
  1438 	fw = e100_request_firmware(nic);
       
  1439 	/* If it's NULL, then no ucode is required */
       
  1440 	if (!fw || IS_ERR(fw))
       
  1441 		return PTR_ERR(fw);
       
  1442 
       
  1443 	if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
       
  1444 		netif_err(nic, probe, nic->netdev,
       
  1445 			  "ucode cmd failed with error %d\n", err);
       
  1446 
       
  1447 	/* must restart cuc */
       
  1448 	nic->cuc_cmd = cuc_start;
       
  1449 
       
  1450 	/* wait for completion */
       
  1451 	e100_write_flush(nic);
       
  1452 	udelay(10);
       
  1453 
       
  1454 	/* wait for possibly (ouch) 500ms */
       
  1455 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1456 		msleep(10);
       
  1457 		if (!--counter) break;
       
  1458 	}
       
  1459 
       
  1460 	/* ack any interrupts, something could have been set */
       
  1461 	iowrite8(~0, &nic->csr->scb.stat_ack);
       
  1462 
       
  1463 	/* if the command failed, or is not OK, notify and return */
       
  1464 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1465 		netif_err(nic, probe, nic->netdev, "ucode load failed\n");
       
  1466 		err = -EPERM;
       
  1467 	}
       
  1468 
       
  1469 	return err;
       
  1470 }
       
  1471 
       
  1472 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1473 	struct sk_buff *skb)
       
  1474 {
       
  1475 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1476 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1477 }
       
  1478 
       
  1479 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1480 {
       
  1481 	cb->command = cpu_to_le16(cb_dump);
       
  1482 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1483 		offsetof(struct mem, dump_buf));
       
  1484 }
       
  1485 
       
  1486 static int e100_phy_check_without_mii(struct nic *nic)
       
  1487 {
       
  1488 	u8 phy_type;
       
  1489 	int without_mii;
       
  1490 
       
  1491 	phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
       
  1492 
       
  1493 	switch (phy_type) {
       
  1494 	case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
       
  1495 	case I82503: /* Non-MII PHY; UNTESTED! */
       
  1496 	case S80C24: /* Non-MII PHY; tested and working */
       
  1497 		/* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
       
  1498 		 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
       
  1499 		 * doesn't have a programming interface of any sort.  The
       
  1500 		 * media is sensed automatically based on how the link partner
       
  1501 		 * is configured.  This is, in essence, manual configuration.
       
  1502 		 */
       
  1503 		netif_info(nic, probe, nic->netdev,
       
  1504 			   "found MII-less i82503 or 80c24 or other PHY\n");
       
  1505 
       
  1506 		nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
       
  1507 		nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
       
  1508 
       
  1509 		/* these might be needed for certain MII-less cards...
       
  1510 		 * nic->flags |= ich;
       
  1511 		 * nic->flags |= ich_10h_workaround; */
       
  1512 
       
  1513 		without_mii = 1;
       
  1514 		break;
       
  1515 	default:
       
  1516 		without_mii = 0;
       
  1517 		break;
       
  1518 	}
       
  1519 	return without_mii;
       
  1520 }
       
  1521 
       
  1522 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1523 #define MII_NSC_CONG		MII_RESV1
       
  1524 #define NSC_CONG_ENABLE		0x0100
       
  1525 #define NSC_CONG_TXREADY	0x0400
       
  1526 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1527 static int e100_phy_init(struct nic *nic)
       
  1528 {
       
  1529 	struct net_device *netdev = nic->netdev;
       
  1530 	u32 addr;
       
  1531 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1532 
       
  1533 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1534 	for (addr = 0; addr < 32; addr++) {
       
  1535 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1536 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1537 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1538 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1539 		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1540 			break;
       
  1541 	}
       
  1542 	if (addr == 32) {
       
  1543 		/* uhoh, no PHY detected: check whether we seem to be some
       
  1544 		 * weird, rare variant which is *known* to not have any MII.
       
  1545 		 * But do this AFTER MII checking only, since this does
       
  1546 		 * lookup of EEPROM values which may easily be unreliable. */
       
  1547 		if (e100_phy_check_without_mii(nic))
       
  1548 			return 0; /* simply return and hope for the best */
       
  1549 		else {
       
  1550 			/* for unknown cases log a fatal error */
       
  1551 			netif_err(nic, hw, nic->netdev,
       
  1552 				  "Failed to locate any known PHY, aborting\n");
       
  1553 			return -EAGAIN;
       
  1554 		}
       
  1555 	} else
       
  1556 		netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1557 			     "phy_addr = %d\n", nic->mii.phy_id);
       
  1558 
       
  1559 	/* Get phy ID */
       
  1560 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1561 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1562 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1563 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1564 		     "phy ID = 0x%08X\n", nic->phy);
       
  1565 
       
  1566 	/* Select the phy and isolate the rest */
       
  1567 	for (addr = 0; addr < 32; addr++) {
       
  1568 		if (addr != nic->mii.phy_id) {
       
  1569 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1570 		} else if (nic->phy != phy_82552_v) {
       
  1571 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1572 			mdio_write(netdev, addr, MII_BMCR,
       
  1573 				bmcr & ~BMCR_ISOLATE);
       
  1574 		}
       
  1575 	}
       
  1576 	/*
       
  1577 	 * Workaround for 82552:
       
  1578 	 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
       
  1579 	 * other phy_id's) using bmcr value from addr discovery loop above.
       
  1580 	 */
       
  1581 	if (nic->phy == phy_82552_v)
       
  1582 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
       
  1583 			bmcr & ~BMCR_ISOLATE);
       
  1584 
       
  1585 	/* Handle National tx phys */
       
  1586 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1587 	if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1588 		/* Disable congestion control */
       
  1589 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1590 		cong |= NSC_CONG_TXREADY;
       
  1591 		cong &= ~NSC_CONG_ENABLE;
       
  1592 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1593 	}
       
  1594 
       
  1595 	if (nic->phy == phy_82552_v) {
       
  1596 		u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
       
  1597 
       
  1598 		/* assign special tweaked mdio_ctrl() function */
       
  1599 		nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
       
  1600 
       
  1601 		/* Workaround Si not advertising flow-control during autoneg */
       
  1602 		advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
       
  1603 		mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
       
  1604 
       
  1605 		/* Reset for the above changes to take effect */
       
  1606 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1607 		bmcr |= BMCR_RESET;
       
  1608 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
       
  1609 	} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1610 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1611 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1612 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1613 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1614 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1615 	}
       
  1616 
       
  1617 	return 0;
       
  1618 }
       
  1619 
       
  1620 static int e100_hw_init(struct nic *nic)
       
  1621 {
       
  1622 	int err = 0;
       
  1623 
       
  1624 	e100_hw_reset(nic);
       
  1625 
       
  1626 	netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
       
  1627 	if (!in_interrupt() && (err = e100_self_test(nic)))
       
  1628 		return err;
       
  1629 
       
  1630 	if ((err = e100_phy_init(nic)))
       
  1631 		return err;
       
  1632 	if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1633 		return err;
       
  1634 	if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1635 		return err;
       
  1636 	if ((err = e100_load_ucode_wait(nic)))
       
  1637 		return err;
       
  1638 	if ((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1639 		return err;
       
  1640 	if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1641 		return err;
       
  1642 	if ((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1643 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1644 		return err;
       
  1645 	if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1646 		return err;
       
  1647 
       
  1648 	e100_disable_irq(nic);
       
  1649 
       
  1650 	return 0;
       
  1651 }
       
  1652 
       
  1653 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1654 {
       
  1655 	struct net_device *netdev = nic->netdev;
       
  1656 	struct netdev_hw_addr *ha;
       
  1657 	u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
       
  1658 
       
  1659 	cb->command = cpu_to_le16(cb_multi);
       
  1660 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1661 	i = 0;
       
  1662 	netdev_for_each_mc_addr(ha, netdev) {
       
  1663 		if (i == count)
       
  1664 			break;
       
  1665 		memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
       
  1666 			ETH_ALEN);
       
  1667 	}
       
  1668 }
       
  1669 
       
  1670 static void e100_set_multicast_list(struct net_device *netdev)
       
  1671 {
       
  1672 	struct nic *nic = netdev_priv(netdev);
       
  1673 
       
  1674 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1675 		     "mc_count=%d, flags=0x%04X\n",
       
  1676 		     netdev_mc_count(netdev), netdev->flags);
       
  1677 
       
  1678 	if (netdev->flags & IFF_PROMISC)
       
  1679 		nic->flags |= promiscuous;
       
  1680 	else
       
  1681 		nic->flags &= ~promiscuous;
       
  1682 
       
  1683 	if (netdev->flags & IFF_ALLMULTI ||
       
  1684 		netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
       
  1685 		nic->flags |= multicast_all;
       
  1686 	else
       
  1687 		nic->flags &= ~multicast_all;
       
  1688 
       
  1689 	e100_exec_cb(nic, NULL, e100_configure);
       
  1690 	e100_exec_cb(nic, NULL, e100_multi);
       
  1691 }
       
  1692 
       
  1693 static void e100_update_stats(struct nic *nic)
       
  1694 {
       
  1695 	struct net_device *dev = nic->netdev;
       
  1696 	struct net_device_stats *ns = &dev->stats;
       
  1697 	struct stats *s = &nic->mem->stats;
       
  1698 	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1699 		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
       
  1700 		&s->complete;
       
  1701 
       
  1702 	/* Device's stats reporting may take several microseconds to
       
  1703 	 * complete, so we're always waiting for results of the
       
  1704 	 * previous command. */
       
  1705 
       
  1706 	if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
       
  1707 		*complete = 0;
       
  1708 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1709 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1710 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1711 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1712 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1713 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1714 		ns->collisions += nic->tx_collisions;
       
  1715 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1716 			le32_to_cpu(s->tx_lost_crs);
       
  1717 		nic->rx_short_frame_errors +=
       
  1718 			le32_to_cpu(s->rx_short_frame_errors);
       
  1719 		ns->rx_length_errors = nic->rx_short_frame_errors +
       
  1720 			nic->rx_over_length_errors;
       
  1721 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1722 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1723 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1724 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1725 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1726 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1727 			le32_to_cpu(s->rx_alignment_errors) +
       
  1728 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1729 			le32_to_cpu(s->rx_cdt_errors);
       
  1730 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1731 		nic->tx_single_collisions +=
       
  1732 			le32_to_cpu(s->tx_single_collisions);
       
  1733 		nic->tx_multiple_collisions +=
       
  1734 			le32_to_cpu(s->tx_multiple_collisions);
       
  1735 		if (nic->mac >= mac_82558_D101_A4) {
       
  1736 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1737 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1738 			nic->rx_fc_unsupported +=
       
  1739 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1740 			if (nic->mac >= mac_82559_D101M) {
       
  1741 				nic->tx_tco_frames +=
       
  1742 					le16_to_cpu(s->xmt_tco_frames);
       
  1743 				nic->rx_tco_frames +=
       
  1744 					le16_to_cpu(s->rcv_tco_frames);
       
  1745 			}
       
  1746 		}
       
  1747 	}
       
  1748 
       
  1749 
       
  1750 	if (e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1751 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1752 			     "exec cuc_dump_reset failed\n");
       
  1753 }
       
  1754 
       
  1755 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1756 {
       
  1757 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1758 	 * we're getting collisions on a half-duplex connection. */
       
  1759 
       
  1760 	if (duplex == DUPLEX_HALF) {
       
  1761 		u32 prev = nic->adaptive_ifs;
       
  1762 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1763 
       
  1764 		if ((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1765 		   (nic->tx_frames > min_frames)) {
       
  1766 			if (nic->adaptive_ifs < 60)
       
  1767 				nic->adaptive_ifs += 5;
       
  1768 		} else if (nic->tx_frames < min_frames) {
       
  1769 			if (nic->adaptive_ifs >= 5)
       
  1770 				nic->adaptive_ifs -= 5;
       
  1771 		}
       
  1772 		if (nic->adaptive_ifs != prev)
       
  1773 			e100_exec_cb(nic, NULL, e100_configure);
       
  1774 	}
       
  1775 }
       
  1776 
       
  1777 static void e100_watchdog(unsigned long data)
       
  1778 {
       
  1779 	struct nic *nic = (struct nic *)data;
       
  1780 	struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
       
  1781 	u32 speed;
       
  1782 
       
  1783 	if (nic->ecdev) {
       
  1784 		ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1785 		return;
       
  1786 	}
       
  1787 
       
  1788 	netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
       
  1789 		     "right now = %ld\n", jiffies);
       
  1790 
       
  1791 	/* mii library handles link maintenance tasks */
       
  1792 
       
  1793 	mii_ethtool_gset(&nic->mii, &cmd);
       
  1794 	speed = ethtool_cmd_speed(&cmd);
       
  1795 
       
  1796 	if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1797 		netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
       
  1798 			    speed == SPEED_100 ? 100 : 10,
       
  1799 			    cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
       
  1800 	} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1801 		netdev_info(nic->netdev, "NIC Link is Down\n");
       
  1802 	}
       
  1803 
       
  1804 	mii_check_link(&nic->mii);
       
  1805 
       
  1806 	/* Software generated interrupt to recover from (rare) Rx
       
  1807 	 * allocation failure.
       
  1808 	 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1809 	 * accidentally, due to hardware that shares a register between the
       
  1810 	 * interrupt mask bit and the SW Interrupt generation bit */
       
  1811 	spin_lock_irq(&nic->cmd_lock);
       
  1812 	iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1813 	e100_write_flush(nic);
       
  1814 	spin_unlock_irq(&nic->cmd_lock);
       
  1815 
       
  1816 	e100_update_stats(nic);
       
  1817 	e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
       
  1818 
       
  1819 	if (nic->mac <= mac_82557_D100_C)
       
  1820 		/* Issue a multicast command to workaround a 557 lock up */
       
  1821 		e100_set_multicast_list(nic->netdev);
       
  1822 
       
  1823 	if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
       
  1824 		/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1825 		nic->flags |= ich_10h_workaround;
       
  1826 	else
       
  1827 		nic->flags &= ~ich_10h_workaround;
       
  1828 
       
  1829 	mod_timer(&nic->watchdog,
       
  1830 		  round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
       
  1831 }
       
  1832 
       
  1833 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1834 	struct sk_buff *skb)
       
  1835 {
       
  1836 	cb->command = nic->tx_command;
       
  1837 
       
  1838 	/*
       
  1839 	 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
       
  1840 	 * testing, ie sending frames with bad CRC.
       
  1841 	 */
       
  1842 	if (unlikely(skb->no_fcs))
       
  1843 		cb->command |= __constant_cpu_to_le16(cb_tx_nc);
       
  1844 	else
       
  1845 		cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
       
  1846 
       
  1847 	/* interrupt every 16 packets regardless of delay */
       
  1848 	if ((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1849 		cb->command |= cpu_to_le16(cb_i);
       
  1850 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1851 	cb->u.tcb.tcb_byte_count = 0;
       
  1852 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1853 	cb->u.tcb.tbd_count = 1;
       
  1854 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
       
  1855 		skb->data, skb->len, PCI_DMA_TODEVICE));
       
  1856 	/* check for mapping failure? */
       
  1857 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1858 	skb_tx_timestamp(skb);
       
  1859 }
       
  1860 
       
  1861 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
       
  1862 				   struct net_device *netdev)
       
  1863 {
       
  1864 	struct nic *nic = netdev_priv(netdev);
       
  1865 	int err;
       
  1866 
       
  1867 	if (nic->flags & ich_10h_workaround) {
       
  1868 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1869 		   Issue a NOP command followed by a 1us delay before
       
  1870 		   issuing the Tx command. */
       
  1871 		if (e100_exec_cmd(nic, cuc_nop, 0))
       
  1872 			netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1873 				     "exec cuc_nop failed\n");
       
  1874 		udelay(1);
       
  1875 	}
       
  1876 
       
  1877 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1878 
       
  1879 	switch (err) {
       
  1880 	case -ENOSPC:
       
  1881 		/* We queued the skb, but now we're out of space. */
       
  1882 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1883 			     "No space for CB\n");
       
  1884 		if (!nic->ecdev)
       
  1885 			netif_stop_queue(netdev);
       
  1886 		break;
       
  1887 	case -ENOMEM:
       
  1888 		/* This is a hard error - log it. */
       
  1889 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1890 			     "Out of Tx resources, returning skb\n");
       
  1891 		if (!nic->ecdev)
       
  1892 			netif_stop_queue(netdev);
       
  1893 		return NETDEV_TX_BUSY;
       
  1894 	}
       
  1895 
       
  1896 	return NETDEV_TX_OK;
       
  1897 }
       
  1898 
       
  1899 static int e100_tx_clean(struct nic *nic)
       
  1900 {
       
  1901 	struct net_device *dev = nic->netdev;
       
  1902 	struct cb *cb;
       
  1903 	int tx_cleaned = 0;
       
  1904 
       
  1905 	if (!nic->ecdev)
       
  1906 		spin_lock(&nic->cb_lock);
       
  1907 
       
  1908 	/* Clean CBs marked complete */
       
  1909 	for (cb = nic->cb_to_clean;
       
  1910 	    cb->status & cpu_to_le16(cb_complete);
       
  1911 	    cb = nic->cb_to_clean = cb->next) {
       
  1912 		rmb(); /* read skb after status */
       
  1913 		netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
       
  1914 			     "cb[%d]->status = 0x%04X\n",
       
  1915 			     (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1916 			     cb->status);
       
  1917 
       
  1918 		if (likely(cb->skb != NULL)) {
       
  1919 			dev->stats.tx_packets++;
       
  1920 			dev->stats.tx_bytes += cb->skb->len;
       
  1921 
       
  1922 			pci_unmap_single(nic->pdev,
       
  1923 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1924 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1925 				PCI_DMA_TODEVICE);
       
  1926 			if (!nic->ecdev)
       
  1927 				dev_kfree_skb_any(cb->skb);
       
  1928 			cb->skb = NULL;
       
  1929 			tx_cleaned = 1;
       
  1930 		}
       
  1931 		cb->status = 0;
       
  1932 		nic->cbs_avail++;
       
  1933 	}
       
  1934 
       
  1935 	if (!nic->ecdev) {
       
  1936 		spin_unlock(&nic->cb_lock);
       
  1937 
       
  1938 		/* Recover from running out of Tx resources in xmit_frame */
       
  1939 		if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1940 			netif_wake_queue(nic->netdev);
       
  1941 	}
       
  1942 
       
  1943 	return tx_cleaned;
       
  1944 }
       
  1945 
       
  1946 static void e100_clean_cbs(struct nic *nic)
       
  1947 {
       
  1948 	if (nic->cbs) {
       
  1949 		while (nic->cbs_avail != nic->params.cbs.count) {
       
  1950 			struct cb *cb = nic->cb_to_clean;
       
  1951 			if (cb->skb) {
       
  1952 				pci_unmap_single(nic->pdev,
       
  1953 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1954 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1955 					PCI_DMA_TODEVICE);
       
  1956 				if (!nic->ecdev)
       
  1957 					dev_kfree_skb(cb->skb);
       
  1958 			}
       
  1959 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1960 			nic->cbs_avail++;
       
  1961 		}
       
  1962 		pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
       
  1963 		nic->cbs = NULL;
       
  1964 		nic->cbs_avail = 0;
       
  1965 	}
       
  1966 	nic->cuc_cmd = cuc_start;
       
  1967 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1968 		nic->cbs;
       
  1969 }
       
  1970 
       
  1971 static int e100_alloc_cbs(struct nic *nic)
       
  1972 {
       
  1973 	struct cb *cb;
       
  1974 	unsigned int i, count = nic->params.cbs.count;
       
  1975 
       
  1976 	nic->cuc_cmd = cuc_start;
       
  1977 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1978 	nic->cbs_avail = 0;
       
  1979 
       
  1980 	nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
       
  1981 				  &nic->cbs_dma_addr);
       
  1982 	if (!nic->cbs)
       
  1983 		return -ENOMEM;
       
  1984 	memset(nic->cbs, 0, count * sizeof(struct cb));
       
  1985 
       
  1986 	for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1987 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1988 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  1989 
       
  1990 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  1991 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  1992 			((i+1) % count) * sizeof(struct cb));
       
  1993 	}
       
  1994 
       
  1995 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  1996 	nic->cbs_avail = count;
       
  1997 
       
  1998 	return 0;
       
  1999 }
       
  2000 
       
  2001 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  2002 {
       
  2003 	if (!nic->rxs) return;
       
  2004 	if (RU_SUSPENDED != nic->ru_running) return;
       
  2005 
       
  2006 	/* handle init time starts */
       
  2007 	if (!rx) rx = nic->rxs;
       
  2008 
       
  2009 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  2010 	if (rx->skb) {
       
  2011 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  2012 		nic->ru_running = RU_RUNNING;
       
  2013 	}
       
  2014 }
       
  2015 
       
  2016 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
       
  2017 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  2018 {
       
  2019 	if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
       
  2020 		return -ENOMEM;
       
  2021 
       
  2022 	/* Init, and map the RFD. */
       
  2023 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
       
  2024 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  2025 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2026 
       
  2027 	if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2028 		dev_kfree_skb_any(rx->skb);
       
  2029 		rx->skb = NULL;
       
  2030 		rx->dma_addr = 0;
       
  2031 		return -ENOMEM;
       
  2032 	}
       
  2033 
       
  2034 	/* Link the RFD to end of RFA by linking previous RFD to
       
  2035 	 * this one.  We are safe to touch the previous RFD because
       
  2036 	 * it is protected by the before last buffer's el bit being set */
       
  2037 	if (rx->prev->skb) {
       
  2038 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  2039 		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2040 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2041 			sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2042 	}
       
  2043 
       
  2044 	return 0;
       
  2045 }
       
  2046 
       
  2047 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  2048 	unsigned int *work_done, unsigned int work_to_do)
       
  2049 {
       
  2050 	struct net_device *dev = nic->netdev;
       
  2051 	struct sk_buff *skb = rx->skb;
       
  2052 	struct rfd *rfd = (struct rfd *)skb->data;
       
  2053 	u16 rfd_status, actual_size;
       
  2054 	u16 fcs_pad = 0;
       
  2055 
       
  2056 	if (unlikely(work_done && *work_done >= work_to_do))
       
  2057 		return -EAGAIN;
       
  2058 
       
  2059 	/* Need to sync before taking a peek at cb_complete bit */
       
  2060 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  2061 		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2062 	rfd_status = le16_to_cpu(rfd->status);
       
  2063 
       
  2064 	netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
       
  2065 		     "status=0x%04X\n", rfd_status);
       
  2066 	rmb(); /* read size after status bit */
       
  2067 
       
  2068 	/* If data isn't ready, nothing to indicate */
       
  2069 	if (unlikely(!(rfd_status & cb_complete))) {
       
  2070 		/* If the next buffer has the el bit, but we think the receiver
       
  2071 		 * is still running, check to see if it really stopped while
       
  2072 		 * we had interrupts off.
       
  2073 		 * This allows for a fast restart without re-enabling
       
  2074 		 * interrupts */
       
  2075 		if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2076 		    (RU_RUNNING == nic->ru_running))
       
  2077 
       
  2078 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2079 				nic->ru_running = RU_SUSPENDED;
       
  2080 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2081 					       sizeof(struct rfd),
       
  2082 					       PCI_DMA_FROMDEVICE);
       
  2083 		return -ENODATA;
       
  2084 	}
       
  2085 
       
  2086 	/* Get actual data size */
       
  2087 	if (unlikely(dev->features & NETIF_F_RXFCS))
       
  2088 		fcs_pad = 4;
       
  2089 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  2090 	if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  2091 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  2092 
       
  2093 	/* Get data */
       
  2094 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2095 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2096 
       
  2097 	/* If this buffer has the el bit, but we think the receiver
       
  2098 	 * is still running, check to see if it really stopped while
       
  2099 	 * we had interrupts off.
       
  2100 	 * This allows for a fast restart without re-enabling interrupts.
       
  2101 	 * This can happen when the RU sees the size change but also sees
       
  2102 	 * the el bit set. */
       
  2103 	if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2104 	    (RU_RUNNING == nic->ru_running)) {
       
  2105 
       
  2106 	    if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2107 		nic->ru_running = RU_SUSPENDED;
       
  2108 	}
       
  2109 
       
  2110 	if (!nic->ecdev) {
       
  2111 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  2112 		skb_reserve(skb, sizeof(struct rfd));
       
  2113 		skb_put(skb, actual_size);
       
  2114 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  2115 	}
       
  2116 
       
  2117 	/* If we are receiving all frames, then don't bother
       
  2118 	 * checking for errors.
       
  2119 	 */
       
  2120 	if (unlikely(dev->features & NETIF_F_RXALL)) {
       
  2121 		if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
       
  2122 			/* Received oversized frame, but keep it. */
       
  2123 			nic->rx_over_length_errors++;
       
  2124 		goto process_skb;
       
  2125 	}
       
  2126 
       
  2127 	if (unlikely(!(rfd_status & cb_ok))) {
       
  2128 		if (!nic->ecdev) {
       
  2129 			/* Don't indicate if hardware indicates errors */
       
  2130 			dev_kfree_skb_any(skb);
       
  2131 		}
       
  2132 	} else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
       
  2133 		/* Don't indicate oversized frames */
       
  2134 		nic->rx_over_length_errors++;
       
  2135 		if (!nic->ecdev) {
       
  2136 			dev_kfree_skb_any(skb);
       
  2137 		}
       
  2138 	} else {
       
  2139 process_skb:
       
  2140 		dev->stats.rx_packets++;
       
  2141 		dev->stats.rx_bytes += (actual_size - fcs_pad);
       
  2142 		if (nic->ecdev) {
       
  2143 			ecdev_receive(nic->ecdev,
       
  2144 					skb->data + sizeof(struct rfd), actual_size - fcs_pad);
       
  2145 
       
  2146 			// No need to detect link status as
       
  2147 			// long as frames are received: Reset watchdog.
       
  2148 			if (ecdev_get_link(nic->ecdev)) {
       
  2149 				nic->ec_watchdog_jiffies = jiffies;
       
  2150 			}
       
  2151 		} else {
       
  2152 			netif_receive_skb(skb);
       
  2153 		}
       
  2154 		if (work_done)
       
  2155 			(*work_done)++;
       
  2156 	}
       
  2157 
       
  2158 	if (nic->ecdev) {
       
  2159 		// make receive frame descriptior usable again
       
  2160 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  2161 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  2162 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2163 		if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2164 			rx->dma_addr = 0;
       
  2165 		}
       
  2166 
       
  2167 		/* Link the RFD to end of RFA by linking previous RFD to
       
  2168 		 * this one.  We are safe to touch the previous RFD because
       
  2169 		 * it is protected by the before last buffer's el bit being set */
       
  2170 		if (rx->prev->skb) {
       
  2171 			struct rfd *prev_rfd = (struct rfd *) rx->prev->skb->data;
       
  2172 			put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2173 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2174 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  2175 		}
       
  2176 	} else {
       
  2177 		rx->skb = NULL;
       
  2178 	}
       
  2179 
       
  2180 	return 0;
       
  2181 }
       
  2182 
       
  2183 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  2184 	unsigned int work_to_do)
       
  2185 {
       
  2186 	struct rx *rx;
       
  2187 	int restart_required = 0, err = 0;
       
  2188 	struct rx *old_before_last_rx, *new_before_last_rx;
       
  2189 	struct rfd *old_before_last_rfd, *new_before_last_rfd;
       
  2190 
       
  2191 	/* Indicate newly arrived packets */
       
  2192 	for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  2193 		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  2194 		/* Hit quota or no more to clean */
       
  2195 		if (-EAGAIN == err || -ENODATA == err)
       
  2196 			break;
       
  2197 	}
       
  2198 
       
  2199 
       
  2200 	/* On EAGAIN, hit quota so have more work to do, restart once
       
  2201 	 * cleanup is complete.
       
  2202 	 * Else, are we already rnr? then pay attention!!! this ensures that
       
  2203 	 * the state machine progression never allows a start with a
       
  2204 	 * partially cleaned list, avoiding a race between hardware
       
  2205 	 * and rx_to_clean when in NAPI mode */
       
  2206 	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
       
  2207 		restart_required = 1;
       
  2208 
       
  2209 	old_before_last_rx = nic->rx_to_use->prev->prev;
       
  2210 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
       
  2211 
       
  2212 	if (!nic->ecdev) {
       
  2213 		/* Alloc new skbs to refill list */
       
  2214 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  2215 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  2216 				break; /* Better luck next time (see watchdog) */
       
  2217 		}
       
  2218 	}
       
  2219 
       
  2220 	new_before_last_rx = nic->rx_to_use->prev->prev;
       
  2221 	if (new_before_last_rx != old_before_last_rx) {
       
  2222 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2223 		 * This lets us update the next pointer on the last buffer
       
  2224 		 * without worrying about hardware touching it.
       
  2225 		 * We set the size to 0 to prevent hardware from touching this
       
  2226 		 * buffer.
       
  2227 		 * When the hardware hits the before last buffer with el-bit
       
  2228 		 * and size of 0, it will RNR interrupt, the RUS will go into
       
  2229 		 * the No Resources state.  It will not complete nor write to
       
  2230 		 * this buffer. */
       
  2231 		new_before_last_rfd =
       
  2232 			(struct rfd *)new_before_last_rx->skb->data;
       
  2233 		new_before_last_rfd->size = 0;
       
  2234 		new_before_last_rfd->command |= cpu_to_le16(cb_el);
       
  2235 		pci_dma_sync_single_for_device(nic->pdev,
       
  2236 			new_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2237 			PCI_DMA_BIDIRECTIONAL);
       
  2238 
       
  2239 		/* Now that we have a new stopping point, we can clear the old
       
  2240 		 * stopping point.  We must sync twice to get the proper
       
  2241 		 * ordering on the hardware side of things. */
       
  2242 		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
       
  2243 		pci_dma_sync_single_for_device(nic->pdev,
       
  2244 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2245 			PCI_DMA_BIDIRECTIONAL);
       
  2246 		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
       
  2247 							+ ETH_FCS_LEN);
       
  2248 		pci_dma_sync_single_for_device(nic->pdev,
       
  2249 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2250 			PCI_DMA_BIDIRECTIONAL);
       
  2251 	}
       
  2252 
       
  2253 	if (restart_required) {
       
  2254 		// ack the rnr?
       
  2255 		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2256 		e100_start_receiver(nic, nic->rx_to_clean);
       
  2257 		if (work_done)
       
  2258 			(*work_done)++;
       
  2259 	}
       
  2260 }
       
  2261 
       
  2262 static void e100_rx_clean_list(struct nic *nic)
       
  2263 {
       
  2264 	struct rx *rx;
       
  2265 	unsigned int i, count = nic->params.rfds.count;
       
  2266 
       
  2267 	nic->ru_running = RU_UNINITIALIZED;
       
  2268 
       
  2269 	if (nic->rxs) {
       
  2270 		for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2271 			if (rx->skb) {
       
  2272 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2273 					RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2274 				dev_kfree_skb(rx->skb);
       
  2275 			}
       
  2276 		}
       
  2277 		kfree(nic->rxs);
       
  2278 		nic->rxs = NULL;
       
  2279 	}
       
  2280 
       
  2281 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2282 }
       
  2283 
       
  2284 static int e100_rx_alloc_list(struct nic *nic)
       
  2285 {
       
  2286 	struct rx *rx;
       
  2287 	unsigned int i, count = nic->params.rfds.count;
       
  2288 	struct rfd *before_last;
       
  2289 
       
  2290 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2291 	nic->ru_running = RU_UNINITIALIZED;
       
  2292 
       
  2293 	if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2294 		return -ENOMEM;
       
  2295 
       
  2296 	for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2297 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2298 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2299 		if (e100_rx_alloc_skb(nic, rx)) {
       
  2300 			e100_rx_clean_list(nic);
       
  2301 			return -ENOMEM;
       
  2302 		}
       
  2303 	}
       
  2304 
       
  2305 	if (!nic->ecdev) {
       
  2306 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2307 		 * This lets us update the next pointer on the last buffer without
       
  2308 		 * worrying about hardware touching it.
       
  2309 		 * We set the size to 0 to prevent hardware from touching this buffer.
       
  2310 		 * When the hardware hits the before last buffer with el-bit and size
       
  2311 		 * of 0, it will RNR interrupt, the RU will go into the No Resources
       
  2312 		 * state.  It will not complete nor write to this buffer. */
       
  2313 		rx = nic->rxs->prev->prev;
       
  2314 		before_last = (struct rfd *)rx->skb->data;
       
  2315 		before_last->command |= cpu_to_le16(cb_el);
       
  2316 		before_last->size = 0;
       
  2317 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2318 				sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2319 	}
       
  2320 
       
  2321 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2322 	nic->ru_running = RU_SUSPENDED;
       
  2323 
       
  2324 	return 0;
       
  2325 }
       
  2326 
       
  2327 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2328 {
       
  2329 	struct net_device *netdev = dev_id;
       
  2330 	struct nic *nic = netdev_priv(netdev);
       
  2331 	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
       
  2332 
       
  2333 	netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
       
  2334 		     "stat_ack = 0x%02X\n", stat_ack);
       
  2335 
       
  2336 	if (stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2337 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2338 		return IRQ_NONE;
       
  2339 
       
  2340 	/* Ack interrupt(s) */
       
  2341 	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
       
  2342 
       
  2343 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2344 	if (stat_ack & stat_ack_rnr)
       
  2345 		nic->ru_running = RU_SUSPENDED;
       
  2346 
       
  2347 	if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) {
       
  2348 		e100_disable_irq(nic);
       
  2349 		__napi_schedule(&nic->napi);
       
  2350 	}
       
  2351 
       
  2352 	return IRQ_HANDLED;
       
  2353 }
       
  2354 
       
  2355 void e100_ec_poll(struct net_device *netdev)
       
  2356 {
       
  2357 	struct nic *nic = netdev_priv(netdev);
       
  2358 
       
  2359 	e100_rx_clean(nic, NULL, 100);
       
  2360 	e100_tx_clean(nic);
       
  2361 
       
  2362 	if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2363 		e100_watchdog((unsigned long) nic);
       
  2364 		nic->ec_watchdog_jiffies = jiffies;
       
  2365 	}
       
  2366 }
       
  2367 
       
  2368 
       
  2369 static int e100_poll(struct napi_struct *napi, int budget)
       
  2370 {
       
  2371 	struct nic *nic = container_of(napi, struct nic, napi);
       
  2372 	unsigned int work_done = 0;
       
  2373 
       
  2374 	e100_rx_clean(nic, &work_done, budget);
       
  2375 	e100_tx_clean(nic);
       
  2376 
       
  2377 	/* If budget not fully consumed, exit the polling mode */
       
  2378 	if (work_done < budget) {
       
  2379 		napi_complete(napi);
       
  2380 		e100_enable_irq(nic);
       
  2381 	}
       
  2382 
       
  2383 	return work_done;
       
  2384 }
       
  2385 
       
  2386 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2387 static void e100_netpoll(struct net_device *netdev)
       
  2388 {
       
  2389 	struct nic *nic = netdev_priv(netdev);
       
  2390 
       
  2391 	e100_disable_irq(nic);
       
  2392 	e100_intr(nic->pdev->irq, netdev);
       
  2393 	e100_tx_clean(nic);
       
  2394 	e100_enable_irq(nic);
       
  2395 }
       
  2396 #endif
       
  2397 
       
  2398 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2399 {
       
  2400 	struct nic *nic = netdev_priv(netdev);
       
  2401 	struct sockaddr *addr = p;
       
  2402 
       
  2403 	if (!is_valid_ether_addr(addr->sa_data))
       
  2404 		return -EADDRNOTAVAIL;
       
  2405 
       
  2406 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2407 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2408 
       
  2409 	return 0;
       
  2410 }
       
  2411 
       
  2412 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2413 {
       
  2414 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2415 		return -EINVAL;
       
  2416 	netdev->mtu = new_mtu;
       
  2417 	return 0;
       
  2418 }
       
  2419 
       
  2420 static int e100_asf(struct nic *nic)
       
  2421 {
       
  2422 	/* ASF can be enabled from eeprom */
       
  2423 	return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2424 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2425 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2426 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
       
  2427 }
       
  2428 
       
  2429 static int e100_up(struct nic *nic)
       
  2430 {
       
  2431 	int err;
       
  2432 
       
  2433 	if ((err = e100_rx_alloc_list(nic)))
       
  2434 		return err;
       
  2435 	if ((err = e100_alloc_cbs(nic)))
       
  2436 		goto err_rx_clean_list;
       
  2437 	if ((err = e100_hw_init(nic)))
       
  2438 		goto err_clean_cbs;
       
  2439 	e100_set_multicast_list(nic->netdev);
       
  2440 	e100_start_receiver(nic, NULL);
       
  2441 	if (!nic->ecdev) {
       
  2442 		mod_timer(&nic->watchdog, jiffies);
       
  2443 	}
       
  2444 	if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2445 		nic->netdev->name, nic->netdev)))
       
  2446 		goto err_no_irq;
       
  2447 	if (!nic->ecdev) {
       
  2448 		netif_wake_queue(nic->netdev);
       
  2449 		napi_enable(&nic->napi);
       
  2450 		/* enable ints _after_ enabling poll, preventing a race between
       
  2451 		 * disable ints+schedule */
       
  2452 		e100_enable_irq(nic);
       
  2453 	}
       
  2454 	return 0;
       
  2455 
       
  2456 err_no_irq:
       
  2457 	if (!nic->ecdev)
       
  2458 		del_timer_sync(&nic->watchdog);
       
  2459 err_clean_cbs:
       
  2460 	e100_clean_cbs(nic);
       
  2461 err_rx_clean_list:
       
  2462 	e100_rx_clean_list(nic);
       
  2463 	return err;
       
  2464 }
       
  2465 
       
  2466 static void e100_down(struct nic *nic)
       
  2467 {
       
  2468 	if (!nic->ecdev) {
       
  2469 		/* wait here for poll to complete */
       
  2470 		napi_disable(&nic->napi);
       
  2471 		netif_stop_queue(nic->netdev);
       
  2472 	}
       
  2473 	e100_hw_reset(nic);
       
  2474 	free_irq(nic->pdev->irq, nic->netdev);
       
  2475 	if (!nic->ecdev) {
       
  2476 		del_timer_sync(&nic->watchdog);
       
  2477 		netif_carrier_off(nic->netdev);
       
  2478 	}
       
  2479 	e100_clean_cbs(nic);
       
  2480 	e100_rx_clean_list(nic);
       
  2481 }
       
  2482 
       
  2483 static void e100_tx_timeout(struct net_device *netdev)
       
  2484 {
       
  2485 	struct nic *nic = netdev_priv(netdev);
       
  2486 
       
  2487 	/* Reset outside of interrupt context, to avoid request_irq
       
  2488 	 * in interrupt context */
       
  2489 	schedule_work(&nic->tx_timeout_task);
       
  2490 }
       
  2491 
       
  2492 static void e100_tx_timeout_task(struct work_struct *work)
       
  2493 {
       
  2494 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2495 	struct net_device *netdev = nic->netdev;
       
  2496 
       
  2497 	netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  2498 		     "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
       
  2499 
       
  2500 	rtnl_lock();
       
  2501 	if (netif_running(netdev)) {
       
  2502 		e100_down(netdev_priv(netdev));
       
  2503 		e100_up(netdev_priv(netdev));
       
  2504 	}
       
  2505 	rtnl_unlock();
       
  2506 }
       
  2507 
       
  2508 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2509 {
       
  2510 	int err;
       
  2511 	struct sk_buff *skb;
       
  2512 
       
  2513 	/* Use driver resources to perform internal MAC or PHY
       
  2514 	 * loopback test.  A single packet is prepared and transmitted
       
  2515 	 * in loopback mode, and the test passes if the received
       
  2516 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2517 
       
  2518 	if ((err = e100_rx_alloc_list(nic)))
       
  2519 		return err;
       
  2520 	if ((err = e100_alloc_cbs(nic)))
       
  2521 		goto err_clean_rx;
       
  2522 
       
  2523 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2524 	if (nic->flags & ich && loopback_mode == lb_phy)
       
  2525 		loopback_mode = lb_mac;
       
  2526 
       
  2527 	nic->loopback = loopback_mode;
       
  2528 	if ((err = e100_hw_init(nic)))
       
  2529 		goto err_loopback_none;
       
  2530 
       
  2531 	if (loopback_mode == lb_phy)
       
  2532 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2533 			BMCR_LOOPBACK);
       
  2534 
       
  2535 	e100_start_receiver(nic, NULL);
       
  2536 
       
  2537 	if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2538 		err = -ENOMEM;
       
  2539 		goto err_loopback_none;
       
  2540 	}
       
  2541 	skb_put(skb, ETH_DATA_LEN);
       
  2542 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2543 	e100_xmit_frame(skb, nic->netdev);
       
  2544 
       
  2545 	msleep(10);
       
  2546 
       
  2547 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2548 			RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2549 
       
  2550 	if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2551 	   skb->data, ETH_DATA_LEN))
       
  2552 		err = -EAGAIN;
       
  2553 
       
  2554 err_loopback_none:
       
  2555 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2556 	nic->loopback = lb_none;
       
  2557 	e100_clean_cbs(nic);
       
  2558 	e100_hw_reset(nic);
       
  2559 err_clean_rx:
       
  2560 	e100_rx_clean_list(nic);
       
  2561 	return err;
       
  2562 }
       
  2563 
       
  2564 #define MII_LED_CONTROL	0x1B
       
  2565 #define E100_82552_LED_OVERRIDE 0x19
       
  2566 #define E100_82552_LED_ON       0x000F /* LEDTX and LED_RX both on */
       
  2567 #define E100_82552_LED_OFF      0x000A /* LEDTX and LED_RX both off */
       
  2568 
       
  2569 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2570 {
       
  2571 	struct nic *nic = netdev_priv(netdev);
       
  2572 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2573 }
       
  2574 
       
  2575 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2576 {
       
  2577 	struct nic *nic = netdev_priv(netdev);
       
  2578 	int err;
       
  2579 
       
  2580 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2581 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2582 	e100_exec_cb(nic, NULL, e100_configure);
       
  2583 
       
  2584 	return err;
       
  2585 }
       
  2586 
       
  2587 static void e100_get_drvinfo(struct net_device *netdev,
       
  2588 	struct ethtool_drvinfo *info)
       
  2589 {
       
  2590 	struct nic *nic = netdev_priv(netdev);
       
  2591 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
       
  2592 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
       
  2593 	strlcpy(info->bus_info, pci_name(nic->pdev),
       
  2594 		sizeof(info->bus_info));
       
  2595 }
       
  2596 
       
  2597 #define E100_PHY_REGS 0x1C
       
  2598 static int e100_get_regs_len(struct net_device *netdev)
       
  2599 {
       
  2600 	struct nic *nic = netdev_priv(netdev);
       
  2601 	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
       
  2602 }
       
  2603 
       
  2604 static void e100_get_regs(struct net_device *netdev,
       
  2605 	struct ethtool_regs *regs, void *p)
       
  2606 {
       
  2607 	struct nic *nic = netdev_priv(netdev);
       
  2608 	u32 *buff = p;
       
  2609 	int i;
       
  2610 
       
  2611 	regs->version = (1 << 24) | nic->pdev->revision;
       
  2612 	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
       
  2613 		ioread8(&nic->csr->scb.cmd_lo) << 16 |
       
  2614 		ioread16(&nic->csr->scb.status);
       
  2615 	for (i = E100_PHY_REGS; i >= 0; i--)
       
  2616 		buff[1 + E100_PHY_REGS - i] =
       
  2617 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2618 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2619 	e100_exec_cb(nic, NULL, e100_dump);
       
  2620 	msleep(10);
       
  2621 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2622 		sizeof(nic->mem->dump_buf));
       
  2623 }
       
  2624 
       
  2625 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2626 {
       
  2627 	struct nic *nic = netdev_priv(netdev);
       
  2628 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2629 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2630 }
       
  2631 
       
  2632 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2633 {
       
  2634 	struct nic *nic = netdev_priv(netdev);
       
  2635 
       
  2636 	if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
       
  2637 	    !device_can_wakeup(&nic->pdev->dev))
       
  2638 		return -EOPNOTSUPP;
       
  2639 
       
  2640 	if (wol->wolopts)
       
  2641 		nic->flags |= wol_magic;
       
  2642 	else
       
  2643 		nic->flags &= ~wol_magic;
       
  2644 
       
  2645 	device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
       
  2646 
       
  2647 	e100_exec_cb(nic, NULL, e100_configure);
       
  2648 
       
  2649 	return 0;
       
  2650 }
       
  2651 
       
  2652 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2653 {
       
  2654 	struct nic *nic = netdev_priv(netdev);
       
  2655 	return nic->msg_enable;
       
  2656 }
       
  2657 
       
  2658 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2659 {
       
  2660 	struct nic *nic = netdev_priv(netdev);
       
  2661 	nic->msg_enable = value;
       
  2662 }
       
  2663 
       
  2664 static int e100_nway_reset(struct net_device *netdev)
       
  2665 {
       
  2666 	struct nic *nic = netdev_priv(netdev);
       
  2667 	return mii_nway_restart(&nic->mii);
       
  2668 }
       
  2669 
       
  2670 static u32 e100_get_link(struct net_device *netdev)
       
  2671 {
       
  2672 	struct nic *nic = netdev_priv(netdev);
       
  2673 	return mii_link_ok(&nic->mii);
       
  2674 }
       
  2675 
       
  2676 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2677 {
       
  2678 	struct nic *nic = netdev_priv(netdev);
       
  2679 	return nic->eeprom_wc << 1;
       
  2680 }
       
  2681 
       
  2682 #define E100_EEPROM_MAGIC	0x1234
       
  2683 static int e100_get_eeprom(struct net_device *netdev,
       
  2684 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2685 {
       
  2686 	struct nic *nic = netdev_priv(netdev);
       
  2687 
       
  2688 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2689 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2690 
       
  2691 	return 0;
       
  2692 }
       
  2693 
       
  2694 static int e100_set_eeprom(struct net_device *netdev,
       
  2695 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2696 {
       
  2697 	struct nic *nic = netdev_priv(netdev);
       
  2698 
       
  2699 	if (eeprom->magic != E100_EEPROM_MAGIC)
       
  2700 		return -EINVAL;
       
  2701 
       
  2702 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2703 
       
  2704 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2705 		(eeprom->len >> 1) + 1);
       
  2706 }
       
  2707 
       
  2708 static void e100_get_ringparam(struct net_device *netdev,
       
  2709 	struct ethtool_ringparam *ring)
       
  2710 {
       
  2711 	struct nic *nic = netdev_priv(netdev);
       
  2712 	struct param_range *rfds = &nic->params.rfds;
       
  2713 	struct param_range *cbs = &nic->params.cbs;
       
  2714 
       
  2715 	ring->rx_max_pending = rfds->max;
       
  2716 	ring->tx_max_pending = cbs->max;
       
  2717 	ring->rx_pending = rfds->count;
       
  2718 	ring->tx_pending = cbs->count;
       
  2719 }
       
  2720 
       
  2721 static int e100_set_ringparam(struct net_device *netdev,
       
  2722 	struct ethtool_ringparam *ring)
       
  2723 {
       
  2724 	struct nic *nic = netdev_priv(netdev);
       
  2725 	struct param_range *rfds = &nic->params.rfds;
       
  2726 	struct param_range *cbs = &nic->params.cbs;
       
  2727 
       
  2728 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2729 		return -EINVAL;
       
  2730 
       
  2731 	if (netif_running(netdev))
       
  2732 		e100_down(nic);
       
  2733 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2734 	rfds->count = min(rfds->count, rfds->max);
       
  2735 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2736 	cbs->count = min(cbs->count, cbs->max);
       
  2737 	netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
       
  2738 		   rfds->count, cbs->count);
       
  2739 	if (netif_running(netdev))
       
  2740 		e100_up(nic);
       
  2741 
       
  2742 	return 0;
       
  2743 }
       
  2744 
       
  2745 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2746 	"Link test     (on/offline)",
       
  2747 	"Eeprom test   (on/offline)",
       
  2748 	"Self test        (offline)",
       
  2749 	"Mac loopback     (offline)",
       
  2750 	"Phy loopback     (offline)",
       
  2751 };
       
  2752 #define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
       
  2753 
       
  2754 static void e100_diag_test(struct net_device *netdev,
       
  2755 	struct ethtool_test *test, u64 *data)
       
  2756 {
       
  2757 	struct ethtool_cmd cmd;
       
  2758 	struct nic *nic = netdev_priv(netdev);
       
  2759 	int i, err;
       
  2760 
       
  2761 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2762 	data[0] = !mii_link_ok(&nic->mii);
       
  2763 	data[1] = e100_eeprom_load(nic);
       
  2764 	if (test->flags & ETH_TEST_FL_OFFLINE) {
       
  2765 
       
  2766 		/* save speed, duplex & autoneg settings */
       
  2767 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2768 
       
  2769 		if (netif_running(netdev))
       
  2770 			e100_down(nic);
       
  2771 		data[2] = e100_self_test(nic);
       
  2772 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2773 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2774 
       
  2775 		/* restore speed, duplex & autoneg settings */
       
  2776 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2777 
       
  2778 		if (netif_running(netdev))
       
  2779 			e100_up(nic);
       
  2780 	}
       
  2781 	for (i = 0; i < E100_TEST_LEN; i++)
       
  2782 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2783 
       
  2784 	msleep_interruptible(4 * 1000);
       
  2785 }
       
  2786 
       
  2787 static int e100_set_phys_id(struct net_device *netdev,
       
  2788 			    enum ethtool_phys_id_state state)
       
  2789 {
       
  2790 	struct nic *nic = netdev_priv(netdev);
       
  2791 	enum led_state {
       
  2792 		led_on     = 0x01,
       
  2793 		led_off    = 0x04,
       
  2794 		led_on_559 = 0x05,
       
  2795 		led_on_557 = 0x07,
       
  2796 	};
       
  2797 	u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
       
  2798 		MII_LED_CONTROL;
       
  2799 	u16 leds = 0;
       
  2800 
       
  2801 	switch (state) {
       
  2802 	case ETHTOOL_ID_ACTIVE:
       
  2803 		return 2;
       
  2804 
       
  2805 	case ETHTOOL_ID_ON:
       
  2806 		leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
       
  2807 		       (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
       
  2808 		break;
       
  2809 
       
  2810 	case ETHTOOL_ID_OFF:
       
  2811 		leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
       
  2812 		break;
       
  2813 
       
  2814 	case ETHTOOL_ID_INACTIVE:
       
  2815 		break;
       
  2816 	}
       
  2817 
       
  2818 	mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
       
  2819 	return 0;
       
  2820 }
       
  2821 
       
  2822 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2823 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2824 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2825 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2826 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2827 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2828 	"tx_heartbeat_errors", "tx_window_errors",
       
  2829 	/* device-specific stats */
       
  2830 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2831 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2832 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2833 	"rx_short_frame_errors", "rx_over_length_errors",
       
  2834 };
       
  2835 #define E100_NET_STATS_LEN	21
       
  2836 #define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
       
  2837 
       
  2838 static int e100_get_sset_count(struct net_device *netdev, int sset)
       
  2839 {
       
  2840 	switch (sset) {
       
  2841 	case ETH_SS_TEST:
       
  2842 		return E100_TEST_LEN;
       
  2843 	case ETH_SS_STATS:
       
  2844 		return E100_STATS_LEN;
       
  2845 	default:
       
  2846 		return -EOPNOTSUPP;
       
  2847 	}
       
  2848 }
       
  2849 
       
  2850 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2851 	struct ethtool_stats *stats, u64 *data)
       
  2852 {
       
  2853 	struct nic *nic = netdev_priv(netdev);
       
  2854 	int i;
       
  2855 
       
  2856 	for (i = 0; i < E100_NET_STATS_LEN; i++)
       
  2857 		data[i] = ((unsigned long *)&netdev->stats)[i];
       
  2858 
       
  2859 	data[i++] = nic->tx_deferred;
       
  2860 	data[i++] = nic->tx_single_collisions;
       
  2861 	data[i++] = nic->tx_multiple_collisions;
       
  2862 	data[i++] = nic->tx_fc_pause;
       
  2863 	data[i++] = nic->rx_fc_pause;
       
  2864 	data[i++] = nic->rx_fc_unsupported;
       
  2865 	data[i++] = nic->tx_tco_frames;
       
  2866 	data[i++] = nic->rx_tco_frames;
       
  2867 	data[i++] = nic->rx_short_frame_errors;
       
  2868 	data[i++] = nic->rx_over_length_errors;
       
  2869 }
       
  2870 
       
  2871 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2872 {
       
  2873 	switch (stringset) {
       
  2874 	case ETH_SS_TEST:
       
  2875 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2876 		break;
       
  2877 	case ETH_SS_STATS:
       
  2878 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2879 		break;
       
  2880 	}
       
  2881 }
       
  2882 
       
  2883 static const struct ethtool_ops e100_ethtool_ops = {
       
  2884 	.get_settings		= e100_get_settings,
       
  2885 	.set_settings		= e100_set_settings,
       
  2886 	.get_drvinfo		= e100_get_drvinfo,
       
  2887 	.get_regs_len		= e100_get_regs_len,
       
  2888 	.get_regs		= e100_get_regs,
       
  2889 	.get_wol		= e100_get_wol,
       
  2890 	.set_wol		= e100_set_wol,
       
  2891 	.get_msglevel		= e100_get_msglevel,
       
  2892 	.set_msglevel		= e100_set_msglevel,
       
  2893 	.nway_reset		= e100_nway_reset,
       
  2894 	.get_link		= e100_get_link,
       
  2895 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2896 	.get_eeprom		= e100_get_eeprom,
       
  2897 	.set_eeprom		= e100_set_eeprom,
       
  2898 	.get_ringparam		= e100_get_ringparam,
       
  2899 	.set_ringparam		= e100_set_ringparam,
       
  2900 	.self_test		= e100_diag_test,
       
  2901 	.get_strings		= e100_get_strings,
       
  2902 	.set_phys_id		= e100_set_phys_id,
       
  2903 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2904 	.get_sset_count		= e100_get_sset_count,
       
  2905 	.get_ts_info		= ethtool_op_get_ts_info,
       
  2906 };
       
  2907 
       
  2908 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2909 {
       
  2910 	struct nic *nic = netdev_priv(netdev);
       
  2911 
       
  2912 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2913 }
       
  2914 
       
  2915 static int e100_alloc(struct nic *nic)
       
  2916 {
       
  2917 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2918 		&nic->dma_addr);
       
  2919 	return nic->mem ? 0 : -ENOMEM;
       
  2920 }
       
  2921 
       
  2922 static void e100_free(struct nic *nic)
       
  2923 {
       
  2924 	if (nic->mem) {
       
  2925 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2926 			nic->mem, nic->dma_addr);
       
  2927 		nic->mem = NULL;
       
  2928 	}
       
  2929 }
       
  2930 
       
  2931 static int e100_open(struct net_device *netdev)
       
  2932 {
       
  2933 	struct nic *nic = netdev_priv(netdev);
       
  2934 	int err = 0;
       
  2935 
       
  2936 	if (!nic->ecdev)
       
  2937 		netif_carrier_off(netdev);
       
  2938 	if ((err = e100_up(nic)))
       
  2939 		netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
       
  2940 	return err;
       
  2941 }
       
  2942 
       
  2943 static int e100_close(struct net_device *netdev)
       
  2944 {
       
  2945 	e100_down(netdev_priv(netdev));
       
  2946 	return 0;
       
  2947 }
       
  2948 
       
  2949 static int e100_set_features(struct net_device *netdev,
       
  2950 			     netdev_features_t features)
       
  2951 {
       
  2952 	struct nic *nic = netdev_priv(netdev);
       
  2953 	netdev_features_t changed = features ^ netdev->features;
       
  2954 
       
  2955 	if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
       
  2956 		return 0;
       
  2957 
       
  2958 	netdev->features = features;
       
  2959 	e100_exec_cb(nic, NULL, e100_configure);
       
  2960 	return 0;
       
  2961 }
       
  2962 
       
  2963 static const struct net_device_ops e100_netdev_ops = {
       
  2964 	.ndo_open		= e100_open,
       
  2965 	.ndo_stop		= e100_close,
       
  2966 	.ndo_start_xmit		= e100_xmit_frame,
       
  2967 	.ndo_validate_addr	= eth_validate_addr,
       
  2968 	.ndo_set_rx_mode	= e100_set_multicast_list,
       
  2969 	.ndo_set_mac_address	= e100_set_mac_address,
       
  2970 	.ndo_change_mtu		= e100_change_mtu,
       
  2971 	.ndo_do_ioctl		= e100_do_ioctl,
       
  2972 	.ndo_tx_timeout		= e100_tx_timeout,
       
  2973 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2974 	.ndo_poll_controller	= e100_netpoll,
       
  2975 #endif
       
  2976 	.ndo_set_features	= e100_set_features,
       
  2977 };
       
  2978 
       
  2979 static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
       
  2980 {
       
  2981 	struct net_device *netdev;
       
  2982 	struct nic *nic;
       
  2983 	int err;
       
  2984 
       
  2985 	if (!(netdev = alloc_etherdev(sizeof(struct nic))))
       
  2986 		return -ENOMEM;
       
  2987 
       
  2988 	netdev->hw_features |= NETIF_F_RXFCS;
       
  2989 	netdev->priv_flags |= IFF_SUPP_NOFCS;
       
  2990 	netdev->hw_features |= NETIF_F_RXALL;
       
  2991 
       
  2992 	netdev->netdev_ops = &e100_netdev_ops;
       
  2993 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  2994 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  2995 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  2996 
       
  2997 	nic = netdev_priv(netdev);
       
  2998 	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
       
  2999 	nic->netdev = netdev;
       
  3000 	nic->pdev = pdev;
       
  3001 	nic->msg_enable = (1 << debug) - 1;
       
  3002 	nic->mdio_ctrl = mdio_ctrl_hw;
       
  3003 	pci_set_drvdata(pdev, netdev);
       
  3004 
       
  3005 	if ((err = pci_enable_device(pdev))) {
       
  3006 		netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
       
  3007 		goto err_out_free_dev;
       
  3008 	}
       
  3009 
       
  3010 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  3011 		netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
       
  3012 		err = -ENODEV;
       
  3013 		goto err_out_disable_pdev;
       
  3014 	}
       
  3015 
       
  3016 	if ((err = pci_request_regions(pdev, DRV_NAME))) {
       
  3017 		netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
       
  3018 		goto err_out_disable_pdev;
       
  3019 	}
       
  3020 
       
  3021 	if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
       
  3022 		netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
       
  3023 		goto err_out_free_res;
       
  3024 	}
       
  3025 
       
  3026 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  3027 
       
  3028 	if (use_io)
       
  3029 		netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
       
  3030 
       
  3031 	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
       
  3032 	if (!nic->csr) {
       
  3033 		netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
       
  3034 		err = -ENOMEM;
       
  3035 		goto err_out_free_res;
       
  3036 	}
       
  3037 
       
  3038 	if (ent->driver_data)
       
  3039 		nic->flags |= ich;
       
  3040 	else
       
  3041 		nic->flags &= ~ich;
       
  3042 
       
  3043 	e100_get_defaults(nic);
       
  3044 
       
  3045 	/* D100 MAC doesn't allow rx of vlan packets with normal MTU */
       
  3046 	if (nic->mac < mac_82558_D101_A4)
       
  3047 		netdev->features |= NETIF_F_VLAN_CHALLENGED;
       
  3048 
       
  3049 	/* locks must be initialized before calling hw_reset */
       
  3050 	spin_lock_init(&nic->cb_lock);
       
  3051 	spin_lock_init(&nic->cmd_lock);
       
  3052 	spin_lock_init(&nic->mdio_lock);
       
  3053 
       
  3054 	/* Reset the device before pci_set_master() in case device is in some
       
  3055 	 * funky state and has an interrupt pending - hint: we don't have the
       
  3056 	 * interrupt handler registered yet. */
       
  3057 	e100_hw_reset(nic);
       
  3058 
       
  3059 	pci_set_master(pdev);
       
  3060 
       
  3061 	init_timer(&nic->watchdog);
       
  3062 	nic->watchdog.function = e100_watchdog;
       
  3063 	nic->watchdog.data = (unsigned long)nic;
       
  3064 
       
  3065 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  3066 
       
  3067 	if ((err = e100_alloc(nic))) {
       
  3068 		netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
       
  3069 		goto err_out_iounmap;
       
  3070 	}
       
  3071 
       
  3072 	if ((err = e100_eeprom_load(nic)))
       
  3073 		goto err_out_free;
       
  3074 
       
  3075 	e100_phy_init(nic);
       
  3076 
       
  3077 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  3078 	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
       
  3079 	if (!is_valid_ether_addr(netdev->perm_addr)) {
       
  3080 		if (!eeprom_bad_csum_allow) {
       
  3081 			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
       
  3082 			err = -EAGAIN;
       
  3083 			goto err_out_free;
       
  3084 		} else {
       
  3085 			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
       
  3086 		}
       
  3087 	}
       
  3088 
       
  3089 	/* Wol magic packet can be enabled from eeprom */
       
  3090 	if ((nic->mac >= mac_82558_D101_A4) &&
       
  3091 	   (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
       
  3092 		nic->flags |= wol_magic;
       
  3093 		device_set_wakeup_enable(&pdev->dev, true);
       
  3094 	}
       
  3095 
       
  3096 	/* ack any pending wake events, disable PME */
       
  3097 	pci_pme_active(pdev, false);
       
  3098 
       
  3099 	// offer device to EtherCAT master module
       
  3100 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  3101 
       
  3102 	if (!nic->ecdev) {
       
  3103 		strcpy(netdev->name, "eth%d");
       
  3104 		if ((err = register_netdev(netdev))) {
       
  3105 			netif_err(nic, probe, nic->netdev,
       
  3106 					"Cannot register net device, aborting\n");
       
  3107 			goto err_out_free;
       
  3108 		}
       
  3109 	}
       
  3110 
       
  3111 	nic->cbs_pool = pci_pool_create(netdev->name,
       
  3112 			   nic->pdev,
       
  3113 			   nic->params.cbs.max * sizeof(struct cb),
       
  3114 			   sizeof(u32),
       
  3115 			   0);
       
  3116 	netif_info(nic, probe, nic->netdev,
       
  3117 		   "addr 0x%llx, irq %d, MAC addr %pM\n",
       
  3118 		   (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
       
  3119 		   pdev->irq, netdev->dev_addr);
       
  3120 
       
  3121 	if (nic->ecdev) {
       
  3122 		err = ecdev_open(nic->ecdev);
       
  3123 		if (err) {
       
  3124 			ecdev_withdraw(nic->ecdev);
       
  3125 			goto err_out_free;
       
  3126 		}
       
  3127 	}
       
  3128 
       
  3129 	return 0;
       
  3130 
       
  3131 err_out_free:
       
  3132 	e100_free(nic);
       
  3133 err_out_iounmap:
       
  3134 	pci_iounmap(pdev, nic->csr);
       
  3135 err_out_free_res:
       
  3136 	pci_release_regions(pdev);
       
  3137 err_out_disable_pdev:
       
  3138 	pci_disable_device(pdev);
       
  3139 err_out_free_dev:
       
  3140 	pci_set_drvdata(pdev, NULL);
       
  3141 	free_netdev(netdev);
       
  3142 	return err;
       
  3143 }
       
  3144 
       
  3145 static void e100_remove(struct pci_dev *pdev)
       
  3146 {
       
  3147 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3148 
       
  3149 	if (netdev) {
       
  3150 		struct nic *nic = netdev_priv(netdev);
       
  3151 		if (nic->ecdev) {
       
  3152 			ecdev_close(nic->ecdev);
       
  3153 			ecdev_withdraw(nic->ecdev);
       
  3154 		} else {
       
  3155 			unregister_netdev(netdev);
       
  3156 		}
       
  3157 
       
  3158 		e100_free(nic);
       
  3159 		pci_iounmap(pdev, nic->csr);
       
  3160 		pci_pool_destroy(nic->cbs_pool);
       
  3161 		free_netdev(netdev);
       
  3162 		pci_release_regions(pdev);
       
  3163 		pci_disable_device(pdev);
       
  3164 		pci_set_drvdata(pdev, NULL);
       
  3165 	}
       
  3166 }
       
  3167 
       
  3168 #define E100_82552_SMARTSPEED   0x14   /* SmartSpeed Ctrl register */
       
  3169 #define E100_82552_REV_ANEG     0x0200 /* Reverse auto-negotiation */
       
  3170 #define E100_82552_ANEG_NOW     0x0400 /* Auto-negotiate now */
       
  3171 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  3172 {
       
  3173 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3174 	struct nic *nic = netdev_priv(netdev);
       
  3175 
       
  3176 	if (netif_running(netdev))
       
  3177 		e100_down(nic);
       
  3178 	netif_device_detach(netdev);
       
  3179 
       
  3180 	pci_save_state(pdev);
       
  3181 
       
  3182 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  3183 		/* enable reverse auto-negotiation */
       
  3184 		if (nic->phy == phy_82552_v) {
       
  3185 			u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3186 			                           E100_82552_SMARTSPEED);
       
  3187 
       
  3188 			mdio_write(netdev, nic->mii.phy_id,
       
  3189 			           E100_82552_SMARTSPEED, smartspeed |
       
  3190 			           E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
       
  3191 		}
       
  3192 		*enable_wake = true;
       
  3193 	} else {
       
  3194 		*enable_wake = false;
       
  3195 	}
       
  3196 
       
  3197 	pci_disable_device(pdev);
       
  3198 }
       
  3199 
       
  3200 static int __e100_power_off(struct pci_dev *pdev, bool wake)
       
  3201 {
       
  3202 	if (wake)
       
  3203 		return pci_prepare_to_sleep(pdev);
       
  3204 
       
  3205 	pci_wake_from_d3(pdev, false);
       
  3206 	pci_set_power_state(pdev, PCI_D3hot);
       
  3207 
       
  3208 	return 0;
       
  3209 }
       
  3210 
       
  3211 #ifdef CONFIG_PM
       
  3212 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  3213 {
       
  3214 	bool wake;
       
  3215 	__e100_shutdown(pdev, &wake);
       
  3216 	return __e100_power_off(pdev, wake);
       
  3217 }
       
  3218 
       
  3219 static int e100_resume(struct pci_dev *pdev)
       
  3220 {
       
  3221 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3222 	struct nic *nic = netdev_priv(netdev);
       
  3223 
       
  3224 	pci_set_power_state(pdev, PCI_D0);
       
  3225 	pci_restore_state(pdev);
       
  3226 	/* ack any pending wake events, disable PME */
       
  3227 	pci_enable_wake(pdev, 0, 0);
       
  3228 
       
  3229 	/* disable reverse auto-negotiation */
       
  3230 	if (nic->phy == phy_82552_v) {
       
  3231 		u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3232 		                           E100_82552_SMARTSPEED);
       
  3233 
       
  3234 		mdio_write(netdev, nic->mii.phy_id,
       
  3235 		           E100_82552_SMARTSPEED,
       
  3236 		           smartspeed & ~(E100_82552_REV_ANEG));
       
  3237 	}
       
  3238 
       
  3239 	netif_device_attach(netdev);
       
  3240 	if (netif_running(netdev))
       
  3241 		e100_up(nic);
       
  3242 
       
  3243 	return 0;
       
  3244 }
       
  3245 #endif /* CONFIG_PM */
       
  3246 
       
  3247 static void e100_shutdown(struct pci_dev *pdev)
       
  3248 {
       
  3249 	bool wake;
       
  3250 	__e100_shutdown(pdev, &wake);
       
  3251 	if (system_state == SYSTEM_POWER_OFF)
       
  3252 		__e100_power_off(pdev, wake);
       
  3253 }
       
  3254 
       
  3255 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  3256 /**
       
  3257  * e100_io_error_detected - called when PCI error is detected.
       
  3258  * @pdev: Pointer to PCI device
       
  3259  * @state: The current pci connection state
       
  3260  */
       
  3261 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  3262 {
       
  3263 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3264 	struct nic *nic = netdev_priv(netdev);
       
  3265 
       
  3266 	if (nic->ecdev)
       
  3267 		return -EBUSY;
       
  3268 
       
  3269 	netif_device_detach(netdev);
       
  3270 
       
  3271 	if (state == pci_channel_io_perm_failure)
       
  3272 		return PCI_ERS_RESULT_DISCONNECT;
       
  3273 
       
  3274 	if (netif_running(netdev))
       
  3275 		e100_down(nic);
       
  3276 	pci_disable_device(pdev);
       
  3277 
       
  3278 	/* Request a slot reset. */
       
  3279 	return PCI_ERS_RESULT_NEED_RESET;
       
  3280 }
       
  3281 
       
  3282 /**
       
  3283  * e100_io_slot_reset - called after the pci bus has been reset.
       
  3284  * @pdev: Pointer to PCI device
       
  3285  *
       
  3286  * Restart the card from scratch.
       
  3287  */
       
  3288 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  3289 {
       
  3290 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3291 	struct nic *nic = netdev_priv(netdev);
       
  3292 
       
  3293 	if (nic->ecdev)
       
  3294 		return -EBUSY;
       
  3295 
       
  3296 	if (pci_enable_device(pdev)) {
       
  3297 		pr_err("Cannot re-enable PCI device after reset\n");
       
  3298 		return PCI_ERS_RESULT_DISCONNECT;
       
  3299 	}
       
  3300 	pci_set_master(pdev);
       
  3301 
       
  3302 	/* Only one device per card can do a reset */
       
  3303 	if (0 != PCI_FUNC(pdev->devfn))
       
  3304 		return PCI_ERS_RESULT_RECOVERED;
       
  3305 	e100_hw_reset(nic);
       
  3306 	e100_phy_init(nic);
       
  3307 
       
  3308 	return PCI_ERS_RESULT_RECOVERED;
       
  3309 }
       
  3310 
       
  3311 /**
       
  3312  * e100_io_resume - resume normal operations
       
  3313  * @pdev: Pointer to PCI device
       
  3314  *
       
  3315  * Resume normal operations after an error recovery
       
  3316  * sequence has been completed.
       
  3317  */
       
  3318 static void e100_io_resume(struct pci_dev *pdev)
       
  3319 {
       
  3320 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3321 	struct nic *nic = netdev_priv(netdev);
       
  3322 
       
  3323 	/* ack any pending wake events, disable PME */
       
  3324 	pci_enable_wake(pdev, 0, 0);
       
  3325 
       
  3326 	if (!nic->ecdev)
       
  3327 		netif_device_attach(netdev);
       
  3328 	if (nic->ecdev || netif_running(netdev)) {
       
  3329 		e100_open(netdev);
       
  3330 		if (!nic->ecdev)
       
  3331 			mod_timer(&nic->watchdog, jiffies);
       
  3332 	}
       
  3333 }
       
  3334 
       
  3335 static const struct pci_error_handlers e100_err_handler = {
       
  3336 	.error_detected = e100_io_error_detected,
       
  3337 	.slot_reset = e100_io_slot_reset,
       
  3338 	.resume = e100_io_resume,
       
  3339 };
       
  3340 
       
  3341 static struct pci_driver e100_driver = {
       
  3342 	.name =         DRV_NAME,
       
  3343 	.id_table =     e100_id_table,
       
  3344 	.probe =        e100_probe,
       
  3345 	.remove =       e100_remove,
       
  3346 #ifdef CONFIG_PM
       
  3347 	/* Power Management hooks */
       
  3348 	.suspend =      e100_suspend,
       
  3349 	.resume =       e100_resume,
       
  3350 #endif
       
  3351 	.shutdown =     e100_shutdown,
       
  3352 	.err_handler = &e100_err_handler,
       
  3353 };
       
  3354 
       
  3355 static int __init e100_init_module(void)
       
  3356 {
       
  3357 	if (((1 << debug) - 1) & NETIF_MSG_DRV) {
       
  3358 		pr_info("%s %s, %s\n", DRV_NAME, DRV_DESCRIPTION, DRV_VERSION);
       
  3359 		pr_info("%s\n", DRV_COPYRIGHT);
       
  3360 	}
       
  3361 	return pci_register_driver(&e100_driver);
       
  3362 }
       
  3363 
       
  3364 static void __exit e100_cleanup_module(void)
       
  3365 {
       
  3366 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3367 	pci_unregister_driver(&e100_driver);
       
  3368 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3369 }
       
  3370 
       
  3371 module_init(e100_init_module);
       
  3372 module_exit(e100_cleanup_module);