devices/e100-3.16-ethercat.c
branchstable-1.5
changeset 2588 792892ab4806
equal deleted inserted replaced
2587:afd76ee3aa87 2588:792892ab4806
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007-2012  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it and/or
       
    10  *  modify it under the terms of the GNU General Public License version 2, as
       
    11  *  published by the Free Software Foundation.
       
    12  *
       
    13  *  The IgH EtherCAT Master is distributed in the hope that it will be useful,
       
    14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
       
    16  *  Public License for more details.
       
    17  *
       
    18  *  You should have received a copy of the GNU General Public License along
       
    19  *  with the IgH EtherCAT Master; if not, write to the Free Software
       
    20  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    21  *
       
    22  *  ---
       
    23  *
       
    24  *  The license mentioned above concerns the source code only. Using the
       
    25  *  EtherCAT technology and brand is only permitted in compliance with the
       
    26  *  industrial property and similar rights of Beckhoff Automation GmbH.
       
    27  *
       
    28  *  ---
       
    29  *
       
    30  *  vim: noexpandtab
       
    31  *
       
    32  *****************************************************************************/
       
    33 
       
    34 /**
       
    35    \file
       
    36    EtherCAT driver for e100-compatible NICs.
       
    37 */
       
    38 
       
    39 /* Former documentation: */
       
    40 
       
    41 /*******************************************************************************
       
    42 
       
    43   Intel PRO/100 Linux driver
       
    44   Copyright(c) 1999 - 2006 Intel Corporation.
       
    45 
       
    46   This program is free software; you can redistribute it and/or modify it
       
    47   under the terms and conditions of the GNU General Public License,
       
    48   version 2, as published by the Free Software Foundation.
       
    49 
       
    50   This program is distributed in the hope it will be useful, but WITHOUT
       
    51   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    52   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    53   more details.
       
    54 
       
    55   You should have received a copy of the GNU General Public License along with
       
    56   this program; if not, write to the Free Software Foundation, Inc.,
       
    57   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    58 
       
    59   The full GNU General Public License is included in this distribution in
       
    60   the file called "COPYING".
       
    61 
       
    62   Contact Information:
       
    63   Linux NICS <linux.nics@intel.com>
       
    64   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    65   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    66 
       
    67 *******************************************************************************/
       
    68 
       
    69 /*
       
    70  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    71  *
       
    72  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    73  *	original e100 driver, but better described as a munging of
       
    74  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    75  *
       
    76  *	References:
       
    77  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    78  *		Open Source Software Developers Manual,
       
    79  *		http://sourceforge.net/projects/e1000
       
    80  *
       
    81  *
       
    82  *	                      Theory of Operation
       
    83  *
       
    84  *	I.   General
       
    85  *
       
    86  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    87  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    88  *	82551, and 82562 devices.  82558 and greater controllers
       
    89  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    90  *	server and client network interface cards, as well as in
       
    91  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    92  *	configurations.  8255x supports a 32-bit linear addressing
       
    93  *	mode and operates at 33Mhz PCI clock rate.
       
    94  *
       
    95  *	II.  Driver Operation
       
    96  *
       
    97  *	Memory-mapped mode is used exclusively to access the device's
       
    98  *	shared-memory structure, the Control/Status Registers (CSR). All
       
    99  *	setup, configuration, and control of the device, including queuing
       
   100  *	of Tx, Rx, and configuration commands is through the CSR.
       
   101  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   102  *	protects the shared Command Block List (CBL).
       
   103  *
       
   104  *	8255x is highly MII-compliant and all access to the PHY go
       
   105  *	through the Management Data Interface (MDI).  Consequently, the
       
   106  *	driver leverages the mii.c library shared with other MII-compliant
       
   107  *	devices.
       
   108  *
       
   109  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   110  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   111  *	archs are supported.
       
   112  *
       
   113  *	III. Transmit
       
   114  *
       
   115  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   116  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   117  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   118  *	the end of the ring.  The last TCB processed suspends the
       
   119  *	controller, and the controller can be restarted by issue a CU
       
   120  *	resume command to continue from the suspend point, or a CU start
       
   121  *	command to start at a given position in the ring.
       
   122  *
       
   123  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   124  *	into the CBL ring along with Tx commands.  The common structure
       
   125  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   126  *
       
   127  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   128  *	is the next CB to check for completion; cb_to_send is the first
       
   129  *	CB to start on in case of a previous failure to resume.  CB clean
       
   130  *	up happens in interrupt context in response to a CU interrupt.
       
   131  *	cbs_avail keeps track of number of free CB resources available.
       
   132  *
       
   133  * 	Hardware padding of short packets to minimum packet size is
       
   134  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   135  * 	with 00h.
       
   136  *
       
   137  *	IV.  Receive
       
   138  *
       
   139  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   140  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   141  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   142  *	and the data buffer, but the RFD is pulled off before the skb is
       
   143  *	indicated.  The data buffer is aligned such that encapsulated
       
   144  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   145  *	mapped shared memory, and completion status is contained within
       
   146  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   147  *	view from software and hardware.
       
   148  *
       
   149  *	In order to keep updates to the RFD link field from colliding with
       
   150  *	hardware writes to mark packets complete, we use the feature that
       
   151  *	hardware will not write to a size 0 descriptor and mark the previous
       
   152  *	packet as end-of-list (EL).   After updating the link, we remove EL
       
   153  *	and only then restore the size such that hardware may use the
       
   154  *	previous-to-end RFD.
       
   155  *
       
   156  *	Under typical operation, the  receive unit (RU) is start once,
       
   157  *	and the controller happily fills RFDs as frames arrive.  If
       
   158  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   159  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   160  *	and Rx indication and re-allocation happen in the same context,
       
   161  *	therefore no locking is required.  A software-generated interrupt
       
   162  *	is generated from the watchdog to recover from a failed allocation
       
   163  *	scenario where all Rx resources have been indicated and none re-
       
   164  *	placed.
       
   165  *
       
   166  *	V.   Miscellaneous
       
   167  *
       
   168  * 	VLAN offloading of tagging, stripping and filtering is not
       
   169  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   170  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   171  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   172  * 	not supported (hardware limitation).
       
   173  *
       
   174  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   175  *
       
   176  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   177  * 	testing/troubleshooting the development driver.
       
   178  *
       
   179  * 	TODO:
       
   180  * 	o several entry points race with dev->close
       
   181  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   182  *
       
   183  *	FIXES:
       
   184  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   185  *	- Stratus87247: protect MDI control register manipulations
       
   186  * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
       
   187  *      - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
       
   188  */
       
   189 
       
   190 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
       
   191 
       
   192 #include <linux/hardirq.h>
       
   193 #include <linux/interrupt.h>
       
   194 #include <linux/module.h>
       
   195 #include <linux/moduleparam.h>
       
   196 #include <linux/kernel.h>
       
   197 #include <linux/types.h>
       
   198 #include <linux/sched.h>
       
   199 #include <linux/slab.h>
       
   200 #include <linux/delay.h>
       
   201 #include <linux/init.h>
       
   202 #include <linux/pci.h>
       
   203 #include <linux/dma-mapping.h>
       
   204 #include <linux/dmapool.h>
       
   205 #include <linux/netdevice.h>
       
   206 #include <linux/etherdevice.h>
       
   207 #include <linux/mii.h>
       
   208 #include <linux/if_vlan.h>
       
   209 #include <linux/skbuff.h>
       
   210 #include <linux/ethtool.h>
       
   211 #include <linux/string.h>
       
   212 #include <linux/firmware.h>
       
   213 #include <linux/rtnetlink.h>
       
   214 #include <asm/unaligned.h>
       
   215 
       
   216 // EtherCAT includes
       
   217 #include "../globals.h"
       
   218 #include "ecdev.h"
       
   219 
       
   220 #define DRV_NAME		"ec_e100"
       
   221 #define DRV_EXT			"-NAPI"
       
   222 #define DRV_VERSION		"3.5.24-k2"DRV_EXT
       
   223 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   224 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   225 
       
   226 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   227 #define E100_NAPI_WEIGHT	16
       
   228 
       
   229 #define FIRMWARE_D101M		"e100/d101m_ucode.bin"
       
   230 #define FIRMWARE_D101S		"e100/d101s_ucode.bin"
       
   231 #define FIRMWARE_D102E		"e100/d102e_ucode.bin"
       
   232 
       
   233 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   234 MODULE_AUTHOR(DRV_COPYRIGHT);
       
   235 MODULE_LICENSE("GPL");
       
   236 MODULE_VERSION(DRV_VERSION);
       
   237 MODULE_FIRMWARE(FIRMWARE_D101M);
       
   238 MODULE_FIRMWARE(FIRMWARE_D101S);
       
   239 MODULE_FIRMWARE(FIRMWARE_D102E);
       
   240 
       
   241 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   242 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   243 MODULE_LICENSE("GPL");
       
   244 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   245 
       
   246 void e100_ec_poll(struct net_device *);
       
   247 
       
   248 static int debug = 3;
       
   249 static int eeprom_bad_csum_allow = 0;
       
   250 static int use_io = 0;
       
   251 module_param(debug, int, 0);
       
   252 module_param(eeprom_bad_csum_allow, int, 0);
       
   253 module_param(use_io, int, 0);
       
   254 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   255 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   256 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
       
   257 
       
   258 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   259 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   260 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   261 static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
       
   262 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   263 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   277 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   278 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   279 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   280 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   281 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   282 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   283 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   284 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   285 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   286 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   287 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   288 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   289 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   290 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   291 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   292 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   293 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   294 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   295 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   296 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   297 	INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
       
   298 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   299 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   300 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   301 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   302 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   303 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   304 	{ 0, }
       
   305 };
       
   306 
       
   307 // prevent from being loaded automatically
       
   308 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   309 
       
   310 enum mac {
       
   311 	mac_82557_D100_A  = 0,
       
   312 	mac_82557_D100_B  = 1,
       
   313 	mac_82557_D100_C  = 2,
       
   314 	mac_82558_D101_A4 = 4,
       
   315 	mac_82558_D101_B0 = 5,
       
   316 	mac_82559_D101M   = 8,
       
   317 	mac_82559_D101S   = 9,
       
   318 	mac_82550_D102    = 12,
       
   319 	mac_82550_D102_C  = 13,
       
   320 	mac_82551_E       = 14,
       
   321 	mac_82551_F       = 15,
       
   322 	mac_82551_10      = 16,
       
   323 	mac_unknown       = 0xFF,
       
   324 };
       
   325 
       
   326 enum phy {
       
   327 	phy_100a     = 0x000003E0,
       
   328 	phy_100c     = 0x035002A8,
       
   329 	phy_82555_tx = 0x015002A8,
       
   330 	phy_nsc_tx   = 0x5C002000,
       
   331 	phy_82562_et = 0x033002A8,
       
   332 	phy_82562_em = 0x032002A8,
       
   333 	phy_82562_ek = 0x031002A8,
       
   334 	phy_82562_eh = 0x017002A8,
       
   335 	phy_82552_v  = 0xd061004d,
       
   336 	phy_unknown  = 0xFFFFFFFF,
       
   337 };
       
   338 
       
   339 /* CSR (Control/Status Registers) */
       
   340 struct csr {
       
   341 	struct {
       
   342 		u8 status;
       
   343 		u8 stat_ack;
       
   344 		u8 cmd_lo;
       
   345 		u8 cmd_hi;
       
   346 		u32 gen_ptr;
       
   347 	} scb;
       
   348 	u32 port;
       
   349 	u16 flash_ctrl;
       
   350 	u8 eeprom_ctrl_lo;
       
   351 	u8 eeprom_ctrl_hi;
       
   352 	u32 mdi_ctrl;
       
   353 	u32 rx_dma_count;
       
   354 };
       
   355 
       
   356 enum scb_status {
       
   357 	rus_no_res       = 0x08,
       
   358 	rus_ready        = 0x10,
       
   359 	rus_mask         = 0x3C,
       
   360 };
       
   361 
       
   362 enum ru_state  {
       
   363 	RU_SUSPENDED = 0,
       
   364 	RU_RUNNING	 = 1,
       
   365 	RU_UNINITIALIZED = -1,
       
   366 };
       
   367 
       
   368 enum scb_stat_ack {
       
   369 	stat_ack_not_ours    = 0x00,
       
   370 	stat_ack_sw_gen      = 0x04,
       
   371 	stat_ack_rnr         = 0x10,
       
   372 	stat_ack_cu_idle     = 0x20,
       
   373 	stat_ack_frame_rx    = 0x40,
       
   374 	stat_ack_cu_cmd_done = 0x80,
       
   375 	stat_ack_not_present = 0xFF,
       
   376 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   377 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   378 };
       
   379 
       
   380 enum scb_cmd_hi {
       
   381 	irq_mask_none = 0x00,
       
   382 	irq_mask_all  = 0x01,
       
   383 	irq_sw_gen    = 0x02,
       
   384 };
       
   385 
       
   386 enum scb_cmd_lo {
       
   387 	cuc_nop        = 0x00,
       
   388 	ruc_start      = 0x01,
       
   389 	ruc_load_base  = 0x06,
       
   390 	cuc_start      = 0x10,
       
   391 	cuc_resume     = 0x20,
       
   392 	cuc_dump_addr  = 0x40,
       
   393 	cuc_dump_stats = 0x50,
       
   394 	cuc_load_base  = 0x60,
       
   395 	cuc_dump_reset = 0x70,
       
   396 };
       
   397 
       
   398 enum cuc_dump {
       
   399 	cuc_dump_complete       = 0x0000A005,
       
   400 	cuc_dump_reset_complete = 0x0000A007,
       
   401 };
       
   402 
       
   403 enum port {
       
   404 	software_reset  = 0x0000,
       
   405 	selftest        = 0x0001,
       
   406 	selective_reset = 0x0002,
       
   407 };
       
   408 
       
   409 enum eeprom_ctrl_lo {
       
   410 	eesk = 0x01,
       
   411 	eecs = 0x02,
       
   412 	eedi = 0x04,
       
   413 	eedo = 0x08,
       
   414 };
       
   415 
       
   416 enum mdi_ctrl {
       
   417 	mdi_write = 0x04000000,
       
   418 	mdi_read  = 0x08000000,
       
   419 	mdi_ready = 0x10000000,
       
   420 };
       
   421 
       
   422 enum eeprom_op {
       
   423 	op_write = 0x05,
       
   424 	op_read  = 0x06,
       
   425 	op_ewds  = 0x10,
       
   426 	op_ewen  = 0x13,
       
   427 };
       
   428 
       
   429 enum eeprom_offsets {
       
   430 	eeprom_cnfg_mdix  = 0x03,
       
   431 	eeprom_phy_iface  = 0x06,
       
   432 	eeprom_id         = 0x0A,
       
   433 	eeprom_config_asf = 0x0D,
       
   434 	eeprom_smbus_addr = 0x90,
       
   435 };
       
   436 
       
   437 enum eeprom_cnfg_mdix {
       
   438 	eeprom_mdix_enabled = 0x0080,
       
   439 };
       
   440 
       
   441 enum eeprom_phy_iface {
       
   442 	NoSuchPhy = 0,
       
   443 	I82553AB,
       
   444 	I82553C,
       
   445 	I82503,
       
   446 	DP83840,
       
   447 	S80C240,
       
   448 	S80C24,
       
   449 	I82555,
       
   450 	DP83840A = 10,
       
   451 };
       
   452 
       
   453 enum eeprom_id {
       
   454 	eeprom_id_wol = 0x0020,
       
   455 };
       
   456 
       
   457 enum eeprom_config_asf {
       
   458 	eeprom_asf = 0x8000,
       
   459 	eeprom_gcl = 0x4000,
       
   460 };
       
   461 
       
   462 enum cb_status {
       
   463 	cb_complete = 0x8000,
       
   464 	cb_ok       = 0x2000,
       
   465 };
       
   466 
       
   467 /**
       
   468  * cb_command - Command Block flags
       
   469  * @cb_tx_nc:  0: controler does CRC (normal),  1: CRC from skb memory
       
   470  */
       
   471 enum cb_command {
       
   472 	cb_nop    = 0x0000,
       
   473 	cb_iaaddr = 0x0001,
       
   474 	cb_config = 0x0002,
       
   475 	cb_multi  = 0x0003,
       
   476 	cb_tx     = 0x0004,
       
   477 	cb_ucode  = 0x0005,
       
   478 	cb_dump   = 0x0006,
       
   479 	cb_tx_sf  = 0x0008,
       
   480 	cb_tx_nc  = 0x0010,
       
   481 	cb_cid    = 0x1f00,
       
   482 	cb_i      = 0x2000,
       
   483 	cb_s      = 0x4000,
       
   484 	cb_el     = 0x8000,
       
   485 };
       
   486 
       
   487 struct rfd {
       
   488 	__le16 status;
       
   489 	__le16 command;
       
   490 	__le32 link;
       
   491 	__le32 rbd;
       
   492 	__le16 actual_size;
       
   493 	__le16 size;
       
   494 };
       
   495 
       
   496 struct rx {
       
   497 	struct rx *next, *prev;
       
   498 	struct sk_buff *skb;
       
   499 	dma_addr_t dma_addr;
       
   500 };
       
   501 
       
   502 #if defined(__BIG_ENDIAN_BITFIELD)
       
   503 #define X(a,b)	b,a
       
   504 #else
       
   505 #define X(a,b)	a,b
       
   506 #endif
       
   507 struct config {
       
   508 /*0*/	u8 X(byte_count:6, pad0:2);
       
   509 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   510 /*2*/	u8 adaptive_ifs;
       
   511 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   512 	   term_write_cache_line:1), pad3:4);
       
   513 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   514 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   515 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   516 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   517 	   rx_save_overruns : 1), rx_save_bad_frames : 1);
       
   518 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   519 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   520 	   tx_dynamic_tbd:1);
       
   521 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   522 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   523 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   524 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   525 	   loopback:2);
       
   526 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   527 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   528 /*13*/	u8 ip_addr_lo;
       
   529 /*14*/	u8 ip_addr_hi;
       
   530 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   531 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   532 	   pad15_2:1), crs_or_cdt:1);
       
   533 /*16*/	u8 fc_delay_lo;
       
   534 /*17*/	u8 fc_delay_hi;
       
   535 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   536 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   537 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   538 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   539 	   full_duplex_force:1), full_duplex_pin:1);
       
   540 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   541 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   542 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   543 	u8 pad_d102[9];
       
   544 };
       
   545 
       
   546 #define E100_MAX_MULTICAST_ADDRS	64
       
   547 struct multi {
       
   548 	__le16 count;
       
   549 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   550 };
       
   551 
       
   552 /* Important: keep total struct u32-aligned */
       
   553 #define UCODE_SIZE			134
       
   554 struct cb {
       
   555 	__le16 status;
       
   556 	__le16 command;
       
   557 	__le32 link;
       
   558 	union {
       
   559 		u8 iaaddr[ETH_ALEN];
       
   560 		__le32 ucode[UCODE_SIZE];
       
   561 		struct config config;
       
   562 		struct multi multi;
       
   563 		struct {
       
   564 			u32 tbd_array;
       
   565 			u16 tcb_byte_count;
       
   566 			u8 threshold;
       
   567 			u8 tbd_count;
       
   568 			struct {
       
   569 				__le32 buf_addr;
       
   570 				__le16 size;
       
   571 				u16 eol;
       
   572 			} tbd;
       
   573 		} tcb;
       
   574 		__le32 dump_buffer_addr;
       
   575 	} u;
       
   576 	struct cb *next, *prev;
       
   577 	dma_addr_t dma_addr;
       
   578 	struct sk_buff *skb;
       
   579 };
       
   580 
       
   581 enum loopback {
       
   582 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   583 };
       
   584 
       
   585 struct stats {
       
   586 	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   587 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   588 		tx_multiple_collisions, tx_total_collisions;
       
   589 	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   590 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   591 		rx_short_frame_errors;
       
   592 	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   593 	__le16 xmt_tco_frames, rcv_tco_frames;
       
   594 	__le32 complete;
       
   595 };
       
   596 
       
   597 struct mem {
       
   598 	struct {
       
   599 		u32 signature;
       
   600 		u32 result;
       
   601 	} selftest;
       
   602 	struct stats stats;
       
   603 	u8 dump_buf[596];
       
   604 };
       
   605 
       
   606 struct param_range {
       
   607 	u32 min;
       
   608 	u32 max;
       
   609 	u32 count;
       
   610 };
       
   611 
       
   612 struct params {
       
   613 	struct param_range rfds;
       
   614 	struct param_range cbs;
       
   615 };
       
   616 
       
   617 struct nic {
       
   618 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   619 	u32 msg_enable				____cacheline_aligned;
       
   620 	struct net_device *netdev;
       
   621 	struct pci_dev *pdev;
       
   622 	u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
       
   623 
       
   624 	struct rx *rxs				____cacheline_aligned;
       
   625 	struct rx *rx_to_use;
       
   626 	struct rx *rx_to_clean;
       
   627 	struct rfd blank_rfd;
       
   628 	enum ru_state ru_running;
       
   629 
       
   630 	spinlock_t cb_lock			____cacheline_aligned;
       
   631 	spinlock_t cmd_lock;
       
   632 	struct csr __iomem *csr;
       
   633 	enum scb_cmd_lo cuc_cmd;
       
   634 	unsigned int cbs_avail;
       
   635 	struct napi_struct napi;
       
   636 	struct cb *cbs;
       
   637 	struct cb *cb_to_use;
       
   638 	struct cb *cb_to_send;
       
   639 	struct cb *cb_to_clean;
       
   640 	__le16 tx_command;
       
   641 	/* End: frequently used values: keep adjacent for cache effect */
       
   642 
       
   643 	enum {
       
   644 		ich                = (1 << 0),
       
   645 		promiscuous        = (1 << 1),
       
   646 		multicast_all      = (1 << 2),
       
   647 		wol_magic          = (1 << 3),
       
   648 		ich_10h_workaround = (1 << 4),
       
   649 	} flags					____cacheline_aligned;
       
   650 
       
   651 	enum mac mac;
       
   652 	enum phy phy;
       
   653 	struct params params;
       
   654 	struct timer_list watchdog;
       
   655 	struct mii_if_info mii;
       
   656 	struct work_struct tx_timeout_task;
       
   657 	enum loopback loopback;
       
   658 
       
   659 	struct mem *mem;
       
   660 	dma_addr_t dma_addr;
       
   661 
       
   662 	struct pci_pool *cbs_pool;
       
   663 	dma_addr_t cbs_dma_addr;
       
   664 	u8 adaptive_ifs;
       
   665 	u8 tx_threshold;
       
   666 	u32 tx_frames;
       
   667 	u32 tx_collisions;
       
   668 
       
   669 	u32 tx_deferred;
       
   670 	u32 tx_single_collisions;
       
   671 	u32 tx_multiple_collisions;
       
   672 	u32 tx_fc_pause;
       
   673 	u32 tx_tco_frames;
       
   674 
       
   675 	u32 rx_fc_pause;
       
   676 	u32 rx_fc_unsupported;
       
   677 	u32 rx_tco_frames;
       
   678 	u32 rx_short_frame_errors;
       
   679 	u32 rx_over_length_errors;
       
   680 
       
   681 	u16 eeprom_wc;
       
   682 
       
   683 	__le16 eeprom[256];
       
   684 	spinlock_t mdio_lock;
       
   685 	const struct firmware *fw;
       
   686 	ec_device_t *ecdev;
       
   687 	unsigned long ec_watchdog_jiffies;
       
   688 };
       
   689 
       
   690 static inline void e100_write_flush(struct nic *nic)
       
   691 {
       
   692 	/* Flush previous PCI writes through intermediate bridges
       
   693 	 * by doing a benign read */
       
   694 	(void)ioread8(&nic->csr->scb.status);
       
   695 }
       
   696 
       
   697 static void e100_enable_irq(struct nic *nic)
       
   698 {
       
   699 	unsigned long flags;
       
   700 
       
   701 	if (nic->ecdev)
       
   702 		return;
       
   703 
       
   704 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   705 	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   706 	e100_write_flush(nic);
       
   707 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   708 }
       
   709 
       
   710 static void e100_disable_irq(struct nic *nic)
       
   711 {
       
   712 	unsigned long flags = 0;
       
   713 
       
   714 	if (!nic->ecdev)
       
   715 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   716 	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   717 	e100_write_flush(nic);
       
   718 	if (!nic->ecdev)
       
   719 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   720 }
       
   721 
       
   722 static void e100_hw_reset(struct nic *nic)
       
   723 {
       
   724 	/* Put CU and RU into idle with a selective reset to get
       
   725 	 * device off of PCI bus */
       
   726 	iowrite32(selective_reset, &nic->csr->port);
       
   727 	e100_write_flush(nic); udelay(20);
       
   728 
       
   729 	/* Now fully reset device */
       
   730 	iowrite32(software_reset, &nic->csr->port);
       
   731 	e100_write_flush(nic); udelay(20);
       
   732 
       
   733 	/* Mask off our interrupt line - it's unmasked after reset */
       
   734 	e100_disable_irq(nic);
       
   735 }
       
   736 
       
   737 static int e100_self_test(struct nic *nic)
       
   738 {
       
   739 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   740 
       
   741 	/* Passing the self-test is a pretty good indication
       
   742 	 * that the device can DMA to/from host memory */
       
   743 
       
   744 	nic->mem->selftest.signature = 0;
       
   745 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   746 
       
   747 	iowrite32(selftest | dma_addr, &nic->csr->port);
       
   748 	e100_write_flush(nic);
       
   749 	/* Wait 10 msec for self-test to complete */
       
   750 	msleep(10);
       
   751 
       
   752 	/* Interrupts are enabled after self-test */
       
   753 	e100_disable_irq(nic);
       
   754 
       
   755 	/* Check results of self-test */
       
   756 	if (nic->mem->selftest.result != 0) {
       
   757 		netif_err(nic, hw, nic->netdev,
       
   758 			  "Self-test failed: result=0x%08X\n",
       
   759 			  nic->mem->selftest.result);
       
   760 		return -ETIMEDOUT;
       
   761 	}
       
   762 	if (nic->mem->selftest.signature == 0) {
       
   763 		netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
       
   764 		return -ETIMEDOUT;
       
   765 	}
       
   766 
       
   767 	return 0;
       
   768 }
       
   769 
       
   770 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
       
   771 {
       
   772 	u32 cmd_addr_data[3];
       
   773 	u8 ctrl;
       
   774 	int i, j;
       
   775 
       
   776 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   777 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   778 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   779 		le16_to_cpu(data);
       
   780 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   781 
       
   782 	/* Bit-bang cmds to write word to eeprom */
       
   783 	for (j = 0; j < 3; j++) {
       
   784 
       
   785 		/* Chip select */
       
   786 		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   787 		e100_write_flush(nic); udelay(4);
       
   788 
       
   789 		for (i = 31; i >= 0; i--) {
       
   790 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   791 				eecs | eedi : eecs;
       
   792 			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   793 			e100_write_flush(nic); udelay(4);
       
   794 
       
   795 			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   796 			e100_write_flush(nic); udelay(4);
       
   797 		}
       
   798 		/* Wait 10 msec for cmd to complete */
       
   799 		msleep(10);
       
   800 
       
   801 		/* Chip deselect */
       
   802 		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   803 		e100_write_flush(nic); udelay(4);
       
   804 	}
       
   805 };
       
   806 
       
   807 /* General technique stolen from the eepro100 driver - very clever */
       
   808 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   809 {
       
   810 	u32 cmd_addr_data;
       
   811 	u16 data = 0;
       
   812 	u8 ctrl;
       
   813 	int i;
       
   814 
       
   815 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   816 
       
   817 	/* Chip select */
       
   818 	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   819 	e100_write_flush(nic); udelay(4);
       
   820 
       
   821 	/* Bit-bang to read word from eeprom */
       
   822 	for (i = 31; i >= 0; i--) {
       
   823 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   824 		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   825 		e100_write_flush(nic); udelay(4);
       
   826 
       
   827 		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   828 		e100_write_flush(nic); udelay(4);
       
   829 
       
   830 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   831 		 * complete address.  Use this to adjust addr_len. */
       
   832 		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
       
   833 		if (!(ctrl & eedo) && i > 16) {
       
   834 			*addr_len -= (i - 16);
       
   835 			i = 17;
       
   836 		}
       
   837 
       
   838 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   839 	}
       
   840 
       
   841 	/* Chip deselect */
       
   842 	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   843 	e100_write_flush(nic); udelay(4);
       
   844 
       
   845 	return cpu_to_le16(data);
       
   846 };
       
   847 
       
   848 /* Load entire EEPROM image into driver cache and validate checksum */
       
   849 static int e100_eeprom_load(struct nic *nic)
       
   850 {
       
   851 	u16 addr, addr_len = 8, checksum = 0;
       
   852 
       
   853 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   854 	e100_eeprom_read(nic, &addr_len, 0);
       
   855 	nic->eeprom_wc = 1 << addr_len;
       
   856 
       
   857 	for (addr = 0; addr < nic->eeprom_wc; addr++) {
       
   858 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   859 		if (addr < nic->eeprom_wc - 1)
       
   860 			checksum += le16_to_cpu(nic->eeprom[addr]);
       
   861 	}
       
   862 
       
   863 	/* The checksum, stored in the last word, is calculated such that
       
   864 	 * the sum of words should be 0xBABA */
       
   865 	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
       
   866 		netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
       
   867 		if (!eeprom_bad_csum_allow)
       
   868 			return -EAGAIN;
       
   869 	}
       
   870 
       
   871 	return 0;
       
   872 }
       
   873 
       
   874 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   875 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   876 {
       
   877 	u16 addr, addr_len = 8, checksum = 0;
       
   878 
       
   879 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   880 	e100_eeprom_read(nic, &addr_len, 0);
       
   881 	nic->eeprom_wc = 1 << addr_len;
       
   882 
       
   883 	if (start + count >= nic->eeprom_wc)
       
   884 		return -EINVAL;
       
   885 
       
   886 	for (addr = start; addr < start + count; addr++)
       
   887 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   888 
       
   889 	/* The checksum, stored in the last word, is calculated such that
       
   890 	 * the sum of words should be 0xBABA */
       
   891 	for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   892 		checksum += le16_to_cpu(nic->eeprom[addr]);
       
   893 	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
       
   894 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   895 		nic->eeprom[nic->eeprom_wc - 1]);
       
   896 
       
   897 	return 0;
       
   898 }
       
   899 
       
   900 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   901 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   902 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   903 {
       
   904 	unsigned long flags = 0;
       
   905 	unsigned int i;
       
   906 	int err = 0;
       
   907 
       
   908 	if (!nic->ecdev)
       
   909 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   910 
       
   911 	/* Previous command is accepted when SCB clears */
       
   912 	for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   913 		if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
       
   914 			break;
       
   915 		cpu_relax();
       
   916 		if (unlikely(i > E100_WAIT_SCB_FAST))
       
   917 			udelay(5);
       
   918 	}
       
   919 	if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   920 		err = -EAGAIN;
       
   921 		goto err_unlock;
       
   922 	}
       
   923 
       
   924 	if (unlikely(cmd != cuc_resume))
       
   925 		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
       
   926 	iowrite8(cmd, &nic->csr->scb.cmd_lo);
       
   927 
       
   928 err_unlock:
       
   929 	if (!nic->ecdev)
       
   930 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   931 
       
   932 	return err;
       
   933 }
       
   934 
       
   935 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   936 	int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   937 {
       
   938 	struct cb *cb;
       
   939 	unsigned long flags = 0;
       
   940 	int err = 0;
       
   941 
       
   942 	if (!nic->ecdev)
       
   943 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   944 
       
   945 	if (unlikely(!nic->cbs_avail)) {
       
   946 		err = -ENOMEM;
       
   947 		goto err_unlock;
       
   948 	}
       
   949 
       
   950 	cb = nic->cb_to_use;
       
   951 	nic->cb_to_use = cb->next;
       
   952 	nic->cbs_avail--;
       
   953 	cb->skb = skb;
       
   954 
       
   955 	err = cb_prepare(nic, cb, skb);
       
   956 	if (err)
       
   957 		goto err_unlock;
       
   958 
       
   959 	if (unlikely(!nic->cbs_avail))
       
   960 		err = -ENOSPC;
       
   961 
       
   962 
       
   963 	/* Order is important otherwise we'll be in a race with h/w:
       
   964 	 * set S-bit in current first, then clear S-bit in previous. */
       
   965 	cb->command |= cpu_to_le16(cb_s);
       
   966 	wmb();
       
   967 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   968 
       
   969 	while (nic->cb_to_send != nic->cb_to_use) {
       
   970 		if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   971 			nic->cb_to_send->dma_addr))) {
       
   972 			/* Ok, here's where things get sticky.  It's
       
   973 			 * possible that we can't schedule the command
       
   974 			 * because the controller is too busy, so
       
   975 			 * let's just queue the command and try again
       
   976 			 * when another command is scheduled. */
       
   977 			if (err == -ENOSPC) {
       
   978 				//request a reset
       
   979 				schedule_work(&nic->tx_timeout_task);
       
   980 			}
       
   981 			break;
       
   982 		} else {
       
   983 			nic->cuc_cmd = cuc_resume;
       
   984 			nic->cb_to_send = nic->cb_to_send->next;
       
   985 		}
       
   986 	}
       
   987 
       
   988 err_unlock:
       
   989 	if (!nic->ecdev)
       
   990 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   991 
       
   992 	return err;
       
   993 }
       
   994 
       
   995 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
   996 {
       
   997 	struct nic *nic = netdev_priv(netdev);
       
   998 	return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
       
   999 }
       
  1000 
       
  1001 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
  1002 {
       
  1003 	struct nic *nic = netdev_priv(netdev);
       
  1004 
       
  1005 	nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
       
  1006 }
       
  1007 
       
  1008 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
       
  1009 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
  1010 {
       
  1011 	u32 data_out = 0;
       
  1012 	unsigned int i;
       
  1013 	unsigned long flags = 0;
       
  1014 
       
  1015 
       
  1016 	/*
       
  1017 	 * Stratus87247: we shouldn't be writing the MDI control
       
  1018 	 * register until the Ready bit shows True.  Also, since
       
  1019 	 * manipulation of the MDI control registers is a multi-step
       
  1020 	 * procedure it should be done under lock.
       
  1021 	 */
       
  1022 	if (!nic->ecdev)
       
  1023 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
  1024 	for (i = 100; i; --i) {
       
  1025 		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
       
  1026 			break;
       
  1027 		udelay(20);
       
  1028 	}
       
  1029 	if (unlikely(!i)) {
       
  1030 		netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
       
  1031 		if (!nic->ecdev)
       
  1032 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1033 		return 0;		/* No way to indicate timeout error */
       
  1034 	}
       
  1035 	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
  1036 
       
  1037 	for (i = 0; i < 100; i++) {
       
  1038 		udelay(20);
       
  1039 		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
       
  1040 			break;
       
  1041 	}
       
  1042 	if (!nic->ecdev)
       
  1043 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1044 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1045 		     "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
  1046 		     dir == mdi_read ? "READ" : "WRITE",
       
  1047 		     addr, reg, data, data_out);
       
  1048 	return (u16)data_out;
       
  1049 }
       
  1050 
       
  1051 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
       
  1052 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
       
  1053 				 u32 addr,
       
  1054 				 u32 dir,
       
  1055 				 u32 reg,
       
  1056 				 u16 data)
       
  1057 {
       
  1058 	if ((reg == MII_BMCR) && (dir == mdi_write)) {
       
  1059 		if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
       
  1060 			u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
       
  1061 							MII_ADVERTISE);
       
  1062 
       
  1063 			/*
       
  1064 			 * Workaround Si issue where sometimes the part will not
       
  1065 			 * autoneg to 100Mbps even when advertised.
       
  1066 			 */
       
  1067 			if (advert & ADVERTISE_100FULL)
       
  1068 				data |= BMCR_SPEED100 | BMCR_FULLDPLX;
       
  1069 			else if (advert & ADVERTISE_100HALF)
       
  1070 				data |= BMCR_SPEED100;
       
  1071 		}
       
  1072 	}
       
  1073 	return mdio_ctrl_hw(nic, addr, dir, reg, data);
       
  1074 }
       
  1075 
       
  1076 /* Fully software-emulated mdio_ctrl() function for cards without
       
  1077  * MII-compliant PHYs.
       
  1078  * For now, this is mainly geared towards 80c24 support; in case of further
       
  1079  * requirements for other types (i82503, ...?) either extend this mechanism
       
  1080  * or split it, whichever is cleaner.
       
  1081  */
       
  1082 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
       
  1083 				      u32 addr,
       
  1084 				      u32 dir,
       
  1085 				      u32 reg,
       
  1086 				      u16 data)
       
  1087 {
       
  1088 	/* might need to allocate a netdev_priv'ed register array eventually
       
  1089 	 * to be able to record state changes, but for now
       
  1090 	 * some fully hardcoded register handling ought to be ok I guess. */
       
  1091 
       
  1092 	if (dir == mdi_read) {
       
  1093 		switch (reg) {
       
  1094 		case MII_BMCR:
       
  1095 			/* Auto-negotiation, right? */
       
  1096 			return  BMCR_ANENABLE |
       
  1097 				BMCR_FULLDPLX;
       
  1098 		case MII_BMSR:
       
  1099 			return	BMSR_LSTATUS /* for mii_link_ok() */ |
       
  1100 				BMSR_ANEGCAPABLE |
       
  1101 				BMSR_10FULL;
       
  1102 		case MII_ADVERTISE:
       
  1103 			/* 80c24 is a "combo card" PHY, right? */
       
  1104 			return	ADVERTISE_10HALF |
       
  1105 				ADVERTISE_10FULL;
       
  1106 		default:
       
  1107 			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1108 				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1109 				     dir == mdi_read ? "READ" : "WRITE",
       
  1110 				     addr, reg, data);
       
  1111 			return 0xFFFF;
       
  1112 		}
       
  1113 	} else {
       
  1114 		switch (reg) {
       
  1115 		default:
       
  1116 			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1117 				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1118 				     dir == mdi_read ? "READ" : "WRITE",
       
  1119 				     addr, reg, data);
       
  1120 			return 0xFFFF;
       
  1121 		}
       
  1122 	}
       
  1123 }
       
  1124 static inline int e100_phy_supports_mii(struct nic *nic)
       
  1125 {
       
  1126 	/* for now, just check it by comparing whether we
       
  1127 	   are using MII software emulation.
       
  1128 	*/
       
  1129 	return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
       
  1130 }
       
  1131 
       
  1132 static void e100_get_defaults(struct nic *nic)
       
  1133 {
       
  1134 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
  1135 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
  1136 
       
  1137 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1138 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
       
  1139 	if (nic->mac == mac_unknown)
       
  1140 		nic->mac = mac_82557_D100_A;
       
  1141 
       
  1142 	nic->params.rfds = rfds;
       
  1143 	nic->params.cbs = cbs;
       
  1144 
       
  1145 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1146 	nic->tx_threshold = 0xE0;
       
  1147 
       
  1148 	/* no interrupt for every tx completion, delay = 256us if not 557 */
       
  1149 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1150 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1151 
       
  1152 	/* Template for a freshly allocated RFD */
       
  1153 	nic->blank_rfd.command = 0;
       
  1154 	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
       
  1155 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
       
  1156 
       
  1157 	/* MII setup */
       
  1158 	nic->mii.phy_id_mask = 0x1F;
       
  1159 	nic->mii.reg_num_mask = 0x1F;
       
  1160 	nic->mii.dev = nic->netdev;
       
  1161 	nic->mii.mdio_read = mdio_read;
       
  1162 	nic->mii.mdio_write = mdio_write;
       
  1163 }
       
  1164 
       
  1165 static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1166 {
       
  1167 	struct config *config = &cb->u.config;
       
  1168 	u8 *c = (u8 *)config;
       
  1169 	struct net_device *netdev = nic->netdev;
       
  1170 
       
  1171 	cb->command = cpu_to_le16(cb_config);
       
  1172 
       
  1173 	memset(config, 0, sizeof(struct config));
       
  1174 
       
  1175 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1176 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1177 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1178 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1179 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1180 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1181 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1182 	if (e100_phy_supports_mii(nic))
       
  1183 		config->mii_mode = 1;           /* 1=MII mode, 0=i82503 mode */
       
  1184 	config->pad10 = 0x6;
       
  1185 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1186 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1187 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1188 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1189 	config->pad15_1 = 0x1;
       
  1190 	config->pad15_2 = 0x1;
       
  1191 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1192 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1193 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1194 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1195 	config->pad18 = 0x1;
       
  1196 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1197 	config->pad20_1 = 0x1F;
       
  1198 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1199 	config->pad21_1 = 0x5;
       
  1200 
       
  1201 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1202 	config->loopback = nic->loopback;
       
  1203 
       
  1204 	if (nic->mii.force_media && nic->mii.full_duplex)
       
  1205 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1206 
       
  1207 	if (nic->flags & promiscuous || nic->loopback) {
       
  1208 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1209 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1210 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1211 	}
       
  1212 
       
  1213 	if (unlikely(netdev->features & NETIF_F_RXFCS))
       
  1214 		config->rx_crc_transfer = 0x1;	/* 1=save, 0=discard */
       
  1215 
       
  1216 	if (nic->flags & multicast_all)
       
  1217 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1218 
       
  1219 	/* disable WoL when up */
       
  1220 	if (nic->ecdev || 
       
  1221 			(netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1222 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1223 
       
  1224 	if (nic->mac >= mac_82558_D101_A4) {
       
  1225 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1226 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1227 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1228 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1229 		if (nic->mac >= mac_82559_D101M) {
       
  1230 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1231 			/* Enable TCO in extended config */
       
  1232 			if (nic->mac >= mac_82551_10) {
       
  1233 				config->byte_count = 0x20; /* extended bytes */
       
  1234 				config->rx_d102_mode = 0x1; /* GMRC for TCO */
       
  1235 			}
       
  1236 		} else {
       
  1237 			config->standard_stat_counter = 0x0;
       
  1238 		}
       
  1239 	}
       
  1240 
       
  1241 	if (netdev->features & NETIF_F_RXALL) {
       
  1242 		config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
       
  1243 		config->rx_save_bad_frames = 0x1;       /* 1=save, 0=discard */
       
  1244 		config->rx_discard_short_frames = 0x0;  /* 1=discard, 0=save */
       
  1245 	}
       
  1246 
       
  1247 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
       
  1248 		     c + 0);
       
  1249 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
       
  1250 		     c + 8);
       
  1251 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
       
  1252 		     c + 16);
       
  1253 	return 0;
       
  1254 }
       
  1255 
       
  1256 /*************************************************************************
       
  1257 *  CPUSaver parameters
       
  1258 *
       
  1259 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1260 *  "move immediate value" instruction.  By changing the value of
       
  1261 *  the literal in the instruction before the code is loaded, the
       
  1262 *  driver can change the algorithm.
       
  1263 *
       
  1264 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1265 *    When this timer expires the interrupt is asserted, and the
       
  1266 *    timer is reset each time a new packet is received.  (see
       
  1267 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1268 *    The current default is 0x600 or 1536.  Experiments show that
       
  1269 *    the value should probably stay within the 0x200 - 0x1000.
       
  1270 *
       
  1271 *  BUNDLEMAX -
       
  1272 *    This sets the maximum number of frames that will be bundled.  In
       
  1273 *    some situations, such as the TCP windowing algorithm, it may be
       
  1274 *    better to limit the growth of the bundle size than let it go as
       
  1275 *    high as it can, because that could cause too much added latency.
       
  1276 *    The default is six, because this is the number of packets in the
       
  1277 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1278 *    an interrupt for every frame received.  If you do not want to put
       
  1279 *    a limit on the bundle size, set this value to xFFFF.
       
  1280 *
       
  1281 *  BUNDLESMALL -
       
  1282 *    This contains a bit-mask describing the minimum size frame that
       
  1283 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1284 *    that any frame less than 128 bytes in length will not be bundled,
       
  1285 *    but will instead immediately generate an interrupt.  This does
       
  1286 *    not affect the current bundle in any way.  Any frame that is 128
       
  1287 *    bytes or large will be bundled normally.  This feature is meant
       
  1288 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1289 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1290 *    enabled was sending but not receiving.  The delay introduced when
       
  1291 *    the ACKs were received was enough to reduce total throughput, because
       
  1292 *    the sender would sit idle until the ACK was finally seen.
       
  1293 *
       
  1294 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1295 *    This means that any frame which is x7F (127) bytes or smaller
       
  1296 *    will cause an immediate interrupt.  Because this value must be a
       
  1297 *    bit mask, there are only a few valid values that can be used.  To
       
  1298 *    turn this feature off, the driver can write the value xFFFF to the
       
  1299 *    lower word of this instruction (in the same way that the other
       
  1300 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1301 *    cause an interrupt to be generated for every frame, because all
       
  1302 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1303 *************************************************************************/
       
  1304 
       
  1305 /* if you wish to disable the ucode functionality, while maintaining the
       
  1306  * workarounds it provides, set the following defines to:
       
  1307  * BUNDLESMALL 0
       
  1308  * BUNDLEMAX 1
       
  1309  * INTDELAY 1
       
  1310  */
       
  1311 #define BUNDLESMALL 1
       
  1312 #define BUNDLEMAX (u16)6
       
  1313 #define INTDELAY (u16)1536 /* 0x600 */
       
  1314 
       
  1315 /* Initialize firmware */
       
  1316 static const struct firmware *e100_request_firmware(struct nic *nic)
       
  1317 {
       
  1318 	const char *fw_name;
       
  1319 	const struct firmware *fw = nic->fw;
       
  1320 	u8 timer, bundle, min_size;
       
  1321 	int err = 0;
       
  1322 	bool required = false;
       
  1323 
       
  1324 	/* do not load u-code for ICH devices */
       
  1325 	if (nic->flags & ich)
       
  1326 		return NULL;
       
  1327 
       
  1328 	/* Search for ucode match against h/w revision
       
  1329 	 *
       
  1330 	 * Based on comments in the source code for the FreeBSD fxp
       
  1331 	 * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
       
  1332 	 *
       
  1333 	 *    "fixes for bugs in the B-step hardware (specifically, bugs
       
  1334 	 *     with Inline Receive)."
       
  1335 	 *
       
  1336 	 * So we must fail if it cannot be loaded.
       
  1337 	 *
       
  1338 	 * The other microcode files are only required for the optional
       
  1339 	 * CPUSaver feature.  Nice to have, but no reason to fail.
       
  1340 	 */
       
  1341 	if (nic->mac == mac_82559_D101M) {
       
  1342 		fw_name = FIRMWARE_D101M;
       
  1343 	} else if (nic->mac == mac_82559_D101S) {
       
  1344 		fw_name = FIRMWARE_D101S;
       
  1345 	} else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
       
  1346 		fw_name = FIRMWARE_D102E;
       
  1347 		required = true;
       
  1348 	} else { /* No ucode on other devices */
       
  1349 		return NULL;
       
  1350 	}
       
  1351 
       
  1352 	/* If the firmware has not previously been loaded, request a pointer
       
  1353 	 * to it. If it was previously loaded, we are reinitializing the
       
  1354 	 * adapter, possibly in a resume from hibernate, in which case
       
  1355 	 * request_firmware() cannot be used.
       
  1356 	 */
       
  1357 	if (!fw)
       
  1358 		err = request_firmware(&fw, fw_name, &nic->pdev->dev);
       
  1359 
       
  1360 	if (err) {
       
  1361 		if (required) {
       
  1362 			netif_err(nic, probe, nic->netdev,
       
  1363 				  "Failed to load firmware \"%s\": %d\n",
       
  1364 				  fw_name, err);
       
  1365 			return ERR_PTR(err);
       
  1366 		} else {
       
  1367 			netif_info(nic, probe, nic->netdev,
       
  1368 				   "CPUSaver disabled. Needs \"%s\": %d\n",
       
  1369 				   fw_name, err);
       
  1370 			return NULL;
       
  1371 		}
       
  1372 	}
       
  1373 
       
  1374 	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
       
  1375 	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
       
  1376 	if (fw->size != UCODE_SIZE * 4 + 3) {
       
  1377 		netif_err(nic, probe, nic->netdev,
       
  1378 			  "Firmware \"%s\" has wrong size %zu\n",
       
  1379 			  fw_name, fw->size);
       
  1380 		release_firmware(fw);
       
  1381 		return ERR_PTR(-EINVAL);
       
  1382 	}
       
  1383 
       
  1384 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1385 	timer = fw->data[UCODE_SIZE * 4];
       
  1386 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1387 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1388 
       
  1389 	if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
       
  1390 	    min_size >= UCODE_SIZE) {
       
  1391 		netif_err(nic, probe, nic->netdev,
       
  1392 			  "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
       
  1393 			  fw_name, timer, bundle, min_size);
       
  1394 		release_firmware(fw);
       
  1395 		return ERR_PTR(-EINVAL);
       
  1396 	}
       
  1397 
       
  1398 	/* OK, firmware is validated and ready to use. Save a pointer
       
  1399 	 * to it in the nic */
       
  1400 	nic->fw = fw;
       
  1401 	return fw;
       
  1402 }
       
  1403 
       
  1404 static int e100_setup_ucode(struct nic *nic, struct cb *cb,
       
  1405 			     struct sk_buff *skb)
       
  1406 {
       
  1407 	const struct firmware *fw = (void *)skb;
       
  1408 	u8 timer, bundle, min_size;
       
  1409 
       
  1410 	/* It's not a real skb; we just abused the fact that e100_exec_cb
       
  1411 	   will pass it through to here... */
       
  1412 	cb->skb = NULL;
       
  1413 
       
  1414 	/* firmware is stored as little endian already */
       
  1415 	memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
       
  1416 
       
  1417 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1418 	timer = fw->data[UCODE_SIZE * 4];
       
  1419 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1420 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1421 
       
  1422 	/* Insert user-tunable settings in cb->u.ucode */
       
  1423 	cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
       
  1424 	cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
       
  1425 	cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
       
  1426 	cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
       
  1427 	cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
       
  1428 	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
       
  1429 
       
  1430 	cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1431 	return 0;
       
  1432 }
       
  1433 
       
  1434 static inline int e100_load_ucode_wait(struct nic *nic)
       
  1435 {
       
  1436 	const struct firmware *fw;
       
  1437 	int err = 0, counter = 50;
       
  1438 	struct cb *cb = nic->cb_to_clean;
       
  1439 
       
  1440 	fw = e100_request_firmware(nic);
       
  1441 	/* If it's NULL, then no ucode is required */
       
  1442 	if (!fw || IS_ERR(fw))
       
  1443 		return PTR_ERR(fw);
       
  1444 
       
  1445 	if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
       
  1446 		netif_err(nic, probe, nic->netdev,
       
  1447 			  "ucode cmd failed with error %d\n", err);
       
  1448 
       
  1449 	/* must restart cuc */
       
  1450 	nic->cuc_cmd = cuc_start;
       
  1451 
       
  1452 	/* wait for completion */
       
  1453 	e100_write_flush(nic);
       
  1454 	udelay(10);
       
  1455 
       
  1456 	/* wait for possibly (ouch) 500ms */
       
  1457 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1458 		msleep(10);
       
  1459 		if (!--counter) break;
       
  1460 	}
       
  1461 
       
  1462 	/* ack any interrupts, something could have been set */
       
  1463 	iowrite8(~0, &nic->csr->scb.stat_ack);
       
  1464 
       
  1465 	/* if the command failed, or is not OK, notify and return */
       
  1466 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1467 		netif_err(nic, probe, nic->netdev, "ucode load failed\n");
       
  1468 		err = -EPERM;
       
  1469 	}
       
  1470 
       
  1471 	return err;
       
  1472 }
       
  1473 
       
  1474 static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1475 	struct sk_buff *skb)
       
  1476 {
       
  1477 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1478 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1479 	return 0;
       
  1480 }
       
  1481 
       
  1482 static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1483 {
       
  1484 	cb->command = cpu_to_le16(cb_dump);
       
  1485 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1486 		offsetof(struct mem, dump_buf));
       
  1487 	return 0;
       
  1488 }
       
  1489 
       
  1490 static int e100_phy_check_without_mii(struct nic *nic)
       
  1491 {
       
  1492 	u8 phy_type;
       
  1493 	int without_mii;
       
  1494 
       
  1495 	phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
       
  1496 
       
  1497 	switch (phy_type) {
       
  1498 	case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
       
  1499 	case I82503: /* Non-MII PHY; UNTESTED! */
       
  1500 	case S80C24: /* Non-MII PHY; tested and working */
       
  1501 		/* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
       
  1502 		 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
       
  1503 		 * doesn't have a programming interface of any sort.  The
       
  1504 		 * media is sensed automatically based on how the link partner
       
  1505 		 * is configured.  This is, in essence, manual configuration.
       
  1506 		 */
       
  1507 		netif_info(nic, probe, nic->netdev,
       
  1508 			   "found MII-less i82503 or 80c24 or other PHY\n");
       
  1509 
       
  1510 		nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
       
  1511 		nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
       
  1512 
       
  1513 		/* these might be needed for certain MII-less cards...
       
  1514 		 * nic->flags |= ich;
       
  1515 		 * nic->flags |= ich_10h_workaround; */
       
  1516 
       
  1517 		without_mii = 1;
       
  1518 		break;
       
  1519 	default:
       
  1520 		without_mii = 0;
       
  1521 		break;
       
  1522 	}
       
  1523 	return without_mii;
       
  1524 }
       
  1525 
       
  1526 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1527 #define MII_NSC_CONG		MII_RESV1
       
  1528 #define NSC_CONG_ENABLE		0x0100
       
  1529 #define NSC_CONG_TXREADY	0x0400
       
  1530 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1531 static int e100_phy_init(struct nic *nic)
       
  1532 {
       
  1533 	struct net_device *netdev = nic->netdev;
       
  1534 	u32 addr;
       
  1535 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1536 
       
  1537 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1538 	for (addr = 0; addr < 32; addr++) {
       
  1539 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1540 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1541 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1542 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1543 		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1544 			break;
       
  1545 	}
       
  1546 	if (addr == 32) {
       
  1547 		/* uhoh, no PHY detected: check whether we seem to be some
       
  1548 		 * weird, rare variant which is *known* to not have any MII.
       
  1549 		 * But do this AFTER MII checking only, since this does
       
  1550 		 * lookup of EEPROM values which may easily be unreliable. */
       
  1551 		if (e100_phy_check_without_mii(nic))
       
  1552 			return 0; /* simply return and hope for the best */
       
  1553 		else {
       
  1554 			/* for unknown cases log a fatal error */
       
  1555 			netif_err(nic, hw, nic->netdev,
       
  1556 				  "Failed to locate any known PHY, aborting\n");
       
  1557 			return -EAGAIN;
       
  1558 		}
       
  1559 	} else
       
  1560 		netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1561 			     "phy_addr = %d\n", nic->mii.phy_id);
       
  1562 
       
  1563 	/* Get phy ID */
       
  1564 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1565 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1566 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1567 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1568 		     "phy ID = 0x%08X\n", nic->phy);
       
  1569 
       
  1570 	/* Select the phy and isolate the rest */
       
  1571 	for (addr = 0; addr < 32; addr++) {
       
  1572 		if (addr != nic->mii.phy_id) {
       
  1573 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1574 		} else if (nic->phy != phy_82552_v) {
       
  1575 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1576 			mdio_write(netdev, addr, MII_BMCR,
       
  1577 				bmcr & ~BMCR_ISOLATE);
       
  1578 		}
       
  1579 	}
       
  1580 	/*
       
  1581 	 * Workaround for 82552:
       
  1582 	 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
       
  1583 	 * other phy_id's) using bmcr value from addr discovery loop above.
       
  1584 	 */
       
  1585 	if (nic->phy == phy_82552_v)
       
  1586 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
       
  1587 			bmcr & ~BMCR_ISOLATE);
       
  1588 
       
  1589 	/* Handle National tx phys */
       
  1590 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1591 	if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1592 		/* Disable congestion control */
       
  1593 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1594 		cong |= NSC_CONG_TXREADY;
       
  1595 		cong &= ~NSC_CONG_ENABLE;
       
  1596 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1597 	}
       
  1598 
       
  1599 	if (nic->phy == phy_82552_v) {
       
  1600 		u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
       
  1601 
       
  1602 		/* assign special tweaked mdio_ctrl() function */
       
  1603 		nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
       
  1604 
       
  1605 		/* Workaround Si not advertising flow-control during autoneg */
       
  1606 		advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
       
  1607 		mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
       
  1608 
       
  1609 		/* Reset for the above changes to take effect */
       
  1610 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1611 		bmcr |= BMCR_RESET;
       
  1612 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
       
  1613 	} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1614 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1615 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1616 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1617 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1618 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1619 	}
       
  1620 
       
  1621 	return 0;
       
  1622 }
       
  1623 
       
  1624 static int e100_hw_init(struct nic *nic)
       
  1625 {
       
  1626 	int err = 0;
       
  1627 
       
  1628 	e100_hw_reset(nic);
       
  1629 
       
  1630 	netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
       
  1631 	if (!in_interrupt() && (err = e100_self_test(nic)))
       
  1632 		return err;
       
  1633 
       
  1634 	if ((err = e100_phy_init(nic)))
       
  1635 		return err;
       
  1636 	if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1637 		return err;
       
  1638 	if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1639 		return err;
       
  1640 	if ((err = e100_load_ucode_wait(nic)))
       
  1641 		return err;
       
  1642 	if ((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1643 		return err;
       
  1644 	if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1645 		return err;
       
  1646 	if ((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1647 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1648 		return err;
       
  1649 	if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1650 		return err;
       
  1651 
       
  1652 	e100_disable_irq(nic);
       
  1653 
       
  1654 	return 0;
       
  1655 }
       
  1656 
       
  1657 static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1658 {
       
  1659 	struct net_device *netdev = nic->netdev;
       
  1660 	struct netdev_hw_addr *ha;
       
  1661 	u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
       
  1662 
       
  1663 	cb->command = cpu_to_le16(cb_multi);
       
  1664 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1665 	i = 0;
       
  1666 	netdev_for_each_mc_addr(ha, netdev) {
       
  1667 		if (i == count)
       
  1668 			break;
       
  1669 		memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
       
  1670 			ETH_ALEN);
       
  1671 	}
       
  1672 	return 0;
       
  1673 }
       
  1674 
       
  1675 static void e100_set_multicast_list(struct net_device *netdev)
       
  1676 {
       
  1677 	struct nic *nic = netdev_priv(netdev);
       
  1678 
       
  1679 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1680 		     "mc_count=%d, flags=0x%04X\n",
       
  1681 		     netdev_mc_count(netdev), netdev->flags);
       
  1682 
       
  1683 	if (netdev->flags & IFF_PROMISC)
       
  1684 		nic->flags |= promiscuous;
       
  1685 	else
       
  1686 		nic->flags &= ~promiscuous;
       
  1687 
       
  1688 	if (netdev->flags & IFF_ALLMULTI ||
       
  1689 		netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
       
  1690 		nic->flags |= multicast_all;
       
  1691 	else
       
  1692 		nic->flags &= ~multicast_all;
       
  1693 
       
  1694 	e100_exec_cb(nic, NULL, e100_configure);
       
  1695 	e100_exec_cb(nic, NULL, e100_multi);
       
  1696 }
       
  1697 
       
  1698 static void e100_update_stats(struct nic *nic)
       
  1699 {
       
  1700 	struct net_device *dev = nic->netdev;
       
  1701 	struct net_device_stats *ns = &dev->stats;
       
  1702 	struct stats *s = &nic->mem->stats;
       
  1703 	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1704 		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
       
  1705 		&s->complete;
       
  1706 
       
  1707 	/* Device's stats reporting may take several microseconds to
       
  1708 	 * complete, so we're always waiting for results of the
       
  1709 	 * previous command. */
       
  1710 
       
  1711 	if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
       
  1712 		*complete = 0;
       
  1713 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1714 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1715 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1716 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1717 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1718 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1719 		ns->collisions += nic->tx_collisions;
       
  1720 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1721 			le32_to_cpu(s->tx_lost_crs);
       
  1722 		nic->rx_short_frame_errors +=
       
  1723 			le32_to_cpu(s->rx_short_frame_errors);
       
  1724 		ns->rx_length_errors = nic->rx_short_frame_errors +
       
  1725 			nic->rx_over_length_errors;
       
  1726 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1727 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1728 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1729 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1730 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1731 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1732 			le32_to_cpu(s->rx_alignment_errors) +
       
  1733 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1734 			le32_to_cpu(s->rx_cdt_errors);
       
  1735 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1736 		nic->tx_single_collisions +=
       
  1737 			le32_to_cpu(s->tx_single_collisions);
       
  1738 		nic->tx_multiple_collisions +=
       
  1739 			le32_to_cpu(s->tx_multiple_collisions);
       
  1740 		if (nic->mac >= mac_82558_D101_A4) {
       
  1741 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1742 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1743 			nic->rx_fc_unsupported +=
       
  1744 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1745 			if (nic->mac >= mac_82559_D101M) {
       
  1746 				nic->tx_tco_frames +=
       
  1747 					le16_to_cpu(s->xmt_tco_frames);
       
  1748 				nic->rx_tco_frames +=
       
  1749 					le16_to_cpu(s->rcv_tco_frames);
       
  1750 			}
       
  1751 		}
       
  1752 	}
       
  1753 
       
  1754 
       
  1755 	if (e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1756 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1757 			     "exec cuc_dump_reset failed\n");
       
  1758 }
       
  1759 
       
  1760 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1761 {
       
  1762 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1763 	 * we're getting collisions on a half-duplex connection. */
       
  1764 
       
  1765 	if (duplex == DUPLEX_HALF) {
       
  1766 		u32 prev = nic->adaptive_ifs;
       
  1767 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1768 
       
  1769 		if ((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1770 		   (nic->tx_frames > min_frames)) {
       
  1771 			if (nic->adaptive_ifs < 60)
       
  1772 				nic->adaptive_ifs += 5;
       
  1773 		} else if (nic->tx_frames < min_frames) {
       
  1774 			if (nic->adaptive_ifs >= 5)
       
  1775 				nic->adaptive_ifs -= 5;
       
  1776 		}
       
  1777 		if (nic->adaptive_ifs != prev)
       
  1778 			e100_exec_cb(nic, NULL, e100_configure);
       
  1779 	}
       
  1780 }
       
  1781 
       
  1782 static void e100_watchdog(unsigned long data)
       
  1783 {
       
  1784 	struct nic *nic = (struct nic *)data;
       
  1785 	struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
       
  1786 	u32 speed;
       
  1787 
       
  1788 	if (nic->ecdev) {
       
  1789 		ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1790 		return;
       
  1791 	}
       
  1792 
       
  1793 	netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
       
  1794 		     "right now = %ld\n", jiffies);
       
  1795 
       
  1796 	/* mii library handles link maintenance tasks */
       
  1797 
       
  1798 	mii_ethtool_gset(&nic->mii, &cmd);
       
  1799 	speed = ethtool_cmd_speed(&cmd);
       
  1800 
       
  1801 	if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1802 		netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
       
  1803 			    speed == SPEED_100 ? 100 : 10,
       
  1804 			    cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
       
  1805 	} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1806 		netdev_info(nic->netdev, "NIC Link is Down\n");
       
  1807 	}
       
  1808 
       
  1809 	mii_check_link(&nic->mii);
       
  1810 
       
  1811 	/* Software generated interrupt to recover from (rare) Rx
       
  1812 	 * allocation failure.
       
  1813 	 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1814 	 * accidentally, due to hardware that shares a register between the
       
  1815 	 * interrupt mask bit and the SW Interrupt generation bit */
       
  1816 	spin_lock_irq(&nic->cmd_lock);
       
  1817 	iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1818 	e100_write_flush(nic);
       
  1819 	spin_unlock_irq(&nic->cmd_lock);
       
  1820 
       
  1821 	e100_update_stats(nic);
       
  1822 	e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
       
  1823 
       
  1824 	if (nic->mac <= mac_82557_D100_C)
       
  1825 		/* Issue a multicast command to workaround a 557 lock up */
       
  1826 		e100_set_multicast_list(nic->netdev);
       
  1827 
       
  1828 	if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
       
  1829 		/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1830 		nic->flags |= ich_10h_workaround;
       
  1831 	else
       
  1832 		nic->flags &= ~ich_10h_workaround;
       
  1833 
       
  1834 	mod_timer(&nic->watchdog,
       
  1835 		  round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
       
  1836 }
       
  1837 
       
  1838 static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1839 	struct sk_buff *skb)
       
  1840 {
       
  1841 	dma_addr_t dma_addr;
       
  1842 	cb->command = nic->tx_command;
       
  1843 
       
  1844 	dma_addr = pci_map_single(nic->pdev,
       
  1845 				  skb->data, skb->len, PCI_DMA_TODEVICE);
       
  1846 	/* If we can't map the skb, have the upper layer try later */
       
  1847 	if (pci_dma_mapping_error(nic->pdev, dma_addr))
       
  1848 		return -ENOMEM;
       
  1849 
       
  1850 	/*
       
  1851 	 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
       
  1852 	 * testing, ie sending frames with bad CRC.
       
  1853 	 */
       
  1854 	if (unlikely(skb->no_fcs))
       
  1855 		cb->command |= cpu_to_le16(cb_tx_nc);
       
  1856 	else
       
  1857 		cb->command &= ~cpu_to_le16(cb_tx_nc);
       
  1858 
       
  1859 	/* interrupt every 16 packets regardless of delay */
       
  1860 	if ((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1861 		cb->command |= cpu_to_le16(cb_i);
       
  1862 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1863 	cb->u.tcb.tcb_byte_count = 0;
       
  1864 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1865 	cb->u.tcb.tbd_count = 1;
       
  1866 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
       
  1867 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1868 	skb_tx_timestamp(skb);
       
  1869 	return 0;
       
  1870 }
       
  1871 
       
  1872 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
       
  1873 				   struct net_device *netdev)
       
  1874 {
       
  1875 	struct nic *nic = netdev_priv(netdev);
       
  1876 	int err;
       
  1877 
       
  1878 	if (nic->flags & ich_10h_workaround) {
       
  1879 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1880 		   Issue a NOP command followed by a 1us delay before
       
  1881 		   issuing the Tx command. */
       
  1882 		if (e100_exec_cmd(nic, cuc_nop, 0))
       
  1883 			netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1884 				     "exec cuc_nop failed\n");
       
  1885 		udelay(1);
       
  1886 	}
       
  1887 
       
  1888 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1889 
       
  1890 	switch (err) {
       
  1891 	case -ENOSPC:
       
  1892 		/* We queued the skb, but now we're out of space. */
       
  1893 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1894 			     "No space for CB\n");
       
  1895 		if (!nic->ecdev)
       
  1896 			netif_stop_queue(netdev);
       
  1897 		break;
       
  1898 	case -ENOMEM:
       
  1899 		/* This is a hard error - log it. */
       
  1900 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1901 			     "Out of Tx resources, returning skb\n");
       
  1902 		if (!nic->ecdev)
       
  1903 			netif_stop_queue(netdev);
       
  1904 		return NETDEV_TX_BUSY;
       
  1905 	}
       
  1906 
       
  1907 	return NETDEV_TX_OK;
       
  1908 }
       
  1909 
       
  1910 static int e100_tx_clean(struct nic *nic)
       
  1911 {
       
  1912 	struct net_device *dev = nic->netdev;
       
  1913 	struct cb *cb;
       
  1914 	int tx_cleaned = 0;
       
  1915 
       
  1916 	if (!nic->ecdev)
       
  1917 		spin_lock(&nic->cb_lock);
       
  1918 
       
  1919 	/* Clean CBs marked complete */
       
  1920 	for (cb = nic->cb_to_clean;
       
  1921 	    cb->status & cpu_to_le16(cb_complete);
       
  1922 	    cb = nic->cb_to_clean = cb->next) {
       
  1923 		rmb(); /* read skb after status */
       
  1924 		netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
       
  1925 			     "cb[%d]->status = 0x%04X\n",
       
  1926 			     (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1927 			     cb->status);
       
  1928 
       
  1929 		if (likely(cb->skb != NULL)) {
       
  1930 			dev->stats.tx_packets++;
       
  1931 			dev->stats.tx_bytes += cb->skb->len;
       
  1932 
       
  1933 			pci_unmap_single(nic->pdev,
       
  1934 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1935 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1936 				PCI_DMA_TODEVICE);
       
  1937 			if (!nic->ecdev)
       
  1938 				dev_kfree_skb_any(cb->skb);
       
  1939 			cb->skb = NULL;
       
  1940 			tx_cleaned = 1;
       
  1941 		}
       
  1942 		cb->status = 0;
       
  1943 		nic->cbs_avail++;
       
  1944 	}
       
  1945 
       
  1946 	if (!nic->ecdev) {
       
  1947 		spin_unlock(&nic->cb_lock);
       
  1948 
       
  1949 		/* Recover from running out of Tx resources in xmit_frame */
       
  1950 		if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1951 			netif_wake_queue(nic->netdev);
       
  1952 	}
       
  1953 
       
  1954 	return tx_cleaned;
       
  1955 }
       
  1956 
       
  1957 static void e100_clean_cbs(struct nic *nic)
       
  1958 {
       
  1959 	if (nic->cbs) {
       
  1960 		while (nic->cbs_avail != nic->params.cbs.count) {
       
  1961 			struct cb *cb = nic->cb_to_clean;
       
  1962 			if (cb->skb) {
       
  1963 				pci_unmap_single(nic->pdev,
       
  1964 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1965 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1966 					PCI_DMA_TODEVICE);
       
  1967 				if (!nic->ecdev)
       
  1968 					dev_kfree_skb(cb->skb);
       
  1969 			}
       
  1970 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1971 			nic->cbs_avail++;
       
  1972 		}
       
  1973 		pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
       
  1974 		nic->cbs = NULL;
       
  1975 		nic->cbs_avail = 0;
       
  1976 	}
       
  1977 	nic->cuc_cmd = cuc_start;
       
  1978 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1979 		nic->cbs;
       
  1980 }
       
  1981 
       
  1982 static int e100_alloc_cbs(struct nic *nic)
       
  1983 {
       
  1984 	struct cb *cb;
       
  1985 	unsigned int i, count = nic->params.cbs.count;
       
  1986 
       
  1987 	nic->cuc_cmd = cuc_start;
       
  1988 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1989 	nic->cbs_avail = 0;
       
  1990 
       
  1991 	nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
       
  1992 				  &nic->cbs_dma_addr);
       
  1993 	if (!nic->cbs)
       
  1994 		return -ENOMEM;
       
  1995 	memset(nic->cbs, 0, count * sizeof(struct cb));
       
  1996 
       
  1997 	for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1998 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1999 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  2000 
       
  2001 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  2002 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  2003 			((i+1) % count) * sizeof(struct cb));
       
  2004 	}
       
  2005 
       
  2006 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  2007 	nic->cbs_avail = count;
       
  2008 
       
  2009 	return 0;
       
  2010 }
       
  2011 
       
  2012 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  2013 {
       
  2014 	if (!nic->rxs) return;
       
  2015 	if (RU_SUSPENDED != nic->ru_running) return;
       
  2016 
       
  2017 	/* handle init time starts */
       
  2018 	if (!rx) rx = nic->rxs;
       
  2019 
       
  2020 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  2021 	if (rx->skb) {
       
  2022 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  2023 		nic->ru_running = RU_RUNNING;
       
  2024 	}
       
  2025 }
       
  2026 
       
  2027 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
       
  2028 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  2029 {
       
  2030 	if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
       
  2031 		return -ENOMEM;
       
  2032 
       
  2033 	/* Init, and map the RFD. */
       
  2034 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
       
  2035 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  2036 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2037 
       
  2038 	if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2039 		dev_kfree_skb_any(rx->skb);
       
  2040 		rx->skb = NULL;
       
  2041 		rx->dma_addr = 0;
       
  2042 		return -ENOMEM;
       
  2043 	}
       
  2044 
       
  2045 	/* Link the RFD to end of RFA by linking previous RFD to
       
  2046 	 * this one.  We are safe to touch the previous RFD because
       
  2047 	 * it is protected by the before last buffer's el bit being set */
       
  2048 	if (rx->prev->skb) {
       
  2049 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  2050 		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2051 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2052 			sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2053 	}
       
  2054 
       
  2055 	return 0;
       
  2056 }
       
  2057 
       
  2058 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  2059 	unsigned int *work_done, unsigned int work_to_do)
       
  2060 {
       
  2061 	struct net_device *dev = nic->netdev;
       
  2062 	struct sk_buff *skb = rx->skb;
       
  2063 	struct rfd *rfd = (struct rfd *)skb->data;
       
  2064 	u16 rfd_status, actual_size;
       
  2065 	u16 fcs_pad = 0;
       
  2066 
       
  2067 	if (unlikely(work_done && *work_done >= work_to_do))
       
  2068 		return -EAGAIN;
       
  2069 
       
  2070 	/* Need to sync before taking a peek at cb_complete bit */
       
  2071 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  2072 		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2073 	rfd_status = le16_to_cpu(rfd->status);
       
  2074 
       
  2075 	netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
       
  2076 		     "status=0x%04X\n", rfd_status);
       
  2077 	rmb(); /* read size after status bit */
       
  2078 
       
  2079 	/* If data isn't ready, nothing to indicate */
       
  2080 	if (unlikely(!(rfd_status & cb_complete))) {
       
  2081 		/* If the next buffer has the el bit, but we think the receiver
       
  2082 		 * is still running, check to see if it really stopped while
       
  2083 		 * we had interrupts off.
       
  2084 		 * This allows for a fast restart without re-enabling
       
  2085 		 * interrupts */
       
  2086 		if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2087 		    (RU_RUNNING == nic->ru_running))
       
  2088 
       
  2089 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2090 				nic->ru_running = RU_SUSPENDED;
       
  2091 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2092 					       sizeof(struct rfd),
       
  2093 					       PCI_DMA_FROMDEVICE);
       
  2094 		return -ENODATA;
       
  2095 	}
       
  2096 
       
  2097 	/* Get actual data size */
       
  2098 	if (unlikely(dev->features & NETIF_F_RXFCS))
       
  2099 		fcs_pad = 4;
       
  2100 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  2101 	if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  2102 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  2103 
       
  2104 	/* Get data */
       
  2105 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2106 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2107 
       
  2108 	/* If this buffer has the el bit, but we think the receiver
       
  2109 	 * is still running, check to see if it really stopped while
       
  2110 	 * we had interrupts off.
       
  2111 	 * This allows for a fast restart without re-enabling interrupts.
       
  2112 	 * This can happen when the RU sees the size change but also sees
       
  2113 	 * the el bit set. */
       
  2114 	if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2115 	    (RU_RUNNING == nic->ru_running)) {
       
  2116 
       
  2117 	    if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2118 		nic->ru_running = RU_SUSPENDED;
       
  2119 	}
       
  2120 
       
  2121 	if (!nic->ecdev) {
       
  2122 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  2123 		skb_reserve(skb, sizeof(struct rfd));
       
  2124 		skb_put(skb, actual_size);
       
  2125 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  2126 	}
       
  2127 
       
  2128 	/* If we are receiving all frames, then don't bother
       
  2129 	 * checking for errors.
       
  2130 	 */
       
  2131 	if (unlikely(dev->features & NETIF_F_RXALL)) {
       
  2132 		if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
       
  2133 			/* Received oversized frame, but keep it. */
       
  2134 			nic->rx_over_length_errors++;
       
  2135 		goto process_skb;
       
  2136 	}
       
  2137 
       
  2138 	if (unlikely(!(rfd_status & cb_ok))) {
       
  2139 		if (!nic->ecdev) {
       
  2140 			/* Don't indicate if hardware indicates errors */
       
  2141 			dev_kfree_skb_any(skb);
       
  2142 		}
       
  2143 	} else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
       
  2144 		/* Don't indicate oversized frames */
       
  2145 		nic->rx_over_length_errors++;
       
  2146 		if (!nic->ecdev) {
       
  2147 			dev_kfree_skb_any(skb);
       
  2148 		}
       
  2149 	} else {
       
  2150 process_skb:
       
  2151 		dev->stats.rx_packets++;
       
  2152 		dev->stats.rx_bytes += (actual_size - fcs_pad);
       
  2153 		if (nic->ecdev) {
       
  2154 			ecdev_receive(nic->ecdev,
       
  2155 					skb->data + sizeof(struct rfd), actual_size - fcs_pad);
       
  2156 
       
  2157 			// No need to detect link status as
       
  2158 			// long as frames are received: Reset watchdog.
       
  2159 			if (ecdev_get_link(nic->ecdev)) {
       
  2160 				nic->ec_watchdog_jiffies = jiffies;
       
  2161 			}
       
  2162 		} else {
       
  2163 			netif_receive_skb(skb);
       
  2164 		}
       
  2165 		if (work_done)
       
  2166 			(*work_done)++;
       
  2167 	}
       
  2168 
       
  2169 	if (nic->ecdev) {
       
  2170 		// make receive frame descriptior usable again
       
  2171 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  2172 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  2173 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2174 		if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2175 			rx->dma_addr = 0;
       
  2176 		}
       
  2177 
       
  2178 		/* Link the RFD to end of RFA by linking previous RFD to
       
  2179 		 * this one.  We are safe to touch the previous RFD because
       
  2180 		 * it is protected by the before last buffer's el bit being set */
       
  2181 		if (rx->prev->skb) {
       
  2182 			struct rfd *prev_rfd = (struct rfd *) rx->prev->skb->data;
       
  2183 			put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2184 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2185 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  2186 		}
       
  2187 	} else {
       
  2188 		rx->skb = NULL;
       
  2189 	}
       
  2190 
       
  2191 	return 0;
       
  2192 }
       
  2193 
       
  2194 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  2195 	unsigned int work_to_do)
       
  2196 {
       
  2197 	struct rx *rx;
       
  2198 	int restart_required = 0, err = 0;
       
  2199 	struct rx *old_before_last_rx, *new_before_last_rx;
       
  2200 	struct rfd *old_before_last_rfd, *new_before_last_rfd;
       
  2201 
       
  2202 	/* Indicate newly arrived packets */
       
  2203 	for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  2204 		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  2205 		/* Hit quota or no more to clean */
       
  2206 		if (-EAGAIN == err || -ENODATA == err)
       
  2207 			break;
       
  2208 	}
       
  2209 
       
  2210 
       
  2211 	/* On EAGAIN, hit quota so have more work to do, restart once
       
  2212 	 * cleanup is complete.
       
  2213 	 * Else, are we already rnr? then pay attention!!! this ensures that
       
  2214 	 * the state machine progression never allows a start with a
       
  2215 	 * partially cleaned list, avoiding a race between hardware
       
  2216 	 * and rx_to_clean when in NAPI mode */
       
  2217 	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
       
  2218 		restart_required = 1;
       
  2219 
       
  2220 	old_before_last_rx = nic->rx_to_use->prev->prev;
       
  2221 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
       
  2222 
       
  2223 	if (!nic->ecdev) {
       
  2224 		/* Alloc new skbs to refill list */
       
  2225 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  2226 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  2227 				break; /* Better luck next time (see watchdog) */
       
  2228 		}
       
  2229 	}
       
  2230 
       
  2231 	new_before_last_rx = nic->rx_to_use->prev->prev;
       
  2232 	if (new_before_last_rx != old_before_last_rx) {
       
  2233 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2234 		 * This lets us update the next pointer on the last buffer
       
  2235 		 * without worrying about hardware touching it.
       
  2236 		 * We set the size to 0 to prevent hardware from touching this
       
  2237 		 * buffer.
       
  2238 		 * When the hardware hits the before last buffer with el-bit
       
  2239 		 * and size of 0, it will RNR interrupt, the RUS will go into
       
  2240 		 * the No Resources state.  It will not complete nor write to
       
  2241 		 * this buffer. */
       
  2242 		new_before_last_rfd =
       
  2243 			(struct rfd *)new_before_last_rx->skb->data;
       
  2244 		new_before_last_rfd->size = 0;
       
  2245 		new_before_last_rfd->command |= cpu_to_le16(cb_el);
       
  2246 		pci_dma_sync_single_for_device(nic->pdev,
       
  2247 			new_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2248 			PCI_DMA_BIDIRECTIONAL);
       
  2249 
       
  2250 		/* Now that we have a new stopping point, we can clear the old
       
  2251 		 * stopping point.  We must sync twice to get the proper
       
  2252 		 * ordering on the hardware side of things. */
       
  2253 		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
       
  2254 		pci_dma_sync_single_for_device(nic->pdev,
       
  2255 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2256 			PCI_DMA_BIDIRECTIONAL);
       
  2257 		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
       
  2258 							+ ETH_FCS_LEN);
       
  2259 		pci_dma_sync_single_for_device(nic->pdev,
       
  2260 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2261 			PCI_DMA_BIDIRECTIONAL);
       
  2262 	}
       
  2263 
       
  2264 	if (restart_required) {
       
  2265 		// ack the rnr?
       
  2266 		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2267 		e100_start_receiver(nic, nic->rx_to_clean);
       
  2268 		if (work_done)
       
  2269 			(*work_done)++;
       
  2270 	}
       
  2271 }
       
  2272 
       
  2273 static void e100_rx_clean_list(struct nic *nic)
       
  2274 {
       
  2275 	struct rx *rx;
       
  2276 	unsigned int i, count = nic->params.rfds.count;
       
  2277 
       
  2278 	nic->ru_running = RU_UNINITIALIZED;
       
  2279 
       
  2280 	if (nic->rxs) {
       
  2281 		for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2282 			if (rx->skb) {
       
  2283 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2284 					RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2285 				dev_kfree_skb(rx->skb);
       
  2286 			}
       
  2287 		}
       
  2288 		kfree(nic->rxs);
       
  2289 		nic->rxs = NULL;
       
  2290 	}
       
  2291 
       
  2292 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2293 }
       
  2294 
       
  2295 static int e100_rx_alloc_list(struct nic *nic)
       
  2296 {
       
  2297 	struct rx *rx;
       
  2298 	unsigned int i, count = nic->params.rfds.count;
       
  2299 	struct rfd *before_last;
       
  2300 
       
  2301 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2302 	nic->ru_running = RU_UNINITIALIZED;
       
  2303 
       
  2304 	if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2305 		return -ENOMEM;
       
  2306 
       
  2307 	for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2308 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2309 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2310 		if (e100_rx_alloc_skb(nic, rx)) {
       
  2311 			e100_rx_clean_list(nic);
       
  2312 			return -ENOMEM;
       
  2313 		}
       
  2314 	}
       
  2315 
       
  2316 	if (!nic->ecdev) {
       
  2317 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2318 		 * This lets us update the next pointer on the last buffer without
       
  2319 		 * worrying about hardware touching it.
       
  2320 		 * We set the size to 0 to prevent hardware from touching this buffer.
       
  2321 		 * When the hardware hits the before last buffer with el-bit and size
       
  2322 		 * of 0, it will RNR interrupt, the RU will go into the No Resources
       
  2323 		 * state.  It will not complete nor write to this buffer. */
       
  2324 		rx = nic->rxs->prev->prev;
       
  2325 		before_last = (struct rfd *)rx->skb->data;
       
  2326 		before_last->command |= cpu_to_le16(cb_el);
       
  2327 		before_last->size = 0;
       
  2328 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2329 				sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2330 	}
       
  2331 
       
  2332 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2333 	nic->ru_running = RU_SUSPENDED;
       
  2334 
       
  2335 	return 0;
       
  2336 }
       
  2337 
       
  2338 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2339 {
       
  2340 	struct net_device *netdev = dev_id;
       
  2341 	struct nic *nic = netdev_priv(netdev);
       
  2342 	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
       
  2343 
       
  2344 	netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
       
  2345 		     "stat_ack = 0x%02X\n", stat_ack);
       
  2346 
       
  2347 	if (stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2348 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2349 		return IRQ_NONE;
       
  2350 
       
  2351 	/* Ack interrupt(s) */
       
  2352 	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
       
  2353 
       
  2354 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2355 	if (stat_ack & stat_ack_rnr)
       
  2356 		nic->ru_running = RU_SUSPENDED;
       
  2357 
       
  2358 	if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) {
       
  2359 		e100_disable_irq(nic);
       
  2360 		__napi_schedule(&nic->napi);
       
  2361 	}
       
  2362 
       
  2363 	return IRQ_HANDLED;
       
  2364 }
       
  2365 
       
  2366 void e100_ec_poll(struct net_device *netdev)
       
  2367 {
       
  2368 	struct nic *nic = netdev_priv(netdev);
       
  2369 
       
  2370 	e100_rx_clean(nic, NULL, 100);
       
  2371 	e100_tx_clean(nic);
       
  2372 
       
  2373 	if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2374 		e100_watchdog((unsigned long) nic);
       
  2375 		nic->ec_watchdog_jiffies = jiffies;
       
  2376 	}
       
  2377 }
       
  2378 
       
  2379 
       
  2380 static int e100_poll(struct napi_struct *napi, int budget)
       
  2381 {
       
  2382 	struct nic *nic = container_of(napi, struct nic, napi);
       
  2383 	unsigned int work_done = 0;
       
  2384 
       
  2385 	e100_rx_clean(nic, &work_done, budget);
       
  2386 	e100_tx_clean(nic);
       
  2387 
       
  2388 	/* If budget not fully consumed, exit the polling mode */
       
  2389 	if (work_done < budget) {
       
  2390 		napi_complete(napi);
       
  2391 		e100_enable_irq(nic);
       
  2392 	}
       
  2393 
       
  2394 	return work_done;
       
  2395 }
       
  2396 
       
  2397 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2398 static void e100_netpoll(struct net_device *netdev)
       
  2399 {
       
  2400 	struct nic *nic = netdev_priv(netdev);
       
  2401 
       
  2402 	e100_disable_irq(nic);
       
  2403 	e100_intr(nic->pdev->irq, netdev);
       
  2404 	e100_tx_clean(nic);
       
  2405 	e100_enable_irq(nic);
       
  2406 }
       
  2407 #endif
       
  2408 
       
  2409 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2410 {
       
  2411 	struct nic *nic = netdev_priv(netdev);
       
  2412 	struct sockaddr *addr = p;
       
  2413 
       
  2414 	if (!is_valid_ether_addr(addr->sa_data))
       
  2415 		return -EADDRNOTAVAIL;
       
  2416 
       
  2417 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2418 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2419 
       
  2420 	return 0;
       
  2421 }
       
  2422 
       
  2423 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2424 {
       
  2425 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2426 		return -EINVAL;
       
  2427 	netdev->mtu = new_mtu;
       
  2428 	return 0;
       
  2429 }
       
  2430 
       
  2431 static int e100_asf(struct nic *nic)
       
  2432 {
       
  2433 	/* ASF can be enabled from eeprom */
       
  2434 	return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2435 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2436 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2437 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
       
  2438 }
       
  2439 
       
  2440 static int e100_up(struct nic *nic)
       
  2441 {
       
  2442 	int err;
       
  2443 
       
  2444 	if ((err = e100_rx_alloc_list(nic)))
       
  2445 		return err;
       
  2446 	if ((err = e100_alloc_cbs(nic)))
       
  2447 		goto err_rx_clean_list;
       
  2448 	if ((err = e100_hw_init(nic)))
       
  2449 		goto err_clean_cbs;
       
  2450 	e100_set_multicast_list(nic->netdev);
       
  2451 	e100_start_receiver(nic, NULL);
       
  2452 	if (!nic->ecdev) {
       
  2453 		mod_timer(&nic->watchdog, jiffies);
       
  2454 	}
       
  2455 	if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2456 		nic->netdev->name, nic->netdev)))
       
  2457 		goto err_no_irq;
       
  2458 	if (!nic->ecdev) {
       
  2459 		netif_wake_queue(nic->netdev);
       
  2460 		napi_enable(&nic->napi);
       
  2461 		/* enable ints _after_ enabling poll, preventing a race between
       
  2462 		 * disable ints+schedule */
       
  2463 		e100_enable_irq(nic);
       
  2464 	}
       
  2465 	return 0;
       
  2466 
       
  2467 err_no_irq:
       
  2468 	if (!nic->ecdev)
       
  2469 		del_timer_sync(&nic->watchdog);
       
  2470 err_clean_cbs:
       
  2471 	e100_clean_cbs(nic);
       
  2472 err_rx_clean_list:
       
  2473 	e100_rx_clean_list(nic);
       
  2474 	return err;
       
  2475 }
       
  2476 
       
  2477 static void e100_down(struct nic *nic)
       
  2478 {
       
  2479 	if (!nic->ecdev) {
       
  2480 		/* wait here for poll to complete */
       
  2481 		napi_disable(&nic->napi);
       
  2482 		netif_stop_queue(nic->netdev);
       
  2483 	}
       
  2484 	e100_hw_reset(nic);
       
  2485 	free_irq(nic->pdev->irq, nic->netdev);
       
  2486 	if (!nic->ecdev) {
       
  2487 		del_timer_sync(&nic->watchdog);
       
  2488 		netif_carrier_off(nic->netdev);
       
  2489 	}
       
  2490 	e100_clean_cbs(nic);
       
  2491 	e100_rx_clean_list(nic);
       
  2492 }
       
  2493 
       
  2494 static void e100_tx_timeout(struct net_device *netdev)
       
  2495 {
       
  2496 	struct nic *nic = netdev_priv(netdev);
       
  2497 
       
  2498 	/* Reset outside of interrupt context, to avoid request_irq
       
  2499 	 * in interrupt context */
       
  2500 	schedule_work(&nic->tx_timeout_task);
       
  2501 }
       
  2502 
       
  2503 static void e100_tx_timeout_task(struct work_struct *work)
       
  2504 {
       
  2505 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2506 	struct net_device *netdev = nic->netdev;
       
  2507 
       
  2508 	netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  2509 		     "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
       
  2510 
       
  2511 	rtnl_lock();
       
  2512 	if (netif_running(netdev)) {
       
  2513 		e100_down(netdev_priv(netdev));
       
  2514 		e100_up(netdev_priv(netdev));
       
  2515 	}
       
  2516 	rtnl_unlock();
       
  2517 }
       
  2518 
       
  2519 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2520 {
       
  2521 	int err;
       
  2522 	struct sk_buff *skb;
       
  2523 
       
  2524 	/* Use driver resources to perform internal MAC or PHY
       
  2525 	 * loopback test.  A single packet is prepared and transmitted
       
  2526 	 * in loopback mode, and the test passes if the received
       
  2527 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2528 
       
  2529 	if ((err = e100_rx_alloc_list(nic)))
       
  2530 		return err;
       
  2531 	if ((err = e100_alloc_cbs(nic)))
       
  2532 		goto err_clean_rx;
       
  2533 
       
  2534 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2535 	if (nic->flags & ich && loopback_mode == lb_phy)
       
  2536 		loopback_mode = lb_mac;
       
  2537 
       
  2538 	nic->loopback = loopback_mode;
       
  2539 	if ((err = e100_hw_init(nic)))
       
  2540 		goto err_loopback_none;
       
  2541 
       
  2542 	if (loopback_mode == lb_phy)
       
  2543 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2544 			BMCR_LOOPBACK);
       
  2545 
       
  2546 	e100_start_receiver(nic, NULL);
       
  2547 
       
  2548 	if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2549 		err = -ENOMEM;
       
  2550 		goto err_loopback_none;
       
  2551 	}
       
  2552 	skb_put(skb, ETH_DATA_LEN);
       
  2553 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2554 	e100_xmit_frame(skb, nic->netdev);
       
  2555 
       
  2556 	msleep(10);
       
  2557 
       
  2558 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2559 			RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2560 
       
  2561 	if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2562 	   skb->data, ETH_DATA_LEN))
       
  2563 		err = -EAGAIN;
       
  2564 
       
  2565 err_loopback_none:
       
  2566 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2567 	nic->loopback = lb_none;
       
  2568 	e100_clean_cbs(nic);
       
  2569 	e100_hw_reset(nic);
       
  2570 err_clean_rx:
       
  2571 	e100_rx_clean_list(nic);
       
  2572 	return err;
       
  2573 }
       
  2574 
       
  2575 #define MII_LED_CONTROL	0x1B
       
  2576 #define E100_82552_LED_OVERRIDE 0x19
       
  2577 #define E100_82552_LED_ON       0x000F /* LEDTX and LED_RX both on */
       
  2578 #define E100_82552_LED_OFF      0x000A /* LEDTX and LED_RX both off */
       
  2579 
       
  2580 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2581 {
       
  2582 	struct nic *nic = netdev_priv(netdev);
       
  2583 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2584 }
       
  2585 
       
  2586 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2587 {
       
  2588 	struct nic *nic = netdev_priv(netdev);
       
  2589 	int err;
       
  2590 
       
  2591 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2592 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2593 	e100_exec_cb(nic, NULL, e100_configure);
       
  2594 
       
  2595 	return err;
       
  2596 }
       
  2597 
       
  2598 static void e100_get_drvinfo(struct net_device *netdev,
       
  2599 	struct ethtool_drvinfo *info)
       
  2600 {
       
  2601 	struct nic *nic = netdev_priv(netdev);
       
  2602 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
       
  2603 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
       
  2604 	strlcpy(info->bus_info, pci_name(nic->pdev),
       
  2605 		sizeof(info->bus_info));
       
  2606 }
       
  2607 
       
  2608 #define E100_PHY_REGS 0x1C
       
  2609 static int e100_get_regs_len(struct net_device *netdev)
       
  2610 {
       
  2611 	struct nic *nic = netdev_priv(netdev);
       
  2612 	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
       
  2613 }
       
  2614 
       
  2615 static void e100_get_regs(struct net_device *netdev,
       
  2616 	struct ethtool_regs *regs, void *p)
       
  2617 {
       
  2618 	struct nic *nic = netdev_priv(netdev);
       
  2619 	u32 *buff = p;
       
  2620 	int i;
       
  2621 
       
  2622 	regs->version = (1 << 24) | nic->pdev->revision;
       
  2623 	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
       
  2624 		ioread8(&nic->csr->scb.cmd_lo) << 16 |
       
  2625 		ioread16(&nic->csr->scb.status);
       
  2626 	for (i = E100_PHY_REGS; i >= 0; i--)
       
  2627 		buff[1 + E100_PHY_REGS - i] =
       
  2628 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2629 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2630 	e100_exec_cb(nic, NULL, e100_dump);
       
  2631 	msleep(10);
       
  2632 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2633 		sizeof(nic->mem->dump_buf));
       
  2634 }
       
  2635 
       
  2636 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2637 {
       
  2638 	struct nic *nic = netdev_priv(netdev);
       
  2639 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2640 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2641 }
       
  2642 
       
  2643 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2644 {
       
  2645 	struct nic *nic = netdev_priv(netdev);
       
  2646 
       
  2647 	if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
       
  2648 	    !device_can_wakeup(&nic->pdev->dev))
       
  2649 		return -EOPNOTSUPP;
       
  2650 
       
  2651 	if (wol->wolopts)
       
  2652 		nic->flags |= wol_magic;
       
  2653 	else
       
  2654 		nic->flags &= ~wol_magic;
       
  2655 
       
  2656 	device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
       
  2657 
       
  2658 	e100_exec_cb(nic, NULL, e100_configure);
       
  2659 
       
  2660 	return 0;
       
  2661 }
       
  2662 
       
  2663 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2664 {
       
  2665 	struct nic *nic = netdev_priv(netdev);
       
  2666 	return nic->msg_enable;
       
  2667 }
       
  2668 
       
  2669 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2670 {
       
  2671 	struct nic *nic = netdev_priv(netdev);
       
  2672 	nic->msg_enable = value;
       
  2673 }
       
  2674 
       
  2675 static int e100_nway_reset(struct net_device *netdev)
       
  2676 {
       
  2677 	struct nic *nic = netdev_priv(netdev);
       
  2678 	return mii_nway_restart(&nic->mii);
       
  2679 }
       
  2680 
       
  2681 static u32 e100_get_link(struct net_device *netdev)
       
  2682 {
       
  2683 	struct nic *nic = netdev_priv(netdev);
       
  2684 	return mii_link_ok(&nic->mii);
       
  2685 }
       
  2686 
       
  2687 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2688 {
       
  2689 	struct nic *nic = netdev_priv(netdev);
       
  2690 	return nic->eeprom_wc << 1;
       
  2691 }
       
  2692 
       
  2693 #define E100_EEPROM_MAGIC	0x1234
       
  2694 static int e100_get_eeprom(struct net_device *netdev,
       
  2695 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2696 {
       
  2697 	struct nic *nic = netdev_priv(netdev);
       
  2698 
       
  2699 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2700 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2701 
       
  2702 	return 0;
       
  2703 }
       
  2704 
       
  2705 static int e100_set_eeprom(struct net_device *netdev,
       
  2706 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2707 {
       
  2708 	struct nic *nic = netdev_priv(netdev);
       
  2709 
       
  2710 	if (eeprom->magic != E100_EEPROM_MAGIC)
       
  2711 		return -EINVAL;
       
  2712 
       
  2713 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2714 
       
  2715 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2716 		(eeprom->len >> 1) + 1);
       
  2717 }
       
  2718 
       
  2719 static void e100_get_ringparam(struct net_device *netdev,
       
  2720 	struct ethtool_ringparam *ring)
       
  2721 {
       
  2722 	struct nic *nic = netdev_priv(netdev);
       
  2723 	struct param_range *rfds = &nic->params.rfds;
       
  2724 	struct param_range *cbs = &nic->params.cbs;
       
  2725 
       
  2726 	ring->rx_max_pending = rfds->max;
       
  2727 	ring->tx_max_pending = cbs->max;
       
  2728 	ring->rx_pending = rfds->count;
       
  2729 	ring->tx_pending = cbs->count;
       
  2730 }
       
  2731 
       
  2732 static int e100_set_ringparam(struct net_device *netdev,
       
  2733 	struct ethtool_ringparam *ring)
       
  2734 {
       
  2735 	struct nic *nic = netdev_priv(netdev);
       
  2736 	struct param_range *rfds = &nic->params.rfds;
       
  2737 	struct param_range *cbs = &nic->params.cbs;
       
  2738 
       
  2739 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2740 		return -EINVAL;
       
  2741 
       
  2742 	if (netif_running(netdev))
       
  2743 		e100_down(nic);
       
  2744 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2745 	rfds->count = min(rfds->count, rfds->max);
       
  2746 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2747 	cbs->count = min(cbs->count, cbs->max);
       
  2748 	netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
       
  2749 		   rfds->count, cbs->count);
       
  2750 	if (netif_running(netdev))
       
  2751 		e100_up(nic);
       
  2752 
       
  2753 	return 0;
       
  2754 }
       
  2755 
       
  2756 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2757 	"Link test     (on/offline)",
       
  2758 	"Eeprom test   (on/offline)",
       
  2759 	"Self test        (offline)",
       
  2760 	"Mac loopback     (offline)",
       
  2761 	"Phy loopback     (offline)",
       
  2762 };
       
  2763 #define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
       
  2764 
       
  2765 static void e100_diag_test(struct net_device *netdev,
       
  2766 	struct ethtool_test *test, u64 *data)
       
  2767 {
       
  2768 	struct ethtool_cmd cmd;
       
  2769 	struct nic *nic = netdev_priv(netdev);
       
  2770 	int i, err;
       
  2771 
       
  2772 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2773 	data[0] = !mii_link_ok(&nic->mii);
       
  2774 	data[1] = e100_eeprom_load(nic);
       
  2775 	if (test->flags & ETH_TEST_FL_OFFLINE) {
       
  2776 
       
  2777 		/* save speed, duplex & autoneg settings */
       
  2778 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2779 
       
  2780 		if (netif_running(netdev))
       
  2781 			e100_down(nic);
       
  2782 		data[2] = e100_self_test(nic);
       
  2783 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2784 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2785 
       
  2786 		/* restore speed, duplex & autoneg settings */
       
  2787 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2788 
       
  2789 		if (netif_running(netdev))
       
  2790 			e100_up(nic);
       
  2791 	}
       
  2792 	for (i = 0; i < E100_TEST_LEN; i++)
       
  2793 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2794 
       
  2795 	msleep_interruptible(4 * 1000);
       
  2796 }
       
  2797 
       
  2798 static int e100_set_phys_id(struct net_device *netdev,
       
  2799 			    enum ethtool_phys_id_state state)
       
  2800 {
       
  2801 	struct nic *nic = netdev_priv(netdev);
       
  2802 	enum led_state {
       
  2803 		led_on     = 0x01,
       
  2804 		led_off    = 0x04,
       
  2805 		led_on_559 = 0x05,
       
  2806 		led_on_557 = 0x07,
       
  2807 	};
       
  2808 	u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
       
  2809 		MII_LED_CONTROL;
       
  2810 	u16 leds = 0;
       
  2811 
       
  2812 	switch (state) {
       
  2813 	case ETHTOOL_ID_ACTIVE:
       
  2814 		return 2;
       
  2815 
       
  2816 	case ETHTOOL_ID_ON:
       
  2817 		leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
       
  2818 		       (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
       
  2819 		break;
       
  2820 
       
  2821 	case ETHTOOL_ID_OFF:
       
  2822 		leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
       
  2823 		break;
       
  2824 
       
  2825 	case ETHTOOL_ID_INACTIVE:
       
  2826 		break;
       
  2827 	}
       
  2828 
       
  2829 	mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
       
  2830 	return 0;
       
  2831 }
       
  2832 
       
  2833 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2834 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2835 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2836 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2837 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2838 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2839 	"tx_heartbeat_errors", "tx_window_errors",
       
  2840 	/* device-specific stats */
       
  2841 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2842 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2843 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2844 	"rx_short_frame_errors", "rx_over_length_errors",
       
  2845 };
       
  2846 #define E100_NET_STATS_LEN	21
       
  2847 #define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
       
  2848 
       
  2849 static int e100_get_sset_count(struct net_device *netdev, int sset)
       
  2850 {
       
  2851 	switch (sset) {
       
  2852 	case ETH_SS_TEST:
       
  2853 		return E100_TEST_LEN;
       
  2854 	case ETH_SS_STATS:
       
  2855 		return E100_STATS_LEN;
       
  2856 	default:
       
  2857 		return -EOPNOTSUPP;
       
  2858 	}
       
  2859 }
       
  2860 
       
  2861 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2862 	struct ethtool_stats *stats, u64 *data)
       
  2863 {
       
  2864 	struct nic *nic = netdev_priv(netdev);
       
  2865 	int i;
       
  2866 
       
  2867 	for (i = 0; i < E100_NET_STATS_LEN; i++)
       
  2868 		data[i] = ((unsigned long *)&netdev->stats)[i];
       
  2869 
       
  2870 	data[i++] = nic->tx_deferred;
       
  2871 	data[i++] = nic->tx_single_collisions;
       
  2872 	data[i++] = nic->tx_multiple_collisions;
       
  2873 	data[i++] = nic->tx_fc_pause;
       
  2874 	data[i++] = nic->rx_fc_pause;
       
  2875 	data[i++] = nic->rx_fc_unsupported;
       
  2876 	data[i++] = nic->tx_tco_frames;
       
  2877 	data[i++] = nic->rx_tco_frames;
       
  2878 	data[i++] = nic->rx_short_frame_errors;
       
  2879 	data[i++] = nic->rx_over_length_errors;
       
  2880 }
       
  2881 
       
  2882 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2883 {
       
  2884 	switch (stringset) {
       
  2885 	case ETH_SS_TEST:
       
  2886 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2887 		break;
       
  2888 	case ETH_SS_STATS:
       
  2889 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2890 		break;
       
  2891 	}
       
  2892 }
       
  2893 
       
  2894 static const struct ethtool_ops e100_ethtool_ops = {
       
  2895 	.get_settings		= e100_get_settings,
       
  2896 	.set_settings		= e100_set_settings,
       
  2897 	.get_drvinfo		= e100_get_drvinfo,
       
  2898 	.get_regs_len		= e100_get_regs_len,
       
  2899 	.get_regs		= e100_get_regs,
       
  2900 	.get_wol		= e100_get_wol,
       
  2901 	.set_wol		= e100_set_wol,
       
  2902 	.get_msglevel		= e100_get_msglevel,
       
  2903 	.set_msglevel		= e100_set_msglevel,
       
  2904 	.nway_reset		= e100_nway_reset,
       
  2905 	.get_link		= e100_get_link,
       
  2906 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2907 	.get_eeprom		= e100_get_eeprom,
       
  2908 	.set_eeprom		= e100_set_eeprom,
       
  2909 	.get_ringparam		= e100_get_ringparam,
       
  2910 	.set_ringparam		= e100_set_ringparam,
       
  2911 	.self_test		= e100_diag_test,
       
  2912 	.get_strings		= e100_get_strings,
       
  2913 	.set_phys_id		= e100_set_phys_id,
       
  2914 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2915 	.get_sset_count		= e100_get_sset_count,
       
  2916 	.get_ts_info		= ethtool_op_get_ts_info,
       
  2917 };
       
  2918 
       
  2919 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2920 {
       
  2921 	struct nic *nic = netdev_priv(netdev);
       
  2922 
       
  2923 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2924 }
       
  2925 
       
  2926 static int e100_alloc(struct nic *nic)
       
  2927 {
       
  2928 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2929 		&nic->dma_addr);
       
  2930 	return nic->mem ? 0 : -ENOMEM;
       
  2931 }
       
  2932 
       
  2933 static void e100_free(struct nic *nic)
       
  2934 {
       
  2935 	if (nic->mem) {
       
  2936 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2937 			nic->mem, nic->dma_addr);
       
  2938 		nic->mem = NULL;
       
  2939 	}
       
  2940 }
       
  2941 
       
  2942 static int e100_open(struct net_device *netdev)
       
  2943 {
       
  2944 	struct nic *nic = netdev_priv(netdev);
       
  2945 	int err = 0;
       
  2946 
       
  2947 	if (!nic->ecdev)
       
  2948 		netif_carrier_off(netdev);
       
  2949 	if ((err = e100_up(nic)))
       
  2950 		netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
       
  2951 	return err;
       
  2952 }
       
  2953 
       
  2954 static int e100_close(struct net_device *netdev)
       
  2955 {
       
  2956 	e100_down(netdev_priv(netdev));
       
  2957 	return 0;
       
  2958 }
       
  2959 
       
  2960 static int e100_set_features(struct net_device *netdev,
       
  2961 			     netdev_features_t features)
       
  2962 {
       
  2963 	struct nic *nic = netdev_priv(netdev);
       
  2964 	netdev_features_t changed = features ^ netdev->features;
       
  2965 
       
  2966 	if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
       
  2967 		return 0;
       
  2968 
       
  2969 	netdev->features = features;
       
  2970 	e100_exec_cb(nic, NULL, e100_configure);
       
  2971 	return 0;
       
  2972 }
       
  2973 
       
  2974 static const struct net_device_ops e100_netdev_ops = {
       
  2975 	.ndo_open		= e100_open,
       
  2976 	.ndo_stop		= e100_close,
       
  2977 	.ndo_start_xmit		= e100_xmit_frame,
       
  2978 	.ndo_validate_addr	= eth_validate_addr,
       
  2979 	.ndo_set_rx_mode	= e100_set_multicast_list,
       
  2980 	.ndo_set_mac_address	= e100_set_mac_address,
       
  2981 	.ndo_change_mtu		= e100_change_mtu,
       
  2982 	.ndo_do_ioctl		= e100_do_ioctl,
       
  2983 	.ndo_tx_timeout		= e100_tx_timeout,
       
  2984 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2985 	.ndo_poll_controller	= e100_netpoll,
       
  2986 #endif
       
  2987 	.ndo_set_features	= e100_set_features,
       
  2988 };
       
  2989 
       
  2990 static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
       
  2991 {
       
  2992 	struct net_device *netdev;
       
  2993 	struct nic *nic;
       
  2994 	int err;
       
  2995 
       
  2996 	if (!(netdev = alloc_etherdev(sizeof(struct nic))))
       
  2997 		return -ENOMEM;
       
  2998 
       
  2999 	netdev->hw_features |= NETIF_F_RXFCS;
       
  3000 	netdev->priv_flags |= IFF_SUPP_NOFCS;
       
  3001 	netdev->hw_features |= NETIF_F_RXALL;
       
  3002 
       
  3003 	netdev->netdev_ops = &e100_netdev_ops;
       
  3004 	netdev->ethtool_ops = &e100_ethtool_ops;
       
  3005 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  3006 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  3007 
       
  3008 	nic = netdev_priv(netdev);
       
  3009 	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
       
  3010 	nic->netdev = netdev;
       
  3011 	nic->pdev = pdev;
       
  3012 	nic->msg_enable = (1 << debug) - 1;
       
  3013 	nic->mdio_ctrl = mdio_ctrl_hw;
       
  3014 	pci_set_drvdata(pdev, netdev);
       
  3015 
       
  3016 	if ((err = pci_enable_device(pdev))) {
       
  3017 		netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
       
  3018 		goto err_out_free_dev;
       
  3019 	}
       
  3020 
       
  3021 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  3022 		netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
       
  3023 		err = -ENODEV;
       
  3024 		goto err_out_disable_pdev;
       
  3025 	}
       
  3026 
       
  3027 	if ((err = pci_request_regions(pdev, DRV_NAME))) {
       
  3028 		netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
       
  3029 		goto err_out_disable_pdev;
       
  3030 	}
       
  3031 
       
  3032 	if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
       
  3033 		netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
       
  3034 		goto err_out_free_res;
       
  3035 	}
       
  3036 
       
  3037 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  3038 
       
  3039 	if (use_io)
       
  3040 		netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
       
  3041 
       
  3042 	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
       
  3043 	if (!nic->csr) {
       
  3044 		netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
       
  3045 		err = -ENOMEM;
       
  3046 		goto err_out_free_res;
       
  3047 	}
       
  3048 
       
  3049 	if (ent->driver_data)
       
  3050 		nic->flags |= ich;
       
  3051 	else
       
  3052 		nic->flags &= ~ich;
       
  3053 
       
  3054 	e100_get_defaults(nic);
       
  3055 
       
  3056 	/* D100 MAC doesn't allow rx of vlan packets with normal MTU */
       
  3057 	if (nic->mac < mac_82558_D101_A4)
       
  3058 		netdev->features |= NETIF_F_VLAN_CHALLENGED;
       
  3059 
       
  3060 	/* locks must be initialized before calling hw_reset */
       
  3061 	spin_lock_init(&nic->cb_lock);
       
  3062 	spin_lock_init(&nic->cmd_lock);
       
  3063 	spin_lock_init(&nic->mdio_lock);
       
  3064 
       
  3065 	/* Reset the device before pci_set_master() in case device is in some
       
  3066 	 * funky state and has an interrupt pending - hint: we don't have the
       
  3067 	 * interrupt handler registered yet. */
       
  3068 	e100_hw_reset(nic);
       
  3069 
       
  3070 	pci_set_master(pdev);
       
  3071 
       
  3072 	init_timer(&nic->watchdog);
       
  3073 	nic->watchdog.function = e100_watchdog;
       
  3074 	nic->watchdog.data = (unsigned long)nic;
       
  3075 
       
  3076 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  3077 
       
  3078 	if ((err = e100_alloc(nic))) {
       
  3079 		netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
       
  3080 		goto err_out_iounmap;
       
  3081 	}
       
  3082 
       
  3083 	if ((err = e100_eeprom_load(nic)))
       
  3084 		goto err_out_free;
       
  3085 
       
  3086 	e100_phy_init(nic);
       
  3087 
       
  3088 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  3089 	if (!is_valid_ether_addr(netdev->dev_addr)) {
       
  3090 		if (!eeprom_bad_csum_allow) {
       
  3091 			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
       
  3092 			err = -EAGAIN;
       
  3093 			goto err_out_free;
       
  3094 		} else {
       
  3095 			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
       
  3096 		}
       
  3097 	}
       
  3098 
       
  3099 	/* Wol magic packet can be enabled from eeprom */
       
  3100 	if ((nic->mac >= mac_82558_D101_A4) &&
       
  3101 	   (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
       
  3102 		nic->flags |= wol_magic;
       
  3103 		device_set_wakeup_enable(&pdev->dev, true);
       
  3104 	}
       
  3105 
       
  3106 	/* ack any pending wake events, disable PME */
       
  3107 	pci_pme_active(pdev, false);
       
  3108 
       
  3109 	// offer device to EtherCAT master module
       
  3110 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  3111 
       
  3112 	if (!nic->ecdev) {
       
  3113 		strcpy(netdev->name, "eth%d");
       
  3114 		if ((err = register_netdev(netdev))) {
       
  3115 			netif_err(nic, probe, nic->netdev,
       
  3116 					"Cannot register net device, aborting\n");
       
  3117 			goto err_out_free;
       
  3118 		}
       
  3119 	}
       
  3120 
       
  3121 	nic->cbs_pool = pci_pool_create(netdev->name,
       
  3122 			   nic->pdev,
       
  3123 			   nic->params.cbs.max * sizeof(struct cb),
       
  3124 			   sizeof(u32),
       
  3125 			   0);
       
  3126 	netif_info(nic, probe, nic->netdev,
       
  3127 		   "addr 0x%llx, irq %d, MAC addr %pM\n",
       
  3128 		   (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
       
  3129 		   pdev->irq, netdev->dev_addr);
       
  3130 
       
  3131 	if (nic->ecdev) {
       
  3132 		err = ecdev_open(nic->ecdev);
       
  3133 		if (err) {
       
  3134 			ecdev_withdraw(nic->ecdev);
       
  3135 			goto err_out_free;
       
  3136 		}
       
  3137 	}
       
  3138 
       
  3139 	return 0;
       
  3140 
       
  3141 err_out_free:
       
  3142 	e100_free(nic);
       
  3143 err_out_iounmap:
       
  3144 	pci_iounmap(pdev, nic->csr);
       
  3145 err_out_free_res:
       
  3146 	pci_release_regions(pdev);
       
  3147 err_out_disable_pdev:
       
  3148 	pci_disable_device(pdev);
       
  3149 err_out_free_dev:
       
  3150 	free_netdev(netdev);
       
  3151 	return err;
       
  3152 }
       
  3153 
       
  3154 static void e100_remove(struct pci_dev *pdev)
       
  3155 {
       
  3156 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3157 
       
  3158 	if (netdev) {
       
  3159 		struct nic *nic = netdev_priv(netdev);
       
  3160 		if (nic->ecdev) {
       
  3161 			ecdev_close(nic->ecdev);
       
  3162 			ecdev_withdraw(nic->ecdev);
       
  3163 		} else {
       
  3164 			unregister_netdev(netdev);
       
  3165 		}
       
  3166 
       
  3167 		e100_free(nic);
       
  3168 		pci_iounmap(pdev, nic->csr);
       
  3169 		pci_pool_destroy(nic->cbs_pool);
       
  3170 		free_netdev(netdev);
       
  3171 		pci_release_regions(pdev);
       
  3172 		pci_disable_device(pdev);
       
  3173 	}
       
  3174 }
       
  3175 
       
  3176 #define E100_82552_SMARTSPEED   0x14   /* SmartSpeed Ctrl register */
       
  3177 #define E100_82552_REV_ANEG     0x0200 /* Reverse auto-negotiation */
       
  3178 #define E100_82552_ANEG_NOW     0x0400 /* Auto-negotiate now */
       
  3179 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  3180 {
       
  3181 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3182 	struct nic *nic = netdev_priv(netdev);
       
  3183 
       
  3184 	if (netif_running(netdev))
       
  3185 		e100_down(nic);
       
  3186 	netif_device_detach(netdev);
       
  3187 
       
  3188 	pci_save_state(pdev);
       
  3189 
       
  3190 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  3191 		/* enable reverse auto-negotiation */
       
  3192 		if (nic->phy == phy_82552_v) {
       
  3193 			u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3194 			                           E100_82552_SMARTSPEED);
       
  3195 
       
  3196 			mdio_write(netdev, nic->mii.phy_id,
       
  3197 			           E100_82552_SMARTSPEED, smartspeed |
       
  3198 			           E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
       
  3199 		}
       
  3200 		*enable_wake = true;
       
  3201 	} else {
       
  3202 		*enable_wake = false;
       
  3203 	}
       
  3204 
       
  3205 	pci_clear_master(pdev);
       
  3206 }
       
  3207 
       
  3208 static int __e100_power_off(struct pci_dev *pdev, bool wake)
       
  3209 {
       
  3210 	if (wake)
       
  3211 		return pci_prepare_to_sleep(pdev);
       
  3212 
       
  3213 	pci_wake_from_d3(pdev, false);
       
  3214 	pci_set_power_state(pdev, PCI_D3hot);
       
  3215 
       
  3216 	return 0;
       
  3217 }
       
  3218 
       
  3219 #ifdef CONFIG_PM
       
  3220 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  3221 {
       
  3222 	bool wake;
       
  3223 	__e100_shutdown(pdev, &wake);
       
  3224 	return __e100_power_off(pdev, wake);
       
  3225 }
       
  3226 
       
  3227 static int e100_resume(struct pci_dev *pdev)
       
  3228 {
       
  3229 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3230 	struct nic *nic = netdev_priv(netdev);
       
  3231 
       
  3232 	pci_set_power_state(pdev, PCI_D0);
       
  3233 	pci_restore_state(pdev);
       
  3234 	/* ack any pending wake events, disable PME */
       
  3235 	pci_enable_wake(pdev, PCI_D0, 0);
       
  3236 
       
  3237 	/* disable reverse auto-negotiation */
       
  3238 	if (nic->phy == phy_82552_v) {
       
  3239 		u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3240 		                           E100_82552_SMARTSPEED);
       
  3241 
       
  3242 		mdio_write(netdev, nic->mii.phy_id,
       
  3243 		           E100_82552_SMARTSPEED,
       
  3244 		           smartspeed & ~(E100_82552_REV_ANEG));
       
  3245 	}
       
  3246 
       
  3247 	netif_device_attach(netdev);
       
  3248 	if (netif_running(netdev))
       
  3249 		e100_up(nic);
       
  3250 
       
  3251 	return 0;
       
  3252 }
       
  3253 #endif /* CONFIG_PM */
       
  3254 
       
  3255 static void e100_shutdown(struct pci_dev *pdev)
       
  3256 {
       
  3257 	bool wake;
       
  3258 	__e100_shutdown(pdev, &wake);
       
  3259 	if (system_state == SYSTEM_POWER_OFF)
       
  3260 		__e100_power_off(pdev, wake);
       
  3261 }
       
  3262 
       
  3263 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  3264 /**
       
  3265  * e100_io_error_detected - called when PCI error is detected.
       
  3266  * @pdev: Pointer to PCI device
       
  3267  * @state: The current pci connection state
       
  3268  */
       
  3269 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  3270 {
       
  3271 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3272 	struct nic *nic = netdev_priv(netdev);
       
  3273 
       
  3274 	if (nic->ecdev)
       
  3275 		return -EBUSY;
       
  3276 
       
  3277 	netif_device_detach(netdev);
       
  3278 
       
  3279 	if (state == pci_channel_io_perm_failure)
       
  3280 		return PCI_ERS_RESULT_DISCONNECT;
       
  3281 
       
  3282 	if (netif_running(netdev))
       
  3283 		e100_down(nic);
       
  3284 	pci_disable_device(pdev);
       
  3285 
       
  3286 	/* Request a slot reset. */
       
  3287 	return PCI_ERS_RESULT_NEED_RESET;
       
  3288 }
       
  3289 
       
  3290 /**
       
  3291  * e100_io_slot_reset - called after the pci bus has been reset.
       
  3292  * @pdev: Pointer to PCI device
       
  3293  *
       
  3294  * Restart the card from scratch.
       
  3295  */
       
  3296 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  3297 {
       
  3298 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3299 	struct nic *nic = netdev_priv(netdev);
       
  3300 
       
  3301 	if (nic->ecdev)
       
  3302 		return -EBUSY;
       
  3303 
       
  3304 	if (pci_enable_device(pdev)) {
       
  3305 		pr_err("Cannot re-enable PCI device after reset\n");
       
  3306 		return PCI_ERS_RESULT_DISCONNECT;
       
  3307 	}
       
  3308 	pci_set_master(pdev);
       
  3309 
       
  3310 	/* Only one device per card can do a reset */
       
  3311 	if (0 != PCI_FUNC(pdev->devfn))
       
  3312 		return PCI_ERS_RESULT_RECOVERED;
       
  3313 	e100_hw_reset(nic);
       
  3314 	e100_phy_init(nic);
       
  3315 
       
  3316 	return PCI_ERS_RESULT_RECOVERED;
       
  3317 }
       
  3318 
       
  3319 /**
       
  3320  * e100_io_resume - resume normal operations
       
  3321  * @pdev: Pointer to PCI device
       
  3322  *
       
  3323  * Resume normal operations after an error recovery
       
  3324  * sequence has been completed.
       
  3325  */
       
  3326 static void e100_io_resume(struct pci_dev *pdev)
       
  3327 {
       
  3328 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3329 	struct nic *nic = netdev_priv(netdev);
       
  3330 
       
  3331 	/* ack any pending wake events, disable PME */
       
  3332 	pci_enable_wake(pdev, PCI_D0, 0);
       
  3333 
       
  3334 	if (!nic->ecdev)
       
  3335 		netif_device_attach(netdev);
       
  3336 	if (nic->ecdev || netif_running(netdev)) {
       
  3337 		e100_open(netdev);
       
  3338 		if (!nic->ecdev)
       
  3339 			mod_timer(&nic->watchdog, jiffies);
       
  3340 	}
       
  3341 }
       
  3342 
       
  3343 static const struct pci_error_handlers e100_err_handler = {
       
  3344 	.error_detected = e100_io_error_detected,
       
  3345 	.slot_reset = e100_io_slot_reset,
       
  3346 	.resume = e100_io_resume,
       
  3347 };
       
  3348 
       
  3349 static struct pci_driver e100_driver = {
       
  3350 	.name =         DRV_NAME,
       
  3351 	.id_table =     e100_id_table,
       
  3352 	.probe =        e100_probe,
       
  3353 	.remove =       e100_remove,
       
  3354 #ifdef CONFIG_PM
       
  3355 	/* Power Management hooks */
       
  3356 	.suspend =      e100_suspend,
       
  3357 	.resume =       e100_resume,
       
  3358 #endif
       
  3359 	.shutdown =     e100_shutdown,
       
  3360 	.err_handler = &e100_err_handler,
       
  3361 };
       
  3362 
       
  3363 static int __init e100_init_module(void)
       
  3364 {
       
  3365 	if (((1 << debug) - 1) & NETIF_MSG_DRV) {
       
  3366 		pr_info("%s %s, %s\n", DRV_NAME, DRV_DESCRIPTION, DRV_VERSION);
       
  3367 		pr_info("%s\n", DRV_COPYRIGHT);
       
  3368 	}
       
  3369 	return pci_register_driver(&e100_driver);
       
  3370 }
       
  3371 
       
  3372 static void __exit e100_cleanup_module(void)
       
  3373 {
       
  3374 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3375 	pci_unregister_driver(&e100_driver);
       
  3376 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3377 }
       
  3378 
       
  3379 module_init(e100_init_module);
       
  3380 module_exit(e100_cleanup_module);