devices/e100-3.0-ethercat.c
changeset 2389 02f32b78f79d
child 2582 87e502828b3f
equal deleted inserted replaced
2388:7fc286c8e72b 2389:02f32b78f79d
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007-2012  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it and/or
       
    10  *  modify it under the terms of the GNU General Public License version 2, as
       
    11  *  published by the Free Software Foundation.
       
    12  *
       
    13  *  The IgH EtherCAT Master is distributed in the hope that it will be useful,
       
    14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
       
    16  *  Public License for more details.
       
    17  *
       
    18  *  You should have received a copy of the GNU General Public License along
       
    19  *  with the IgH EtherCAT Master; if not, write to the Free Software
       
    20  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    21  *
       
    22  *  ---
       
    23  *
       
    24  *  The license mentioned above concerns the source code only. Using the
       
    25  *  EtherCAT technology and brand is only permitted in compliance with the
       
    26  *  industrial property and similar rights of Beckhoff Automation GmbH.
       
    27  *
       
    28  *  ---
       
    29  *
       
    30  *  vim: noexpandtab
       
    31  *
       
    32  *****************************************************************************/
       
    33 
       
    34 /**
       
    35    \file
       
    36    EtherCAT driver for e100-compatible NICs.
       
    37 */
       
    38 
       
    39 /* Former documentation: */
       
    40 
       
    41 /*******************************************************************************
       
    42 
       
    43   Intel PRO/100 Linux driver
       
    44   Copyright(c) 1999 - 2006 Intel Corporation.
       
    45 
       
    46   This program is free software; you can redistribute it and/or modify it
       
    47   under the terms and conditions of the GNU General Public License,
       
    48   version 2, as published by the Free Software Foundation.
       
    49 
       
    50   This program is distributed in the hope it will be useful, but WITHOUT
       
    51   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    52   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    53   more details.
       
    54 
       
    55   You should have received a copy of the GNU General Public License along with
       
    56   this program; if not, write to the Free Software Foundation, Inc.,
       
    57   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    58 
       
    59   The full GNU General Public License is included in this distribution in
       
    60   the file called "COPYING".
       
    61 
       
    62   Contact Information:
       
    63   Linux NICS <linux.nics@intel.com>
       
    64   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    65   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    66 
       
    67 *******************************************************************************/
       
    68 
       
    69 /*
       
    70  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    71  *
       
    72  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    73  *	original e100 driver, but better described as a munging of
       
    74  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    75  *
       
    76  *	References:
       
    77  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    78  *		Open Source Software Developers Manual,
       
    79  *		http://sourceforge.net/projects/e1000
       
    80  *
       
    81  *
       
    82  *	                      Theory of Operation
       
    83  *
       
    84  *	I.   General
       
    85  *
       
    86  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    87  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    88  *	82551, and 82562 devices.  82558 and greater controllers
       
    89  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    90  *	server and client network interface cards, as well as in
       
    91  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    92  *	configurations.  8255x supports a 32-bit linear addressing
       
    93  *	mode and operates at 33Mhz PCI clock rate.
       
    94  *
       
    95  *	II.  Driver Operation
       
    96  *
       
    97  *	Memory-mapped mode is used exclusively to access the device's
       
    98  *	shared-memory structure, the Control/Status Registers (CSR). All
       
    99  *	setup, configuration, and control of the device, including queuing
       
   100  *	of Tx, Rx, and configuration commands is through the CSR.
       
   101  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   102  *	protects the shared Command Block List (CBL).
       
   103  *
       
   104  *	8255x is highly MII-compliant and all access to the PHY go
       
   105  *	through the Management Data Interface (MDI).  Consequently, the
       
   106  *	driver leverages the mii.c library shared with other MII-compliant
       
   107  *	devices.
       
   108  *
       
   109  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   110  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   111  *	archs are supported.
       
   112  *
       
   113  *	III. Transmit
       
   114  *
       
   115  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   116  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   117  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   118  *	the end of the ring.  The last TCB processed suspends the
       
   119  *	controller, and the controller can be restarted by issue a CU
       
   120  *	resume command to continue from the suspend point, or a CU start
       
   121  *	command to start at a given position in the ring.
       
   122  *
       
   123  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   124  *	into the CBL ring along with Tx commands.  The common structure
       
   125  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   126  *
       
   127  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   128  *	is the next CB to check for completion; cb_to_send is the first
       
   129  *	CB to start on in case of a previous failure to resume.  CB clean
       
   130  *	up happens in interrupt context in response to a CU interrupt.
       
   131  *	cbs_avail keeps track of number of free CB resources available.
       
   132  *
       
   133  * 	Hardware padding of short packets to minimum packet size is
       
   134  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   135  * 	with 00h.
       
   136  *
       
   137  *	IV.  Receive
       
   138  *
       
   139  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   140  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   141  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   142  *	and the data buffer, but the RFD is pulled off before the skb is
       
   143  *	indicated.  The data buffer is aligned such that encapsulated
       
   144  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   145  *	mapped shared memory, and completion status is contained within
       
   146  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   147  *	view from software and hardware.
       
   148  *
       
   149  *	In order to keep updates to the RFD link field from colliding with
       
   150  *	hardware writes to mark packets complete, we use the feature that
       
   151  *	hardware will not write to a size 0 descriptor and mark the previous
       
   152  *	packet as end-of-list (EL).   After updating the link, we remove EL
       
   153  *	and only then restore the size such that hardware may use the
       
   154  *	previous-to-end RFD.
       
   155  *
       
   156  *	Under typical operation, the  receive unit (RU) is start once,
       
   157  *	and the controller happily fills RFDs as frames arrive.  If
       
   158  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   159  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   160  *	and Rx indication and re-allocation happen in the same context,
       
   161  *	therefore no locking is required.  A software-generated interrupt
       
   162  *	is generated from the watchdog to recover from a failed allocation
       
   163  *	scenario where all Rx resources have been indicated and none re-
       
   164  *	placed.
       
   165  *
       
   166  *	V.   Miscellaneous
       
   167  *
       
   168  * 	VLAN offloading of tagging, stripping and filtering is not
       
   169  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   170  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   171  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   172  * 	not supported (hardware limitation).
       
   173  *
       
   174  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   175  *
       
   176  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   177  * 	testing/troubleshooting the development driver.
       
   178  *
       
   179  * 	TODO:
       
   180  * 	o several entry points race with dev->close
       
   181  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   182  *
       
   183  *	FIXES:
       
   184  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   185  *	- Stratus87247: protect MDI control register manipulations
       
   186  * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
       
   187  *      - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
       
   188  */
       
   189 
       
   190 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
       
   191 
       
   192 #include <linux/module.h>
       
   193 #include <linux/moduleparam.h>
       
   194 #include <linux/kernel.h>
       
   195 #include <linux/types.h>
       
   196 #include <linux/sched.h>
       
   197 #include <linux/slab.h>
       
   198 #include <linux/delay.h>
       
   199 #include <linux/init.h>
       
   200 #include <linux/pci.h>
       
   201 #include <linux/dma-mapping.h>
       
   202 #include <linux/dmapool.h>
       
   203 #include <linux/netdevice.h>
       
   204 #include <linux/etherdevice.h>
       
   205 #include <linux/mii.h>
       
   206 #include <linux/if_vlan.h>
       
   207 #include <linux/skbuff.h>
       
   208 #include <linux/ethtool.h>
       
   209 #include <linux/string.h>
       
   210 #include <linux/firmware.h>
       
   211 #include <linux/rtnetlink.h>
       
   212 #include <asm/unaligned.h>
       
   213 
       
   214 // EtherCAT includes
       
   215 #include "../globals.h"
       
   216 #include "ecdev.h"
       
   217 
       
   218 #define DRV_NAME		"ec_e100"
       
   219 #define DRV_EXT			"-NAPI"
       
   220 #define DRV_VERSION		"3.5.24-k2"DRV_EXT
       
   221 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   222 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   223 
       
   224 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   225 #define E100_NAPI_WEIGHT	16
       
   226 
       
   227 #define FIRMWARE_D101M		"e100/d101m_ucode.bin"
       
   228 #define FIRMWARE_D101S		"e100/d101s_ucode.bin"
       
   229 #define FIRMWARE_D102E		"e100/d102e_ucode.bin"
       
   230 
       
   231 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   232 MODULE_AUTHOR(DRV_COPYRIGHT);
       
   233 MODULE_LICENSE("GPL");
       
   234 MODULE_VERSION(DRV_VERSION);
       
   235 MODULE_FIRMWARE(FIRMWARE_D101M);
       
   236 MODULE_FIRMWARE(FIRMWARE_D101S);
       
   237 MODULE_FIRMWARE(FIRMWARE_D102E);
       
   238 
       
   239 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   240 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   241 MODULE_LICENSE("GPL");
       
   242 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   243 
       
   244 void e100_ec_poll(struct net_device *);
       
   245 
       
   246 static int debug = 3;
       
   247 static int eeprom_bad_csum_allow = 0;
       
   248 static int use_io = 0;
       
   249 module_param(debug, int, 0);
       
   250 module_param(eeprom_bad_csum_allow, int, 0);
       
   251 module_param(use_io, int, 0);
       
   252 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   253 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   254 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
       
   255 
       
   256 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   257 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   258 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   259 static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
       
   260 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   261 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   262 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   263 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   277 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   278 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   279 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   280 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   281 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   282 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   283 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   284 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   285 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   286 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   287 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   288 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   289 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   290 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   291 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   292 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   293 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   294 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   295 	INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
       
   296 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   297 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   298 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   299 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   300 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   301 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   302 	{ 0, }
       
   303 };
       
   304 
       
   305 // prevent from being loaded automatically
       
   306 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   307 
       
   308 enum mac {
       
   309 	mac_82557_D100_A  = 0,
       
   310 	mac_82557_D100_B  = 1,
       
   311 	mac_82557_D100_C  = 2,
       
   312 	mac_82558_D101_A4 = 4,
       
   313 	mac_82558_D101_B0 = 5,
       
   314 	mac_82559_D101M   = 8,
       
   315 	mac_82559_D101S   = 9,
       
   316 	mac_82550_D102    = 12,
       
   317 	mac_82550_D102_C  = 13,
       
   318 	mac_82551_E       = 14,
       
   319 	mac_82551_F       = 15,
       
   320 	mac_82551_10      = 16,
       
   321 	mac_unknown       = 0xFF,
       
   322 };
       
   323 
       
   324 enum phy {
       
   325 	phy_100a     = 0x000003E0,
       
   326 	phy_100c     = 0x035002A8,
       
   327 	phy_82555_tx = 0x015002A8,
       
   328 	phy_nsc_tx   = 0x5C002000,
       
   329 	phy_82562_et = 0x033002A8,
       
   330 	phy_82562_em = 0x032002A8,
       
   331 	phy_82562_ek = 0x031002A8,
       
   332 	phy_82562_eh = 0x017002A8,
       
   333 	phy_82552_v  = 0xd061004d,
       
   334 	phy_unknown  = 0xFFFFFFFF,
       
   335 };
       
   336 
       
   337 /* CSR (Control/Status Registers) */
       
   338 struct csr {
       
   339 	struct {
       
   340 		u8 status;
       
   341 		u8 stat_ack;
       
   342 		u8 cmd_lo;
       
   343 		u8 cmd_hi;
       
   344 		u32 gen_ptr;
       
   345 	} scb;
       
   346 	u32 port;
       
   347 	u16 flash_ctrl;
       
   348 	u8 eeprom_ctrl_lo;
       
   349 	u8 eeprom_ctrl_hi;
       
   350 	u32 mdi_ctrl;
       
   351 	u32 rx_dma_count;
       
   352 };
       
   353 
       
   354 enum scb_status {
       
   355 	rus_no_res       = 0x08,
       
   356 	rus_ready        = 0x10,
       
   357 	rus_mask         = 0x3C,
       
   358 };
       
   359 
       
   360 enum ru_state  {
       
   361 	RU_SUSPENDED = 0,
       
   362 	RU_RUNNING	 = 1,
       
   363 	RU_UNINITIALIZED = -1,
       
   364 };
       
   365 
       
   366 enum scb_stat_ack {
       
   367 	stat_ack_not_ours    = 0x00,
       
   368 	stat_ack_sw_gen      = 0x04,
       
   369 	stat_ack_rnr         = 0x10,
       
   370 	stat_ack_cu_idle     = 0x20,
       
   371 	stat_ack_frame_rx    = 0x40,
       
   372 	stat_ack_cu_cmd_done = 0x80,
       
   373 	stat_ack_not_present = 0xFF,
       
   374 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   375 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   376 };
       
   377 
       
   378 enum scb_cmd_hi {
       
   379 	irq_mask_none = 0x00,
       
   380 	irq_mask_all  = 0x01,
       
   381 	irq_sw_gen    = 0x02,
       
   382 };
       
   383 
       
   384 enum scb_cmd_lo {
       
   385 	cuc_nop        = 0x00,
       
   386 	ruc_start      = 0x01,
       
   387 	ruc_load_base  = 0x06,
       
   388 	cuc_start      = 0x10,
       
   389 	cuc_resume     = 0x20,
       
   390 	cuc_dump_addr  = 0x40,
       
   391 	cuc_dump_stats = 0x50,
       
   392 	cuc_load_base  = 0x60,
       
   393 	cuc_dump_reset = 0x70,
       
   394 };
       
   395 
       
   396 enum cuc_dump {
       
   397 	cuc_dump_complete       = 0x0000A005,
       
   398 	cuc_dump_reset_complete = 0x0000A007,
       
   399 };
       
   400 
       
   401 enum port {
       
   402 	software_reset  = 0x0000,
       
   403 	selftest        = 0x0001,
       
   404 	selective_reset = 0x0002,
       
   405 };
       
   406 
       
   407 enum eeprom_ctrl_lo {
       
   408 	eesk = 0x01,
       
   409 	eecs = 0x02,
       
   410 	eedi = 0x04,
       
   411 	eedo = 0x08,
       
   412 };
       
   413 
       
   414 enum mdi_ctrl {
       
   415 	mdi_write = 0x04000000,
       
   416 	mdi_read  = 0x08000000,
       
   417 	mdi_ready = 0x10000000,
       
   418 };
       
   419 
       
   420 enum eeprom_op {
       
   421 	op_write = 0x05,
       
   422 	op_read  = 0x06,
       
   423 	op_ewds  = 0x10,
       
   424 	op_ewen  = 0x13,
       
   425 };
       
   426 
       
   427 enum eeprom_offsets {
       
   428 	eeprom_cnfg_mdix  = 0x03,
       
   429 	eeprom_phy_iface  = 0x06,
       
   430 	eeprom_id         = 0x0A,
       
   431 	eeprom_config_asf = 0x0D,
       
   432 	eeprom_smbus_addr = 0x90,
       
   433 };
       
   434 
       
   435 enum eeprom_cnfg_mdix {
       
   436 	eeprom_mdix_enabled = 0x0080,
       
   437 };
       
   438 
       
   439 enum eeprom_phy_iface {
       
   440 	NoSuchPhy = 0,
       
   441 	I82553AB,
       
   442 	I82553C,
       
   443 	I82503,
       
   444 	DP83840,
       
   445 	S80C240,
       
   446 	S80C24,
       
   447 	I82555,
       
   448 	DP83840A = 10,
       
   449 };
       
   450 
       
   451 enum eeprom_id {
       
   452 	eeprom_id_wol = 0x0020,
       
   453 };
       
   454 
       
   455 enum eeprom_config_asf {
       
   456 	eeprom_asf = 0x8000,
       
   457 	eeprom_gcl = 0x4000,
       
   458 };
       
   459 
       
   460 enum cb_status {
       
   461 	cb_complete = 0x8000,
       
   462 	cb_ok       = 0x2000,
       
   463 };
       
   464 
       
   465 enum cb_command {
       
   466 	cb_nop    = 0x0000,
       
   467 	cb_iaaddr = 0x0001,
       
   468 	cb_config = 0x0002,
       
   469 	cb_multi  = 0x0003,
       
   470 	cb_tx     = 0x0004,
       
   471 	cb_ucode  = 0x0005,
       
   472 	cb_dump   = 0x0006,
       
   473 	cb_tx_sf  = 0x0008,
       
   474 	cb_cid    = 0x1f00,
       
   475 	cb_i      = 0x2000,
       
   476 	cb_s      = 0x4000,
       
   477 	cb_el     = 0x8000,
       
   478 };
       
   479 
       
   480 struct rfd {
       
   481 	__le16 status;
       
   482 	__le16 command;
       
   483 	__le32 link;
       
   484 	__le32 rbd;
       
   485 	__le16 actual_size;
       
   486 	__le16 size;
       
   487 };
       
   488 
       
   489 struct rx {
       
   490 	struct rx *next, *prev;
       
   491 	struct sk_buff *skb;
       
   492 	dma_addr_t dma_addr;
       
   493 };
       
   494 
       
   495 #if defined(__BIG_ENDIAN_BITFIELD)
       
   496 #define X(a,b)	b,a
       
   497 #else
       
   498 #define X(a,b)	a,b
       
   499 #endif
       
   500 struct config {
       
   501 /*0*/	u8 X(byte_count:6, pad0:2);
       
   502 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   503 /*2*/	u8 adaptive_ifs;
       
   504 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   505 	   term_write_cache_line:1), pad3:4);
       
   506 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   507 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   508 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   509 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   510 	   rx_discard_overruns:1), rx_save_bad_frames:1);
       
   511 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   512 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   513 	   tx_dynamic_tbd:1);
       
   514 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   515 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   516 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   517 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   518 	   loopback:2);
       
   519 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   520 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   521 /*13*/	u8 ip_addr_lo;
       
   522 /*14*/	u8 ip_addr_hi;
       
   523 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   524 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   525 	   pad15_2:1), crs_or_cdt:1);
       
   526 /*16*/	u8 fc_delay_lo;
       
   527 /*17*/	u8 fc_delay_hi;
       
   528 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   529 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   530 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   531 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   532 	   full_duplex_force:1), full_duplex_pin:1);
       
   533 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   534 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   535 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   536 	u8 pad_d102[9];
       
   537 };
       
   538 
       
   539 #define E100_MAX_MULTICAST_ADDRS	64
       
   540 struct multi {
       
   541 	__le16 count;
       
   542 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   543 };
       
   544 
       
   545 /* Important: keep total struct u32-aligned */
       
   546 #define UCODE_SIZE			134
       
   547 struct cb {
       
   548 	__le16 status;
       
   549 	__le16 command;
       
   550 	__le32 link;
       
   551 	union {
       
   552 		u8 iaaddr[ETH_ALEN];
       
   553 		__le32 ucode[UCODE_SIZE];
       
   554 		struct config config;
       
   555 		struct multi multi;
       
   556 		struct {
       
   557 			u32 tbd_array;
       
   558 			u16 tcb_byte_count;
       
   559 			u8 threshold;
       
   560 			u8 tbd_count;
       
   561 			struct {
       
   562 				__le32 buf_addr;
       
   563 				__le16 size;
       
   564 				u16 eol;
       
   565 			} tbd;
       
   566 		} tcb;
       
   567 		__le32 dump_buffer_addr;
       
   568 	} u;
       
   569 	struct cb *next, *prev;
       
   570 	dma_addr_t dma_addr;
       
   571 	struct sk_buff *skb;
       
   572 };
       
   573 
       
   574 enum loopback {
       
   575 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   576 };
       
   577 
       
   578 struct stats {
       
   579 	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   580 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   581 		tx_multiple_collisions, tx_total_collisions;
       
   582 	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   583 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   584 		rx_short_frame_errors;
       
   585 	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   586 	__le16 xmt_tco_frames, rcv_tco_frames;
       
   587 	__le32 complete;
       
   588 };
       
   589 
       
   590 struct mem {
       
   591 	struct {
       
   592 		u32 signature;
       
   593 		u32 result;
       
   594 	} selftest;
       
   595 	struct stats stats;
       
   596 	u8 dump_buf[596];
       
   597 };
       
   598 
       
   599 struct param_range {
       
   600 	u32 min;
       
   601 	u32 max;
       
   602 	u32 count;
       
   603 };
       
   604 
       
   605 struct params {
       
   606 	struct param_range rfds;
       
   607 	struct param_range cbs;
       
   608 };
       
   609 
       
   610 struct nic {
       
   611 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   612 	u32 msg_enable				____cacheline_aligned;
       
   613 	struct net_device *netdev;
       
   614 	struct pci_dev *pdev;
       
   615 	u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
       
   616 
       
   617 	struct rx *rxs				____cacheline_aligned;
       
   618 	struct rx *rx_to_use;
       
   619 	struct rx *rx_to_clean;
       
   620 	struct rfd blank_rfd;
       
   621 	enum ru_state ru_running;
       
   622 
       
   623 	spinlock_t cb_lock			____cacheline_aligned;
       
   624 	spinlock_t cmd_lock;
       
   625 	struct csr __iomem *csr;
       
   626 	enum scb_cmd_lo cuc_cmd;
       
   627 	unsigned int cbs_avail;
       
   628 	struct napi_struct napi;
       
   629 	struct cb *cbs;
       
   630 	struct cb *cb_to_use;
       
   631 	struct cb *cb_to_send;
       
   632 	struct cb *cb_to_clean;
       
   633 	__le16 tx_command;
       
   634 	/* End: frequently used values: keep adjacent for cache effect */
       
   635 
       
   636 	enum {
       
   637 		ich                = (1 << 0),
       
   638 		promiscuous        = (1 << 1),
       
   639 		multicast_all      = (1 << 2),
       
   640 		wol_magic          = (1 << 3),
       
   641 		ich_10h_workaround = (1 << 4),
       
   642 	} flags					____cacheline_aligned;
       
   643 
       
   644 	enum mac mac;
       
   645 	enum phy phy;
       
   646 	struct params params;
       
   647 	struct timer_list watchdog;
       
   648 	struct mii_if_info mii;
       
   649 	struct work_struct tx_timeout_task;
       
   650 	enum loopback loopback;
       
   651 
       
   652 	struct mem *mem;
       
   653 	dma_addr_t dma_addr;
       
   654 
       
   655 	struct pci_pool *cbs_pool;
       
   656 	dma_addr_t cbs_dma_addr;
       
   657 	u8 adaptive_ifs;
       
   658 	u8 tx_threshold;
       
   659 	u32 tx_frames;
       
   660 	u32 tx_collisions;
       
   661 
       
   662 	u32 tx_deferred;
       
   663 	u32 tx_single_collisions;
       
   664 	u32 tx_multiple_collisions;
       
   665 	u32 tx_fc_pause;
       
   666 	u32 tx_tco_frames;
       
   667 
       
   668 	u32 rx_fc_pause;
       
   669 	u32 rx_fc_unsupported;
       
   670 	u32 rx_tco_frames;
       
   671 	u32 rx_over_length_errors;
       
   672 
       
   673 	u16 eeprom_wc;
       
   674 
       
   675 	__le16 eeprom[256];
       
   676 	spinlock_t mdio_lock;
       
   677 	const struct firmware *fw;
       
   678 	ec_device_t *ecdev;
       
   679 	unsigned long ec_watchdog_jiffies;
       
   680 };
       
   681 
       
   682 static inline void e100_write_flush(struct nic *nic)
       
   683 {
       
   684 	/* Flush previous PCI writes through intermediate bridges
       
   685 	 * by doing a benign read */
       
   686 	(void)ioread8(&nic->csr->scb.status);
       
   687 }
       
   688 
       
   689 static void e100_enable_irq(struct nic *nic)
       
   690 {
       
   691 	unsigned long flags;
       
   692 
       
   693 	if (nic->ecdev)
       
   694 		return;
       
   695 
       
   696 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   697 	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   698 	e100_write_flush(nic);
       
   699 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   700 }
       
   701 
       
   702 static void e100_disable_irq(struct nic *nic)
       
   703 {
       
   704 	unsigned long flags = 0;
       
   705 
       
   706 	if (!nic->ecdev)
       
   707 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   708 	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   709 	e100_write_flush(nic);
       
   710 	if (!nic->ecdev)
       
   711 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   712 }
       
   713 
       
   714 static void e100_hw_reset(struct nic *nic)
       
   715 {
       
   716 	/* Put CU and RU into idle with a selective reset to get
       
   717 	 * device off of PCI bus */
       
   718 	iowrite32(selective_reset, &nic->csr->port);
       
   719 	e100_write_flush(nic); udelay(20);
       
   720 
       
   721 	/* Now fully reset device */
       
   722 	iowrite32(software_reset, &nic->csr->port);
       
   723 	e100_write_flush(nic); udelay(20);
       
   724 
       
   725 	/* Mask off our interrupt line - it's unmasked after reset */
       
   726 	e100_disable_irq(nic);
       
   727 }
       
   728 
       
   729 static int e100_self_test(struct nic *nic)
       
   730 {
       
   731 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   732 
       
   733 	/* Passing the self-test is a pretty good indication
       
   734 	 * that the device can DMA to/from host memory */
       
   735 
       
   736 	nic->mem->selftest.signature = 0;
       
   737 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   738 
       
   739 	iowrite32(selftest | dma_addr, &nic->csr->port);
       
   740 	e100_write_flush(nic);
       
   741 	/* Wait 10 msec for self-test to complete */
       
   742 	msleep(10);
       
   743 
       
   744 	/* Interrupts are enabled after self-test */
       
   745 	e100_disable_irq(nic);
       
   746 
       
   747 	/* Check results of self-test */
       
   748 	if (nic->mem->selftest.result != 0) {
       
   749 		netif_err(nic, hw, nic->netdev,
       
   750 			  "Self-test failed: result=0x%08X\n",
       
   751 			  nic->mem->selftest.result);
       
   752 		return -ETIMEDOUT;
       
   753 	}
       
   754 	if (nic->mem->selftest.signature == 0) {
       
   755 		netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
       
   756 		return -ETIMEDOUT;
       
   757 	}
       
   758 
       
   759 	return 0;
       
   760 }
       
   761 
       
   762 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
       
   763 {
       
   764 	u32 cmd_addr_data[3];
       
   765 	u8 ctrl;
       
   766 	int i, j;
       
   767 
       
   768 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   769 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   770 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   771 		le16_to_cpu(data);
       
   772 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   773 
       
   774 	/* Bit-bang cmds to write word to eeprom */
       
   775 	for (j = 0; j < 3; j++) {
       
   776 
       
   777 		/* Chip select */
       
   778 		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   779 		e100_write_flush(nic); udelay(4);
       
   780 
       
   781 		for (i = 31; i >= 0; i--) {
       
   782 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   783 				eecs | eedi : eecs;
       
   784 			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   785 			e100_write_flush(nic); udelay(4);
       
   786 
       
   787 			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   788 			e100_write_flush(nic); udelay(4);
       
   789 		}
       
   790 		/* Wait 10 msec for cmd to complete */
       
   791 		msleep(10);
       
   792 
       
   793 		/* Chip deselect */
       
   794 		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   795 		e100_write_flush(nic); udelay(4);
       
   796 	}
       
   797 };
       
   798 
       
   799 /* General technique stolen from the eepro100 driver - very clever */
       
   800 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   801 {
       
   802 	u32 cmd_addr_data;
       
   803 	u16 data = 0;
       
   804 	u8 ctrl;
       
   805 	int i;
       
   806 
       
   807 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   808 
       
   809 	/* Chip select */
       
   810 	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   811 	e100_write_flush(nic); udelay(4);
       
   812 
       
   813 	/* Bit-bang to read word from eeprom */
       
   814 	for (i = 31; i >= 0; i--) {
       
   815 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   816 		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   817 		e100_write_flush(nic); udelay(4);
       
   818 
       
   819 		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   820 		e100_write_flush(nic); udelay(4);
       
   821 
       
   822 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   823 		 * complete address.  Use this to adjust addr_len. */
       
   824 		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
       
   825 		if (!(ctrl & eedo) && i > 16) {
       
   826 			*addr_len -= (i - 16);
       
   827 			i = 17;
       
   828 		}
       
   829 
       
   830 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   831 	}
       
   832 
       
   833 	/* Chip deselect */
       
   834 	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   835 	e100_write_flush(nic); udelay(4);
       
   836 
       
   837 	return cpu_to_le16(data);
       
   838 };
       
   839 
       
   840 /* Load entire EEPROM image into driver cache and validate checksum */
       
   841 static int e100_eeprom_load(struct nic *nic)
       
   842 {
       
   843 	u16 addr, addr_len = 8, checksum = 0;
       
   844 
       
   845 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   846 	e100_eeprom_read(nic, &addr_len, 0);
       
   847 	nic->eeprom_wc = 1 << addr_len;
       
   848 
       
   849 	for (addr = 0; addr < nic->eeprom_wc; addr++) {
       
   850 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   851 		if (addr < nic->eeprom_wc - 1)
       
   852 			checksum += le16_to_cpu(nic->eeprom[addr]);
       
   853 	}
       
   854 
       
   855 	/* The checksum, stored in the last word, is calculated such that
       
   856 	 * the sum of words should be 0xBABA */
       
   857 	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
       
   858 		netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
       
   859 		if (!eeprom_bad_csum_allow)
       
   860 			return -EAGAIN;
       
   861 	}
       
   862 
       
   863 	return 0;
       
   864 }
       
   865 
       
   866 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   867 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   868 {
       
   869 	u16 addr, addr_len = 8, checksum = 0;
       
   870 
       
   871 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   872 	e100_eeprom_read(nic, &addr_len, 0);
       
   873 	nic->eeprom_wc = 1 << addr_len;
       
   874 
       
   875 	if (start + count >= nic->eeprom_wc)
       
   876 		return -EINVAL;
       
   877 
       
   878 	for (addr = start; addr < start + count; addr++)
       
   879 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   880 
       
   881 	/* The checksum, stored in the last word, is calculated such that
       
   882 	 * the sum of words should be 0xBABA */
       
   883 	for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   884 		checksum += le16_to_cpu(nic->eeprom[addr]);
       
   885 	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
       
   886 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   887 		nic->eeprom[nic->eeprom_wc - 1]);
       
   888 
       
   889 	return 0;
       
   890 }
       
   891 
       
   892 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   893 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   894 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   895 {
       
   896 	unsigned long flags = 0;
       
   897 	unsigned int i;
       
   898 	int err = 0;
       
   899 
       
   900 	if (!nic->ecdev)
       
   901 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   902 
       
   903 	/* Previous command is accepted when SCB clears */
       
   904 	for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   905 		if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
       
   906 			break;
       
   907 		cpu_relax();
       
   908 		if (unlikely(i > E100_WAIT_SCB_FAST))
       
   909 			udelay(5);
       
   910 	}
       
   911 	if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   912 		err = -EAGAIN;
       
   913 		goto err_unlock;
       
   914 	}
       
   915 
       
   916 	if (unlikely(cmd != cuc_resume))
       
   917 		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
       
   918 	iowrite8(cmd, &nic->csr->scb.cmd_lo);
       
   919 
       
   920 err_unlock:
       
   921 	if (!nic->ecdev)
       
   922 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   923 
       
   924 	return err;
       
   925 }
       
   926 
       
   927 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   928 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   929 {
       
   930 	struct cb *cb;
       
   931 	unsigned long flags = 0;
       
   932 	int err = 0;
       
   933 
       
   934 	if (!nic->ecdev)
       
   935 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   936 
       
   937 	if (unlikely(!nic->cbs_avail)) {
       
   938 		err = -ENOMEM;
       
   939 		goto err_unlock;
       
   940 	}
       
   941 
       
   942 	cb = nic->cb_to_use;
       
   943 	nic->cb_to_use = cb->next;
       
   944 	nic->cbs_avail--;
       
   945 	cb->skb = skb;
       
   946 
       
   947 	if (unlikely(!nic->cbs_avail))
       
   948 		err = -ENOSPC;
       
   949 
       
   950 	cb_prepare(nic, cb, skb);
       
   951 
       
   952 	/* Order is important otherwise we'll be in a race with h/w:
       
   953 	 * set S-bit in current first, then clear S-bit in previous. */
       
   954 	cb->command |= cpu_to_le16(cb_s);
       
   955 	wmb();
       
   956 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   957 
       
   958 	while (nic->cb_to_send != nic->cb_to_use) {
       
   959 		if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   960 			nic->cb_to_send->dma_addr))) {
       
   961 			/* Ok, here's where things get sticky.  It's
       
   962 			 * possible that we can't schedule the command
       
   963 			 * because the controller is too busy, so
       
   964 			 * let's just queue the command and try again
       
   965 			 * when another command is scheduled. */
       
   966 			if (err == -ENOSPC) {
       
   967 				//request a reset
       
   968 				schedule_work(&nic->tx_timeout_task);
       
   969 			}
       
   970 			break;
       
   971 		} else {
       
   972 			nic->cuc_cmd = cuc_resume;
       
   973 			nic->cb_to_send = nic->cb_to_send->next;
       
   974 		}
       
   975 	}
       
   976 
       
   977 err_unlock:
       
   978 	if (!nic->ecdev)
       
   979 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   980 
       
   981 	return err;
       
   982 }
       
   983 
       
   984 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
   985 {
       
   986 	struct nic *nic = netdev_priv(netdev);
       
   987 	return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
       
   988 }
       
   989 
       
   990 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
   991 {
       
   992 	struct nic *nic = netdev_priv(netdev);
       
   993 
       
   994 	nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
       
   995 }
       
   996 
       
   997 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
       
   998 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
   999 {
       
  1000 	u32 data_out = 0;
       
  1001 	unsigned int i;
       
  1002 	unsigned long flags = 0;
       
  1003 
       
  1004 
       
  1005 	/*
       
  1006 	 * Stratus87247: we shouldn't be writing the MDI control
       
  1007 	 * register until the Ready bit shows True.  Also, since
       
  1008 	 * manipulation of the MDI control registers is a multi-step
       
  1009 	 * procedure it should be done under lock.
       
  1010 	 */
       
  1011 	if (!nic->ecdev)
       
  1012 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
  1013 	for (i = 100; i; --i) {
       
  1014 		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
       
  1015 			break;
       
  1016 		udelay(20);
       
  1017 	}
       
  1018 	if (unlikely(!i)) {
       
  1019 		netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
       
  1020 		if (!nic->ecdev)
       
  1021 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1022 		return 0;		/* No way to indicate timeout error */
       
  1023 	}
       
  1024 	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
  1025 
       
  1026 	for (i = 0; i < 100; i++) {
       
  1027 		udelay(20);
       
  1028 		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
       
  1029 			break;
       
  1030 	}
       
  1031 	if (!nic->ecdev)
       
  1032 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1033 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1034 		     "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
  1035 		     dir == mdi_read ? "READ" : "WRITE",
       
  1036 		     addr, reg, data, data_out);
       
  1037 	return (u16)data_out;
       
  1038 }
       
  1039 
       
  1040 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
       
  1041 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
       
  1042 				 u32 addr,
       
  1043 				 u32 dir,
       
  1044 				 u32 reg,
       
  1045 				 u16 data)
       
  1046 {
       
  1047 	if ((reg == MII_BMCR) && (dir == mdi_write)) {
       
  1048 		if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
       
  1049 			u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
       
  1050 							MII_ADVERTISE);
       
  1051 
       
  1052 			/*
       
  1053 			 * Workaround Si issue where sometimes the part will not
       
  1054 			 * autoneg to 100Mbps even when advertised.
       
  1055 			 */
       
  1056 			if (advert & ADVERTISE_100FULL)
       
  1057 				data |= BMCR_SPEED100 | BMCR_FULLDPLX;
       
  1058 			else if (advert & ADVERTISE_100HALF)
       
  1059 				data |= BMCR_SPEED100;
       
  1060 		}
       
  1061 	}
       
  1062 	return mdio_ctrl_hw(nic, addr, dir, reg, data);
       
  1063 }
       
  1064 
       
  1065 /* Fully software-emulated mdio_ctrl() function for cards without
       
  1066  * MII-compliant PHYs.
       
  1067  * For now, this is mainly geared towards 80c24 support; in case of further
       
  1068  * requirements for other types (i82503, ...?) either extend this mechanism
       
  1069  * or split it, whichever is cleaner.
       
  1070  */
       
  1071 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
       
  1072 				      u32 addr,
       
  1073 				      u32 dir,
       
  1074 				      u32 reg,
       
  1075 				      u16 data)
       
  1076 {
       
  1077 	/* might need to allocate a netdev_priv'ed register array eventually
       
  1078 	 * to be able to record state changes, but for now
       
  1079 	 * some fully hardcoded register handling ought to be ok I guess. */
       
  1080 
       
  1081 	if (dir == mdi_read) {
       
  1082 		switch (reg) {
       
  1083 		case MII_BMCR:
       
  1084 			/* Auto-negotiation, right? */
       
  1085 			return  BMCR_ANENABLE |
       
  1086 				BMCR_FULLDPLX;
       
  1087 		case MII_BMSR:
       
  1088 			return	BMSR_LSTATUS /* for mii_link_ok() */ |
       
  1089 				BMSR_ANEGCAPABLE |
       
  1090 				BMSR_10FULL;
       
  1091 		case MII_ADVERTISE:
       
  1092 			/* 80c24 is a "combo card" PHY, right? */
       
  1093 			return	ADVERTISE_10HALF |
       
  1094 				ADVERTISE_10FULL;
       
  1095 		default:
       
  1096 			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1097 				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1098 				     dir == mdi_read ? "READ" : "WRITE",
       
  1099 				     addr, reg, data);
       
  1100 			return 0xFFFF;
       
  1101 		}
       
  1102 	} else {
       
  1103 		switch (reg) {
       
  1104 		default:
       
  1105 			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1106 				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1107 				     dir == mdi_read ? "READ" : "WRITE",
       
  1108 				     addr, reg, data);
       
  1109 			return 0xFFFF;
       
  1110 		}
       
  1111 	}
       
  1112 }
       
  1113 static inline int e100_phy_supports_mii(struct nic *nic)
       
  1114 {
       
  1115 	/* for now, just check it by comparing whether we
       
  1116 	   are using MII software emulation.
       
  1117 	*/
       
  1118 	return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
       
  1119 }
       
  1120 
       
  1121 static void e100_get_defaults(struct nic *nic)
       
  1122 {
       
  1123 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
  1124 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
  1125 
       
  1126 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1127 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
       
  1128 	if (nic->mac == mac_unknown)
       
  1129 		nic->mac = mac_82557_D100_A;
       
  1130 
       
  1131 	nic->params.rfds = rfds;
       
  1132 	nic->params.cbs = cbs;
       
  1133 
       
  1134 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1135 	nic->tx_threshold = 0xE0;
       
  1136 
       
  1137 	/* no interrupt for every tx completion, delay = 256us if not 557 */
       
  1138 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1139 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1140 
       
  1141 	/* Template for a freshly allocated RFD */
       
  1142 	nic->blank_rfd.command = 0;
       
  1143 	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
       
  1144 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  1145 
       
  1146 	/* MII setup */
       
  1147 	nic->mii.phy_id_mask = 0x1F;
       
  1148 	nic->mii.reg_num_mask = 0x1F;
       
  1149 	nic->mii.dev = nic->netdev;
       
  1150 	nic->mii.mdio_read = mdio_read;
       
  1151 	nic->mii.mdio_write = mdio_write;
       
  1152 }
       
  1153 
       
  1154 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1155 {
       
  1156 	struct config *config = &cb->u.config;
       
  1157 	u8 *c = (u8 *)config;
       
  1158 
       
  1159 	cb->command = cpu_to_le16(cb_config);
       
  1160 
       
  1161 	memset(config, 0, sizeof(struct config));
       
  1162 
       
  1163 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1164 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1165 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1166 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1167 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1168 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1169 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1170 	if (e100_phy_supports_mii(nic))
       
  1171 		config->mii_mode = 1;           /* 1=MII mode, 0=i82503 mode */
       
  1172 	config->pad10 = 0x6;
       
  1173 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1174 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1175 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1176 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1177 	config->pad15_1 = 0x1;
       
  1178 	config->pad15_2 = 0x1;
       
  1179 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1180 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1181 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1182 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1183 	config->pad18 = 0x1;
       
  1184 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1185 	config->pad20_1 = 0x1F;
       
  1186 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1187 	config->pad21_1 = 0x5;
       
  1188 
       
  1189 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1190 	config->loopback = nic->loopback;
       
  1191 
       
  1192 	if (nic->mii.force_media && nic->mii.full_duplex)
       
  1193 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1194 
       
  1195 	if (nic->flags & promiscuous || nic->loopback) {
       
  1196 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1197 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1198 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1199 	}
       
  1200 
       
  1201 	if (nic->flags & multicast_all)
       
  1202 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1203 
       
  1204 	/* disable WoL when up */
       
  1205 	if (nic->ecdev || 
       
  1206 			(netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1207 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1208 
       
  1209 	if (nic->mac >= mac_82558_D101_A4) {
       
  1210 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1211 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1212 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1213 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1214 		if (nic->mac >= mac_82559_D101M) {
       
  1215 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1216 			/* Enable TCO in extended config */
       
  1217 			if (nic->mac >= mac_82551_10) {
       
  1218 				config->byte_count = 0x20; /* extended bytes */
       
  1219 				config->rx_d102_mode = 0x1; /* GMRC for TCO */
       
  1220 			}
       
  1221 		} else {
       
  1222 			config->standard_stat_counter = 0x0;
       
  1223 		}
       
  1224 	}
       
  1225 
       
  1226 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1227 		     "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1228 		     c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1229 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1230 		     "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1231 		     c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1232 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1233 		     "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1234 		     c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1235 }
       
  1236 
       
  1237 /*************************************************************************
       
  1238 *  CPUSaver parameters
       
  1239 *
       
  1240 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1241 *  "move immediate value" instruction.  By changing the value of
       
  1242 *  the literal in the instruction before the code is loaded, the
       
  1243 *  driver can change the algorithm.
       
  1244 *
       
  1245 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1246 *    When this timer expires the interrupt is asserted, and the
       
  1247 *    timer is reset each time a new packet is received.  (see
       
  1248 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1249 *    The current default is 0x600 or 1536.  Experiments show that
       
  1250 *    the value should probably stay within the 0x200 - 0x1000.
       
  1251 *
       
  1252 *  BUNDLEMAX -
       
  1253 *    This sets the maximum number of frames that will be bundled.  In
       
  1254 *    some situations, such as the TCP windowing algorithm, it may be
       
  1255 *    better to limit the growth of the bundle size than let it go as
       
  1256 *    high as it can, because that could cause too much added latency.
       
  1257 *    The default is six, because this is the number of packets in the
       
  1258 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1259 *    an interrupt for every frame received.  If you do not want to put
       
  1260 *    a limit on the bundle size, set this value to xFFFF.
       
  1261 *
       
  1262 *  BUNDLESMALL -
       
  1263 *    This contains a bit-mask describing the minimum size frame that
       
  1264 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1265 *    that any frame less than 128 bytes in length will not be bundled,
       
  1266 *    but will instead immediately generate an interrupt.  This does
       
  1267 *    not affect the current bundle in any way.  Any frame that is 128
       
  1268 *    bytes or large will be bundled normally.  This feature is meant
       
  1269 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1270 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1271 *    enabled was sending but not receiving.  The delay introduced when
       
  1272 *    the ACKs were received was enough to reduce total throughput, because
       
  1273 *    the sender would sit idle until the ACK was finally seen.
       
  1274 *
       
  1275 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1276 *    This means that any frame which is x7F (127) bytes or smaller
       
  1277 *    will cause an immediate interrupt.  Because this value must be a
       
  1278 *    bit mask, there are only a few valid values that can be used.  To
       
  1279 *    turn this feature off, the driver can write the value xFFFF to the
       
  1280 *    lower word of this instruction (in the same way that the other
       
  1281 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1282 *    cause an interrupt to be generated for every frame, because all
       
  1283 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1284 *************************************************************************/
       
  1285 
       
  1286 /* if you wish to disable the ucode functionality, while maintaining the
       
  1287  * workarounds it provides, set the following defines to:
       
  1288  * BUNDLESMALL 0
       
  1289  * BUNDLEMAX 1
       
  1290  * INTDELAY 1
       
  1291  */
       
  1292 #define BUNDLESMALL 1
       
  1293 #define BUNDLEMAX (u16)6
       
  1294 #define INTDELAY (u16)1536 /* 0x600 */
       
  1295 
       
  1296 /* Initialize firmware */
       
  1297 static const struct firmware *e100_request_firmware(struct nic *nic)
       
  1298 {
       
  1299 	const char *fw_name;
       
  1300 	const struct firmware *fw = nic->fw;
       
  1301 	u8 timer, bundle, min_size;
       
  1302 	int err = 0;
       
  1303 
       
  1304 	/* do not load u-code for ICH devices */
       
  1305 	if (nic->flags & ich)
       
  1306 		return NULL;
       
  1307 
       
  1308 	/* Search for ucode match against h/w revision */
       
  1309 	if (nic->mac == mac_82559_D101M)
       
  1310 		fw_name = FIRMWARE_D101M;
       
  1311 	else if (nic->mac == mac_82559_D101S)
       
  1312 		fw_name = FIRMWARE_D101S;
       
  1313 	else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
       
  1314 		fw_name = FIRMWARE_D102E;
       
  1315 	else /* No ucode on other devices */
       
  1316 		return NULL;
       
  1317 
       
  1318 	/* If the firmware has not previously been loaded, request a pointer
       
  1319 	 * to it. If it was previously loaded, we are reinitializing the
       
  1320 	 * adapter, possibly in a resume from hibernate, in which case
       
  1321 	 * request_firmware() cannot be used.
       
  1322 	 */
       
  1323 	if (!fw)
       
  1324 		err = request_firmware(&fw, fw_name, &nic->pdev->dev);
       
  1325 
       
  1326 	if (err) {
       
  1327 		netif_err(nic, probe, nic->netdev,
       
  1328 			  "Failed to load firmware \"%s\": %d\n",
       
  1329 			  fw_name, err);
       
  1330 		return ERR_PTR(err);
       
  1331 	}
       
  1332 
       
  1333 	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
       
  1334 	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
       
  1335 	if (fw->size != UCODE_SIZE * 4 + 3) {
       
  1336 		netif_err(nic, probe, nic->netdev,
       
  1337 			  "Firmware \"%s\" has wrong size %zu\n",
       
  1338 			  fw_name, fw->size);
       
  1339 		release_firmware(fw);
       
  1340 		return ERR_PTR(-EINVAL);
       
  1341 	}
       
  1342 
       
  1343 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1344 	timer = fw->data[UCODE_SIZE * 4];
       
  1345 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1346 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1347 
       
  1348 	if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
       
  1349 	    min_size >= UCODE_SIZE) {
       
  1350 		netif_err(nic, probe, nic->netdev,
       
  1351 			  "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
       
  1352 			  fw_name, timer, bundle, min_size);
       
  1353 		release_firmware(fw);
       
  1354 		return ERR_PTR(-EINVAL);
       
  1355 	}
       
  1356 
       
  1357 	/* OK, firmware is validated and ready to use. Save a pointer
       
  1358 	 * to it in the nic */
       
  1359 	nic->fw = fw;
       
  1360 	return fw;
       
  1361 }
       
  1362 
       
  1363 static void e100_setup_ucode(struct nic *nic, struct cb *cb,
       
  1364 			     struct sk_buff *skb)
       
  1365 {
       
  1366 	const struct firmware *fw = (void *)skb;
       
  1367 	u8 timer, bundle, min_size;
       
  1368 
       
  1369 	/* It's not a real skb; we just abused the fact that e100_exec_cb
       
  1370 	   will pass it through to here... */
       
  1371 	cb->skb = NULL;
       
  1372 
       
  1373 	/* firmware is stored as little endian already */
       
  1374 	memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
       
  1375 
       
  1376 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1377 	timer = fw->data[UCODE_SIZE * 4];
       
  1378 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1379 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1380 
       
  1381 	/* Insert user-tunable settings in cb->u.ucode */
       
  1382 	cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
       
  1383 	cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
       
  1384 	cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
       
  1385 	cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
       
  1386 	cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
       
  1387 	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
       
  1388 
       
  1389 	cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1390 }
       
  1391 
       
  1392 static inline int e100_load_ucode_wait(struct nic *nic)
       
  1393 {
       
  1394 	const struct firmware *fw;
       
  1395 	int err = 0, counter = 50;
       
  1396 	struct cb *cb = nic->cb_to_clean;
       
  1397 
       
  1398 	fw = e100_request_firmware(nic);
       
  1399 	/* If it's NULL, then no ucode is required */
       
  1400 	if (!fw || IS_ERR(fw))
       
  1401 		return PTR_ERR(fw);
       
  1402 
       
  1403 	if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
       
  1404 		netif_err(nic, probe, nic->netdev,
       
  1405 			  "ucode cmd failed with error %d\n", err);
       
  1406 
       
  1407 	/* must restart cuc */
       
  1408 	nic->cuc_cmd = cuc_start;
       
  1409 
       
  1410 	/* wait for completion */
       
  1411 	e100_write_flush(nic);
       
  1412 	udelay(10);
       
  1413 
       
  1414 	/* wait for possibly (ouch) 500ms */
       
  1415 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1416 		msleep(10);
       
  1417 		if (!--counter) break;
       
  1418 	}
       
  1419 
       
  1420 	/* ack any interrupts, something could have been set */
       
  1421 	iowrite8(~0, &nic->csr->scb.stat_ack);
       
  1422 
       
  1423 	/* if the command failed, or is not OK, notify and return */
       
  1424 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1425 		netif_err(nic, probe, nic->netdev, "ucode load failed\n");
       
  1426 		err = -EPERM;
       
  1427 	}
       
  1428 
       
  1429 	return err;
       
  1430 }
       
  1431 
       
  1432 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1433 	struct sk_buff *skb)
       
  1434 {
       
  1435 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1436 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1437 }
       
  1438 
       
  1439 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1440 {
       
  1441 	cb->command = cpu_to_le16(cb_dump);
       
  1442 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1443 		offsetof(struct mem, dump_buf));
       
  1444 }
       
  1445 
       
  1446 static int e100_phy_check_without_mii(struct nic *nic)
       
  1447 {
       
  1448 	u8 phy_type;
       
  1449 	int without_mii;
       
  1450 
       
  1451 	phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
       
  1452 
       
  1453 	switch (phy_type) {
       
  1454 	case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
       
  1455 	case I82503: /* Non-MII PHY; UNTESTED! */
       
  1456 	case S80C24: /* Non-MII PHY; tested and working */
       
  1457 		/* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
       
  1458 		 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
       
  1459 		 * doesn't have a programming interface of any sort.  The
       
  1460 		 * media is sensed automatically based on how the link partner
       
  1461 		 * is configured.  This is, in essence, manual configuration.
       
  1462 		 */
       
  1463 		netif_info(nic, probe, nic->netdev,
       
  1464 			   "found MII-less i82503 or 80c24 or other PHY\n");
       
  1465 
       
  1466 		nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
       
  1467 		nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
       
  1468 
       
  1469 		/* these might be needed for certain MII-less cards...
       
  1470 		 * nic->flags |= ich;
       
  1471 		 * nic->flags |= ich_10h_workaround; */
       
  1472 
       
  1473 		without_mii = 1;
       
  1474 		break;
       
  1475 	default:
       
  1476 		without_mii = 0;
       
  1477 		break;
       
  1478 	}
       
  1479 	return without_mii;
       
  1480 }
       
  1481 
       
  1482 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1483 #define MII_NSC_CONG		MII_RESV1
       
  1484 #define NSC_CONG_ENABLE		0x0100
       
  1485 #define NSC_CONG_TXREADY	0x0400
       
  1486 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1487 static int e100_phy_init(struct nic *nic)
       
  1488 {
       
  1489 	struct net_device *netdev = nic->netdev;
       
  1490 	u32 addr;
       
  1491 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1492 
       
  1493 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1494 	for (addr = 0; addr < 32; addr++) {
       
  1495 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1496 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1497 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1498 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1499 		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1500 			break;
       
  1501 	}
       
  1502 	if (addr == 32) {
       
  1503 		/* uhoh, no PHY detected: check whether we seem to be some
       
  1504 		 * weird, rare variant which is *known* to not have any MII.
       
  1505 		 * But do this AFTER MII checking only, since this does
       
  1506 		 * lookup of EEPROM values which may easily be unreliable. */
       
  1507 		if (e100_phy_check_without_mii(nic))
       
  1508 			return 0; /* simply return and hope for the best */
       
  1509 		else {
       
  1510 			/* for unknown cases log a fatal error */
       
  1511 			netif_err(nic, hw, nic->netdev,
       
  1512 				  "Failed to locate any known PHY, aborting\n");
       
  1513 			return -EAGAIN;
       
  1514 		}
       
  1515 	} else
       
  1516 		netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1517 			     "phy_addr = %d\n", nic->mii.phy_id);
       
  1518 
       
  1519 	/* Get phy ID */
       
  1520 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1521 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1522 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1523 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1524 		     "phy ID = 0x%08X\n", nic->phy);
       
  1525 
       
  1526 	/* Select the phy and isolate the rest */
       
  1527 	for (addr = 0; addr < 32; addr++) {
       
  1528 		if (addr != nic->mii.phy_id) {
       
  1529 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1530 		} else if (nic->phy != phy_82552_v) {
       
  1531 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1532 			mdio_write(netdev, addr, MII_BMCR,
       
  1533 				bmcr & ~BMCR_ISOLATE);
       
  1534 		}
       
  1535 	}
       
  1536 	/*
       
  1537 	 * Workaround for 82552:
       
  1538 	 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
       
  1539 	 * other phy_id's) using bmcr value from addr discovery loop above.
       
  1540 	 */
       
  1541 	if (nic->phy == phy_82552_v)
       
  1542 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
       
  1543 			bmcr & ~BMCR_ISOLATE);
       
  1544 
       
  1545 	/* Handle National tx phys */
       
  1546 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1547 	if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1548 		/* Disable congestion control */
       
  1549 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1550 		cong |= NSC_CONG_TXREADY;
       
  1551 		cong &= ~NSC_CONG_ENABLE;
       
  1552 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1553 	}
       
  1554 
       
  1555 	if (nic->phy == phy_82552_v) {
       
  1556 		u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
       
  1557 
       
  1558 		/* assign special tweaked mdio_ctrl() function */
       
  1559 		nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
       
  1560 
       
  1561 		/* Workaround Si not advertising flow-control during autoneg */
       
  1562 		advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
       
  1563 		mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
       
  1564 
       
  1565 		/* Reset for the above changes to take effect */
       
  1566 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1567 		bmcr |= BMCR_RESET;
       
  1568 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
       
  1569 	} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1570 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1571 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1572 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1573 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1574 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1575 	}
       
  1576 
       
  1577 	return 0;
       
  1578 }
       
  1579 
       
  1580 static int e100_hw_init(struct nic *nic)
       
  1581 {
       
  1582 	int err = 0;
       
  1583 
       
  1584 	e100_hw_reset(nic);
       
  1585 
       
  1586 	netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
       
  1587 	if (!in_interrupt() && (err = e100_self_test(nic)))
       
  1588 		return err;
       
  1589 
       
  1590 	if ((err = e100_phy_init(nic)))
       
  1591 		return err;
       
  1592 	if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1593 		return err;
       
  1594 	if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1595 		return err;
       
  1596 	if ((err = e100_load_ucode_wait(nic)))
       
  1597 		return err;
       
  1598 	if ((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1599 		return err;
       
  1600 	if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1601 		return err;
       
  1602 	if ((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1603 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1604 		return err;
       
  1605 	if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1606 		return err;
       
  1607 
       
  1608 	e100_disable_irq(nic);
       
  1609 
       
  1610 	return 0;
       
  1611 }
       
  1612 
       
  1613 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1614 {
       
  1615 	struct net_device *netdev = nic->netdev;
       
  1616 	struct netdev_hw_addr *ha;
       
  1617 	u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
       
  1618 
       
  1619 	cb->command = cpu_to_le16(cb_multi);
       
  1620 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1621 	i = 0;
       
  1622 	netdev_for_each_mc_addr(ha, netdev) {
       
  1623 		if (i == count)
       
  1624 			break;
       
  1625 		memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
       
  1626 			ETH_ALEN);
       
  1627 	}
       
  1628 }
       
  1629 
       
  1630 static void e100_set_multicast_list(struct net_device *netdev)
       
  1631 {
       
  1632 	struct nic *nic = netdev_priv(netdev);
       
  1633 
       
  1634 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1635 		     "mc_count=%d, flags=0x%04X\n",
       
  1636 		     netdev_mc_count(netdev), netdev->flags);
       
  1637 
       
  1638 	if (netdev->flags & IFF_PROMISC)
       
  1639 		nic->flags |= promiscuous;
       
  1640 	else
       
  1641 		nic->flags &= ~promiscuous;
       
  1642 
       
  1643 	if (netdev->flags & IFF_ALLMULTI ||
       
  1644 		netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
       
  1645 		nic->flags |= multicast_all;
       
  1646 	else
       
  1647 		nic->flags &= ~multicast_all;
       
  1648 
       
  1649 	e100_exec_cb(nic, NULL, e100_configure);
       
  1650 	e100_exec_cb(nic, NULL, e100_multi);
       
  1651 }
       
  1652 
       
  1653 static void e100_update_stats(struct nic *nic)
       
  1654 {
       
  1655 	struct net_device *dev = nic->netdev;
       
  1656 	struct net_device_stats *ns = &dev->stats;
       
  1657 	struct stats *s = &nic->mem->stats;
       
  1658 	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1659 		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
       
  1660 		&s->complete;
       
  1661 
       
  1662 	/* Device's stats reporting may take several microseconds to
       
  1663 	 * complete, so we're always waiting for results of the
       
  1664 	 * previous command. */
       
  1665 
       
  1666 	if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
       
  1667 		*complete = 0;
       
  1668 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1669 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1670 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1671 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1672 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1673 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1674 		ns->collisions += nic->tx_collisions;
       
  1675 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1676 			le32_to_cpu(s->tx_lost_crs);
       
  1677 		ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
       
  1678 			nic->rx_over_length_errors;
       
  1679 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1680 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1681 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1682 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1683 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1684 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1685 			le32_to_cpu(s->rx_alignment_errors) +
       
  1686 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1687 			le32_to_cpu(s->rx_cdt_errors);
       
  1688 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1689 		nic->tx_single_collisions +=
       
  1690 			le32_to_cpu(s->tx_single_collisions);
       
  1691 		nic->tx_multiple_collisions +=
       
  1692 			le32_to_cpu(s->tx_multiple_collisions);
       
  1693 		if (nic->mac >= mac_82558_D101_A4) {
       
  1694 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1695 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1696 			nic->rx_fc_unsupported +=
       
  1697 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1698 			if (nic->mac >= mac_82559_D101M) {
       
  1699 				nic->tx_tco_frames +=
       
  1700 					le16_to_cpu(s->xmt_tco_frames);
       
  1701 				nic->rx_tco_frames +=
       
  1702 					le16_to_cpu(s->rcv_tco_frames);
       
  1703 			}
       
  1704 		}
       
  1705 	}
       
  1706 
       
  1707 
       
  1708 	if (e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1709 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1710 			     "exec cuc_dump_reset failed\n");
       
  1711 }
       
  1712 
       
  1713 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1714 {
       
  1715 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1716 	 * we're getting collisions on a half-duplex connection. */
       
  1717 
       
  1718 	if (duplex == DUPLEX_HALF) {
       
  1719 		u32 prev = nic->adaptive_ifs;
       
  1720 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1721 
       
  1722 		if ((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1723 		   (nic->tx_frames > min_frames)) {
       
  1724 			if (nic->adaptive_ifs < 60)
       
  1725 				nic->adaptive_ifs += 5;
       
  1726 		} else if (nic->tx_frames < min_frames) {
       
  1727 			if (nic->adaptive_ifs >= 5)
       
  1728 				nic->adaptive_ifs -= 5;
       
  1729 		}
       
  1730 		if (nic->adaptive_ifs != prev)
       
  1731 			e100_exec_cb(nic, NULL, e100_configure);
       
  1732 	}
       
  1733 }
       
  1734 
       
  1735 static void e100_watchdog(unsigned long data)
       
  1736 {
       
  1737 	struct nic *nic = (struct nic *)data;
       
  1738 	struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
       
  1739 	u32 speed;
       
  1740 
       
  1741 	if (nic->ecdev) {
       
  1742 		ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1743 		return;
       
  1744 	}
       
  1745 
       
  1746 	netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
       
  1747 		     "right now = %ld\n", jiffies);
       
  1748 
       
  1749 	/* mii library handles link maintenance tasks */
       
  1750 
       
  1751 	mii_ethtool_gset(&nic->mii, &cmd);
       
  1752 	speed = ethtool_cmd_speed(&cmd);
       
  1753 
       
  1754 	if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1755 		netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
       
  1756 			    speed == SPEED_100 ? 100 : 10,
       
  1757 			    cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
       
  1758 	} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1759 		netdev_info(nic->netdev, "NIC Link is Down\n");
       
  1760 	}
       
  1761 
       
  1762 	mii_check_link(&nic->mii);
       
  1763 
       
  1764 	/* Software generated interrupt to recover from (rare) Rx
       
  1765 	 * allocation failure.
       
  1766 	 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1767 	 * accidentally, due to hardware that shares a register between the
       
  1768 	 * interrupt mask bit and the SW Interrupt generation bit */
       
  1769 	spin_lock_irq(&nic->cmd_lock);
       
  1770 	iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1771 	e100_write_flush(nic);
       
  1772 	spin_unlock_irq(&nic->cmd_lock);
       
  1773 
       
  1774 	e100_update_stats(nic);
       
  1775 	e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
       
  1776 
       
  1777 	if (nic->mac <= mac_82557_D100_C)
       
  1778 		/* Issue a multicast command to workaround a 557 lock up */
       
  1779 		e100_set_multicast_list(nic->netdev);
       
  1780 
       
  1781 	if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
       
  1782 		/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1783 		nic->flags |= ich_10h_workaround;
       
  1784 	else
       
  1785 		nic->flags &= ~ich_10h_workaround;
       
  1786 
       
  1787 	mod_timer(&nic->watchdog,
       
  1788 		  round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
       
  1789 }
       
  1790 
       
  1791 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1792 	struct sk_buff *skb)
       
  1793 {
       
  1794 	cb->command = nic->tx_command;
       
  1795 	/* interrupt every 16 packets regardless of delay */
       
  1796 	if ((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1797 		cb->command |= cpu_to_le16(cb_i);
       
  1798 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1799 	cb->u.tcb.tcb_byte_count = 0;
       
  1800 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1801 	cb->u.tcb.tbd_count = 1;
       
  1802 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
       
  1803 		skb->data, skb->len, PCI_DMA_TODEVICE));
       
  1804 	/* check for mapping failure? */
       
  1805 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1806 }
       
  1807 
       
  1808 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
       
  1809 				   struct net_device *netdev)
       
  1810 {
       
  1811 	struct nic *nic = netdev_priv(netdev);
       
  1812 	int err;
       
  1813 
       
  1814 	if (nic->flags & ich_10h_workaround) {
       
  1815 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1816 		   Issue a NOP command followed by a 1us delay before
       
  1817 		   issuing the Tx command. */
       
  1818 		if (e100_exec_cmd(nic, cuc_nop, 0))
       
  1819 			netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1820 				     "exec cuc_nop failed\n");
       
  1821 		udelay(1);
       
  1822 	}
       
  1823 
       
  1824 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1825 
       
  1826 	switch (err) {
       
  1827 	case -ENOSPC:
       
  1828 		/* We queued the skb, but now we're out of space. */
       
  1829 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1830 			     "No space for CB\n");
       
  1831 		if (!nic->ecdev)
       
  1832 			netif_stop_queue(netdev);
       
  1833 		break;
       
  1834 	case -ENOMEM:
       
  1835 		/* This is a hard error - log it. */
       
  1836 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1837 			     "Out of Tx resources, returning skb\n");
       
  1838 		if (!nic->ecdev)
       
  1839 			netif_stop_queue(netdev);
       
  1840 		return NETDEV_TX_BUSY;
       
  1841 	}
       
  1842 
       
  1843 	return NETDEV_TX_OK;
       
  1844 }
       
  1845 
       
  1846 static int e100_tx_clean(struct nic *nic)
       
  1847 {
       
  1848 	struct net_device *dev = nic->netdev;
       
  1849 	struct cb *cb;
       
  1850 	int tx_cleaned = 0;
       
  1851 
       
  1852 	if (!nic->ecdev)
       
  1853 		spin_lock(&nic->cb_lock);
       
  1854 
       
  1855 	/* Clean CBs marked complete */
       
  1856 	for (cb = nic->cb_to_clean;
       
  1857 	    cb->status & cpu_to_le16(cb_complete);
       
  1858 	    cb = nic->cb_to_clean = cb->next) {
       
  1859 		rmb(); /* read skb after status */
       
  1860 		netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
       
  1861 			     "cb[%d]->status = 0x%04X\n",
       
  1862 			     (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1863 			     cb->status);
       
  1864 
       
  1865 		if (likely(cb->skb != NULL)) {
       
  1866 			dev->stats.tx_packets++;
       
  1867 			dev->stats.tx_bytes += cb->skb->len;
       
  1868 
       
  1869 			pci_unmap_single(nic->pdev,
       
  1870 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1871 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1872 				PCI_DMA_TODEVICE);
       
  1873 			if (!nic->ecdev)
       
  1874 				dev_kfree_skb_any(cb->skb);
       
  1875 			cb->skb = NULL;
       
  1876 			tx_cleaned = 1;
       
  1877 		}
       
  1878 		cb->status = 0;
       
  1879 		nic->cbs_avail++;
       
  1880 	}
       
  1881 
       
  1882 	if (!nic->ecdev) {
       
  1883 		spin_unlock(&nic->cb_lock);
       
  1884 
       
  1885 		/* Recover from running out of Tx resources in xmit_frame */
       
  1886 		if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1887 			netif_wake_queue(nic->netdev);
       
  1888 	}
       
  1889 
       
  1890 	return tx_cleaned;
       
  1891 }
       
  1892 
       
  1893 static void e100_clean_cbs(struct nic *nic)
       
  1894 {
       
  1895 	if (nic->cbs) {
       
  1896 		while (nic->cbs_avail != nic->params.cbs.count) {
       
  1897 			struct cb *cb = nic->cb_to_clean;
       
  1898 			if (cb->skb) {
       
  1899 				pci_unmap_single(nic->pdev,
       
  1900 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1901 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1902 					PCI_DMA_TODEVICE);
       
  1903 				if (!nic->ecdev)
       
  1904 					dev_kfree_skb(cb->skb);
       
  1905 			}
       
  1906 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1907 			nic->cbs_avail++;
       
  1908 		}
       
  1909 		pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
       
  1910 		nic->cbs = NULL;
       
  1911 		nic->cbs_avail = 0;
       
  1912 	}
       
  1913 	nic->cuc_cmd = cuc_start;
       
  1914 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1915 		nic->cbs;
       
  1916 }
       
  1917 
       
  1918 static int e100_alloc_cbs(struct nic *nic)
       
  1919 {
       
  1920 	struct cb *cb;
       
  1921 	unsigned int i, count = nic->params.cbs.count;
       
  1922 
       
  1923 	nic->cuc_cmd = cuc_start;
       
  1924 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1925 	nic->cbs_avail = 0;
       
  1926 
       
  1927 	nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
       
  1928 				  &nic->cbs_dma_addr);
       
  1929 	if (!nic->cbs)
       
  1930 		return -ENOMEM;
       
  1931 	memset(nic->cbs, 0, count * sizeof(struct cb));
       
  1932 
       
  1933 	for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1934 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1935 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  1936 
       
  1937 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  1938 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  1939 			((i+1) % count) * sizeof(struct cb));
       
  1940 	}
       
  1941 
       
  1942 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  1943 	nic->cbs_avail = count;
       
  1944 
       
  1945 	return 0;
       
  1946 }
       
  1947 
       
  1948 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  1949 {
       
  1950 	if (!nic->rxs) return;
       
  1951 	if (RU_SUSPENDED != nic->ru_running) return;
       
  1952 
       
  1953 	/* handle init time starts */
       
  1954 	if (!rx) rx = nic->rxs;
       
  1955 
       
  1956 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  1957 	if (rx->skb) {
       
  1958 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  1959 		nic->ru_running = RU_RUNNING;
       
  1960 	}
       
  1961 }
       
  1962 
       
  1963 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
       
  1964 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  1965 {
       
  1966 	if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
       
  1967 		return -ENOMEM;
       
  1968 
       
  1969 	/* Init, and map the RFD. */
       
  1970 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
       
  1971 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  1972 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1973 
       
  1974 	if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  1975 		dev_kfree_skb_any(rx->skb);
       
  1976 		rx->skb = NULL;
       
  1977 		rx->dma_addr = 0;
       
  1978 		return -ENOMEM;
       
  1979 	}
       
  1980 
       
  1981 	/* Link the RFD to end of RFA by linking previous RFD to
       
  1982 	 * this one.  We are safe to touch the previous RFD because
       
  1983 	 * it is protected by the before last buffer's el bit being set */
       
  1984 	if (rx->prev->skb) {
       
  1985 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  1986 		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  1987 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  1988 			sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  1989 	}
       
  1990 
       
  1991 	return 0;
       
  1992 }
       
  1993 
       
  1994 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  1995 	unsigned int *work_done, unsigned int work_to_do)
       
  1996 {
       
  1997 	struct net_device *dev = nic->netdev;
       
  1998 	struct sk_buff *skb = rx->skb;
       
  1999 	struct rfd *rfd = (struct rfd *)skb->data;
       
  2000 	u16 rfd_status, actual_size;
       
  2001 
       
  2002 	if (unlikely(work_done && *work_done >= work_to_do))
       
  2003 		return -EAGAIN;
       
  2004 
       
  2005 	/* Need to sync before taking a peek at cb_complete bit */
       
  2006 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  2007 		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2008 	rfd_status = le16_to_cpu(rfd->status);
       
  2009 
       
  2010 	netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
       
  2011 		     "status=0x%04X\n", rfd_status);
       
  2012 	rmb(); /* read size after status bit */
       
  2013 
       
  2014 	/* If data isn't ready, nothing to indicate */
       
  2015 	if (unlikely(!(rfd_status & cb_complete))) {
       
  2016 		/* If the next buffer has the el bit, but we think the receiver
       
  2017 		 * is still running, check to see if it really stopped while
       
  2018 		 * we had interrupts off.
       
  2019 		 * This allows for a fast restart without re-enabling
       
  2020 		 * interrupts */
       
  2021 		if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2022 		    (RU_RUNNING == nic->ru_running))
       
  2023 
       
  2024 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2025 				nic->ru_running = RU_SUSPENDED;
       
  2026 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2027 					       sizeof(struct rfd),
       
  2028 					       PCI_DMA_FROMDEVICE);
       
  2029 		return -ENODATA;
       
  2030 	}
       
  2031 
       
  2032 	/* Get actual data size */
       
  2033 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  2034 	if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  2035 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  2036 
       
  2037 	/* Get data */
       
  2038 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2039 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2040 
       
  2041 	/* If this buffer has the el bit, but we think the receiver
       
  2042 	 * is still running, check to see if it really stopped while
       
  2043 	 * we had interrupts off.
       
  2044 	 * This allows for a fast restart without re-enabling interrupts.
       
  2045 	 * This can happen when the RU sees the size change but also sees
       
  2046 	 * the el bit set. */
       
  2047 	if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2048 	    (RU_RUNNING == nic->ru_running)) {
       
  2049 
       
  2050 	    if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2051 		nic->ru_running = RU_SUSPENDED;
       
  2052 	}
       
  2053 
       
  2054 	if (!nic->ecdev) {
       
  2055 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  2056 		skb_reserve(skb, sizeof(struct rfd));
       
  2057 		skb_put(skb, actual_size);
       
  2058 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  2059 	}
       
  2060 
       
  2061 	if (unlikely(!(rfd_status & cb_ok))) {
       
  2062 		if (!nic->ecdev) {
       
  2063 			/* Don't indicate if hardware indicates errors */
       
  2064 			dev_kfree_skb_any(skb);
       
  2065 		}
       
  2066 	} else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
       
  2067 		/* Don't indicate oversized frames */
       
  2068 		nic->rx_over_length_errors++;
       
  2069 		if (!nic->ecdev)
       
  2070 			dev_kfree_skb_any(skb);
       
  2071 	} else {
       
  2072 		dev->stats.rx_packets++;
       
  2073 		dev->stats.rx_bytes += actual_size;
       
  2074 		if (nic->ecdev) {
       
  2075 			ecdev_receive(nic->ecdev,
       
  2076 					skb->data + sizeof(struct rfd), actual_size);
       
  2077 
       
  2078 			// No need to detect link status as
       
  2079 			// long as frames are received: Reset watchdog.
       
  2080 			nic->ec_watchdog_jiffies = jiffies;
       
  2081 		} else {
       
  2082 			netif_receive_skb(skb);
       
  2083 		}
       
  2084 		if (work_done)
       
  2085 			(*work_done)++;
       
  2086 	}
       
  2087 
       
  2088 	if (nic->ecdev) {
       
  2089 		// make receive frame descriptior usable again
       
  2090 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  2091 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  2092 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2093 		if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2094 			rx->dma_addr = 0;
       
  2095 		}
       
  2096 
       
  2097 		/* Link the RFD to end of RFA by linking previous RFD to
       
  2098 		 * this one.  We are safe to touch the previous RFD because
       
  2099 		 * it is protected by the before last buffer's el bit being set */
       
  2100 		if (rx->prev->skb) {
       
  2101 			struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  2102 			put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2103 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2104 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  2105 		}
       
  2106 	} else {
       
  2107 		rx->skb = NULL;
       
  2108 	}
       
  2109 
       
  2110 	return 0;
       
  2111 }
       
  2112 
       
  2113 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  2114 	unsigned int work_to_do)
       
  2115 {
       
  2116 	struct rx *rx;
       
  2117 	int restart_required = 0, err = 0;
       
  2118 	struct rx *old_before_last_rx, *new_before_last_rx;
       
  2119 	struct rfd *old_before_last_rfd, *new_before_last_rfd;
       
  2120 
       
  2121 	/* Indicate newly arrived packets */
       
  2122 	for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  2123 		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  2124 		/* Hit quota or no more to clean */
       
  2125 		if (-EAGAIN == err || -ENODATA == err)
       
  2126 			break;
       
  2127 	}
       
  2128 
       
  2129 
       
  2130 	/* On EAGAIN, hit quota so have more work to do, restart once
       
  2131 	 * cleanup is complete.
       
  2132 	 * Else, are we already rnr? then pay attention!!! this ensures that
       
  2133 	 * the state machine progression never allows a start with a
       
  2134 	 * partially cleaned list, avoiding a race between hardware
       
  2135 	 * and rx_to_clean when in NAPI mode */
       
  2136 	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
       
  2137 		restart_required = 1;
       
  2138 
       
  2139 	old_before_last_rx = nic->rx_to_use->prev->prev;
       
  2140 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
       
  2141 
       
  2142 	if (!nic->ecdev) {
       
  2143 		/* Alloc new skbs to refill list */
       
  2144 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  2145 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  2146 				break; /* Better luck next time (see watchdog) */
       
  2147 		}
       
  2148 	}
       
  2149 
       
  2150 	new_before_last_rx = nic->rx_to_use->prev->prev;
       
  2151 	if (new_before_last_rx != old_before_last_rx) {
       
  2152 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2153 		 * This lets us update the next pointer on the last buffer
       
  2154 		 * without worrying about hardware touching it.
       
  2155 		 * We set the size to 0 to prevent hardware from touching this
       
  2156 		 * buffer.
       
  2157 		 * When the hardware hits the before last buffer with el-bit
       
  2158 		 * and size of 0, it will RNR interrupt, the RUS will go into
       
  2159 		 * the No Resources state.  It will not complete nor write to
       
  2160 		 * this buffer. */
       
  2161 		new_before_last_rfd =
       
  2162 			(struct rfd *)new_before_last_rx->skb->data;
       
  2163 		new_before_last_rfd->size = 0;
       
  2164 		new_before_last_rfd->command |= cpu_to_le16(cb_el);
       
  2165 		pci_dma_sync_single_for_device(nic->pdev,
       
  2166 			new_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2167 			PCI_DMA_BIDIRECTIONAL);
       
  2168 
       
  2169 		/* Now that we have a new stopping point, we can clear the old
       
  2170 		 * stopping point.  We must sync twice to get the proper
       
  2171 		 * ordering on the hardware side of things. */
       
  2172 		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
       
  2173 		pci_dma_sync_single_for_device(nic->pdev,
       
  2174 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2175 			PCI_DMA_BIDIRECTIONAL);
       
  2176 		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  2177 		pci_dma_sync_single_for_device(nic->pdev,
       
  2178 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2179 			PCI_DMA_BIDIRECTIONAL);
       
  2180 	}
       
  2181 
       
  2182 	if (restart_required) {
       
  2183 		// ack the rnr?
       
  2184 		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2185 		e100_start_receiver(nic, nic->rx_to_clean);
       
  2186 		if (work_done)
       
  2187 			(*work_done)++;
       
  2188 	}
       
  2189 }
       
  2190 
       
  2191 static void e100_rx_clean_list(struct nic *nic)
       
  2192 {
       
  2193 	struct rx *rx;
       
  2194 	unsigned int i, count = nic->params.rfds.count;
       
  2195 
       
  2196 	nic->ru_running = RU_UNINITIALIZED;
       
  2197 
       
  2198 	if (nic->rxs) {
       
  2199 		for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2200 			if (rx->skb) {
       
  2201 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2202 					RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2203 				dev_kfree_skb(rx->skb);
       
  2204 			}
       
  2205 		}
       
  2206 		kfree(nic->rxs);
       
  2207 		nic->rxs = NULL;
       
  2208 	}
       
  2209 
       
  2210 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2211 }
       
  2212 
       
  2213 static int e100_rx_alloc_list(struct nic *nic)
       
  2214 {
       
  2215 	struct rx *rx;
       
  2216 	unsigned int i, count = nic->params.rfds.count;
       
  2217 	struct rfd *before_last;
       
  2218 
       
  2219 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2220 	nic->ru_running = RU_UNINITIALIZED;
       
  2221 
       
  2222 	if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2223 		return -ENOMEM;
       
  2224 
       
  2225 	for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2226 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2227 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2228 		if (e100_rx_alloc_skb(nic, rx)) {
       
  2229 			e100_rx_clean_list(nic);
       
  2230 			return -ENOMEM;
       
  2231 		}
       
  2232 	}
       
  2233 
       
  2234 	if (!nic->ecdev) {
       
  2235 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2236 		 * This lets us update the next pointer on the last buffer without
       
  2237 		 * worrying about hardware touching it.
       
  2238 		 * We set the size to 0 to prevent hardware from touching this buffer.
       
  2239 		 * When the hardware hits the before last buffer with el-bit and size
       
  2240 		 * of 0, it will RNR interrupt, the RU will go into the No Resources
       
  2241 		 * state.  It will not complete nor write to this buffer. */
       
  2242 		rx = nic->rxs->prev->prev;
       
  2243 		before_last = (struct rfd *)rx->skb->data;
       
  2244 		before_last->command |= cpu_to_le16(cb_el);
       
  2245 		before_last->size = 0;
       
  2246 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2247 				sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2248 	}
       
  2249 
       
  2250 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2251 	nic->ru_running = RU_SUSPENDED;
       
  2252 
       
  2253 	return 0;
       
  2254 }
       
  2255 
       
  2256 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2257 {
       
  2258 	struct net_device *netdev = dev_id;
       
  2259 	struct nic *nic = netdev_priv(netdev);
       
  2260 	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
       
  2261 
       
  2262 	netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
       
  2263 		     "stat_ack = 0x%02X\n", stat_ack);
       
  2264 
       
  2265 	if (stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2266 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2267 		return IRQ_NONE;
       
  2268 
       
  2269 	/* Ack interrupt(s) */
       
  2270 	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
       
  2271 
       
  2272 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2273 	if (stat_ack & stat_ack_rnr)
       
  2274 		nic->ru_running = RU_SUSPENDED;
       
  2275 
       
  2276 	if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) {
       
  2277 		e100_disable_irq(nic);
       
  2278 		__napi_schedule(&nic->napi);
       
  2279 	}
       
  2280 
       
  2281 	return IRQ_HANDLED;
       
  2282 }
       
  2283 
       
  2284 void e100_ec_poll(struct net_device *netdev)
       
  2285 {
       
  2286 	struct nic *nic = netdev_priv(netdev);
       
  2287 
       
  2288 	e100_rx_clean(nic, NULL, 100);
       
  2289 	e100_tx_clean(nic);
       
  2290 
       
  2291 	if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2292 		e100_watchdog((unsigned long) nic);
       
  2293 		nic->ec_watchdog_jiffies = jiffies;
       
  2294 	}
       
  2295 }
       
  2296 
       
  2297 
       
  2298 static int e100_poll(struct napi_struct *napi, int budget)
       
  2299 {
       
  2300 	struct nic *nic = container_of(napi, struct nic, napi);
       
  2301 	unsigned int work_done = 0;
       
  2302 
       
  2303 	e100_rx_clean(nic, &work_done, budget);
       
  2304 	e100_tx_clean(nic);
       
  2305 
       
  2306 	/* If budget not fully consumed, exit the polling mode */
       
  2307 	if (work_done < budget) {
       
  2308 		napi_complete(napi);
       
  2309 		e100_enable_irq(nic);
       
  2310 	}
       
  2311 
       
  2312 	return work_done;
       
  2313 }
       
  2314 
       
  2315 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2316 static void e100_netpoll(struct net_device *netdev)
       
  2317 {
       
  2318 	struct nic *nic = netdev_priv(netdev);
       
  2319 
       
  2320 	e100_disable_irq(nic);
       
  2321 	e100_intr(nic->pdev->irq, netdev);
       
  2322 	e100_tx_clean(nic);
       
  2323 	e100_enable_irq(nic);
       
  2324 }
       
  2325 #endif
       
  2326 
       
  2327 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2328 {
       
  2329 	struct nic *nic = netdev_priv(netdev);
       
  2330 	struct sockaddr *addr = p;
       
  2331 
       
  2332 	if (!is_valid_ether_addr(addr->sa_data))
       
  2333 		return -EADDRNOTAVAIL;
       
  2334 
       
  2335 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2336 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2337 
       
  2338 	return 0;
       
  2339 }
       
  2340 
       
  2341 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2342 {
       
  2343 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2344 		return -EINVAL;
       
  2345 	netdev->mtu = new_mtu;
       
  2346 	return 0;
       
  2347 }
       
  2348 
       
  2349 static int e100_asf(struct nic *nic)
       
  2350 {
       
  2351 	/* ASF can be enabled from eeprom */
       
  2352 	return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2353 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2354 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2355 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
       
  2356 }
       
  2357 
       
  2358 static int e100_up(struct nic *nic)
       
  2359 {
       
  2360 	int err;
       
  2361 
       
  2362 	if ((err = e100_rx_alloc_list(nic)))
       
  2363 		return err;
       
  2364 	if ((err = e100_alloc_cbs(nic)))
       
  2365 		goto err_rx_clean_list;
       
  2366 	if ((err = e100_hw_init(nic)))
       
  2367 		goto err_clean_cbs;
       
  2368 	e100_set_multicast_list(nic->netdev);
       
  2369 	e100_start_receiver(nic, NULL);
       
  2370 	if (!nic->ecdev) {
       
  2371 		mod_timer(&nic->watchdog, jiffies);
       
  2372 	}
       
  2373 	if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2374 		nic->netdev->name, nic->netdev)))
       
  2375 		goto err_no_irq;
       
  2376 	if (!nic->ecdev) {
       
  2377 		netif_wake_queue(nic->netdev);
       
  2378 		napi_enable(&nic->napi);
       
  2379 		/* enable ints _after_ enabling poll, preventing a race between
       
  2380 		 * disable ints+schedule */
       
  2381 		e100_enable_irq(nic);
       
  2382 	}
       
  2383 	return 0;
       
  2384 
       
  2385 err_no_irq:
       
  2386 	if (!nic->ecdev)
       
  2387 		del_timer_sync(&nic->watchdog);
       
  2388 err_clean_cbs:
       
  2389 	e100_clean_cbs(nic);
       
  2390 err_rx_clean_list:
       
  2391 	e100_rx_clean_list(nic);
       
  2392 	return err;
       
  2393 }
       
  2394 
       
  2395 static void e100_down(struct nic *nic)
       
  2396 {
       
  2397 	if (!nic->ecdev) {
       
  2398 		/* wait here for poll to complete */
       
  2399 		napi_disable(&nic->napi);
       
  2400 		netif_stop_queue(nic->netdev);
       
  2401 	}
       
  2402 	e100_hw_reset(nic);
       
  2403 	free_irq(nic->pdev->irq, nic->netdev);
       
  2404 	if (!nic->ecdev) {
       
  2405 		del_timer_sync(&nic->watchdog);
       
  2406 		netif_carrier_off(nic->netdev);
       
  2407 	}
       
  2408 	e100_clean_cbs(nic);
       
  2409 	e100_rx_clean_list(nic);
       
  2410 }
       
  2411 
       
  2412 static void e100_tx_timeout(struct net_device *netdev)
       
  2413 {
       
  2414 	struct nic *nic = netdev_priv(netdev);
       
  2415 
       
  2416 	/* Reset outside of interrupt context, to avoid request_irq
       
  2417 	 * in interrupt context */
       
  2418 	schedule_work(&nic->tx_timeout_task);
       
  2419 }
       
  2420 
       
  2421 static void e100_tx_timeout_task(struct work_struct *work)
       
  2422 {
       
  2423 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2424 	struct net_device *netdev = nic->netdev;
       
  2425 
       
  2426 	netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  2427 		     "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
       
  2428 
       
  2429 	rtnl_lock();
       
  2430 	if (netif_running(netdev)) {
       
  2431 		e100_down(netdev_priv(netdev));
       
  2432 		e100_up(netdev_priv(netdev));
       
  2433 	}
       
  2434 	rtnl_unlock();
       
  2435 }
       
  2436 
       
  2437 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2438 {
       
  2439 	int err;
       
  2440 	struct sk_buff *skb;
       
  2441 
       
  2442 	/* Use driver resources to perform internal MAC or PHY
       
  2443 	 * loopback test.  A single packet is prepared and transmitted
       
  2444 	 * in loopback mode, and the test passes if the received
       
  2445 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2446 
       
  2447 	if ((err = e100_rx_alloc_list(nic)))
       
  2448 		return err;
       
  2449 	if ((err = e100_alloc_cbs(nic)))
       
  2450 		goto err_clean_rx;
       
  2451 
       
  2452 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2453 	if (nic->flags & ich && loopback_mode == lb_phy)
       
  2454 		loopback_mode = lb_mac;
       
  2455 
       
  2456 	nic->loopback = loopback_mode;
       
  2457 	if ((err = e100_hw_init(nic)))
       
  2458 		goto err_loopback_none;
       
  2459 
       
  2460 	if (loopback_mode == lb_phy)
       
  2461 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2462 			BMCR_LOOPBACK);
       
  2463 
       
  2464 	e100_start_receiver(nic, NULL);
       
  2465 
       
  2466 	if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2467 		err = -ENOMEM;
       
  2468 		goto err_loopback_none;
       
  2469 	}
       
  2470 	skb_put(skb, ETH_DATA_LEN);
       
  2471 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2472 	e100_xmit_frame(skb, nic->netdev);
       
  2473 
       
  2474 	msleep(10);
       
  2475 
       
  2476 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2477 			RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2478 
       
  2479 	if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2480 	   skb->data, ETH_DATA_LEN))
       
  2481 		err = -EAGAIN;
       
  2482 
       
  2483 err_loopback_none:
       
  2484 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2485 	nic->loopback = lb_none;
       
  2486 	e100_clean_cbs(nic);
       
  2487 	e100_hw_reset(nic);
       
  2488 err_clean_rx:
       
  2489 	e100_rx_clean_list(nic);
       
  2490 	return err;
       
  2491 }
       
  2492 
       
  2493 #define MII_LED_CONTROL	0x1B
       
  2494 #define E100_82552_LED_OVERRIDE 0x19
       
  2495 #define E100_82552_LED_ON       0x000F /* LEDTX and LED_RX both on */
       
  2496 #define E100_82552_LED_OFF      0x000A /* LEDTX and LED_RX both off */
       
  2497 
       
  2498 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2499 {
       
  2500 	struct nic *nic = netdev_priv(netdev);
       
  2501 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2502 }
       
  2503 
       
  2504 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2505 {
       
  2506 	struct nic *nic = netdev_priv(netdev);
       
  2507 	int err;
       
  2508 
       
  2509 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2510 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2511 	e100_exec_cb(nic, NULL, e100_configure);
       
  2512 
       
  2513 	return err;
       
  2514 }
       
  2515 
       
  2516 static void e100_get_drvinfo(struct net_device *netdev,
       
  2517 	struct ethtool_drvinfo *info)
       
  2518 {
       
  2519 	struct nic *nic = netdev_priv(netdev);
       
  2520 	strcpy(info->driver, DRV_NAME);
       
  2521 	strcpy(info->version, DRV_VERSION);
       
  2522 	strcpy(info->fw_version, "N/A");
       
  2523 	strcpy(info->bus_info, pci_name(nic->pdev));
       
  2524 }
       
  2525 
       
  2526 #define E100_PHY_REGS 0x1C
       
  2527 static int e100_get_regs_len(struct net_device *netdev)
       
  2528 {
       
  2529 	struct nic *nic = netdev_priv(netdev);
       
  2530 	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
       
  2531 }
       
  2532 
       
  2533 static void e100_get_regs(struct net_device *netdev,
       
  2534 	struct ethtool_regs *regs, void *p)
       
  2535 {
       
  2536 	struct nic *nic = netdev_priv(netdev);
       
  2537 	u32 *buff = p;
       
  2538 	int i;
       
  2539 
       
  2540 	regs->version = (1 << 24) | nic->pdev->revision;
       
  2541 	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
       
  2542 		ioread8(&nic->csr->scb.cmd_lo) << 16 |
       
  2543 		ioread16(&nic->csr->scb.status);
       
  2544 	for (i = E100_PHY_REGS; i >= 0; i--)
       
  2545 		buff[1 + E100_PHY_REGS - i] =
       
  2546 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2547 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2548 	e100_exec_cb(nic, NULL, e100_dump);
       
  2549 	msleep(10);
       
  2550 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2551 		sizeof(nic->mem->dump_buf));
       
  2552 }
       
  2553 
       
  2554 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2555 {
       
  2556 	struct nic *nic = netdev_priv(netdev);
       
  2557 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2558 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2559 }
       
  2560 
       
  2561 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2562 {
       
  2563 	struct nic *nic = netdev_priv(netdev);
       
  2564 
       
  2565 	if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
       
  2566 	    !device_can_wakeup(&nic->pdev->dev))
       
  2567 		return -EOPNOTSUPP;
       
  2568 
       
  2569 	if (wol->wolopts)
       
  2570 		nic->flags |= wol_magic;
       
  2571 	else
       
  2572 		nic->flags &= ~wol_magic;
       
  2573 
       
  2574 	device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
       
  2575 
       
  2576 	e100_exec_cb(nic, NULL, e100_configure);
       
  2577 
       
  2578 	return 0;
       
  2579 }
       
  2580 
       
  2581 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2582 {
       
  2583 	struct nic *nic = netdev_priv(netdev);
       
  2584 	return nic->msg_enable;
       
  2585 }
       
  2586 
       
  2587 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2588 {
       
  2589 	struct nic *nic = netdev_priv(netdev);
       
  2590 	nic->msg_enable = value;
       
  2591 }
       
  2592 
       
  2593 static int e100_nway_reset(struct net_device *netdev)
       
  2594 {
       
  2595 	struct nic *nic = netdev_priv(netdev);
       
  2596 	return mii_nway_restart(&nic->mii);
       
  2597 }
       
  2598 
       
  2599 static u32 e100_get_link(struct net_device *netdev)
       
  2600 {
       
  2601 	struct nic *nic = netdev_priv(netdev);
       
  2602 	return mii_link_ok(&nic->mii);
       
  2603 }
       
  2604 
       
  2605 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2606 {
       
  2607 	struct nic *nic = netdev_priv(netdev);
       
  2608 	return nic->eeprom_wc << 1;
       
  2609 }
       
  2610 
       
  2611 #define E100_EEPROM_MAGIC	0x1234
       
  2612 static int e100_get_eeprom(struct net_device *netdev,
       
  2613 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2614 {
       
  2615 	struct nic *nic = netdev_priv(netdev);
       
  2616 
       
  2617 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2618 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2619 
       
  2620 	return 0;
       
  2621 }
       
  2622 
       
  2623 static int e100_set_eeprom(struct net_device *netdev,
       
  2624 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2625 {
       
  2626 	struct nic *nic = netdev_priv(netdev);
       
  2627 
       
  2628 	if (eeprom->magic != E100_EEPROM_MAGIC)
       
  2629 		return -EINVAL;
       
  2630 
       
  2631 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2632 
       
  2633 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2634 		(eeprom->len >> 1) + 1);
       
  2635 }
       
  2636 
       
  2637 static void e100_get_ringparam(struct net_device *netdev,
       
  2638 	struct ethtool_ringparam *ring)
       
  2639 {
       
  2640 	struct nic *nic = netdev_priv(netdev);
       
  2641 	struct param_range *rfds = &nic->params.rfds;
       
  2642 	struct param_range *cbs = &nic->params.cbs;
       
  2643 
       
  2644 	ring->rx_max_pending = rfds->max;
       
  2645 	ring->tx_max_pending = cbs->max;
       
  2646 	ring->rx_mini_max_pending = 0;
       
  2647 	ring->rx_jumbo_max_pending = 0;
       
  2648 	ring->rx_pending = rfds->count;
       
  2649 	ring->tx_pending = cbs->count;
       
  2650 	ring->rx_mini_pending = 0;
       
  2651 	ring->rx_jumbo_pending = 0;
       
  2652 }
       
  2653 
       
  2654 static int e100_set_ringparam(struct net_device *netdev,
       
  2655 	struct ethtool_ringparam *ring)
       
  2656 {
       
  2657 	struct nic *nic = netdev_priv(netdev);
       
  2658 	struct param_range *rfds = &nic->params.rfds;
       
  2659 	struct param_range *cbs = &nic->params.cbs;
       
  2660 
       
  2661 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2662 		return -EINVAL;
       
  2663 
       
  2664 	if (netif_running(netdev))
       
  2665 		e100_down(nic);
       
  2666 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2667 	rfds->count = min(rfds->count, rfds->max);
       
  2668 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2669 	cbs->count = min(cbs->count, cbs->max);
       
  2670 	netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
       
  2671 		   rfds->count, cbs->count);
       
  2672 	if (netif_running(netdev))
       
  2673 		e100_up(nic);
       
  2674 
       
  2675 	return 0;
       
  2676 }
       
  2677 
       
  2678 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2679 	"Link test     (on/offline)",
       
  2680 	"Eeprom test   (on/offline)",
       
  2681 	"Self test        (offline)",
       
  2682 	"Mac loopback     (offline)",
       
  2683 	"Phy loopback     (offline)",
       
  2684 };
       
  2685 #define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
       
  2686 
       
  2687 static void e100_diag_test(struct net_device *netdev,
       
  2688 	struct ethtool_test *test, u64 *data)
       
  2689 {
       
  2690 	struct ethtool_cmd cmd;
       
  2691 	struct nic *nic = netdev_priv(netdev);
       
  2692 	int i, err;
       
  2693 
       
  2694 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2695 	data[0] = !mii_link_ok(&nic->mii);
       
  2696 	data[1] = e100_eeprom_load(nic);
       
  2697 	if (test->flags & ETH_TEST_FL_OFFLINE) {
       
  2698 
       
  2699 		/* save speed, duplex & autoneg settings */
       
  2700 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2701 
       
  2702 		if (netif_running(netdev))
       
  2703 			e100_down(nic);
       
  2704 		data[2] = e100_self_test(nic);
       
  2705 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2706 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2707 
       
  2708 		/* restore speed, duplex & autoneg settings */
       
  2709 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2710 
       
  2711 		if (netif_running(netdev))
       
  2712 			e100_up(nic);
       
  2713 	}
       
  2714 	for (i = 0; i < E100_TEST_LEN; i++)
       
  2715 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2716 
       
  2717 	msleep_interruptible(4 * 1000);
       
  2718 }
       
  2719 
       
  2720 static int e100_set_phys_id(struct net_device *netdev,
       
  2721 			    enum ethtool_phys_id_state state)
       
  2722 {
       
  2723 	struct nic *nic = netdev_priv(netdev);
       
  2724 	enum led_state {
       
  2725 		led_on     = 0x01,
       
  2726 		led_off    = 0x04,
       
  2727 		led_on_559 = 0x05,
       
  2728 		led_on_557 = 0x07,
       
  2729 	};
       
  2730 	u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
       
  2731 		MII_LED_CONTROL;
       
  2732 	u16 leds = 0;
       
  2733 
       
  2734 	switch (state) {
       
  2735 	case ETHTOOL_ID_ACTIVE:
       
  2736 		return 2;
       
  2737 
       
  2738 	case ETHTOOL_ID_ON:
       
  2739 		leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
       
  2740 		       (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
       
  2741 		break;
       
  2742 
       
  2743 	case ETHTOOL_ID_OFF:
       
  2744 		leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
       
  2745 		break;
       
  2746 
       
  2747 	case ETHTOOL_ID_INACTIVE:
       
  2748 		break;
       
  2749 	}
       
  2750 
       
  2751 	mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
       
  2752 	return 0;
       
  2753 }
       
  2754 
       
  2755 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2756 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2757 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2758 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2759 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2760 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2761 	"tx_heartbeat_errors", "tx_window_errors",
       
  2762 	/* device-specific stats */
       
  2763 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2764 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2765 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2766 };
       
  2767 #define E100_NET_STATS_LEN	21
       
  2768 #define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
       
  2769 
       
  2770 static int e100_get_sset_count(struct net_device *netdev, int sset)
       
  2771 {
       
  2772 	switch (sset) {
       
  2773 	case ETH_SS_TEST:
       
  2774 		return E100_TEST_LEN;
       
  2775 	case ETH_SS_STATS:
       
  2776 		return E100_STATS_LEN;
       
  2777 	default:
       
  2778 		return -EOPNOTSUPP;
       
  2779 	}
       
  2780 }
       
  2781 
       
  2782 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2783 	struct ethtool_stats *stats, u64 *data)
       
  2784 {
       
  2785 	struct nic *nic = netdev_priv(netdev);
       
  2786 	int i;
       
  2787 
       
  2788 	for (i = 0; i < E100_NET_STATS_LEN; i++)
       
  2789 		data[i] = ((unsigned long *)&netdev->stats)[i];
       
  2790 
       
  2791 	data[i++] = nic->tx_deferred;
       
  2792 	data[i++] = nic->tx_single_collisions;
       
  2793 	data[i++] = nic->tx_multiple_collisions;
       
  2794 	data[i++] = nic->tx_fc_pause;
       
  2795 	data[i++] = nic->rx_fc_pause;
       
  2796 	data[i++] = nic->rx_fc_unsupported;
       
  2797 	data[i++] = nic->tx_tco_frames;
       
  2798 	data[i++] = nic->rx_tco_frames;
       
  2799 }
       
  2800 
       
  2801 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2802 {
       
  2803 	switch (stringset) {
       
  2804 	case ETH_SS_TEST:
       
  2805 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2806 		break;
       
  2807 	case ETH_SS_STATS:
       
  2808 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2809 		break;
       
  2810 	}
       
  2811 }
       
  2812 
       
  2813 static const struct ethtool_ops e100_ethtool_ops = {
       
  2814 	.get_settings		= e100_get_settings,
       
  2815 	.set_settings		= e100_set_settings,
       
  2816 	.get_drvinfo		= e100_get_drvinfo,
       
  2817 	.get_regs_len		= e100_get_regs_len,
       
  2818 	.get_regs		= e100_get_regs,
       
  2819 	.get_wol		= e100_get_wol,
       
  2820 	.set_wol		= e100_set_wol,
       
  2821 	.get_msglevel		= e100_get_msglevel,
       
  2822 	.set_msglevel		= e100_set_msglevel,
       
  2823 	.nway_reset		= e100_nway_reset,
       
  2824 	.get_link		= e100_get_link,
       
  2825 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2826 	.get_eeprom		= e100_get_eeprom,
       
  2827 	.set_eeprom		= e100_set_eeprom,
       
  2828 	.get_ringparam		= e100_get_ringparam,
       
  2829 	.set_ringparam		= e100_set_ringparam,
       
  2830 	.self_test		= e100_diag_test,
       
  2831 	.get_strings		= e100_get_strings,
       
  2832 	.set_phys_id		= e100_set_phys_id,
       
  2833 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2834 	.get_sset_count		= e100_get_sset_count,
       
  2835 };
       
  2836 
       
  2837 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2838 {
       
  2839 	struct nic *nic = netdev_priv(netdev);
       
  2840 
       
  2841 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2842 }
       
  2843 
       
  2844 static int e100_alloc(struct nic *nic)
       
  2845 {
       
  2846 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2847 		&nic->dma_addr);
       
  2848 	return nic->mem ? 0 : -ENOMEM;
       
  2849 }
       
  2850 
       
  2851 static void e100_free(struct nic *nic)
       
  2852 {
       
  2853 	if (nic->mem) {
       
  2854 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2855 			nic->mem, nic->dma_addr);
       
  2856 		nic->mem = NULL;
       
  2857 	}
       
  2858 }
       
  2859 
       
  2860 static int e100_open(struct net_device *netdev)
       
  2861 {
       
  2862 	struct nic *nic = netdev_priv(netdev);
       
  2863 	int err = 0;
       
  2864 
       
  2865 	if (!nic->ecdev)
       
  2866 		netif_carrier_off(netdev);
       
  2867 	if ((err = e100_up(nic)))
       
  2868 		netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
       
  2869 	return err;
       
  2870 }
       
  2871 
       
  2872 static int e100_close(struct net_device *netdev)
       
  2873 {
       
  2874 	e100_down(netdev_priv(netdev));
       
  2875 	return 0;
       
  2876 }
       
  2877 
       
  2878 static const struct net_device_ops e100_netdev_ops = {
       
  2879 	.ndo_open		= e100_open,
       
  2880 	.ndo_stop		= e100_close,
       
  2881 	.ndo_start_xmit		= e100_xmit_frame,
       
  2882 	.ndo_validate_addr	= eth_validate_addr,
       
  2883 	.ndo_set_multicast_list	= e100_set_multicast_list,
       
  2884 	.ndo_set_mac_address	= e100_set_mac_address,
       
  2885 	.ndo_change_mtu		= e100_change_mtu,
       
  2886 	.ndo_do_ioctl		= e100_do_ioctl,
       
  2887 	.ndo_tx_timeout		= e100_tx_timeout,
       
  2888 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2889 	.ndo_poll_controller	= e100_netpoll,
       
  2890 #endif
       
  2891 };
       
  2892 
       
  2893 static int __devinit e100_probe(struct pci_dev *pdev,
       
  2894 	const struct pci_device_id *ent)
       
  2895 {
       
  2896 	struct net_device *netdev;
       
  2897 	struct nic *nic;
       
  2898 	int err;
       
  2899 
       
  2900 	if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
       
  2901 		if (((1 << debug) - 1) & NETIF_MSG_PROBE)
       
  2902 			pr_err("Etherdev alloc failed, aborting\n");
       
  2903 		return -ENOMEM;
       
  2904 	}
       
  2905 
       
  2906 	netdev->netdev_ops = &e100_netdev_ops;
       
  2907 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  2908 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  2909 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  2910 
       
  2911 	nic = netdev_priv(netdev);
       
  2912 	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
       
  2913 	nic->netdev = netdev;
       
  2914 	nic->pdev = pdev;
       
  2915 	nic->msg_enable = (1 << debug) - 1;
       
  2916 	nic->mdio_ctrl = mdio_ctrl_hw;
       
  2917 	pci_set_drvdata(pdev, netdev);
       
  2918 
       
  2919 	if ((err = pci_enable_device(pdev))) {
       
  2920 		netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
       
  2921 		goto err_out_free_dev;
       
  2922 	}
       
  2923 
       
  2924 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  2925 		netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
       
  2926 		err = -ENODEV;
       
  2927 		goto err_out_disable_pdev;
       
  2928 	}
       
  2929 
       
  2930 	if ((err = pci_request_regions(pdev, DRV_NAME))) {
       
  2931 		netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
       
  2932 		goto err_out_disable_pdev;
       
  2933 	}
       
  2934 
       
  2935 	if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
       
  2936 		netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
       
  2937 		goto err_out_free_res;
       
  2938 	}
       
  2939 
       
  2940 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  2941 
       
  2942 	if (use_io)
       
  2943 		netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
       
  2944 
       
  2945 	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
       
  2946 	if (!nic->csr) {
       
  2947 		netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
       
  2948 		err = -ENOMEM;
       
  2949 		goto err_out_free_res;
       
  2950 	}
       
  2951 
       
  2952 	if (ent->driver_data)
       
  2953 		nic->flags |= ich;
       
  2954 	else
       
  2955 		nic->flags &= ~ich;
       
  2956 
       
  2957 	e100_get_defaults(nic);
       
  2958 
       
  2959 	/* locks must be initialized before calling hw_reset */
       
  2960 	spin_lock_init(&nic->cb_lock);
       
  2961 	spin_lock_init(&nic->cmd_lock);
       
  2962 	spin_lock_init(&nic->mdio_lock);
       
  2963 
       
  2964 	/* Reset the device before pci_set_master() in case device is in some
       
  2965 	 * funky state and has an interrupt pending - hint: we don't have the
       
  2966 	 * interrupt handler registered yet. */
       
  2967 	e100_hw_reset(nic);
       
  2968 
       
  2969 	pci_set_master(pdev);
       
  2970 
       
  2971 	init_timer(&nic->watchdog);
       
  2972 	nic->watchdog.function = e100_watchdog;
       
  2973 	nic->watchdog.data = (unsigned long)nic;
       
  2974 
       
  2975 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  2976 
       
  2977 	if ((err = e100_alloc(nic))) {
       
  2978 		netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
       
  2979 		goto err_out_iounmap;
       
  2980 	}
       
  2981 
       
  2982 	if ((err = e100_eeprom_load(nic)))
       
  2983 		goto err_out_free;
       
  2984 
       
  2985 	e100_phy_init(nic);
       
  2986 
       
  2987 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  2988 	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
       
  2989 	if (!is_valid_ether_addr(netdev->perm_addr)) {
       
  2990 		if (!eeprom_bad_csum_allow) {
       
  2991 			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
       
  2992 			err = -EAGAIN;
       
  2993 			goto err_out_free;
       
  2994 		} else {
       
  2995 			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
       
  2996 		}
       
  2997 	}
       
  2998 
       
  2999 	/* Wol magic packet can be enabled from eeprom */
       
  3000 	if ((nic->mac >= mac_82558_D101_A4) &&
       
  3001 	   (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
       
  3002 		nic->flags |= wol_magic;
       
  3003 		device_set_wakeup_enable(&pdev->dev, true);
       
  3004 	}
       
  3005 
       
  3006 	/* ack any pending wake events, disable PME */
       
  3007 	pci_pme_active(pdev, false);
       
  3008 
       
  3009 	// offer device to EtherCAT master module
       
  3010 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  3011 
       
  3012 	if (!nic->ecdev) {
       
  3013 		strcpy(netdev->name, "eth%d");
       
  3014 		if ((err = register_netdev(netdev))) {
       
  3015 			netif_err(nic, probe, nic->netdev,
       
  3016 					"Cannot register net device, aborting\n");
       
  3017 			goto err_out_free;
       
  3018 		}
       
  3019 	}
       
  3020 
       
  3021 	nic->cbs_pool = pci_pool_create(netdev->name,
       
  3022 			   nic->pdev,
       
  3023 			   nic->params.cbs.max * sizeof(struct cb),
       
  3024 			   sizeof(u32),
       
  3025 			   0);
       
  3026 	netif_info(nic, probe, nic->netdev,
       
  3027 		   "addr 0x%llx, irq %d, MAC addr %pM\n",
       
  3028 		   (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
       
  3029 		   pdev->irq, netdev->dev_addr);
       
  3030 
       
  3031 	if (nic->ecdev) {
       
  3032 		if (ecdev_open(nic->ecdev)) {
       
  3033 			ecdev_withdraw(nic->ecdev);
       
  3034 			goto err_out_free;
       
  3035 		}
       
  3036 	}
       
  3037 
       
  3038 	return 0;
       
  3039 
       
  3040 err_out_free:
       
  3041 	e100_free(nic);
       
  3042 err_out_iounmap:
       
  3043 	pci_iounmap(pdev, nic->csr);
       
  3044 err_out_free_res:
       
  3045 	pci_release_regions(pdev);
       
  3046 err_out_disable_pdev:
       
  3047 	pci_disable_device(pdev);
       
  3048 err_out_free_dev:
       
  3049 	pci_set_drvdata(pdev, NULL);
       
  3050 	free_netdev(netdev);
       
  3051 	return err;
       
  3052 }
       
  3053 
       
  3054 static void __devexit e100_remove(struct pci_dev *pdev)
       
  3055 {
       
  3056 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3057 
       
  3058 	if (netdev) {
       
  3059 		struct nic *nic = netdev_priv(netdev);
       
  3060 		if (nic->ecdev) {
       
  3061 			ecdev_close(nic->ecdev);
       
  3062 			ecdev_withdraw(nic->ecdev);
       
  3063 		} else {
       
  3064 			unregister_netdev(netdev);
       
  3065 		}
       
  3066 
       
  3067 		e100_free(nic);
       
  3068 		pci_iounmap(pdev, nic->csr);
       
  3069 		pci_pool_destroy(nic->cbs_pool);
       
  3070 		free_netdev(netdev);
       
  3071 		pci_release_regions(pdev);
       
  3072 		pci_disable_device(pdev);
       
  3073 		pci_set_drvdata(pdev, NULL);
       
  3074 	}
       
  3075 }
       
  3076 
       
  3077 #define E100_82552_SMARTSPEED   0x14   /* SmartSpeed Ctrl register */
       
  3078 #define E100_82552_REV_ANEG     0x0200 /* Reverse auto-negotiation */
       
  3079 #define E100_82552_ANEG_NOW     0x0400 /* Auto-negotiate now */
       
  3080 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  3081 {
       
  3082 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3083 	struct nic *nic = netdev_priv(netdev);
       
  3084 
       
  3085 	if (netif_running(netdev))
       
  3086 		e100_down(nic);
       
  3087 	netif_device_detach(netdev);
       
  3088 
       
  3089 	pci_save_state(pdev);
       
  3090 
       
  3091 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  3092 		/* enable reverse auto-negotiation */
       
  3093 		if (nic->phy == phy_82552_v) {
       
  3094 			u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3095 			                           E100_82552_SMARTSPEED);
       
  3096 
       
  3097 			mdio_write(netdev, nic->mii.phy_id,
       
  3098 			           E100_82552_SMARTSPEED, smartspeed |
       
  3099 			           E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
       
  3100 		}
       
  3101 		*enable_wake = true;
       
  3102 	} else {
       
  3103 		*enable_wake = false;
       
  3104 	}
       
  3105 
       
  3106 	pci_disable_device(pdev);
       
  3107 }
       
  3108 
       
  3109 static int __e100_power_off(struct pci_dev *pdev, bool wake)
       
  3110 {
       
  3111 	if (wake)
       
  3112 		return pci_prepare_to_sleep(pdev);
       
  3113 
       
  3114 	pci_wake_from_d3(pdev, false);
       
  3115 	pci_set_power_state(pdev, PCI_D3hot);
       
  3116 
       
  3117 	return 0;
       
  3118 }
       
  3119 
       
  3120 #ifdef CONFIG_PM
       
  3121 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  3122 {
       
  3123 	bool wake;
       
  3124 	__e100_shutdown(pdev, &wake);
       
  3125 	return __e100_power_off(pdev, wake);
       
  3126 }
       
  3127 
       
  3128 static int e100_resume(struct pci_dev *pdev)
       
  3129 {
       
  3130 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3131 	struct nic *nic = netdev_priv(netdev);
       
  3132 
       
  3133 	pci_set_power_state(pdev, PCI_D0);
       
  3134 	pci_restore_state(pdev);
       
  3135 	/* ack any pending wake events, disable PME */
       
  3136 	pci_enable_wake(pdev, 0, 0);
       
  3137 
       
  3138 	/* disable reverse auto-negotiation */
       
  3139 	if (nic->phy == phy_82552_v) {
       
  3140 		u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3141 		                           E100_82552_SMARTSPEED);
       
  3142 
       
  3143 		mdio_write(netdev, nic->mii.phy_id,
       
  3144 		           E100_82552_SMARTSPEED,
       
  3145 		           smartspeed & ~(E100_82552_REV_ANEG));
       
  3146 	}
       
  3147 
       
  3148 	netif_device_attach(netdev);
       
  3149 	if (netif_running(netdev))
       
  3150 		e100_up(nic);
       
  3151 
       
  3152 	return 0;
       
  3153 }
       
  3154 #endif /* CONFIG_PM */
       
  3155 
       
  3156 static void e100_shutdown(struct pci_dev *pdev)
       
  3157 {
       
  3158 	bool wake;
       
  3159 	__e100_shutdown(pdev, &wake);
       
  3160 	if (system_state == SYSTEM_POWER_OFF)
       
  3161 		__e100_power_off(pdev, wake);
       
  3162 }
       
  3163 
       
  3164 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  3165 /**
       
  3166  * e100_io_error_detected - called when PCI error is detected.
       
  3167  * @pdev: Pointer to PCI device
       
  3168  * @state: The current pci connection state
       
  3169  */
       
  3170 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  3171 {
       
  3172 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3173 	struct nic *nic = netdev_priv(netdev);
       
  3174 
       
  3175 	if (nic->ecdev)
       
  3176 		return -EBUSY;
       
  3177 
       
  3178 	netif_device_detach(netdev);
       
  3179 
       
  3180 	if (state == pci_channel_io_perm_failure)
       
  3181 		return PCI_ERS_RESULT_DISCONNECT;
       
  3182 
       
  3183 	if (netif_running(netdev))
       
  3184 		e100_down(nic);
       
  3185 	pci_disable_device(pdev);
       
  3186 
       
  3187 	/* Request a slot reset. */
       
  3188 	return PCI_ERS_RESULT_NEED_RESET;
       
  3189 }
       
  3190 
       
  3191 /**
       
  3192  * e100_io_slot_reset - called after the pci bus has been reset.
       
  3193  * @pdev: Pointer to PCI device
       
  3194  *
       
  3195  * Restart the card from scratch.
       
  3196  */
       
  3197 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  3198 {
       
  3199 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3200 	struct nic *nic = netdev_priv(netdev);
       
  3201 
       
  3202 	if (nic->ecdev)
       
  3203 		return -EBUSY;
       
  3204 
       
  3205 	if (pci_enable_device(pdev)) {
       
  3206 		pr_err("Cannot re-enable PCI device after reset\n");
       
  3207 		return PCI_ERS_RESULT_DISCONNECT;
       
  3208 	}
       
  3209 	pci_set_master(pdev);
       
  3210 
       
  3211 	/* Only one device per card can do a reset */
       
  3212 	if (0 != PCI_FUNC(pdev->devfn))
       
  3213 		return PCI_ERS_RESULT_RECOVERED;
       
  3214 	e100_hw_reset(nic);
       
  3215 	e100_phy_init(nic);
       
  3216 
       
  3217 	return PCI_ERS_RESULT_RECOVERED;
       
  3218 }
       
  3219 
       
  3220 /**
       
  3221  * e100_io_resume - resume normal operations
       
  3222  * @pdev: Pointer to PCI device
       
  3223  *
       
  3224  * Resume normal operations after an error recovery
       
  3225  * sequence has been completed.
       
  3226  */
       
  3227 static void e100_io_resume(struct pci_dev *pdev)
       
  3228 {
       
  3229 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3230 	struct nic *nic = netdev_priv(netdev);
       
  3231 
       
  3232 	/* ack any pending wake events, disable PME */
       
  3233 	pci_enable_wake(pdev, 0, 0);
       
  3234 
       
  3235 	if (!nic->ecdev)
       
  3236 		netif_device_attach(netdev);
       
  3237 	if (nic->ecdev || netif_running(netdev)) {
       
  3238 		e100_open(netdev);
       
  3239 		if (!nic->ecdev)
       
  3240 			mod_timer(&nic->watchdog, jiffies);
       
  3241 	}
       
  3242 }
       
  3243 
       
  3244 static struct pci_error_handlers e100_err_handler = {
       
  3245 	.error_detected = e100_io_error_detected,
       
  3246 	.slot_reset = e100_io_slot_reset,
       
  3247 	.resume = e100_io_resume,
       
  3248 };
       
  3249 
       
  3250 static struct pci_driver e100_driver = {
       
  3251 	.name =         DRV_NAME,
       
  3252 	.id_table =     e100_id_table,
       
  3253 	.probe =        e100_probe,
       
  3254 	.remove =       __devexit_p(e100_remove),
       
  3255 #ifdef CONFIG_PM
       
  3256 	/* Power Management hooks */
       
  3257 	.suspend =      e100_suspend,
       
  3258 	.resume =       e100_resume,
       
  3259 #endif
       
  3260 	.shutdown =     e100_shutdown,
       
  3261 	.err_handler = &e100_err_handler,
       
  3262 };
       
  3263 
       
  3264 static int __init e100_init_module(void)
       
  3265 {
       
  3266 	if (((1 << debug) - 1) & NETIF_MSG_DRV) {
       
  3267 		pr_info("%s %s, %s\n", DRV_NAME, DRV_DESCRIPTION, DRV_VERSION);
       
  3268 		pr_info("%s\n", DRV_COPYRIGHT);
       
  3269 	}
       
  3270 	return pci_register_driver(&e100_driver);
       
  3271 }
       
  3272 
       
  3273 static void __exit e100_cleanup_module(void)
       
  3274 {
       
  3275 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3276 	pci_unregister_driver(&e100_driver);
       
  3277 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3278 }
       
  3279 
       
  3280 module_init(e100_init_module);
       
  3281 module_exit(e100_cleanup_module);