devices/e100-2.6.35-ethercat.c
branchstable-1.5
changeset 2378 ca345abf0565
parent 2286 03be7b751d08
child 2421 bc2d4bf9cbe5
child 2589 2b9c78543663
equal deleted inserted replaced
2377:aa0f6f939cb3 2378:ca345abf0565
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007-2012  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it and/or
       
    10  *  modify it under the terms of the GNU General Public License version 2, as
       
    11  *  published by the Free Software Foundation.
       
    12  *
       
    13  *  The IgH EtherCAT Master is distributed in the hope that it will be useful,
       
    14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
       
    16  *  Public License for more details.
       
    17  *
       
    18  *  You should have received a copy of the GNU General Public License along
       
    19  *  with the IgH EtherCAT Master; if not, write to the Free Software
       
    20  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    21  *
       
    22  *  ---
       
    23  *
       
    24  *  The license mentioned above concerns the source code only. Using the
       
    25  *  EtherCAT technology and brand is only permitted in compliance with the
       
    26  *  industrial property and similar rights of Beckhoff Automation GmbH.
       
    27  *
       
    28  *  ---
       
    29  *
       
    30  *  vim: noexpandtab
       
    31  *
       
    32  *****************************************************************************/
       
    33 
       
    34 /**
       
    35    \file
       
    36    EtherCAT driver for e100-compatible NICs.
       
    37 */
       
    38 
       
    39 /* Former documentation: */
       
    40 
       
    41 /*******************************************************************************
       
    42 
       
    43   Intel PRO/100 Linux driver
       
    44   Copyright(c) 1999 - 2006 Intel Corporation.
       
    45 
       
    46   This program is free software; you can redistribute it and/or modify it
       
    47   under the terms and conditions of the GNU General Public License,
       
    48   version 2, as published by the Free Software Foundation.
       
    49 
       
    50   This program is distributed in the hope it will be useful, but WITHOUT
       
    51   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    52   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    53   more details.
       
    54 
       
    55   You should have received a copy of the GNU General Public License along with
       
    56   this program; if not, write to the Free Software Foundation, Inc.,
       
    57   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    58 
       
    59   The full GNU General Public License is included in this distribution in
       
    60   the file called "COPYING".
       
    61 
       
    62   Contact Information:
       
    63   Linux NICS <linux.nics@intel.com>
       
    64   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    65   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    66 
       
    67 *******************************************************************************/
       
    68 
       
    69 /*
       
    70  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    71  *
       
    72  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    73  *	original e100 driver, but better described as a munging of
       
    74  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    75  *
       
    76  *	References:
       
    77  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    78  *		Open Source Software Developers Manual,
       
    79  *		http://sourceforge.net/projects/e1000
       
    80  *
       
    81  *
       
    82  *	                      Theory of Operation
       
    83  *
       
    84  *	I.   General
       
    85  *
       
    86  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    87  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    88  *	82551, and 82562 devices.  82558 and greater controllers
       
    89  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    90  *	server and client network interface cards, as well as in
       
    91  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    92  *	configurations.  8255x supports a 32-bit linear addressing
       
    93  *	mode and operates at 33Mhz PCI clock rate.
       
    94  *
       
    95  *	II.  Driver Operation
       
    96  *
       
    97  *	Memory-mapped mode is used exclusively to access the device's
       
    98  *	shared-memory structure, the Control/Status Registers (CSR). All
       
    99  *	setup, configuration, and control of the device, including queuing
       
   100  *	of Tx, Rx, and configuration commands is through the CSR.
       
   101  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   102  *	protects the shared Command Block List (CBL).
       
   103  *
       
   104  *	8255x is highly MII-compliant and all access to the PHY go
       
   105  *	through the Management Data Interface (MDI).  Consequently, the
       
   106  *	driver leverages the mii.c library shared with other MII-compliant
       
   107  *	devices.
       
   108  *
       
   109  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   110  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   111  *	archs are supported.
       
   112  *
       
   113  *	III. Transmit
       
   114  *
       
   115  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   116  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   117  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   118  *	the end of the ring.  The last TCB processed suspends the
       
   119  *	controller, and the controller can be restarted by issue a CU
       
   120  *	resume command to continue from the suspend point, or a CU start
       
   121  *	command to start at a given position in the ring.
       
   122  *
       
   123  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   124  *	into the CBL ring along with Tx commands.  The common structure
       
   125  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   126  *
       
   127  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   128  *	is the next CB to check for completion; cb_to_send is the first
       
   129  *	CB to start on in case of a previous failure to resume.  CB clean
       
   130  *	up happens in interrupt context in response to a CU interrupt.
       
   131  *	cbs_avail keeps track of number of free CB resources available.
       
   132  *
       
   133  * 	Hardware padding of short packets to minimum packet size is
       
   134  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   135  * 	with 00h.
       
   136  *
       
   137  *	IV.  Receive
       
   138  *
       
   139  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   140  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   141  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   142  *	and the data buffer, but the RFD is pulled off before the skb is
       
   143  *	indicated.  The data buffer is aligned such that encapsulated
       
   144  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   145  *	mapped shared memory, and completion status is contained within
       
   146  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   147  *	view from software and hardware.
       
   148  *
       
   149  *	In order to keep updates to the RFD link field from colliding with
       
   150  *	hardware writes to mark packets complete, we use the feature that
       
   151  *	hardware will not write to a size 0 descriptor and mark the previous
       
   152  *	packet as end-of-list (EL).   After updating the link, we remove EL
       
   153  *	and only then restore the size such that hardware may use the
       
   154  *	previous-to-end RFD.
       
   155  *
       
   156  *	Under typical operation, the  receive unit (RU) is start once,
       
   157  *	and the controller happily fills RFDs as frames arrive.  If
       
   158  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   159  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   160  *	and Rx indication and re-allocation happen in the same context,
       
   161  *	therefore no locking is required.  A software-generated interrupt
       
   162  *	is generated from the watchdog to recover from a failed allocation
       
   163  *	scenario where all Rx resources have been indicated and none re-
       
   164  *	placed.
       
   165  *
       
   166  *	V.   Miscellaneous
       
   167  *
       
   168  * 	VLAN offloading of tagging, stripping and filtering is not
       
   169  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   170  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   171  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   172  * 	not supported (hardware limitation).
       
   173  *
       
   174  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   175  *
       
   176  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   177  * 	testing/troubleshooting the development driver.
       
   178  *
       
   179  * 	TODO:
       
   180  * 	o several entry points race with dev->close
       
   181  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   182  *
       
   183  *	FIXES:
       
   184  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   185  *	- Stratus87247: protect MDI control register manipulations
       
   186  * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
       
   187  *      - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
       
   188  */
       
   189 
       
   190 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
       
   191 
       
   192 #include <linux/module.h>
       
   193 #include <linux/moduleparam.h>
       
   194 #include <linux/kernel.h>
       
   195 #include <linux/types.h>
       
   196 #include <linux/sched.h>
       
   197 #include <linux/slab.h>
       
   198 #include <linux/delay.h>
       
   199 #include <linux/init.h>
       
   200 #include <linux/pci.h>
       
   201 #include <linux/dma-mapping.h>
       
   202 #include <linux/dmapool.h>
       
   203 #include <linux/netdevice.h>
       
   204 #include <linux/etherdevice.h>
       
   205 #include <linux/mii.h>
       
   206 #include <linux/if_vlan.h>
       
   207 #include <linux/skbuff.h>
       
   208 #include <linux/ethtool.h>
       
   209 #include <linux/string.h>
       
   210 #include <linux/firmware.h>
       
   211 #include <linux/rtnetlink.h>
       
   212 #include <asm/unaligned.h>
       
   213 
       
   214 // EtherCAT includes
       
   215 #include "../globals.h"
       
   216 #include "ecdev.h"
       
   217 
       
   218 #define DRV_NAME		"ec_e100"
       
   219 #define DRV_EXT			"-NAPI"
       
   220 #define DRV_VERSION		"3.5.24-k2"DRV_EXT
       
   221 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   222 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   223 
       
   224 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   225 #define E100_NAPI_WEIGHT	16
       
   226 
       
   227 #define FIRMWARE_D101M		"e100/d101m_ucode.bin"
       
   228 #define FIRMWARE_D101S		"e100/d101s_ucode.bin"
       
   229 #define FIRMWARE_D102E		"e100/d102e_ucode.bin"
       
   230 
       
   231 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   232 MODULE_AUTHOR(DRV_COPYRIGHT);
       
   233 MODULE_LICENSE("GPL");
       
   234 MODULE_VERSION(DRV_VERSION);
       
   235 MODULE_FIRMWARE(FIRMWARE_D101M);
       
   236 MODULE_FIRMWARE(FIRMWARE_D101S);
       
   237 MODULE_FIRMWARE(FIRMWARE_D102E);
       
   238 
       
   239 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   240 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   241 MODULE_LICENSE("GPL");
       
   242 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   243 
       
   244 void e100_ec_poll(struct net_device *);
       
   245 
       
   246 static int debug = 3;
       
   247 static int eeprom_bad_csum_allow = 0;
       
   248 static int use_io = 0;
       
   249 module_param(debug, int, 0);
       
   250 module_param(eeprom_bad_csum_allow, int, 0);
       
   251 module_param(use_io, int, 0);
       
   252 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   253 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   254 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
       
   255 
       
   256 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   257 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   258 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   259 static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
       
   260 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   261 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   262 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   263 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   277 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   278 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   279 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   280 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   281 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   282 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   283 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   284 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   285 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   286 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   287 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   288 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   289 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   290 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   291 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   292 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   293 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   294 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   295 	INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
       
   296 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   297 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   298 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   299 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   300 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   301 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   302 	{ 0, }
       
   303 };
       
   304 
       
   305 // prevent from being loaded automatically
       
   306 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   307 
       
   308 enum mac {
       
   309 	mac_82557_D100_A  = 0,
       
   310 	mac_82557_D100_B  = 1,
       
   311 	mac_82557_D100_C  = 2,
       
   312 	mac_82558_D101_A4 = 4,
       
   313 	mac_82558_D101_B0 = 5,
       
   314 	mac_82559_D101M   = 8,
       
   315 	mac_82559_D101S   = 9,
       
   316 	mac_82550_D102    = 12,
       
   317 	mac_82550_D102_C  = 13,
       
   318 	mac_82551_E       = 14,
       
   319 	mac_82551_F       = 15,
       
   320 	mac_82551_10      = 16,
       
   321 	mac_unknown       = 0xFF,
       
   322 };
       
   323 
       
   324 enum phy {
       
   325 	phy_100a     = 0x000003E0,
       
   326 	phy_100c     = 0x035002A8,
       
   327 	phy_82555_tx = 0x015002A8,
       
   328 	phy_nsc_tx   = 0x5C002000,
       
   329 	phy_82562_et = 0x033002A8,
       
   330 	phy_82562_em = 0x032002A8,
       
   331 	phy_82562_ek = 0x031002A8,
       
   332 	phy_82562_eh = 0x017002A8,
       
   333 	phy_82552_v  = 0xd061004d,
       
   334 	phy_unknown  = 0xFFFFFFFF,
       
   335 };
       
   336 
       
   337 /* CSR (Control/Status Registers) */
       
   338 struct csr {
       
   339 	struct {
       
   340 		u8 status;
       
   341 		u8 stat_ack;
       
   342 		u8 cmd_lo;
       
   343 		u8 cmd_hi;
       
   344 		u32 gen_ptr;
       
   345 	} scb;
       
   346 	u32 port;
       
   347 	u16 flash_ctrl;
       
   348 	u8 eeprom_ctrl_lo;
       
   349 	u8 eeprom_ctrl_hi;
       
   350 	u32 mdi_ctrl;
       
   351 	u32 rx_dma_count;
       
   352 };
       
   353 
       
   354 enum scb_status {
       
   355 	rus_no_res       = 0x08,
       
   356 	rus_ready        = 0x10,
       
   357 	rus_mask         = 0x3C,
       
   358 };
       
   359 
       
   360 enum ru_state  {
       
   361 	RU_SUSPENDED = 0,
       
   362 	RU_RUNNING	 = 1,
       
   363 	RU_UNINITIALIZED = -1,
       
   364 };
       
   365 
       
   366 enum scb_stat_ack {
       
   367 	stat_ack_not_ours    = 0x00,
       
   368 	stat_ack_sw_gen      = 0x04,
       
   369 	stat_ack_rnr         = 0x10,
       
   370 	stat_ack_cu_idle     = 0x20,
       
   371 	stat_ack_frame_rx    = 0x40,
       
   372 	stat_ack_cu_cmd_done = 0x80,
       
   373 	stat_ack_not_present = 0xFF,
       
   374 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   375 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   376 };
       
   377 
       
   378 enum scb_cmd_hi {
       
   379 	irq_mask_none = 0x00,
       
   380 	irq_mask_all  = 0x01,
       
   381 	irq_sw_gen    = 0x02,
       
   382 };
       
   383 
       
   384 enum scb_cmd_lo {
       
   385 	cuc_nop        = 0x00,
       
   386 	ruc_start      = 0x01,
       
   387 	ruc_load_base  = 0x06,
       
   388 	cuc_start      = 0x10,
       
   389 	cuc_resume     = 0x20,
       
   390 	cuc_dump_addr  = 0x40,
       
   391 	cuc_dump_stats = 0x50,
       
   392 	cuc_load_base  = 0x60,
       
   393 	cuc_dump_reset = 0x70,
       
   394 };
       
   395 
       
   396 enum cuc_dump {
       
   397 	cuc_dump_complete       = 0x0000A005,
       
   398 	cuc_dump_reset_complete = 0x0000A007,
       
   399 };
       
   400 
       
   401 enum port {
       
   402 	software_reset  = 0x0000,
       
   403 	selftest        = 0x0001,
       
   404 	selective_reset = 0x0002,
       
   405 };
       
   406 
       
   407 enum eeprom_ctrl_lo {
       
   408 	eesk = 0x01,
       
   409 	eecs = 0x02,
       
   410 	eedi = 0x04,
       
   411 	eedo = 0x08,
       
   412 };
       
   413 
       
   414 enum mdi_ctrl {
       
   415 	mdi_write = 0x04000000,
       
   416 	mdi_read  = 0x08000000,
       
   417 	mdi_ready = 0x10000000,
       
   418 };
       
   419 
       
   420 enum eeprom_op {
       
   421 	op_write = 0x05,
       
   422 	op_read  = 0x06,
       
   423 	op_ewds  = 0x10,
       
   424 	op_ewen  = 0x13,
       
   425 };
       
   426 
       
   427 enum eeprom_offsets {
       
   428 	eeprom_cnfg_mdix  = 0x03,
       
   429 	eeprom_phy_iface  = 0x06,
       
   430 	eeprom_id         = 0x0A,
       
   431 	eeprom_config_asf = 0x0D,
       
   432 	eeprom_smbus_addr = 0x90,
       
   433 };
       
   434 
       
   435 enum eeprom_cnfg_mdix {
       
   436 	eeprom_mdix_enabled = 0x0080,
       
   437 };
       
   438 
       
   439 enum eeprom_phy_iface {
       
   440 	NoSuchPhy = 0,
       
   441 	I82553AB,
       
   442 	I82553C,
       
   443 	I82503,
       
   444 	DP83840,
       
   445 	S80C240,
       
   446 	S80C24,
       
   447 	I82555,
       
   448 	DP83840A = 10,
       
   449 };
       
   450 
       
   451 enum eeprom_id {
       
   452 	eeprom_id_wol = 0x0020,
       
   453 };
       
   454 
       
   455 enum eeprom_config_asf {
       
   456 	eeprom_asf = 0x8000,
       
   457 	eeprom_gcl = 0x4000,
       
   458 };
       
   459 
       
   460 enum cb_status {
       
   461 	cb_complete = 0x8000,
       
   462 	cb_ok       = 0x2000,
       
   463 };
       
   464 
       
   465 enum cb_command {
       
   466 	cb_nop    = 0x0000,
       
   467 	cb_iaaddr = 0x0001,
       
   468 	cb_config = 0x0002,
       
   469 	cb_multi  = 0x0003,
       
   470 	cb_tx     = 0x0004,
       
   471 	cb_ucode  = 0x0005,
       
   472 	cb_dump   = 0x0006,
       
   473 	cb_tx_sf  = 0x0008,
       
   474 	cb_cid    = 0x1f00,
       
   475 	cb_i      = 0x2000,
       
   476 	cb_s      = 0x4000,
       
   477 	cb_el     = 0x8000,
       
   478 };
       
   479 
       
   480 struct rfd {
       
   481 	__le16 status;
       
   482 	__le16 command;
       
   483 	__le32 link;
       
   484 	__le32 rbd;
       
   485 	__le16 actual_size;
       
   486 	__le16 size;
       
   487 };
       
   488 
       
   489 struct rx {
       
   490 	struct rx *next, *prev;
       
   491 	struct sk_buff *skb;
       
   492 	dma_addr_t dma_addr;
       
   493 };
       
   494 
       
   495 #if defined(__BIG_ENDIAN_BITFIELD)
       
   496 #define X(a,b)	b,a
       
   497 #else
       
   498 #define X(a,b)	a,b
       
   499 #endif
       
   500 struct config {
       
   501 /*0*/	u8 X(byte_count:6, pad0:2);
       
   502 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   503 /*2*/	u8 adaptive_ifs;
       
   504 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   505 	   term_write_cache_line:1), pad3:4);
       
   506 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   507 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   508 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   509 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   510 	   rx_discard_overruns:1), rx_save_bad_frames:1);
       
   511 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   512 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   513 	   tx_dynamic_tbd:1);
       
   514 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   515 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   516 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   517 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   518 	   loopback:2);
       
   519 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   520 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   521 /*13*/	u8 ip_addr_lo;
       
   522 /*14*/	u8 ip_addr_hi;
       
   523 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   524 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   525 	   pad15_2:1), crs_or_cdt:1);
       
   526 /*16*/	u8 fc_delay_lo;
       
   527 /*17*/	u8 fc_delay_hi;
       
   528 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   529 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   530 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   531 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   532 	   full_duplex_force:1), full_duplex_pin:1);
       
   533 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   534 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   535 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   536 	u8 pad_d102[9];
       
   537 };
       
   538 
       
   539 #define E100_MAX_MULTICAST_ADDRS	64
       
   540 struct multi {
       
   541 	__le16 count;
       
   542 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   543 };
       
   544 
       
   545 /* Important: keep total struct u32-aligned */
       
   546 #define UCODE_SIZE			134
       
   547 struct cb {
       
   548 	__le16 status;
       
   549 	__le16 command;
       
   550 	__le32 link;
       
   551 	union {
       
   552 		u8 iaaddr[ETH_ALEN];
       
   553 		__le32 ucode[UCODE_SIZE];
       
   554 		struct config config;
       
   555 		struct multi multi;
       
   556 		struct {
       
   557 			u32 tbd_array;
       
   558 			u16 tcb_byte_count;
       
   559 			u8 threshold;
       
   560 			u8 tbd_count;
       
   561 			struct {
       
   562 				__le32 buf_addr;
       
   563 				__le16 size;
       
   564 				u16 eol;
       
   565 			} tbd;
       
   566 		} tcb;
       
   567 		__le32 dump_buffer_addr;
       
   568 	} u;
       
   569 	struct cb *next, *prev;
       
   570 	dma_addr_t dma_addr;
       
   571 	struct sk_buff *skb;
       
   572 };
       
   573 
       
   574 enum loopback {
       
   575 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   576 };
       
   577 
       
   578 struct stats {
       
   579 	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   580 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   581 		tx_multiple_collisions, tx_total_collisions;
       
   582 	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   583 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   584 		rx_short_frame_errors;
       
   585 	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   586 	__le16 xmt_tco_frames, rcv_tco_frames;
       
   587 	__le32 complete;
       
   588 };
       
   589 
       
   590 struct mem {
       
   591 	struct {
       
   592 		u32 signature;
       
   593 		u32 result;
       
   594 	} selftest;
       
   595 	struct stats stats;
       
   596 	u8 dump_buf[596];
       
   597 };
       
   598 
       
   599 struct param_range {
       
   600 	u32 min;
       
   601 	u32 max;
       
   602 	u32 count;
       
   603 };
       
   604 
       
   605 struct params {
       
   606 	struct param_range rfds;
       
   607 	struct param_range cbs;
       
   608 };
       
   609 
       
   610 struct nic {
       
   611 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   612 	u32 msg_enable				____cacheline_aligned;
       
   613 	struct net_device *netdev;
       
   614 	struct pci_dev *pdev;
       
   615 	u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
       
   616 
       
   617 	struct rx *rxs				____cacheline_aligned;
       
   618 	struct rx *rx_to_use;
       
   619 	struct rx *rx_to_clean;
       
   620 	struct rfd blank_rfd;
       
   621 	enum ru_state ru_running;
       
   622 
       
   623 	spinlock_t cb_lock			____cacheline_aligned;
       
   624 	spinlock_t cmd_lock;
       
   625 	struct csr __iomem *csr;
       
   626 	enum scb_cmd_lo cuc_cmd;
       
   627 	unsigned int cbs_avail;
       
   628 	struct napi_struct napi;
       
   629 	struct cb *cbs;
       
   630 	struct cb *cb_to_use;
       
   631 	struct cb *cb_to_send;
       
   632 	struct cb *cb_to_clean;
       
   633 	__le16 tx_command;
       
   634 	/* End: frequently used values: keep adjacent for cache effect */
       
   635 
       
   636 	enum {
       
   637 		ich                = (1 << 0),
       
   638 		promiscuous        = (1 << 1),
       
   639 		multicast_all      = (1 << 2),
       
   640 		wol_magic          = (1 << 3),
       
   641 		ich_10h_workaround = (1 << 4),
       
   642 	} flags					____cacheline_aligned;
       
   643 
       
   644 	enum mac mac;
       
   645 	enum phy phy;
       
   646 	struct params params;
       
   647 	struct timer_list watchdog;
       
   648 	struct timer_list blink_timer;
       
   649 	struct mii_if_info mii;
       
   650 	struct work_struct tx_timeout_task;
       
   651 	enum loopback loopback;
       
   652 
       
   653 	struct mem *mem;
       
   654 	dma_addr_t dma_addr;
       
   655 
       
   656 	struct pci_pool *cbs_pool;
       
   657 	dma_addr_t cbs_dma_addr;
       
   658 	u8 adaptive_ifs;
       
   659 	u8 tx_threshold;
       
   660 	u32 tx_frames;
       
   661 	u32 tx_collisions;
       
   662 
       
   663 	u32 tx_deferred;
       
   664 	u32 tx_single_collisions;
       
   665 	u32 tx_multiple_collisions;
       
   666 	u32 tx_fc_pause;
       
   667 	u32 tx_tco_frames;
       
   668 
       
   669 	u32 rx_fc_pause;
       
   670 	u32 rx_fc_unsupported;
       
   671 	u32 rx_tco_frames;
       
   672 	u32 rx_over_length_errors;
       
   673 
       
   674 	u16 leds;
       
   675 	u16 eeprom_wc;
       
   676 
       
   677 	__le16 eeprom[256];
       
   678 	spinlock_t mdio_lock;
       
   679 	const struct firmware *fw;
       
   680 	ec_device_t *ecdev;
       
   681 	unsigned long ec_watchdog_jiffies;
       
   682 };
       
   683 
       
   684 static inline void e100_write_flush(struct nic *nic)
       
   685 {
       
   686 	/* Flush previous PCI writes through intermediate bridges
       
   687 	 * by doing a benign read */
       
   688 	(void)ioread8(&nic->csr->scb.status);
       
   689 }
       
   690 
       
   691 static void e100_enable_irq(struct nic *nic)
       
   692 {
       
   693 	unsigned long flags;
       
   694 
       
   695 	if (nic->ecdev)
       
   696 		return;
       
   697 
       
   698 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   699 	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   700 	e100_write_flush(nic);
       
   701 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   702 }
       
   703 
       
   704 static void e100_disable_irq(struct nic *nic)
       
   705 {
       
   706 	unsigned long flags = 0;
       
   707 
       
   708 	if (!nic->ecdev)
       
   709 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   710 	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   711 	e100_write_flush(nic);
       
   712 	if (!nic->ecdev)
       
   713 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   714 }
       
   715 
       
   716 static void e100_hw_reset(struct nic *nic)
       
   717 {
       
   718 	/* Put CU and RU into idle with a selective reset to get
       
   719 	 * device off of PCI bus */
       
   720 	iowrite32(selective_reset, &nic->csr->port);
       
   721 	e100_write_flush(nic); udelay(20);
       
   722 
       
   723 	/* Now fully reset device */
       
   724 	iowrite32(software_reset, &nic->csr->port);
       
   725 	e100_write_flush(nic); udelay(20);
       
   726 
       
   727 	/* Mask off our interrupt line - it's unmasked after reset */
       
   728 	e100_disable_irq(nic);
       
   729 }
       
   730 
       
   731 static int e100_self_test(struct nic *nic)
       
   732 {
       
   733 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   734 
       
   735 	/* Passing the self-test is a pretty good indication
       
   736 	 * that the device can DMA to/from host memory */
       
   737 
       
   738 	nic->mem->selftest.signature = 0;
       
   739 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   740 
       
   741 	iowrite32(selftest | dma_addr, &nic->csr->port);
       
   742 	e100_write_flush(nic);
       
   743 	/* Wait 10 msec for self-test to complete */
       
   744 	msleep(10);
       
   745 
       
   746 	/* Interrupts are enabled after self-test */
       
   747 	e100_disable_irq(nic);
       
   748 
       
   749 	/* Check results of self-test */
       
   750 	if (nic->mem->selftest.result != 0) {
       
   751 		netif_err(nic, hw, nic->netdev,
       
   752 			  "Self-test failed: result=0x%08X\n",
       
   753 			  nic->mem->selftest.result);
       
   754 		return -ETIMEDOUT;
       
   755 	}
       
   756 	if (nic->mem->selftest.signature == 0) {
       
   757 		netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
       
   758 		return -ETIMEDOUT;
       
   759 	}
       
   760 
       
   761 	return 0;
       
   762 }
       
   763 
       
   764 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
       
   765 {
       
   766 	u32 cmd_addr_data[3];
       
   767 	u8 ctrl;
       
   768 	int i, j;
       
   769 
       
   770 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   771 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   772 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   773 		le16_to_cpu(data);
       
   774 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   775 
       
   776 	/* Bit-bang cmds to write word to eeprom */
       
   777 	for (j = 0; j < 3; j++) {
       
   778 
       
   779 		/* Chip select */
       
   780 		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   781 		e100_write_flush(nic); udelay(4);
       
   782 
       
   783 		for (i = 31; i >= 0; i--) {
       
   784 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   785 				eecs | eedi : eecs;
       
   786 			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   787 			e100_write_flush(nic); udelay(4);
       
   788 
       
   789 			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   790 			e100_write_flush(nic); udelay(4);
       
   791 		}
       
   792 		/* Wait 10 msec for cmd to complete */
       
   793 		msleep(10);
       
   794 
       
   795 		/* Chip deselect */
       
   796 		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   797 		e100_write_flush(nic); udelay(4);
       
   798 	}
       
   799 };
       
   800 
       
   801 /* General technique stolen from the eepro100 driver - very clever */
       
   802 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   803 {
       
   804 	u32 cmd_addr_data;
       
   805 	u16 data = 0;
       
   806 	u8 ctrl;
       
   807 	int i;
       
   808 
       
   809 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   810 
       
   811 	/* Chip select */
       
   812 	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   813 	e100_write_flush(nic); udelay(4);
       
   814 
       
   815 	/* Bit-bang to read word from eeprom */
       
   816 	for (i = 31; i >= 0; i--) {
       
   817 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   818 		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   819 		e100_write_flush(nic); udelay(4);
       
   820 
       
   821 		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   822 		e100_write_flush(nic); udelay(4);
       
   823 
       
   824 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   825 		 * complete address.  Use this to adjust addr_len. */
       
   826 		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
       
   827 		if (!(ctrl & eedo) && i > 16) {
       
   828 			*addr_len -= (i - 16);
       
   829 			i = 17;
       
   830 		}
       
   831 
       
   832 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   833 	}
       
   834 
       
   835 	/* Chip deselect */
       
   836 	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   837 	e100_write_flush(nic); udelay(4);
       
   838 
       
   839 	return cpu_to_le16(data);
       
   840 };
       
   841 
       
   842 /* Load entire EEPROM image into driver cache and validate checksum */
       
   843 static int e100_eeprom_load(struct nic *nic)
       
   844 {
       
   845 	u16 addr, addr_len = 8, checksum = 0;
       
   846 
       
   847 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   848 	e100_eeprom_read(nic, &addr_len, 0);
       
   849 	nic->eeprom_wc = 1 << addr_len;
       
   850 
       
   851 	for (addr = 0; addr < nic->eeprom_wc; addr++) {
       
   852 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   853 		if (addr < nic->eeprom_wc - 1)
       
   854 			checksum += le16_to_cpu(nic->eeprom[addr]);
       
   855 	}
       
   856 
       
   857 	/* The checksum, stored in the last word, is calculated such that
       
   858 	 * the sum of words should be 0xBABA */
       
   859 	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
       
   860 		netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
       
   861 		if (!eeprom_bad_csum_allow)
       
   862 			return -EAGAIN;
       
   863 	}
       
   864 
       
   865 	return 0;
       
   866 }
       
   867 
       
   868 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   869 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   870 {
       
   871 	u16 addr, addr_len = 8, checksum = 0;
       
   872 
       
   873 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   874 	e100_eeprom_read(nic, &addr_len, 0);
       
   875 	nic->eeprom_wc = 1 << addr_len;
       
   876 
       
   877 	if (start + count >= nic->eeprom_wc)
       
   878 		return -EINVAL;
       
   879 
       
   880 	for (addr = start; addr < start + count; addr++)
       
   881 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   882 
       
   883 	/* The checksum, stored in the last word, is calculated such that
       
   884 	 * the sum of words should be 0xBABA */
       
   885 	for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   886 		checksum += le16_to_cpu(nic->eeprom[addr]);
       
   887 	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
       
   888 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   889 		nic->eeprom[nic->eeprom_wc - 1]);
       
   890 
       
   891 	return 0;
       
   892 }
       
   893 
       
   894 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   895 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   896 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   897 {
       
   898 	unsigned long flags = 0;
       
   899 	unsigned int i;
       
   900 	int err = 0;
       
   901 
       
   902 	if (!nic->ecdev)
       
   903 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   904 
       
   905 	/* Previous command is accepted when SCB clears */
       
   906 	for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   907 		if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
       
   908 			break;
       
   909 		cpu_relax();
       
   910 		if (unlikely(i > E100_WAIT_SCB_FAST))
       
   911 			udelay(5);
       
   912 	}
       
   913 	if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   914 		err = -EAGAIN;
       
   915 		goto err_unlock;
       
   916 	}
       
   917 
       
   918 	if (unlikely(cmd != cuc_resume))
       
   919 		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
       
   920 	iowrite8(cmd, &nic->csr->scb.cmd_lo);
       
   921 
       
   922 err_unlock:
       
   923 	if (!nic->ecdev)
       
   924 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   925 
       
   926 	return err;
       
   927 }
       
   928 
       
   929 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   930 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   931 {
       
   932 	struct cb *cb;
       
   933 	unsigned long flags = 0;
       
   934 	int err = 0;
       
   935 
       
   936 	if (!nic->ecdev)
       
   937 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   938 
       
   939 	if (unlikely(!nic->cbs_avail)) {
       
   940 		err = -ENOMEM;
       
   941 		goto err_unlock;
       
   942 	}
       
   943 
       
   944 	cb = nic->cb_to_use;
       
   945 	nic->cb_to_use = cb->next;
       
   946 	nic->cbs_avail--;
       
   947 	cb->skb = skb;
       
   948 
       
   949 	if (unlikely(!nic->cbs_avail))
       
   950 		err = -ENOSPC;
       
   951 
       
   952 	cb_prepare(nic, cb, skb);
       
   953 
       
   954 	/* Order is important otherwise we'll be in a race with h/w:
       
   955 	 * set S-bit in current first, then clear S-bit in previous. */
       
   956 	cb->command |= cpu_to_le16(cb_s);
       
   957 	wmb();
       
   958 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   959 
       
   960 	while (nic->cb_to_send != nic->cb_to_use) {
       
   961 		if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   962 			nic->cb_to_send->dma_addr))) {
       
   963 			/* Ok, here's where things get sticky.  It's
       
   964 			 * possible that we can't schedule the command
       
   965 			 * because the controller is too busy, so
       
   966 			 * let's just queue the command and try again
       
   967 			 * when another command is scheduled. */
       
   968 			if (err == -ENOSPC) {
       
   969 				//request a reset
       
   970 				schedule_work(&nic->tx_timeout_task);
       
   971 			}
       
   972 			break;
       
   973 		} else {
       
   974 			nic->cuc_cmd = cuc_resume;
       
   975 			nic->cb_to_send = nic->cb_to_send->next;
       
   976 		}
       
   977 	}
       
   978 
       
   979 err_unlock:
       
   980 	if (!nic->ecdev)
       
   981 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   982 
       
   983 	return err;
       
   984 }
       
   985 
       
   986 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
   987 {
       
   988 	struct nic *nic = netdev_priv(netdev);
       
   989 	return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
       
   990 }
       
   991 
       
   992 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
   993 {
       
   994 	struct nic *nic = netdev_priv(netdev);
       
   995 
       
   996 	nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
       
   997 }
       
   998 
       
   999 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
       
  1000 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
  1001 {
       
  1002 	u32 data_out = 0;
       
  1003 	unsigned int i;
       
  1004 	unsigned long flags = 0;
       
  1005 
       
  1006 
       
  1007 	/*
       
  1008 	 * Stratus87247: we shouldn't be writing the MDI control
       
  1009 	 * register until the Ready bit shows True.  Also, since
       
  1010 	 * manipulation of the MDI control registers is a multi-step
       
  1011 	 * procedure it should be done under lock.
       
  1012 	 */
       
  1013 	if (!nic->ecdev)
       
  1014 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
  1015 	for (i = 100; i; --i) {
       
  1016 		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
       
  1017 			break;
       
  1018 		udelay(20);
       
  1019 	}
       
  1020 	if (unlikely(!i)) {
       
  1021 		netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
       
  1022 		if (!nic->ecdev)
       
  1023 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1024 		return 0;		/* No way to indicate timeout error */
       
  1025 	}
       
  1026 	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
  1027 
       
  1028 	for (i = 0; i < 100; i++) {
       
  1029 		udelay(20);
       
  1030 		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
       
  1031 			break;
       
  1032 	}
       
  1033 	if (!nic->ecdev)
       
  1034 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1035 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1036 		     "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
  1037 		     dir == mdi_read ? "READ" : "WRITE",
       
  1038 		     addr, reg, data, data_out);
       
  1039 	return (u16)data_out;
       
  1040 }
       
  1041 
       
  1042 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
       
  1043 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
       
  1044 				 u32 addr,
       
  1045 				 u32 dir,
       
  1046 				 u32 reg,
       
  1047 				 u16 data)
       
  1048 {
       
  1049 	if ((reg == MII_BMCR) && (dir == mdi_write)) {
       
  1050 		if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
       
  1051 			u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
       
  1052 							MII_ADVERTISE);
       
  1053 
       
  1054 			/*
       
  1055 			 * Workaround Si issue where sometimes the part will not
       
  1056 			 * autoneg to 100Mbps even when advertised.
       
  1057 			 */
       
  1058 			if (advert & ADVERTISE_100FULL)
       
  1059 				data |= BMCR_SPEED100 | BMCR_FULLDPLX;
       
  1060 			else if (advert & ADVERTISE_100HALF)
       
  1061 				data |= BMCR_SPEED100;
       
  1062 		}
       
  1063 	}
       
  1064 	return mdio_ctrl_hw(nic, addr, dir, reg, data);
       
  1065 }
       
  1066 
       
  1067 /* Fully software-emulated mdio_ctrl() function for cards without
       
  1068  * MII-compliant PHYs.
       
  1069  * For now, this is mainly geared towards 80c24 support; in case of further
       
  1070  * requirements for other types (i82503, ...?) either extend this mechanism
       
  1071  * or split it, whichever is cleaner.
       
  1072  */
       
  1073 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
       
  1074 				      u32 addr,
       
  1075 				      u32 dir,
       
  1076 				      u32 reg,
       
  1077 				      u16 data)
       
  1078 {
       
  1079 	/* might need to allocate a netdev_priv'ed register array eventually
       
  1080 	 * to be able to record state changes, but for now
       
  1081 	 * some fully hardcoded register handling ought to be ok I guess. */
       
  1082 
       
  1083 	if (dir == mdi_read) {
       
  1084 		switch (reg) {
       
  1085 		case MII_BMCR:
       
  1086 			/* Auto-negotiation, right? */
       
  1087 			return  BMCR_ANENABLE |
       
  1088 				BMCR_FULLDPLX;
       
  1089 		case MII_BMSR:
       
  1090 			return	BMSR_LSTATUS /* for mii_link_ok() */ |
       
  1091 				BMSR_ANEGCAPABLE |
       
  1092 				BMSR_10FULL;
       
  1093 		case MII_ADVERTISE:
       
  1094 			/* 80c24 is a "combo card" PHY, right? */
       
  1095 			return	ADVERTISE_10HALF |
       
  1096 				ADVERTISE_10FULL;
       
  1097 		default:
       
  1098 			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1099 				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1100 				     dir == mdi_read ? "READ" : "WRITE",
       
  1101 				     addr, reg, data);
       
  1102 			return 0xFFFF;
       
  1103 		}
       
  1104 	} else {
       
  1105 		switch (reg) {
       
  1106 		default:
       
  1107 			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1108 				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1109 				     dir == mdi_read ? "READ" : "WRITE",
       
  1110 				     addr, reg, data);
       
  1111 			return 0xFFFF;
       
  1112 		}
       
  1113 	}
       
  1114 }
       
  1115 static inline int e100_phy_supports_mii(struct nic *nic)
       
  1116 {
       
  1117 	/* for now, just check it by comparing whether we
       
  1118 	   are using MII software emulation.
       
  1119 	*/
       
  1120 	return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
       
  1121 }
       
  1122 
       
  1123 static void e100_get_defaults(struct nic *nic)
       
  1124 {
       
  1125 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
  1126 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
  1127 
       
  1128 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1129 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
       
  1130 	if (nic->mac == mac_unknown)
       
  1131 		nic->mac = mac_82557_D100_A;
       
  1132 
       
  1133 	nic->params.rfds = rfds;
       
  1134 	nic->params.cbs = cbs;
       
  1135 
       
  1136 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1137 	nic->tx_threshold = 0xE0;
       
  1138 
       
  1139 	/* no interrupt for every tx completion, delay = 256us if not 557 */
       
  1140 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1141 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1142 
       
  1143 	/* Template for a freshly allocated RFD */
       
  1144 	nic->blank_rfd.command = 0;
       
  1145 	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
       
  1146 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  1147 
       
  1148 	/* MII setup */
       
  1149 	nic->mii.phy_id_mask = 0x1F;
       
  1150 	nic->mii.reg_num_mask = 0x1F;
       
  1151 	nic->mii.dev = nic->netdev;
       
  1152 	nic->mii.mdio_read = mdio_read;
       
  1153 	nic->mii.mdio_write = mdio_write;
       
  1154 }
       
  1155 
       
  1156 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1157 {
       
  1158 	struct config *config = &cb->u.config;
       
  1159 	u8 *c = (u8 *)config;
       
  1160 
       
  1161 	cb->command = cpu_to_le16(cb_config);
       
  1162 
       
  1163 	memset(config, 0, sizeof(struct config));
       
  1164 
       
  1165 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1166 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1167 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1168 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1169 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1170 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1171 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1172 	if (e100_phy_supports_mii(nic))
       
  1173 		config->mii_mode = 1;           /* 1=MII mode, 0=i82503 mode */
       
  1174 	config->pad10 = 0x6;
       
  1175 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1176 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1177 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1178 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1179 	config->pad15_1 = 0x1;
       
  1180 	config->pad15_2 = 0x1;
       
  1181 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1182 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1183 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1184 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1185 	config->pad18 = 0x1;
       
  1186 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1187 	config->pad20_1 = 0x1F;
       
  1188 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1189 	config->pad21_1 = 0x5;
       
  1190 
       
  1191 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1192 	config->loopback = nic->loopback;
       
  1193 
       
  1194 	if (nic->mii.force_media && nic->mii.full_duplex)
       
  1195 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1196 
       
  1197 	if (nic->flags & promiscuous || nic->loopback) {
       
  1198 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1199 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1200 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1201 	}
       
  1202 
       
  1203 	if (nic->flags & multicast_all)
       
  1204 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1205 
       
  1206 	/* disable WoL when up */
       
  1207 	if (nic->ecdev || 
       
  1208 			(netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1209 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1210 
       
  1211 	if (nic->mac >= mac_82558_D101_A4) {
       
  1212 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1213 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1214 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1215 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1216 		if (nic->mac >= mac_82559_D101M) {
       
  1217 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1218 			/* Enable TCO in extended config */
       
  1219 			if (nic->mac >= mac_82551_10) {
       
  1220 				config->byte_count = 0x20; /* extended bytes */
       
  1221 				config->rx_d102_mode = 0x1; /* GMRC for TCO */
       
  1222 			}
       
  1223 		} else {
       
  1224 			config->standard_stat_counter = 0x0;
       
  1225 		}
       
  1226 	}
       
  1227 
       
  1228 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1229 		     "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1230 		     c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1231 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1232 		     "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1233 		     c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1234 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1235 		     "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1236 		     c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1237 }
       
  1238 
       
  1239 /*************************************************************************
       
  1240 *  CPUSaver parameters
       
  1241 *
       
  1242 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1243 *  "move immediate value" instruction.  By changing the value of
       
  1244 *  the literal in the instruction before the code is loaded, the
       
  1245 *  driver can change the algorithm.
       
  1246 *
       
  1247 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1248 *    When this timer expires the interrupt is asserted, and the
       
  1249 *    timer is reset each time a new packet is received.  (see
       
  1250 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1251 *    The current default is 0x600 or 1536.  Experiments show that
       
  1252 *    the value should probably stay within the 0x200 - 0x1000.
       
  1253 *
       
  1254 *  BUNDLEMAX -
       
  1255 *    This sets the maximum number of frames that will be bundled.  In
       
  1256 *    some situations, such as the TCP windowing algorithm, it may be
       
  1257 *    better to limit the growth of the bundle size than let it go as
       
  1258 *    high as it can, because that could cause too much added latency.
       
  1259 *    The default is six, because this is the number of packets in the
       
  1260 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1261 *    an interrupt for every frame received.  If you do not want to put
       
  1262 *    a limit on the bundle size, set this value to xFFFF.
       
  1263 *
       
  1264 *  BUNDLESMALL -
       
  1265 *    This contains a bit-mask describing the minimum size frame that
       
  1266 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1267 *    that any frame less than 128 bytes in length will not be bundled,
       
  1268 *    but will instead immediately generate an interrupt.  This does
       
  1269 *    not affect the current bundle in any way.  Any frame that is 128
       
  1270 *    bytes or large will be bundled normally.  This feature is meant
       
  1271 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1272 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1273 *    enabled was sending but not receiving.  The delay introduced when
       
  1274 *    the ACKs were received was enough to reduce total throughput, because
       
  1275 *    the sender would sit idle until the ACK was finally seen.
       
  1276 *
       
  1277 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1278 *    This means that any frame which is x7F (127) bytes or smaller
       
  1279 *    will cause an immediate interrupt.  Because this value must be a
       
  1280 *    bit mask, there are only a few valid values that can be used.  To
       
  1281 *    turn this feature off, the driver can write the value xFFFF to the
       
  1282 *    lower word of this instruction (in the same way that the other
       
  1283 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1284 *    cause an interrupt to be generated for every frame, because all
       
  1285 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1286 *************************************************************************/
       
  1287 
       
  1288 /* if you wish to disable the ucode functionality, while maintaining the
       
  1289  * workarounds it provides, set the following defines to:
       
  1290  * BUNDLESMALL 0
       
  1291  * BUNDLEMAX 1
       
  1292  * INTDELAY 1
       
  1293  */
       
  1294 #define BUNDLESMALL 1
       
  1295 #define BUNDLEMAX (u16)6
       
  1296 #define INTDELAY (u16)1536 /* 0x600 */
       
  1297 
       
  1298 /* Initialize firmware */
       
  1299 static const struct firmware *e100_request_firmware(struct nic *nic)
       
  1300 {
       
  1301 	const char *fw_name;
       
  1302 	const struct firmware *fw = nic->fw;
       
  1303 	u8 timer, bundle, min_size;
       
  1304 	int err = 0;
       
  1305 
       
  1306 	/* do not load u-code for ICH devices */
       
  1307 	if (nic->flags & ich)
       
  1308 		return NULL;
       
  1309 
       
  1310 	/* Search for ucode match against h/w revision */
       
  1311 	if (nic->mac == mac_82559_D101M)
       
  1312 		fw_name = FIRMWARE_D101M;
       
  1313 	else if (nic->mac == mac_82559_D101S)
       
  1314 		fw_name = FIRMWARE_D101S;
       
  1315 	else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
       
  1316 		fw_name = FIRMWARE_D102E;
       
  1317 	else /* No ucode on other devices */
       
  1318 		return NULL;
       
  1319 
       
  1320 	/* If the firmware has not previously been loaded, request a pointer
       
  1321 	 * to it. If it was previously loaded, we are reinitializing the
       
  1322 	 * adapter, possibly in a resume from hibernate, in which case
       
  1323 	 * request_firmware() cannot be used.
       
  1324 	 */
       
  1325 	if (!fw)
       
  1326 		err = request_firmware(&fw, fw_name, &nic->pdev->dev);
       
  1327 
       
  1328 	if (err) {
       
  1329 		netif_err(nic, probe, nic->netdev,
       
  1330 			  "Failed to load firmware \"%s\": %d\n",
       
  1331 			  fw_name, err);
       
  1332 		return ERR_PTR(err);
       
  1333 	}
       
  1334 
       
  1335 	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
       
  1336 	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
       
  1337 	if (fw->size != UCODE_SIZE * 4 + 3) {
       
  1338 		netif_err(nic, probe, nic->netdev,
       
  1339 			  "Firmware \"%s\" has wrong size %zu\n",
       
  1340 			  fw_name, fw->size);
       
  1341 		release_firmware(fw);
       
  1342 		return ERR_PTR(-EINVAL);
       
  1343 	}
       
  1344 
       
  1345 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1346 	timer = fw->data[UCODE_SIZE * 4];
       
  1347 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1348 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1349 
       
  1350 	if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
       
  1351 	    min_size >= UCODE_SIZE) {
       
  1352 		netif_err(nic, probe, nic->netdev,
       
  1353 			  "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
       
  1354 			  fw_name, timer, bundle, min_size);
       
  1355 		release_firmware(fw);
       
  1356 		return ERR_PTR(-EINVAL);
       
  1357 	}
       
  1358 
       
  1359 	/* OK, firmware is validated and ready to use. Save a pointer
       
  1360 	 * to it in the nic */
       
  1361 	nic->fw = fw;
       
  1362 	return fw;
       
  1363 }
       
  1364 
       
  1365 static void e100_setup_ucode(struct nic *nic, struct cb *cb,
       
  1366 			     struct sk_buff *skb)
       
  1367 {
       
  1368 	const struct firmware *fw = (void *)skb;
       
  1369 	u8 timer, bundle, min_size;
       
  1370 
       
  1371 	/* It's not a real skb; we just abused the fact that e100_exec_cb
       
  1372 	   will pass it through to here... */
       
  1373 	cb->skb = NULL;
       
  1374 
       
  1375 	/* firmware is stored as little endian already */
       
  1376 	memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
       
  1377 
       
  1378 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1379 	timer = fw->data[UCODE_SIZE * 4];
       
  1380 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1381 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1382 
       
  1383 	/* Insert user-tunable settings in cb->u.ucode */
       
  1384 	cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
       
  1385 	cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
       
  1386 	cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
       
  1387 	cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
       
  1388 	cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
       
  1389 	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
       
  1390 
       
  1391 	cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1392 }
       
  1393 
       
  1394 static inline int e100_load_ucode_wait(struct nic *nic)
       
  1395 {
       
  1396 	const struct firmware *fw;
       
  1397 	int err = 0, counter = 50;
       
  1398 	struct cb *cb = nic->cb_to_clean;
       
  1399 
       
  1400 	fw = e100_request_firmware(nic);
       
  1401 	/* If it's NULL, then no ucode is required */
       
  1402 	if (!fw || IS_ERR(fw))
       
  1403 		return PTR_ERR(fw);
       
  1404 
       
  1405 	if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
       
  1406 		netif_err(nic, probe, nic->netdev,
       
  1407 			  "ucode cmd failed with error %d\n", err);
       
  1408 
       
  1409 	/* must restart cuc */
       
  1410 	nic->cuc_cmd = cuc_start;
       
  1411 
       
  1412 	/* wait for completion */
       
  1413 	e100_write_flush(nic);
       
  1414 	udelay(10);
       
  1415 
       
  1416 	/* wait for possibly (ouch) 500ms */
       
  1417 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1418 		msleep(10);
       
  1419 		if (!--counter) break;
       
  1420 	}
       
  1421 
       
  1422 	/* ack any interrupts, something could have been set */
       
  1423 	iowrite8(~0, &nic->csr->scb.stat_ack);
       
  1424 
       
  1425 	/* if the command failed, or is not OK, notify and return */
       
  1426 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1427 		netif_err(nic, probe, nic->netdev, "ucode load failed\n");
       
  1428 		err = -EPERM;
       
  1429 	}
       
  1430 
       
  1431 	return err;
       
  1432 }
       
  1433 
       
  1434 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1435 	struct sk_buff *skb)
       
  1436 {
       
  1437 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1438 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1439 }
       
  1440 
       
  1441 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1442 {
       
  1443 	cb->command = cpu_to_le16(cb_dump);
       
  1444 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1445 		offsetof(struct mem, dump_buf));
       
  1446 }
       
  1447 
       
  1448 static int e100_phy_check_without_mii(struct nic *nic)
       
  1449 {
       
  1450 	u8 phy_type;
       
  1451 	int without_mii;
       
  1452 
       
  1453 	phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
       
  1454 
       
  1455 	switch (phy_type) {
       
  1456 	case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
       
  1457 	case I82503: /* Non-MII PHY; UNTESTED! */
       
  1458 	case S80C24: /* Non-MII PHY; tested and working */
       
  1459 		/* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
       
  1460 		 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
       
  1461 		 * doesn't have a programming interface of any sort.  The
       
  1462 		 * media is sensed automatically based on how the link partner
       
  1463 		 * is configured.  This is, in essence, manual configuration.
       
  1464 		 */
       
  1465 		netif_info(nic, probe, nic->netdev,
       
  1466 			   "found MII-less i82503 or 80c24 or other PHY\n");
       
  1467 
       
  1468 		nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
       
  1469 		nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
       
  1470 
       
  1471 		/* these might be needed for certain MII-less cards...
       
  1472 		 * nic->flags |= ich;
       
  1473 		 * nic->flags |= ich_10h_workaround; */
       
  1474 
       
  1475 		without_mii = 1;
       
  1476 		break;
       
  1477 	default:
       
  1478 		without_mii = 0;
       
  1479 		break;
       
  1480 	}
       
  1481 	return without_mii;
       
  1482 }
       
  1483 
       
  1484 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1485 #define MII_NSC_CONG		MII_RESV1
       
  1486 #define NSC_CONG_ENABLE		0x0100
       
  1487 #define NSC_CONG_TXREADY	0x0400
       
  1488 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1489 static int e100_phy_init(struct nic *nic)
       
  1490 {
       
  1491 	struct net_device *netdev = nic->netdev;
       
  1492 	u32 addr;
       
  1493 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1494 
       
  1495 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1496 	for (addr = 0; addr < 32; addr++) {
       
  1497 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1498 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1499 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1500 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1501 		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1502 			break;
       
  1503 	}
       
  1504 	if (addr == 32) {
       
  1505 		/* uhoh, no PHY detected: check whether we seem to be some
       
  1506 		 * weird, rare variant which is *known* to not have any MII.
       
  1507 		 * But do this AFTER MII checking only, since this does
       
  1508 		 * lookup of EEPROM values which may easily be unreliable. */
       
  1509 		if (e100_phy_check_without_mii(nic))
       
  1510 			return 0; /* simply return and hope for the best */
       
  1511 		else {
       
  1512 			/* for unknown cases log a fatal error */
       
  1513 			netif_err(nic, hw, nic->netdev,
       
  1514 				  "Failed to locate any known PHY, aborting\n");
       
  1515 			return -EAGAIN;
       
  1516 		}
       
  1517 	} else
       
  1518 		netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1519 			     "phy_addr = %d\n", nic->mii.phy_id);
       
  1520 
       
  1521 	/* Get phy ID */
       
  1522 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1523 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1524 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1525 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1526 		     "phy ID = 0x%08X\n", nic->phy);
       
  1527 
       
  1528 	/* Select the phy and isolate the rest */
       
  1529 	for (addr = 0; addr < 32; addr++) {
       
  1530 		if (addr != nic->mii.phy_id) {
       
  1531 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1532 		} else if (nic->phy != phy_82552_v) {
       
  1533 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1534 			mdio_write(netdev, addr, MII_BMCR,
       
  1535 				bmcr & ~BMCR_ISOLATE);
       
  1536 		}
       
  1537 	}
       
  1538 	/*
       
  1539 	 * Workaround for 82552:
       
  1540 	 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
       
  1541 	 * other phy_id's) using bmcr value from addr discovery loop above.
       
  1542 	 */
       
  1543 	if (nic->phy == phy_82552_v)
       
  1544 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
       
  1545 			bmcr & ~BMCR_ISOLATE);
       
  1546 
       
  1547 	/* Handle National tx phys */
       
  1548 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1549 	if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1550 		/* Disable congestion control */
       
  1551 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1552 		cong |= NSC_CONG_TXREADY;
       
  1553 		cong &= ~NSC_CONG_ENABLE;
       
  1554 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1555 	}
       
  1556 
       
  1557 	if (nic->phy == phy_82552_v) {
       
  1558 		u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
       
  1559 
       
  1560 		/* assign special tweaked mdio_ctrl() function */
       
  1561 		nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
       
  1562 
       
  1563 		/* Workaround Si not advertising flow-control during autoneg */
       
  1564 		advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
       
  1565 		mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
       
  1566 
       
  1567 		/* Reset for the above changes to take effect */
       
  1568 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1569 		bmcr |= BMCR_RESET;
       
  1570 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
       
  1571 	} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1572 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1573 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1574 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1575 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1576 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1577 	}
       
  1578 
       
  1579 	return 0;
       
  1580 }
       
  1581 
       
  1582 static int e100_hw_init(struct nic *nic)
       
  1583 {
       
  1584 	int err;
       
  1585 
       
  1586 	e100_hw_reset(nic);
       
  1587 
       
  1588 	netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
       
  1589 	if (!in_interrupt() && (err = e100_self_test(nic)))
       
  1590 		return err;
       
  1591 
       
  1592 	if ((err = e100_phy_init(nic)))
       
  1593 		return err;
       
  1594 	if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1595 		return err;
       
  1596 	if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1597 		return err;
       
  1598 	if ((err = e100_load_ucode_wait(nic)))
       
  1599 		return err;
       
  1600 	if ((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1601 		return err;
       
  1602 	if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1603 		return err;
       
  1604 	if ((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1605 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1606 		return err;
       
  1607 	if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1608 		return err;
       
  1609 
       
  1610 	e100_disable_irq(nic);
       
  1611 
       
  1612 	return 0;
       
  1613 }
       
  1614 
       
  1615 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1616 {
       
  1617 	struct net_device *netdev = nic->netdev;
       
  1618 	struct netdev_hw_addr *ha;
       
  1619 	u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
       
  1620 
       
  1621 	cb->command = cpu_to_le16(cb_multi);
       
  1622 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1623 	i = 0;
       
  1624 	netdev_for_each_mc_addr(ha, netdev) {
       
  1625 		if (i == count)
       
  1626 			break;
       
  1627 		memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
       
  1628 			ETH_ALEN);
       
  1629 	}
       
  1630 }
       
  1631 
       
  1632 static void e100_set_multicast_list(struct net_device *netdev)
       
  1633 {
       
  1634 	struct nic *nic = netdev_priv(netdev);
       
  1635 
       
  1636 	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
       
  1637 		     "mc_count=%d, flags=0x%04X\n",
       
  1638 		     netdev_mc_count(netdev), netdev->flags);
       
  1639 
       
  1640 	if (netdev->flags & IFF_PROMISC)
       
  1641 		nic->flags |= promiscuous;
       
  1642 	else
       
  1643 		nic->flags &= ~promiscuous;
       
  1644 
       
  1645 	if (netdev->flags & IFF_ALLMULTI ||
       
  1646 		netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
       
  1647 		nic->flags |= multicast_all;
       
  1648 	else
       
  1649 		nic->flags &= ~multicast_all;
       
  1650 
       
  1651 	e100_exec_cb(nic, NULL, e100_configure);
       
  1652 	e100_exec_cb(nic, NULL, e100_multi);
       
  1653 }
       
  1654 
       
  1655 static void e100_update_stats(struct nic *nic)
       
  1656 {
       
  1657 	struct net_device *dev = nic->netdev;
       
  1658 	struct net_device_stats *ns = &dev->stats;
       
  1659 	struct stats *s = &nic->mem->stats;
       
  1660 	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1661 		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
       
  1662 		&s->complete;
       
  1663 
       
  1664 	/* Device's stats reporting may take several microseconds to
       
  1665 	 * complete, so we're always waiting for results of the
       
  1666 	 * previous command. */
       
  1667 
       
  1668 	if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
       
  1669 		*complete = 0;
       
  1670 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1671 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1672 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1673 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1674 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1675 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1676 		ns->collisions += nic->tx_collisions;
       
  1677 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1678 			le32_to_cpu(s->tx_lost_crs);
       
  1679 		ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
       
  1680 			nic->rx_over_length_errors;
       
  1681 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1682 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1683 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1684 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1685 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1686 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1687 			le32_to_cpu(s->rx_alignment_errors) +
       
  1688 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1689 			le32_to_cpu(s->rx_cdt_errors);
       
  1690 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1691 		nic->tx_single_collisions +=
       
  1692 			le32_to_cpu(s->tx_single_collisions);
       
  1693 		nic->tx_multiple_collisions +=
       
  1694 			le32_to_cpu(s->tx_multiple_collisions);
       
  1695 		if (nic->mac >= mac_82558_D101_A4) {
       
  1696 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1697 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1698 			nic->rx_fc_unsupported +=
       
  1699 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1700 			if (nic->mac >= mac_82559_D101M) {
       
  1701 				nic->tx_tco_frames +=
       
  1702 					le16_to_cpu(s->xmt_tco_frames);
       
  1703 				nic->rx_tco_frames +=
       
  1704 					le16_to_cpu(s->rcv_tco_frames);
       
  1705 			}
       
  1706 		}
       
  1707 	}
       
  1708 
       
  1709 
       
  1710 	if (e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1711 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1712 			     "exec cuc_dump_reset failed\n");
       
  1713 }
       
  1714 
       
  1715 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1716 {
       
  1717 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1718 	 * we're getting collisions on a half-duplex connection. */
       
  1719 
       
  1720 	if (duplex == DUPLEX_HALF) {
       
  1721 		u32 prev = nic->adaptive_ifs;
       
  1722 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1723 
       
  1724 		if ((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1725 		   (nic->tx_frames > min_frames)) {
       
  1726 			if (nic->adaptive_ifs < 60)
       
  1727 				nic->adaptive_ifs += 5;
       
  1728 		} else if (nic->tx_frames < min_frames) {
       
  1729 			if (nic->adaptive_ifs >= 5)
       
  1730 				nic->adaptive_ifs -= 5;
       
  1731 		}
       
  1732 		if (nic->adaptive_ifs != prev)
       
  1733 			e100_exec_cb(nic, NULL, e100_configure);
       
  1734 	}
       
  1735 }
       
  1736 
       
  1737 static void e100_watchdog(unsigned long data)
       
  1738 {
       
  1739 	struct nic *nic = (struct nic *)data;
       
  1740 	struct ethtool_cmd cmd;
       
  1741 
       
  1742 	if (nic->ecdev) {
       
  1743 		ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1744 		return;
       
  1745 	}
       
  1746 
       
  1747 	netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
       
  1748 		     "right now = %ld\n", jiffies);
       
  1749 
       
  1750 	/* mii library handles link maintenance tasks */
       
  1751 
       
  1752 	mii_ethtool_gset(&nic->mii, &cmd);
       
  1753 
       
  1754 	if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1755 		netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
       
  1756 			    cmd.speed == SPEED_100 ? 100 : 10,
       
  1757 			    cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
       
  1758 	} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1759 		netdev_info(nic->netdev, "NIC Link is Down\n");
       
  1760 	}
       
  1761 
       
  1762 	mii_check_link(&nic->mii);
       
  1763 
       
  1764 	/* Software generated interrupt to recover from (rare) Rx
       
  1765 	 * allocation failure.
       
  1766 	 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1767 	 * accidentally, due to hardware that shares a register between the
       
  1768 	 * interrupt mask bit and the SW Interrupt generation bit */
       
  1769 	spin_lock_irq(&nic->cmd_lock);
       
  1770 	iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1771 	e100_write_flush(nic);
       
  1772 	spin_unlock_irq(&nic->cmd_lock);
       
  1773 
       
  1774 	e100_update_stats(nic);
       
  1775 	e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
       
  1776 
       
  1777 	if (nic->mac <= mac_82557_D100_C)
       
  1778 		/* Issue a multicast command to workaround a 557 lock up */
       
  1779 		e100_set_multicast_list(nic->netdev);
       
  1780 
       
  1781 	if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
       
  1782 		/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1783 		nic->flags |= ich_10h_workaround;
       
  1784 	else
       
  1785 		nic->flags &= ~ich_10h_workaround;
       
  1786 
       
  1787 	mod_timer(&nic->watchdog,
       
  1788 		  round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
       
  1789 }
       
  1790 
       
  1791 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1792 	struct sk_buff *skb)
       
  1793 {
       
  1794 	cb->command = nic->tx_command;
       
  1795 	/* interrupt every 16 packets regardless of delay */
       
  1796 	if ((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1797 		cb->command |= cpu_to_le16(cb_i);
       
  1798 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1799 	cb->u.tcb.tcb_byte_count = 0;
       
  1800 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1801 	cb->u.tcb.tbd_count = 1;
       
  1802 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
       
  1803 		skb->data, skb->len, PCI_DMA_TODEVICE));
       
  1804 	/* check for mapping failure? */
       
  1805 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1806 }
       
  1807 
       
  1808 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
       
  1809 				   struct net_device *netdev)
       
  1810 {
       
  1811 	struct nic *nic = netdev_priv(netdev);
       
  1812 	int err;
       
  1813 
       
  1814 	if (nic->flags & ich_10h_workaround) {
       
  1815 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1816 		   Issue a NOP command followed by a 1us delay before
       
  1817 		   issuing the Tx command. */
       
  1818 		if (e100_exec_cmd(nic, cuc_nop, 0))
       
  1819 			netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1820 				     "exec cuc_nop failed\n");
       
  1821 		udelay(1);
       
  1822 	}
       
  1823 
       
  1824 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1825 
       
  1826 	switch (err) {
       
  1827 	case -ENOSPC:
       
  1828 		/* We queued the skb, but now we're out of space. */
       
  1829 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1830 			     "No space for CB\n");
       
  1831 		if (!nic->ecdev)
       
  1832 			netif_stop_queue(netdev);
       
  1833 		break;
       
  1834 	case -ENOMEM:
       
  1835 		/* This is a hard error - log it. */
       
  1836 		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  1837 			     "Out of Tx resources, returning skb\n");
       
  1838 		if (!nic->ecdev)
       
  1839 			netif_stop_queue(netdev);
       
  1840 		return NETDEV_TX_BUSY;
       
  1841 	}
       
  1842 
       
  1843 	return NETDEV_TX_OK;
       
  1844 }
       
  1845 
       
  1846 static int e100_tx_clean(struct nic *nic)
       
  1847 {
       
  1848 	struct net_device *dev = nic->netdev;
       
  1849 	struct cb *cb;
       
  1850 	int tx_cleaned = 0;
       
  1851 
       
  1852 	if (!nic->ecdev)
       
  1853 		spin_lock(&nic->cb_lock);
       
  1854 
       
  1855 	/* Clean CBs marked complete */
       
  1856 	for (cb = nic->cb_to_clean;
       
  1857 	    cb->status & cpu_to_le16(cb_complete);
       
  1858 	    cb = nic->cb_to_clean = cb->next) {
       
  1859 		rmb(); /* read skb after status */
       
  1860 		netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
       
  1861 			     "cb[%d]->status = 0x%04X\n",
       
  1862 			     (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1863 			     cb->status);
       
  1864 
       
  1865 		if (likely(cb->skb != NULL)) {
       
  1866 			dev->stats.tx_packets++;
       
  1867 			dev->stats.tx_bytes += cb->skb->len;
       
  1868 
       
  1869 			pci_unmap_single(nic->pdev,
       
  1870 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1871 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1872 				PCI_DMA_TODEVICE);
       
  1873 			if (!nic->ecdev)
       
  1874 				dev_kfree_skb_any(cb->skb);
       
  1875 			cb->skb = NULL;
       
  1876 			tx_cleaned = 1;
       
  1877 		}
       
  1878 		cb->status = 0;
       
  1879 		nic->cbs_avail++;
       
  1880 	}
       
  1881 
       
  1882 	if (!nic->ecdev) {
       
  1883 		spin_unlock(&nic->cb_lock);
       
  1884 
       
  1885 		/* Recover from running out of Tx resources in xmit_frame */
       
  1886 		if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1887 			netif_wake_queue(nic->netdev);
       
  1888 	}
       
  1889 
       
  1890 	return tx_cleaned;
       
  1891 }
       
  1892 
       
  1893 static void e100_clean_cbs(struct nic *nic)
       
  1894 {
       
  1895 	if (nic->cbs) {
       
  1896 		while (nic->cbs_avail != nic->params.cbs.count) {
       
  1897 			struct cb *cb = nic->cb_to_clean;
       
  1898 			if (cb->skb) {
       
  1899 				pci_unmap_single(nic->pdev,
       
  1900 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1901 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1902 					PCI_DMA_TODEVICE);
       
  1903 				if (!nic->ecdev)
       
  1904 					dev_kfree_skb(cb->skb);
       
  1905 			}
       
  1906 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1907 			nic->cbs_avail++;
       
  1908 		}
       
  1909 		pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
       
  1910 		nic->cbs = NULL;
       
  1911 		nic->cbs_avail = 0;
       
  1912 	}
       
  1913 	nic->cuc_cmd = cuc_start;
       
  1914 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1915 		nic->cbs;
       
  1916 }
       
  1917 
       
  1918 static int e100_alloc_cbs(struct nic *nic)
       
  1919 {
       
  1920 	struct cb *cb;
       
  1921 	unsigned int i, count = nic->params.cbs.count;
       
  1922 
       
  1923 	nic->cuc_cmd = cuc_start;
       
  1924 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1925 	nic->cbs_avail = 0;
       
  1926 
       
  1927 	nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
       
  1928 				  &nic->cbs_dma_addr);
       
  1929 	if (!nic->cbs)
       
  1930 		return -ENOMEM;
       
  1931 	memset(nic->cbs, 0, count * sizeof(struct cb));
       
  1932 
       
  1933 	for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1934 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1935 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  1936 
       
  1937 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  1938 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  1939 			((i+1) % count) * sizeof(struct cb));
       
  1940 	}
       
  1941 
       
  1942 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  1943 	nic->cbs_avail = count;
       
  1944 
       
  1945 	return 0;
       
  1946 }
       
  1947 
       
  1948 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  1949 {
       
  1950 	if (!nic->rxs) return;
       
  1951 	if (RU_SUSPENDED != nic->ru_running) return;
       
  1952 
       
  1953 	/* handle init time starts */
       
  1954 	if (!rx) rx = nic->rxs;
       
  1955 
       
  1956 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  1957 	if (rx->skb) {
       
  1958 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  1959 		nic->ru_running = RU_RUNNING;
       
  1960 	}
       
  1961 }
       
  1962 
       
  1963 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
       
  1964 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  1965 {
       
  1966 	if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
       
  1967 		return -ENOMEM;
       
  1968 
       
  1969 	/* Init, and map the RFD. */
       
  1970 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
       
  1971 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  1972 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1973 
       
  1974 	if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  1975 		dev_kfree_skb_any(rx->skb);
       
  1976 		rx->skb = NULL;
       
  1977 		rx->dma_addr = 0;
       
  1978 		return -ENOMEM;
       
  1979 	}
       
  1980 
       
  1981 	/* Link the RFD to end of RFA by linking previous RFD to
       
  1982 	 * this one.  We are safe to touch the previous RFD because
       
  1983 	 * it is protected by the before last buffer's el bit being set */
       
  1984 	if (rx->prev->skb) {
       
  1985 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  1986 		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  1987 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  1988 			sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  1989 	}
       
  1990 
       
  1991 	return 0;
       
  1992 }
       
  1993 
       
  1994 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  1995 	unsigned int *work_done, unsigned int work_to_do)
       
  1996 {
       
  1997 	struct net_device *dev = nic->netdev;
       
  1998 	struct sk_buff *skb = rx->skb;
       
  1999 	struct rfd *rfd = (struct rfd *)skb->data;
       
  2000 	u16 rfd_status, actual_size;
       
  2001 
       
  2002 	if (unlikely(work_done && *work_done >= work_to_do))
       
  2003 		return -EAGAIN;
       
  2004 
       
  2005 	/* Need to sync before taking a peek at cb_complete bit */
       
  2006 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  2007 		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2008 	rfd_status = le16_to_cpu(rfd->status);
       
  2009 
       
  2010 	netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
       
  2011 		     "status=0x%04X\n", rfd_status);
       
  2012 	rmb(); /* read size after status bit */
       
  2013 
       
  2014 	/* If data isn't ready, nothing to indicate */
       
  2015 	if (unlikely(!(rfd_status & cb_complete))) {
       
  2016 		/* If the next buffer has the el bit, but we think the receiver
       
  2017 		 * is still running, check to see if it really stopped while
       
  2018 		 * we had interrupts off.
       
  2019 		 * This allows for a fast restart without re-enabling
       
  2020 		 * interrupts */
       
  2021 		if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2022 		    (RU_RUNNING == nic->ru_running))
       
  2023 
       
  2024 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2025 				nic->ru_running = RU_SUSPENDED;
       
  2026 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2027 					       sizeof(struct rfd),
       
  2028 					       PCI_DMA_FROMDEVICE);
       
  2029 		return -ENODATA;
       
  2030 	}
       
  2031 
       
  2032 	/* Get actual data size */
       
  2033 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  2034 	if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  2035 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  2036 
       
  2037 	/* Get data */
       
  2038 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2039 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2040 
       
  2041 	/* If this buffer has the el bit, but we think the receiver
       
  2042 	 * is still running, check to see if it really stopped while
       
  2043 	 * we had interrupts off.
       
  2044 	 * This allows for a fast restart without re-enabling interrupts.
       
  2045 	 * This can happen when the RU sees the size change but also sees
       
  2046 	 * the el bit set. */
       
  2047 	if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2048 	    (RU_RUNNING == nic->ru_running)) {
       
  2049 
       
  2050 	    if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2051 		nic->ru_running = RU_SUSPENDED;
       
  2052 	}
       
  2053 
       
  2054 	if (!nic->ecdev) {
       
  2055 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  2056 		skb_reserve(skb, sizeof(struct rfd));
       
  2057 		skb_put(skb, actual_size);
       
  2058 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  2059 	}
       
  2060 
       
  2061 	if (unlikely(!(rfd_status & cb_ok))) {
       
  2062 		if (!nic->ecdev) {
       
  2063 			/* Don't indicate if hardware indicates errors */
       
  2064 			dev_kfree_skb_any(skb);
       
  2065 		}
       
  2066 	} else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
       
  2067 		/* Don't indicate oversized frames */
       
  2068 		nic->rx_over_length_errors++;
       
  2069 		if (!nic->ecdev)
       
  2070 			dev_kfree_skb_any(skb);
       
  2071 	} else {
       
  2072 		dev->stats.rx_packets++;
       
  2073 		dev->stats.rx_bytes += actual_size;
       
  2074 		if (nic->ecdev) {
       
  2075 			ecdev_receive(nic->ecdev,
       
  2076 					skb->data + sizeof(struct rfd), actual_size);
       
  2077 
       
  2078 			// No need to detect link status as
       
  2079 			// long as frames are received: Reset watchdog.
       
  2080 			nic->ec_watchdog_jiffies = jiffies;
       
  2081 		} else {
       
  2082 			netif_receive_skb(skb);
       
  2083 		}
       
  2084 		if (work_done)
       
  2085 			(*work_done)++;
       
  2086 	}
       
  2087 
       
  2088 	if (nic->ecdev) {
       
  2089 		// make receive frame descriptior usable again
       
  2090 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  2091 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  2092 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2093 		if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2094 			rx->dma_addr = 0;
       
  2095 		}
       
  2096 
       
  2097 		/* Link the RFD to end of RFA by linking previous RFD to
       
  2098 		 * this one.  We are safe to touch the previous RFD because
       
  2099 		 * it is protected by the before last buffer's el bit being set */
       
  2100 		if (rx->prev->skb) {
       
  2101 			struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  2102 			put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2103 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2104 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  2105 		}
       
  2106 	} else {
       
  2107 		rx->skb = NULL;
       
  2108 	}
       
  2109 
       
  2110 	return 0;
       
  2111 }
       
  2112 
       
  2113 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  2114 	unsigned int work_to_do)
       
  2115 {
       
  2116 	struct rx *rx;
       
  2117 	int restart_required = 0, err = 0;
       
  2118 	struct rx *old_before_last_rx, *new_before_last_rx;
       
  2119 	struct rfd *old_before_last_rfd, *new_before_last_rfd;
       
  2120 
       
  2121 	/* Indicate newly arrived packets */
       
  2122 	for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  2123 		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  2124 		/* Hit quota or no more to clean */
       
  2125 		if (-EAGAIN == err || -ENODATA == err)
       
  2126 			break;
       
  2127 	}
       
  2128 
       
  2129 
       
  2130 	/* On EAGAIN, hit quota so have more work to do, restart once
       
  2131 	 * cleanup is complete.
       
  2132 	 * Else, are we already rnr? then pay attention!!! this ensures that
       
  2133 	 * the state machine progression never allows a start with a
       
  2134 	 * partially cleaned list, avoiding a race between hardware
       
  2135 	 * and rx_to_clean when in NAPI mode */
       
  2136 	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
       
  2137 		restart_required = 1;
       
  2138 
       
  2139 	old_before_last_rx = nic->rx_to_use->prev->prev;
       
  2140 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
       
  2141 
       
  2142 	if (!nic->ecdev) {
       
  2143 		/* Alloc new skbs to refill list */
       
  2144 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  2145 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  2146 				break; /* Better luck next time (see watchdog) */
       
  2147 		}
       
  2148 	}
       
  2149 
       
  2150 	new_before_last_rx = nic->rx_to_use->prev->prev;
       
  2151 	if (new_before_last_rx != old_before_last_rx) {
       
  2152 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2153 		 * This lets us update the next pointer on the last buffer
       
  2154 		 * without worrying about hardware touching it.
       
  2155 		 * We set the size to 0 to prevent hardware from touching this
       
  2156 		 * buffer.
       
  2157 		 * When the hardware hits the before last buffer with el-bit
       
  2158 		 * and size of 0, it will RNR interrupt, the RUS will go into
       
  2159 		 * the No Resources state.  It will not complete nor write to
       
  2160 		 * this buffer. */
       
  2161 		new_before_last_rfd =
       
  2162 			(struct rfd *)new_before_last_rx->skb->data;
       
  2163 		new_before_last_rfd->size = 0;
       
  2164 		new_before_last_rfd->command |= cpu_to_le16(cb_el);
       
  2165 		pci_dma_sync_single_for_device(nic->pdev,
       
  2166 			new_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2167 			PCI_DMA_BIDIRECTIONAL);
       
  2168 
       
  2169 		/* Now that we have a new stopping point, we can clear the old
       
  2170 		 * stopping point.  We must sync twice to get the proper
       
  2171 		 * ordering on the hardware side of things. */
       
  2172 		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
       
  2173 		pci_dma_sync_single_for_device(nic->pdev,
       
  2174 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2175 			PCI_DMA_BIDIRECTIONAL);
       
  2176 		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  2177 		pci_dma_sync_single_for_device(nic->pdev,
       
  2178 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2179 			PCI_DMA_BIDIRECTIONAL);
       
  2180 	}
       
  2181 
       
  2182 	if (restart_required) {
       
  2183 		// ack the rnr?
       
  2184 		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2185 		e100_start_receiver(nic, nic->rx_to_clean);
       
  2186 		if (work_done)
       
  2187 			(*work_done)++;
       
  2188 	}
       
  2189 }
       
  2190 
       
  2191 static void e100_rx_clean_list(struct nic *nic)
       
  2192 {
       
  2193 	struct rx *rx;
       
  2194 	unsigned int i, count = nic->params.rfds.count;
       
  2195 
       
  2196 	nic->ru_running = RU_UNINITIALIZED;
       
  2197 
       
  2198 	if (nic->rxs) {
       
  2199 		for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2200 			if (rx->skb) {
       
  2201 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2202 					RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2203 				dev_kfree_skb(rx->skb);
       
  2204 			}
       
  2205 		}
       
  2206 		kfree(nic->rxs);
       
  2207 		nic->rxs = NULL;
       
  2208 	}
       
  2209 
       
  2210 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2211 }
       
  2212 
       
  2213 static int e100_rx_alloc_list(struct nic *nic)
       
  2214 {
       
  2215 	struct rx *rx;
       
  2216 	unsigned int i, count = nic->params.rfds.count;
       
  2217 	struct rfd *before_last;
       
  2218 
       
  2219 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2220 	nic->ru_running = RU_UNINITIALIZED;
       
  2221 
       
  2222 	if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2223 		return -ENOMEM;
       
  2224 
       
  2225 	for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2226 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2227 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2228 		if (e100_rx_alloc_skb(nic, rx)) {
       
  2229 			e100_rx_clean_list(nic);
       
  2230 			return -ENOMEM;
       
  2231 		}
       
  2232 	}
       
  2233 
       
  2234 	if (!nic->ecdev) {
       
  2235 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2236 		 * This lets us update the next pointer on the last buffer without
       
  2237 		 * worrying about hardware touching it.
       
  2238 		 * We set the size to 0 to prevent hardware from touching this buffer.
       
  2239 		 * When the hardware hits the before last buffer with el-bit and size
       
  2240 		 * of 0, it will RNR interrupt, the RU will go into the No Resources
       
  2241 		 * state.  It will not complete nor write to this buffer. */
       
  2242 		rx = nic->rxs->prev->prev;
       
  2243 		before_last = (struct rfd *)rx->skb->data;
       
  2244 		before_last->command |= cpu_to_le16(cb_el);
       
  2245 		before_last->size = 0;
       
  2246 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2247 				sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2248 	}
       
  2249 
       
  2250 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2251 	nic->ru_running = RU_SUSPENDED;
       
  2252 
       
  2253 	return 0;
       
  2254 }
       
  2255 
       
  2256 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2257 {
       
  2258 	struct net_device *netdev = dev_id;
       
  2259 	struct nic *nic = netdev_priv(netdev);
       
  2260 	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
       
  2261 
       
  2262 	netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
       
  2263 		     "stat_ack = 0x%02X\n", stat_ack);
       
  2264 
       
  2265 	if (stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2266 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2267 		return IRQ_NONE;
       
  2268 
       
  2269 	/* Ack interrupt(s) */
       
  2270 	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
       
  2271 
       
  2272 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2273 	if (stat_ack & stat_ack_rnr)
       
  2274 		nic->ru_running = RU_SUSPENDED;
       
  2275 
       
  2276 	if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) {
       
  2277 		e100_disable_irq(nic);
       
  2278 		__napi_schedule(&nic->napi);
       
  2279 	}
       
  2280 
       
  2281 	return IRQ_HANDLED;
       
  2282 }
       
  2283 
       
  2284 void e100_ec_poll(struct net_device *netdev)
       
  2285 {
       
  2286 	struct nic *nic = netdev_priv(netdev);
       
  2287 
       
  2288 	e100_rx_clean(nic, NULL, 100);
       
  2289 	e100_tx_clean(nic);
       
  2290 
       
  2291 	if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2292 		e100_watchdog((unsigned long) nic);
       
  2293 		nic->ec_watchdog_jiffies = jiffies;
       
  2294 	}
       
  2295 }
       
  2296 
       
  2297 
       
  2298 static int e100_poll(struct napi_struct *napi, int budget)
       
  2299 {
       
  2300 	struct nic *nic = container_of(napi, struct nic, napi);
       
  2301 	unsigned int work_done = 0;
       
  2302 
       
  2303 	e100_rx_clean(nic, &work_done, budget);
       
  2304 	e100_tx_clean(nic);
       
  2305 
       
  2306 	/* If budget not fully consumed, exit the polling mode */
       
  2307 	if (work_done < budget) {
       
  2308 		napi_complete(napi);
       
  2309 		e100_enable_irq(nic);
       
  2310 	}
       
  2311 
       
  2312 	return work_done;
       
  2313 }
       
  2314 
       
  2315 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2316 static void e100_netpoll(struct net_device *netdev)
       
  2317 {
       
  2318 	struct nic *nic = netdev_priv(netdev);
       
  2319 
       
  2320 	e100_disable_irq(nic);
       
  2321 	e100_intr(nic->pdev->irq, netdev);
       
  2322 	e100_tx_clean(nic);
       
  2323 	e100_enable_irq(nic);
       
  2324 }
       
  2325 #endif
       
  2326 
       
  2327 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2328 {
       
  2329 	struct nic *nic = netdev_priv(netdev);
       
  2330 	struct sockaddr *addr = p;
       
  2331 
       
  2332 	if (!is_valid_ether_addr(addr->sa_data))
       
  2333 		return -EADDRNOTAVAIL;
       
  2334 
       
  2335 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2336 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2337 
       
  2338 	return 0;
       
  2339 }
       
  2340 
       
  2341 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2342 {
       
  2343 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2344 		return -EINVAL;
       
  2345 	netdev->mtu = new_mtu;
       
  2346 	return 0;
       
  2347 }
       
  2348 
       
  2349 static int e100_asf(struct nic *nic)
       
  2350 {
       
  2351 	/* ASF can be enabled from eeprom */
       
  2352 	return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2353 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2354 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2355 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
       
  2356 }
       
  2357 
       
  2358 static int e100_up(struct nic *nic)
       
  2359 {
       
  2360 	int err;
       
  2361 
       
  2362 	if ((err = e100_rx_alloc_list(nic)))
       
  2363 		return err;
       
  2364 	if ((err = e100_alloc_cbs(nic)))
       
  2365 		goto err_rx_clean_list;
       
  2366 	if ((err = e100_hw_init(nic)))
       
  2367 		goto err_clean_cbs;
       
  2368 	e100_set_multicast_list(nic->netdev);
       
  2369 	e100_start_receiver(nic, NULL);
       
  2370 	if (!nic->ecdev) {
       
  2371 		mod_timer(&nic->watchdog, jiffies);
       
  2372 	}
       
  2373 	if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2374 		nic->netdev->name, nic->netdev)))
       
  2375 		goto err_no_irq;
       
  2376 	if (!nic->ecdev) {
       
  2377 		netif_wake_queue(nic->netdev);
       
  2378 		napi_enable(&nic->napi);
       
  2379 		/* enable ints _after_ enabling poll, preventing a race between
       
  2380 		 * disable ints+schedule */
       
  2381 		e100_enable_irq(nic);
       
  2382 	}
       
  2383 	return 0;
       
  2384 
       
  2385 err_no_irq:
       
  2386 	if (!nic->ecdev)
       
  2387 		del_timer_sync(&nic->watchdog);
       
  2388 err_clean_cbs:
       
  2389 	e100_clean_cbs(nic);
       
  2390 err_rx_clean_list:
       
  2391 	e100_rx_clean_list(nic);
       
  2392 	return err;
       
  2393 }
       
  2394 
       
  2395 static void e100_down(struct nic *nic)
       
  2396 {
       
  2397 	if (!nic->ecdev) {
       
  2398 		/* wait here for poll to complete */
       
  2399 		napi_disable(&nic->napi);
       
  2400 		netif_stop_queue(nic->netdev);
       
  2401 	}
       
  2402 	e100_hw_reset(nic);
       
  2403 	free_irq(nic->pdev->irq, nic->netdev);
       
  2404 	if (!nic->ecdev) {
       
  2405 		del_timer_sync(&nic->watchdog);
       
  2406 		netif_carrier_off(nic->netdev);
       
  2407 	}
       
  2408 	e100_clean_cbs(nic);
       
  2409 	e100_rx_clean_list(nic);
       
  2410 }
       
  2411 
       
  2412 static void e100_tx_timeout(struct net_device *netdev)
       
  2413 {
       
  2414 	struct nic *nic = netdev_priv(netdev);
       
  2415 
       
  2416 	/* Reset outside of interrupt context, to avoid request_irq
       
  2417 	 * in interrupt context */
       
  2418 	schedule_work(&nic->tx_timeout_task);
       
  2419 }
       
  2420 
       
  2421 static void e100_tx_timeout_task(struct work_struct *work)
       
  2422 {
       
  2423 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2424 	struct net_device *netdev = nic->netdev;
       
  2425 
       
  2426 	netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
       
  2427 		     "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
       
  2428 
       
  2429 	rtnl_lock();
       
  2430 	if (netif_running(netdev)) {
       
  2431 		e100_down(netdev_priv(netdev));
       
  2432 		e100_up(netdev_priv(netdev));
       
  2433 	}
       
  2434 	rtnl_unlock();
       
  2435 }
       
  2436 
       
  2437 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2438 {
       
  2439 	int err;
       
  2440 	struct sk_buff *skb;
       
  2441 
       
  2442 	/* Use driver resources to perform internal MAC or PHY
       
  2443 	 * loopback test.  A single packet is prepared and transmitted
       
  2444 	 * in loopback mode, and the test passes if the received
       
  2445 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2446 
       
  2447 	if ((err = e100_rx_alloc_list(nic)))
       
  2448 		return err;
       
  2449 	if ((err = e100_alloc_cbs(nic)))
       
  2450 		goto err_clean_rx;
       
  2451 
       
  2452 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2453 	if (nic->flags & ich && loopback_mode == lb_phy)
       
  2454 		loopback_mode = lb_mac;
       
  2455 
       
  2456 	nic->loopback = loopback_mode;
       
  2457 	if ((err = e100_hw_init(nic)))
       
  2458 		goto err_loopback_none;
       
  2459 
       
  2460 	if (loopback_mode == lb_phy)
       
  2461 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2462 			BMCR_LOOPBACK);
       
  2463 
       
  2464 	e100_start_receiver(nic, NULL);
       
  2465 
       
  2466 	if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2467 		err = -ENOMEM;
       
  2468 		goto err_loopback_none;
       
  2469 	}
       
  2470 	skb_put(skb, ETH_DATA_LEN);
       
  2471 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2472 	e100_xmit_frame(skb, nic->netdev);
       
  2473 
       
  2474 	msleep(10);
       
  2475 
       
  2476 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2477 			RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2478 
       
  2479 	if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2480 	   skb->data, ETH_DATA_LEN))
       
  2481 		err = -EAGAIN;
       
  2482 
       
  2483 err_loopback_none:
       
  2484 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2485 	nic->loopback = lb_none;
       
  2486 	e100_clean_cbs(nic);
       
  2487 	e100_hw_reset(nic);
       
  2488 err_clean_rx:
       
  2489 	e100_rx_clean_list(nic);
       
  2490 	return err;
       
  2491 }
       
  2492 
       
  2493 #define MII_LED_CONTROL	0x1B
       
  2494 #define E100_82552_LED_OVERRIDE 0x19
       
  2495 #define E100_82552_LED_ON       0x000F /* LEDTX and LED_RX both on */
       
  2496 #define E100_82552_LED_OFF      0x000A /* LEDTX and LED_RX both off */
       
  2497 static void e100_blink_led(unsigned long data)
       
  2498 {
       
  2499 	struct nic *nic = (struct nic *)data;
       
  2500 	enum led_state {
       
  2501 		led_on     = 0x01,
       
  2502 		led_off    = 0x04,
       
  2503 		led_on_559 = 0x05,
       
  2504 		led_on_557 = 0x07,
       
  2505 	};
       
  2506 	u16 led_reg = MII_LED_CONTROL;
       
  2507 
       
  2508 	if (nic->phy == phy_82552_v) {
       
  2509 		led_reg = E100_82552_LED_OVERRIDE;
       
  2510 
       
  2511 		nic->leds = (nic->leds == E100_82552_LED_ON) ?
       
  2512 		            E100_82552_LED_OFF : E100_82552_LED_ON;
       
  2513 	} else {
       
  2514 		nic->leds = (nic->leds & led_on) ? led_off :
       
  2515 		            (nic->mac < mac_82559_D101M) ? led_on_557 :
       
  2516 		            led_on_559;
       
  2517 	}
       
  2518 	mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
       
  2519 	mod_timer(&nic->blink_timer, jiffies + HZ / 4);
       
  2520 }
       
  2521 
       
  2522 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2523 {
       
  2524 	struct nic *nic = netdev_priv(netdev);
       
  2525 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2526 }
       
  2527 
       
  2528 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2529 {
       
  2530 	struct nic *nic = netdev_priv(netdev);
       
  2531 	int err;
       
  2532 
       
  2533 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2534 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2535 	e100_exec_cb(nic, NULL, e100_configure);
       
  2536 
       
  2537 	return err;
       
  2538 }
       
  2539 
       
  2540 static void e100_get_drvinfo(struct net_device *netdev,
       
  2541 	struct ethtool_drvinfo *info)
       
  2542 {
       
  2543 	struct nic *nic = netdev_priv(netdev);
       
  2544 	strcpy(info->driver, DRV_NAME);
       
  2545 	strcpy(info->version, DRV_VERSION);
       
  2546 	strcpy(info->fw_version, "N/A");
       
  2547 	strcpy(info->bus_info, pci_name(nic->pdev));
       
  2548 }
       
  2549 
       
  2550 #define E100_PHY_REGS 0x1C
       
  2551 static int e100_get_regs_len(struct net_device *netdev)
       
  2552 {
       
  2553 	struct nic *nic = netdev_priv(netdev);
       
  2554 	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
       
  2555 }
       
  2556 
       
  2557 static void e100_get_regs(struct net_device *netdev,
       
  2558 	struct ethtool_regs *regs, void *p)
       
  2559 {
       
  2560 	struct nic *nic = netdev_priv(netdev);
       
  2561 	u32 *buff = p;
       
  2562 	int i;
       
  2563 
       
  2564 	regs->version = (1 << 24) | nic->pdev->revision;
       
  2565 	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
       
  2566 		ioread8(&nic->csr->scb.cmd_lo) << 16 |
       
  2567 		ioread16(&nic->csr->scb.status);
       
  2568 	for (i = E100_PHY_REGS; i >= 0; i--)
       
  2569 		buff[1 + E100_PHY_REGS - i] =
       
  2570 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2571 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2572 	e100_exec_cb(nic, NULL, e100_dump);
       
  2573 	msleep(10);
       
  2574 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2575 		sizeof(nic->mem->dump_buf));
       
  2576 }
       
  2577 
       
  2578 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2579 {
       
  2580 	struct nic *nic = netdev_priv(netdev);
       
  2581 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2582 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2583 }
       
  2584 
       
  2585 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2586 {
       
  2587 	struct nic *nic = netdev_priv(netdev);
       
  2588 
       
  2589 	if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
       
  2590 	    !device_can_wakeup(&nic->pdev->dev))
       
  2591 		return -EOPNOTSUPP;
       
  2592 
       
  2593 	if (wol->wolopts)
       
  2594 		nic->flags |= wol_magic;
       
  2595 	else
       
  2596 		nic->flags &= ~wol_magic;
       
  2597 
       
  2598 	device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
       
  2599 
       
  2600 	e100_exec_cb(nic, NULL, e100_configure);
       
  2601 
       
  2602 	return 0;
       
  2603 }
       
  2604 
       
  2605 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2606 {
       
  2607 	struct nic *nic = netdev_priv(netdev);
       
  2608 	return nic->msg_enable;
       
  2609 }
       
  2610 
       
  2611 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2612 {
       
  2613 	struct nic *nic = netdev_priv(netdev);
       
  2614 	nic->msg_enable = value;
       
  2615 }
       
  2616 
       
  2617 static int e100_nway_reset(struct net_device *netdev)
       
  2618 {
       
  2619 	struct nic *nic = netdev_priv(netdev);
       
  2620 	return mii_nway_restart(&nic->mii);
       
  2621 }
       
  2622 
       
  2623 static u32 e100_get_link(struct net_device *netdev)
       
  2624 {
       
  2625 	struct nic *nic = netdev_priv(netdev);
       
  2626 	return mii_link_ok(&nic->mii);
       
  2627 }
       
  2628 
       
  2629 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2630 {
       
  2631 	struct nic *nic = netdev_priv(netdev);
       
  2632 	return nic->eeprom_wc << 1;
       
  2633 }
       
  2634 
       
  2635 #define E100_EEPROM_MAGIC	0x1234
       
  2636 static int e100_get_eeprom(struct net_device *netdev,
       
  2637 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2638 {
       
  2639 	struct nic *nic = netdev_priv(netdev);
       
  2640 
       
  2641 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2642 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2643 
       
  2644 	return 0;
       
  2645 }
       
  2646 
       
  2647 static int e100_set_eeprom(struct net_device *netdev,
       
  2648 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2649 {
       
  2650 	struct nic *nic = netdev_priv(netdev);
       
  2651 
       
  2652 	if (eeprom->magic != E100_EEPROM_MAGIC)
       
  2653 		return -EINVAL;
       
  2654 
       
  2655 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2656 
       
  2657 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2658 		(eeprom->len >> 1) + 1);
       
  2659 }
       
  2660 
       
  2661 static void e100_get_ringparam(struct net_device *netdev,
       
  2662 	struct ethtool_ringparam *ring)
       
  2663 {
       
  2664 	struct nic *nic = netdev_priv(netdev);
       
  2665 	struct param_range *rfds = &nic->params.rfds;
       
  2666 	struct param_range *cbs = &nic->params.cbs;
       
  2667 
       
  2668 	ring->rx_max_pending = rfds->max;
       
  2669 	ring->tx_max_pending = cbs->max;
       
  2670 	ring->rx_mini_max_pending = 0;
       
  2671 	ring->rx_jumbo_max_pending = 0;
       
  2672 	ring->rx_pending = rfds->count;
       
  2673 	ring->tx_pending = cbs->count;
       
  2674 	ring->rx_mini_pending = 0;
       
  2675 	ring->rx_jumbo_pending = 0;
       
  2676 }
       
  2677 
       
  2678 static int e100_set_ringparam(struct net_device *netdev,
       
  2679 	struct ethtool_ringparam *ring)
       
  2680 {
       
  2681 	struct nic *nic = netdev_priv(netdev);
       
  2682 	struct param_range *rfds = &nic->params.rfds;
       
  2683 	struct param_range *cbs = &nic->params.cbs;
       
  2684 
       
  2685 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2686 		return -EINVAL;
       
  2687 
       
  2688 	if (netif_running(netdev))
       
  2689 		e100_down(nic);
       
  2690 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2691 	rfds->count = min(rfds->count, rfds->max);
       
  2692 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2693 	cbs->count = min(cbs->count, cbs->max);
       
  2694 	netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
       
  2695 		   rfds->count, cbs->count);
       
  2696 	if (netif_running(netdev))
       
  2697 		e100_up(nic);
       
  2698 
       
  2699 	return 0;
       
  2700 }
       
  2701 
       
  2702 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2703 	"Link test     (on/offline)",
       
  2704 	"Eeprom test   (on/offline)",
       
  2705 	"Self test        (offline)",
       
  2706 	"Mac loopback     (offline)",
       
  2707 	"Phy loopback     (offline)",
       
  2708 };
       
  2709 #define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
       
  2710 
       
  2711 static void e100_diag_test(struct net_device *netdev,
       
  2712 	struct ethtool_test *test, u64 *data)
       
  2713 {
       
  2714 	struct ethtool_cmd cmd;
       
  2715 	struct nic *nic = netdev_priv(netdev);
       
  2716 	int i, err __attribute__ ((unused));
       
  2717 
       
  2718 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2719 	data[0] = !mii_link_ok(&nic->mii);
       
  2720 	data[1] = e100_eeprom_load(nic);
       
  2721 	if (test->flags & ETH_TEST_FL_OFFLINE) {
       
  2722 
       
  2723 		/* save speed, duplex & autoneg settings */
       
  2724 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2725 
       
  2726 		if (netif_running(netdev))
       
  2727 			e100_down(nic);
       
  2728 		data[2] = e100_self_test(nic);
       
  2729 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2730 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2731 
       
  2732 		/* restore speed, duplex & autoneg settings */
       
  2733 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2734 
       
  2735 		if (netif_running(netdev))
       
  2736 			e100_up(nic);
       
  2737 	}
       
  2738 	for (i = 0; i < E100_TEST_LEN; i++)
       
  2739 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2740 
       
  2741 	msleep_interruptible(4 * 1000);
       
  2742 }
       
  2743 
       
  2744 static int e100_phys_id(struct net_device *netdev, u32 data)
       
  2745 {
       
  2746 	struct nic *nic = netdev_priv(netdev);
       
  2747 	u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
       
  2748 	              MII_LED_CONTROL;
       
  2749 
       
  2750 	if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
       
  2751 		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
       
  2752 	mod_timer(&nic->blink_timer, jiffies);
       
  2753 	msleep_interruptible(data * 1000);
       
  2754 	del_timer_sync(&nic->blink_timer);
       
  2755 	mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
       
  2756 
       
  2757 	return 0;
       
  2758 }
       
  2759 
       
  2760 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2761 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2762 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2763 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2764 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2765 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2766 	"tx_heartbeat_errors", "tx_window_errors",
       
  2767 	/* device-specific stats */
       
  2768 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2769 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2770 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2771 };
       
  2772 #define E100_NET_STATS_LEN	21
       
  2773 #define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
       
  2774 
       
  2775 static int e100_get_sset_count(struct net_device *netdev, int sset)
       
  2776 {
       
  2777 	switch (sset) {
       
  2778 	case ETH_SS_TEST:
       
  2779 		return E100_TEST_LEN;
       
  2780 	case ETH_SS_STATS:
       
  2781 		return E100_STATS_LEN;
       
  2782 	default:
       
  2783 		return -EOPNOTSUPP;
       
  2784 	}
       
  2785 }
       
  2786 
       
  2787 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2788 	struct ethtool_stats *stats, u64 *data)
       
  2789 {
       
  2790 	struct nic *nic = netdev_priv(netdev);
       
  2791 	int i;
       
  2792 
       
  2793 	for (i = 0; i < E100_NET_STATS_LEN; i++)
       
  2794 		data[i] = ((unsigned long *)&netdev->stats)[i];
       
  2795 
       
  2796 	data[i++] = nic->tx_deferred;
       
  2797 	data[i++] = nic->tx_single_collisions;
       
  2798 	data[i++] = nic->tx_multiple_collisions;
       
  2799 	data[i++] = nic->tx_fc_pause;
       
  2800 	data[i++] = nic->rx_fc_pause;
       
  2801 	data[i++] = nic->rx_fc_unsupported;
       
  2802 	data[i++] = nic->tx_tco_frames;
       
  2803 	data[i++] = nic->rx_tco_frames;
       
  2804 }
       
  2805 
       
  2806 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2807 {
       
  2808 	switch (stringset) {
       
  2809 	case ETH_SS_TEST:
       
  2810 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2811 		break;
       
  2812 	case ETH_SS_STATS:
       
  2813 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2814 		break;
       
  2815 	}
       
  2816 }
       
  2817 
       
  2818 static const struct ethtool_ops e100_ethtool_ops = {
       
  2819 	.get_settings		= e100_get_settings,
       
  2820 	.set_settings		= e100_set_settings,
       
  2821 	.get_drvinfo		= e100_get_drvinfo,
       
  2822 	.get_regs_len		= e100_get_regs_len,
       
  2823 	.get_regs		= e100_get_regs,
       
  2824 	.get_wol		= e100_get_wol,
       
  2825 	.set_wol		= e100_set_wol,
       
  2826 	.get_msglevel		= e100_get_msglevel,
       
  2827 	.set_msglevel		= e100_set_msglevel,
       
  2828 	.nway_reset		= e100_nway_reset,
       
  2829 	.get_link		= e100_get_link,
       
  2830 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2831 	.get_eeprom		= e100_get_eeprom,
       
  2832 	.set_eeprom		= e100_set_eeprom,
       
  2833 	.get_ringparam		= e100_get_ringparam,
       
  2834 	.set_ringparam		= e100_set_ringparam,
       
  2835 	.self_test		= e100_diag_test,
       
  2836 	.get_strings		= e100_get_strings,
       
  2837 	.phys_id		= e100_phys_id,
       
  2838 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2839 	.get_sset_count		= e100_get_sset_count,
       
  2840 };
       
  2841 
       
  2842 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2843 {
       
  2844 	struct nic *nic = netdev_priv(netdev);
       
  2845 
       
  2846 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2847 }
       
  2848 
       
  2849 static int e100_alloc(struct nic *nic)
       
  2850 {
       
  2851 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2852 		&nic->dma_addr);
       
  2853 	return nic->mem ? 0 : -ENOMEM;
       
  2854 }
       
  2855 
       
  2856 static void e100_free(struct nic *nic)
       
  2857 {
       
  2858 	if (nic->mem) {
       
  2859 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2860 			nic->mem, nic->dma_addr);
       
  2861 		nic->mem = NULL;
       
  2862 	}
       
  2863 }
       
  2864 
       
  2865 static int e100_open(struct net_device *netdev)
       
  2866 {
       
  2867 	struct nic *nic = netdev_priv(netdev);
       
  2868 	int err = 0;
       
  2869 
       
  2870 	if (!nic->ecdev)
       
  2871 		netif_carrier_off(netdev);
       
  2872 	if ((err = e100_up(nic)))
       
  2873 		netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
       
  2874 	return err;
       
  2875 }
       
  2876 
       
  2877 static int e100_close(struct net_device *netdev)
       
  2878 {
       
  2879 	e100_down(netdev_priv(netdev));
       
  2880 	return 0;
       
  2881 }
       
  2882 
       
  2883 static const struct net_device_ops e100_netdev_ops = {
       
  2884 	.ndo_open		= e100_open,
       
  2885 	.ndo_stop		= e100_close,
       
  2886 	.ndo_start_xmit		= e100_xmit_frame,
       
  2887 	.ndo_validate_addr	= eth_validate_addr,
       
  2888 	.ndo_set_multicast_list	= e100_set_multicast_list,
       
  2889 	.ndo_set_mac_address	= e100_set_mac_address,
       
  2890 	.ndo_change_mtu		= e100_change_mtu,
       
  2891 	.ndo_do_ioctl		= e100_do_ioctl,
       
  2892 	.ndo_tx_timeout		= e100_tx_timeout,
       
  2893 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2894 	.ndo_poll_controller	= e100_netpoll,
       
  2895 #endif
       
  2896 };
       
  2897 
       
  2898 static int __devinit e100_probe(struct pci_dev *pdev,
       
  2899 	const struct pci_device_id *ent)
       
  2900 {
       
  2901 	struct net_device *netdev;
       
  2902 	struct nic *nic;
       
  2903 	int err;
       
  2904 
       
  2905 	if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
       
  2906 		if (((1 << debug) - 1) & NETIF_MSG_PROBE)
       
  2907 			pr_err("Etherdev alloc failed, aborting\n");
       
  2908 		return -ENOMEM;
       
  2909 	}
       
  2910 
       
  2911 	netdev->netdev_ops = &e100_netdev_ops;
       
  2912 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  2913 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  2914 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  2915 
       
  2916 	nic = netdev_priv(netdev);
       
  2917 	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
       
  2918 	nic->netdev = netdev;
       
  2919 	nic->pdev = pdev;
       
  2920 	nic->msg_enable = (1 << debug) - 1;
       
  2921 	nic->mdio_ctrl = mdio_ctrl_hw;
       
  2922 	pci_set_drvdata(pdev, netdev);
       
  2923 
       
  2924 	if ((err = pci_enable_device(pdev))) {
       
  2925 		netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
       
  2926 		goto err_out_free_dev;
       
  2927 	}
       
  2928 
       
  2929 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  2930 		netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
       
  2931 		err = -ENODEV;
       
  2932 		goto err_out_disable_pdev;
       
  2933 	}
       
  2934 
       
  2935 	if ((err = pci_request_regions(pdev, DRV_NAME))) {
       
  2936 		netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
       
  2937 		goto err_out_disable_pdev;
       
  2938 	}
       
  2939 
       
  2940 	if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
       
  2941 		netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
       
  2942 		goto err_out_free_res;
       
  2943 	}
       
  2944 
       
  2945 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  2946 
       
  2947 	if (use_io)
       
  2948 		netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
       
  2949 
       
  2950 	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
       
  2951 	if (!nic->csr) {
       
  2952 		netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
       
  2953 		err = -ENOMEM;
       
  2954 		goto err_out_free_res;
       
  2955 	}
       
  2956 
       
  2957 	if (ent->driver_data)
       
  2958 		nic->flags |= ich;
       
  2959 	else
       
  2960 		nic->flags &= ~ich;
       
  2961 
       
  2962 	e100_get_defaults(nic);
       
  2963 
       
  2964 	/* locks must be initialized before calling hw_reset */
       
  2965 	spin_lock_init(&nic->cb_lock);
       
  2966 	spin_lock_init(&nic->cmd_lock);
       
  2967 	spin_lock_init(&nic->mdio_lock);
       
  2968 
       
  2969 	/* Reset the device before pci_set_master() in case device is in some
       
  2970 	 * funky state and has an interrupt pending - hint: we don't have the
       
  2971 	 * interrupt handler registered yet. */
       
  2972 	e100_hw_reset(nic);
       
  2973 
       
  2974 	pci_set_master(pdev);
       
  2975 
       
  2976 	init_timer(&nic->watchdog);
       
  2977 	nic->watchdog.function = e100_watchdog;
       
  2978 	nic->watchdog.data = (unsigned long)nic;
       
  2979 	init_timer(&nic->blink_timer);
       
  2980 	nic->blink_timer.function = e100_blink_led;
       
  2981 	nic->blink_timer.data = (unsigned long)nic;
       
  2982 
       
  2983 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  2984 
       
  2985 	if ((err = e100_alloc(nic))) {
       
  2986 		netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
       
  2987 		goto err_out_iounmap;
       
  2988 	}
       
  2989 
       
  2990 	if ((err = e100_eeprom_load(nic)))
       
  2991 		goto err_out_free;
       
  2992 
       
  2993 	e100_phy_init(nic);
       
  2994 
       
  2995 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  2996 	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
       
  2997 	if (!is_valid_ether_addr(netdev->perm_addr)) {
       
  2998 		if (!eeprom_bad_csum_allow) {
       
  2999 			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
       
  3000 			err = -EAGAIN;
       
  3001 			goto err_out_free;
       
  3002 		} else {
       
  3003 			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
       
  3004 		}
       
  3005 	}
       
  3006 
       
  3007 	/* Wol magic packet can be enabled from eeprom */
       
  3008 	if ((nic->mac >= mac_82558_D101_A4) &&
       
  3009 	   (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
       
  3010 		nic->flags |= wol_magic;
       
  3011 		device_set_wakeup_enable(&pdev->dev, true);
       
  3012 	}
       
  3013 
       
  3014 	/* ack any pending wake events, disable PME */
       
  3015 	pci_pme_active(pdev, false);
       
  3016 
       
  3017 	// offer device to EtherCAT master module
       
  3018 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  3019 
       
  3020 	if (!nic->ecdev) {
       
  3021 		strcpy(netdev->name, "eth%d");
       
  3022 		if ((err = register_netdev(netdev))) {
       
  3023 			netif_err(nic, probe, nic->netdev,
       
  3024 					"Cannot register net device, aborting\n");
       
  3025 			goto err_out_free;
       
  3026 		}
       
  3027 	}
       
  3028 
       
  3029 	nic->cbs_pool = pci_pool_create(netdev->name,
       
  3030 			   nic->pdev,
       
  3031 			   nic->params.cbs.max * sizeof(struct cb),
       
  3032 			   sizeof(u32),
       
  3033 			   0);
       
  3034 	netif_info(nic, probe, nic->netdev,
       
  3035 		   "addr 0x%llx, irq %d, MAC addr %pM\n",
       
  3036 		   (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
       
  3037 		   pdev->irq, netdev->dev_addr);
       
  3038 
       
  3039 	if (nic->ecdev) {
       
  3040 		if (ecdev_open(nic->ecdev)) {
       
  3041 			ecdev_withdraw(nic->ecdev);
       
  3042 			goto err_out_free;
       
  3043 		}
       
  3044 	}
       
  3045 
       
  3046 	return 0;
       
  3047 
       
  3048 err_out_free:
       
  3049 	e100_free(nic);
       
  3050 err_out_iounmap:
       
  3051 	pci_iounmap(pdev, nic->csr);
       
  3052 err_out_free_res:
       
  3053 	pci_release_regions(pdev);
       
  3054 err_out_disable_pdev:
       
  3055 	pci_disable_device(pdev);
       
  3056 err_out_free_dev:
       
  3057 	pci_set_drvdata(pdev, NULL);
       
  3058 	free_netdev(netdev);
       
  3059 	return err;
       
  3060 }
       
  3061 
       
  3062 static void __devexit e100_remove(struct pci_dev *pdev)
       
  3063 {
       
  3064 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3065 
       
  3066 	if (netdev) {
       
  3067 		struct nic *nic = netdev_priv(netdev);
       
  3068 		if (nic->ecdev) {
       
  3069 			ecdev_close(nic->ecdev);
       
  3070 			ecdev_withdraw(nic->ecdev);
       
  3071 		} else {
       
  3072 			unregister_netdev(netdev);
       
  3073 		}
       
  3074 
       
  3075 		e100_free(nic);
       
  3076 		pci_iounmap(pdev, nic->csr);
       
  3077 		pci_pool_destroy(nic->cbs_pool);
       
  3078 		free_netdev(netdev);
       
  3079 		pci_release_regions(pdev);
       
  3080 		pci_disable_device(pdev);
       
  3081 		pci_set_drvdata(pdev, NULL);
       
  3082 	}
       
  3083 }
       
  3084 
       
  3085 #define E100_82552_SMARTSPEED   0x14   /* SmartSpeed Ctrl register */
       
  3086 #define E100_82552_REV_ANEG     0x0200 /* Reverse auto-negotiation */
       
  3087 #define E100_82552_ANEG_NOW     0x0400 /* Auto-negotiate now */
       
  3088 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  3089 {
       
  3090 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3091 	struct nic *nic = netdev_priv(netdev);
       
  3092 
       
  3093 	if (netif_running(netdev))
       
  3094 		e100_down(nic);
       
  3095 	netif_device_detach(netdev);
       
  3096 
       
  3097 	pci_save_state(pdev);
       
  3098 
       
  3099 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  3100 		/* enable reverse auto-negotiation */
       
  3101 		if (nic->phy == phy_82552_v) {
       
  3102 			u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3103 			                           E100_82552_SMARTSPEED);
       
  3104 
       
  3105 			mdio_write(netdev, nic->mii.phy_id,
       
  3106 			           E100_82552_SMARTSPEED, smartspeed |
       
  3107 			           E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
       
  3108 		}
       
  3109 		*enable_wake = true;
       
  3110 	} else {
       
  3111 		*enable_wake = false;
       
  3112 	}
       
  3113 
       
  3114 	pci_disable_device(pdev);
       
  3115 }
       
  3116 
       
  3117 static int __e100_power_off(struct pci_dev *pdev, bool wake)
       
  3118 {
       
  3119 	if (wake)
       
  3120 		return pci_prepare_to_sleep(pdev);
       
  3121 
       
  3122 	pci_wake_from_d3(pdev, false);
       
  3123 	pci_set_power_state(pdev, PCI_D3hot);
       
  3124 
       
  3125 	return 0;
       
  3126 }
       
  3127 
       
  3128 #ifdef CONFIG_PM
       
  3129 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  3130 {
       
  3131 	bool wake;
       
  3132 	__e100_shutdown(pdev, &wake);
       
  3133 	return __e100_power_off(pdev, wake);
       
  3134 }
       
  3135 
       
  3136 static int e100_resume(struct pci_dev *pdev)
       
  3137 {
       
  3138 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3139 	struct nic *nic = netdev_priv(netdev);
       
  3140 
       
  3141 	pci_set_power_state(pdev, PCI_D0);
       
  3142 	pci_restore_state(pdev);
       
  3143 	/* ack any pending wake events, disable PME */
       
  3144 	pci_enable_wake(pdev, 0, 0);
       
  3145 
       
  3146 	/* disable reverse auto-negotiation */
       
  3147 	if (nic->phy == phy_82552_v) {
       
  3148 		u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3149 		                           E100_82552_SMARTSPEED);
       
  3150 
       
  3151 		mdio_write(netdev, nic->mii.phy_id,
       
  3152 		           E100_82552_SMARTSPEED,
       
  3153 		           smartspeed & ~(E100_82552_REV_ANEG));
       
  3154 	}
       
  3155 
       
  3156 	netif_device_attach(netdev);
       
  3157 	if (netif_running(netdev))
       
  3158 		e100_up(nic);
       
  3159 
       
  3160 	return 0;
       
  3161 }
       
  3162 #endif /* CONFIG_PM */
       
  3163 
       
  3164 static void e100_shutdown(struct pci_dev *pdev)
       
  3165 {
       
  3166 	bool wake;
       
  3167 	__e100_shutdown(pdev, &wake);
       
  3168 	if (system_state == SYSTEM_POWER_OFF)
       
  3169 		__e100_power_off(pdev, wake);
       
  3170 }
       
  3171 
       
  3172 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  3173 /**
       
  3174  * e100_io_error_detected - called when PCI error is detected.
       
  3175  * @pdev: Pointer to PCI device
       
  3176  * @state: The current pci connection state
       
  3177  */
       
  3178 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  3179 {
       
  3180 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3181 	struct nic *nic = netdev_priv(netdev);
       
  3182 
       
  3183 	if (nic->ecdev)
       
  3184 		return -EBUSY;
       
  3185 
       
  3186 	netif_device_detach(netdev);
       
  3187 
       
  3188 	if (state == pci_channel_io_perm_failure)
       
  3189 		return PCI_ERS_RESULT_DISCONNECT;
       
  3190 
       
  3191 	if (netif_running(netdev))
       
  3192 		e100_down(nic);
       
  3193 	pci_disable_device(pdev);
       
  3194 
       
  3195 	/* Request a slot reset. */
       
  3196 	return PCI_ERS_RESULT_NEED_RESET;
       
  3197 }
       
  3198 
       
  3199 /**
       
  3200  * e100_io_slot_reset - called after the pci bus has been reset.
       
  3201  * @pdev: Pointer to PCI device
       
  3202  *
       
  3203  * Restart the card from scratch.
       
  3204  */
       
  3205 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  3206 {
       
  3207 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3208 	struct nic *nic = netdev_priv(netdev);
       
  3209 
       
  3210 	if (nic->ecdev)
       
  3211 		return -EBUSY;
       
  3212 
       
  3213 	if (pci_enable_device(pdev)) {
       
  3214 		pr_err("Cannot re-enable PCI device after reset\n");
       
  3215 		return PCI_ERS_RESULT_DISCONNECT;
       
  3216 	}
       
  3217 	pci_set_master(pdev);
       
  3218 
       
  3219 	/* Only one device per card can do a reset */
       
  3220 	if (0 != PCI_FUNC(pdev->devfn))
       
  3221 		return PCI_ERS_RESULT_RECOVERED;
       
  3222 	e100_hw_reset(nic);
       
  3223 	e100_phy_init(nic);
       
  3224 
       
  3225 	return PCI_ERS_RESULT_RECOVERED;
       
  3226 }
       
  3227 
       
  3228 /**
       
  3229  * e100_io_resume - resume normal operations
       
  3230  * @pdev: Pointer to PCI device
       
  3231  *
       
  3232  * Resume normal operations after an error recovery
       
  3233  * sequence has been completed.
       
  3234  */
       
  3235 static void e100_io_resume(struct pci_dev *pdev)
       
  3236 {
       
  3237 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3238 	struct nic *nic = netdev_priv(netdev);
       
  3239 
       
  3240 	/* ack any pending wake events, disable PME */
       
  3241 	pci_enable_wake(pdev, 0, 0);
       
  3242 
       
  3243 	if (!nic->ecdev)
       
  3244 		netif_device_attach(netdev);
       
  3245 	if (nic->ecdev || netif_running(netdev)) {
       
  3246 		e100_open(netdev);
       
  3247 		if (!nic->ecdev)
       
  3248 			mod_timer(&nic->watchdog, jiffies);
       
  3249 	}
       
  3250 }
       
  3251 
       
  3252 static struct pci_error_handlers e100_err_handler = {
       
  3253 	.error_detected = e100_io_error_detected,
       
  3254 	.slot_reset = e100_io_slot_reset,
       
  3255 	.resume = e100_io_resume,
       
  3256 };
       
  3257 
       
  3258 static struct pci_driver e100_driver = {
       
  3259 	.name =         DRV_NAME,
       
  3260 	.id_table =     e100_id_table,
       
  3261 	.probe =        e100_probe,
       
  3262 	.remove =       __devexit_p(e100_remove),
       
  3263 #ifdef CONFIG_PM
       
  3264 	/* Power Management hooks */
       
  3265 	.suspend =      e100_suspend,
       
  3266 	.resume =       e100_resume,
       
  3267 #endif
       
  3268 	.shutdown =     e100_shutdown,
       
  3269 	.err_handler = &e100_err_handler,
       
  3270 };
       
  3271 
       
  3272 static int __init e100_init_module(void)
       
  3273 {
       
  3274 	if (((1 << debug) - 1) & NETIF_MSG_DRV) {
       
  3275 		pr_info("%s %s, %s\n", DRV_NAME, DRV_DESCRIPTION, DRV_VERSION);
       
  3276 		pr_info("%s\n", DRV_COPYRIGHT);
       
  3277 	}
       
  3278 	return pci_register_driver(&e100_driver);
       
  3279 }
       
  3280 
       
  3281 static void __exit e100_cleanup_module(void)
       
  3282 {
       
  3283 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3284 	pci_unregister_driver(&e100_driver);
       
  3285 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3286 }
       
  3287 
       
  3288 module_init(e100_init_module);
       
  3289 module_exit(e100_cleanup_module);