devices/e100-2.6.31-ethercat.c
changeset 1812 b7ce5f9f13df
child 1928 e9d5929829ba
equal deleted inserted replaced
1810:a352f3e4f95d 1812:b7ce5f9f13df
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007-2008  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it and/or
       
    10  *  modify it under the terms of the GNU General Public License version 2, as
       
    11  *  published by the Free Software Foundation.
       
    12  *
       
    13  *  The IgH EtherCAT Master is distributed in the hope that it will be useful,
       
    14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
       
    16  *  Public License for more details.
       
    17  *
       
    18  *  You should have received a copy of the GNU General Public License along
       
    19  *  with the IgH EtherCAT Master; if not, write to the Free Software
       
    20  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    21  *
       
    22  *  ---
       
    23  *
       
    24  *  The license mentioned above concerns the source code only. Using the
       
    25  *  EtherCAT technology and brand is only permitted in compliance with the
       
    26  *  industrial property and similar rights of Beckhoff Automation GmbH.
       
    27  *
       
    28  *  ---
       
    29  *
       
    30  *  vim: noexpandtab
       
    31  *
       
    32  *****************************************************************************/
       
    33 
       
    34 /**
       
    35    \file
       
    36    EtherCAT driver for e100-compatible NICs.
       
    37 */
       
    38 
       
    39 /* Former documentation: */
       
    40 
       
    41 /*******************************************************************************
       
    42 
       
    43   Intel PRO/100 Linux driver
       
    44   Copyright(c) 1999 - 2006 Intel Corporation.
       
    45 
       
    46   This program is free software; you can redistribute it and/or modify it
       
    47   under the terms and conditions of the GNU General Public License,
       
    48   version 2, as published by the Free Software Foundation.
       
    49 
       
    50   This program is distributed in the hope it will be useful, but WITHOUT
       
    51   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    52   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    53   more details.
       
    54 
       
    55   You should have received a copy of the GNU General Public License along with
       
    56   this program; if not, write to the Free Software Foundation, Inc.,
       
    57   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    58 
       
    59   The full GNU General Public License is included in this distribution in
       
    60   the file called "COPYING".
       
    61 
       
    62   Contact Information:
       
    63   Linux NICS <linux.nics@intel.com>
       
    64   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    65   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    66 
       
    67 *******************************************************************************/
       
    68 
       
    69 /*
       
    70  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    71  *
       
    72  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    73  *	original e100 driver, but better described as a munging of
       
    74  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    75  *
       
    76  *	References:
       
    77  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    78  *		Open Source Software Developers Manual,
       
    79  *		http://sourceforge.net/projects/e1000
       
    80  *
       
    81  *
       
    82  *	                      Theory of Operation
       
    83  *
       
    84  *	I.   General
       
    85  *
       
    86  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    87  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    88  *	82551, and 82562 devices.  82558 and greater controllers
       
    89  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    90  *	server and client network interface cards, as well as in
       
    91  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    92  *	configurations.  8255x supports a 32-bit linear addressing
       
    93  *	mode and operates at 33Mhz PCI clock rate.
       
    94  *
       
    95  *	II.  Driver Operation
       
    96  *
       
    97  *	Memory-mapped mode is used exclusively to access the device's
       
    98  *	shared-memory structure, the Control/Status Registers (CSR). All
       
    99  *	setup, configuration, and control of the device, including queuing
       
   100  *	of Tx, Rx, and configuration commands is through the CSR.
       
   101  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   102  *	protects the shared Command Block List (CBL).
       
   103  *
       
   104  *	8255x is highly MII-compliant and all access to the PHY go
       
   105  *	through the Management Data Interface (MDI).  Consequently, the
       
   106  *	driver leverages the mii.c library shared with other MII-compliant
       
   107  *	devices.
       
   108  *
       
   109  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   110  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   111  *	archs are supported.
       
   112  *
       
   113  *	III. Transmit
       
   114  *
       
   115  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   116  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   117  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   118  *	the end of the ring.  The last TCB processed suspends the
       
   119  *	controller, and the controller can be restarted by issue a CU
       
   120  *	resume command to continue from the suspend point, or a CU start
       
   121  *	command to start at a given position in the ring.
       
   122  *
       
   123  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   124  *	into the CBL ring along with Tx commands.  The common structure
       
   125  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   126  *
       
   127  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   128  *	is the next CB to check for completion; cb_to_send is the first
       
   129  *	CB to start on in case of a previous failure to resume.  CB clean
       
   130  *	up happens in interrupt context in response to a CU interrupt.
       
   131  *	cbs_avail keeps track of number of free CB resources available.
       
   132  *
       
   133  * 	Hardware padding of short packets to minimum packet size is
       
   134  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   135  * 	with 00h.
       
   136  *
       
   137  *	IV.  Receive
       
   138  *
       
   139  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   140  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   141  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   142  *	and the data buffer, but the RFD is pulled off before the skb is
       
   143  *	indicated.  The data buffer is aligned such that encapsulated
       
   144  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   145  *	mapped shared memory, and completion status is contained within
       
   146  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   147  *	view from software and hardware.
       
   148  *
       
   149  *	In order to keep updates to the RFD link field from colliding with
       
   150  *	hardware writes to mark packets complete, we use the feature that
       
   151  *	hardware will not write to a size 0 descriptor and mark the previous
       
   152  *	packet as end-of-list (EL).   After updating the link, we remove EL
       
   153  *	and only then restore the size such that hardware may use the
       
   154  *	previous-to-end RFD.
       
   155  *
       
   156  *	Under typical operation, the  receive unit (RU) is start once,
       
   157  *	and the controller happily fills RFDs as frames arrive.  If
       
   158  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   159  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   160  *	and Rx indication and re-allocation happen in the same context,
       
   161  *	therefore no locking is required.  A software-generated interrupt
       
   162  *	is generated from the watchdog to recover from a failed allocation
       
   163  *	scenario where all Rx resources have been indicated and none re-
       
   164  *	placed.
       
   165  *
       
   166  *	V.   Miscellaneous
       
   167  *
       
   168  * 	VLAN offloading of tagging, stripping and filtering is not
       
   169  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   170  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   171  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   172  * 	not supported (hardware limitation).
       
   173  *
       
   174  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   175  *
       
   176  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   177  * 	testing/troubleshooting the development driver.
       
   178  *
       
   179  * 	TODO:
       
   180  * 	o several entry points race with dev->close
       
   181  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   182  *
       
   183  *	FIXES:
       
   184  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   185  *	- Stratus87247: protect MDI control register manipulations
       
   186  * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
       
   187  *      - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
       
   188  */
       
   189 
       
   190 #include <linux/module.h>
       
   191 #include <linux/moduleparam.h>
       
   192 #include <linux/kernel.h>
       
   193 #include <linux/types.h>
       
   194 #include <linux/slab.h>
       
   195 #include <linux/delay.h>
       
   196 #include <linux/init.h>
       
   197 #include <linux/pci.h>
       
   198 #include <linux/dma-mapping.h>
       
   199 #include <linux/netdevice.h>
       
   200 #include <linux/etherdevice.h>
       
   201 #include <linux/mii.h>
       
   202 #include <linux/if_vlan.h>
       
   203 #include <linux/skbuff.h>
       
   204 #include <linux/ethtool.h>
       
   205 #include <linux/string.h>
       
   206 #include <linux/firmware.h>
       
   207 
       
   208 // EtherCAT includes
       
   209 #include "../globals.h"
       
   210 #include "ecdev.h"
       
   211 
       
   212 #define DRV_NAME		"ec_e100"
       
   213 #include <asm/unaligned.h>
       
   214 
       
   215 
       
   216 #define DRV_EXT			"-NAPI"
       
   217 #define DRV_VERSION		"3.5.24-k2"DRV_EXT
       
   218 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   219 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   220 #define PFX			DRV_NAME ": "
       
   221 
       
   222 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   223 #define E100_NAPI_WEIGHT	16
       
   224 
       
   225 #define FIRMWARE_D101M		"e100/d101m_ucode.bin"
       
   226 #define FIRMWARE_D101S		"e100/d101s_ucode.bin"
       
   227 #define FIRMWARE_D102E		"e100/d102e_ucode.bin"
       
   228 
       
   229 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   230 MODULE_AUTHOR(DRV_COPYRIGHT);
       
   231 MODULE_LICENSE("GPL");
       
   232 MODULE_VERSION(DRV_VERSION);
       
   233 MODULE_FIRMWARE(FIRMWARE_D101M);
       
   234 MODULE_FIRMWARE(FIRMWARE_D101S);
       
   235 MODULE_FIRMWARE(FIRMWARE_D102E);
       
   236 
       
   237 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   238 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   239 MODULE_LICENSE("GPL");
       
   240 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   241 
       
   242 void e100_ec_poll(struct net_device *);
       
   243 
       
   244 static int debug = 3;
       
   245 static int eeprom_bad_csum_allow = 0;
       
   246 static int use_io = 0;
       
   247 module_param(debug, int, 0);
       
   248 module_param(eeprom_bad_csum_allow, int, 0);
       
   249 module_param(use_io, int, 0);
       
   250 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   251 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   252 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
       
   253 #define DPRINTK(nlevel, klevel, fmt, args...) \
       
   254 	(void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
       
   255 	printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
       
   256 		__func__ , ## args))
       
   257 
       
   258 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   259 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   260 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   261 static struct pci_device_id e100_id_table[] = {
       
   262 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   263 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   277 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   278 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   279 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   280 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   281 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   282 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   283 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   284 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   285 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   286 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   287 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   288 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   289 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   290 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   291 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   292 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   293 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   294 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   295 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   296 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   297 	INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
       
   298 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   299 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   300 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   301 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   302 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   303 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   304 	{ 0, }
       
   305 };
       
   306 
       
   307 // prevent from being loaded automatically
       
   308 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   309 
       
   310 enum mac {
       
   311 	mac_82557_D100_A  = 0,
       
   312 	mac_82557_D100_B  = 1,
       
   313 	mac_82557_D100_C  = 2,
       
   314 	mac_82558_D101_A4 = 4,
       
   315 	mac_82558_D101_B0 = 5,
       
   316 	mac_82559_D101M   = 8,
       
   317 	mac_82559_D101S   = 9,
       
   318 	mac_82550_D102    = 12,
       
   319 	mac_82550_D102_C  = 13,
       
   320 	mac_82551_E       = 14,
       
   321 	mac_82551_F       = 15,
       
   322 	mac_82551_10      = 16,
       
   323 	mac_unknown       = 0xFF,
       
   324 };
       
   325 
       
   326 enum phy {
       
   327 	phy_100a     = 0x000003E0,
       
   328 	phy_100c     = 0x035002A8,
       
   329 	phy_82555_tx = 0x015002A8,
       
   330 	phy_nsc_tx   = 0x5C002000,
       
   331 	phy_82562_et = 0x033002A8,
       
   332 	phy_82562_em = 0x032002A8,
       
   333 	phy_82562_ek = 0x031002A8,
       
   334 	phy_82562_eh = 0x017002A8,
       
   335 	phy_82552_v  = 0xd061004d,
       
   336 	phy_unknown  = 0xFFFFFFFF,
       
   337 };
       
   338 
       
   339 /* CSR (Control/Status Registers) */
       
   340 struct csr {
       
   341 	struct {
       
   342 		u8 status;
       
   343 		u8 stat_ack;
       
   344 		u8 cmd_lo;
       
   345 		u8 cmd_hi;
       
   346 		u32 gen_ptr;
       
   347 	} scb;
       
   348 	u32 port;
       
   349 	u16 flash_ctrl;
       
   350 	u8 eeprom_ctrl_lo;
       
   351 	u8 eeprom_ctrl_hi;
       
   352 	u32 mdi_ctrl;
       
   353 	u32 rx_dma_count;
       
   354 };
       
   355 
       
   356 enum scb_status {
       
   357 	rus_no_res       = 0x08,
       
   358 	rus_ready        = 0x10,
       
   359 	rus_mask         = 0x3C,
       
   360 };
       
   361 
       
   362 enum ru_state  {
       
   363 	RU_SUSPENDED = 0,
       
   364 	RU_RUNNING	 = 1,
       
   365 	RU_UNINITIALIZED = -1,
       
   366 };
       
   367 
       
   368 enum scb_stat_ack {
       
   369 	stat_ack_not_ours    = 0x00,
       
   370 	stat_ack_sw_gen      = 0x04,
       
   371 	stat_ack_rnr         = 0x10,
       
   372 	stat_ack_cu_idle     = 0x20,
       
   373 	stat_ack_frame_rx    = 0x40,
       
   374 	stat_ack_cu_cmd_done = 0x80,
       
   375 	stat_ack_not_present = 0xFF,
       
   376 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   377 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   378 };
       
   379 
       
   380 enum scb_cmd_hi {
       
   381 	irq_mask_none = 0x00,
       
   382 	irq_mask_all  = 0x01,
       
   383 	irq_sw_gen    = 0x02,
       
   384 };
       
   385 
       
   386 enum scb_cmd_lo {
       
   387 	cuc_nop        = 0x00,
       
   388 	ruc_start      = 0x01,
       
   389 	ruc_load_base  = 0x06,
       
   390 	cuc_start      = 0x10,
       
   391 	cuc_resume     = 0x20,
       
   392 	cuc_dump_addr  = 0x40,
       
   393 	cuc_dump_stats = 0x50,
       
   394 	cuc_load_base  = 0x60,
       
   395 	cuc_dump_reset = 0x70,
       
   396 };
       
   397 
       
   398 enum cuc_dump {
       
   399 	cuc_dump_complete       = 0x0000A005,
       
   400 	cuc_dump_reset_complete = 0x0000A007,
       
   401 };
       
   402 
       
   403 enum port {
       
   404 	software_reset  = 0x0000,
       
   405 	selftest        = 0x0001,
       
   406 	selective_reset = 0x0002,
       
   407 };
       
   408 
       
   409 enum eeprom_ctrl_lo {
       
   410 	eesk = 0x01,
       
   411 	eecs = 0x02,
       
   412 	eedi = 0x04,
       
   413 	eedo = 0x08,
       
   414 };
       
   415 
       
   416 enum mdi_ctrl {
       
   417 	mdi_write = 0x04000000,
       
   418 	mdi_read  = 0x08000000,
       
   419 	mdi_ready = 0x10000000,
       
   420 };
       
   421 
       
   422 enum eeprom_op {
       
   423 	op_write = 0x05,
       
   424 	op_read  = 0x06,
       
   425 	op_ewds  = 0x10,
       
   426 	op_ewen  = 0x13,
       
   427 };
       
   428 
       
   429 enum eeprom_offsets {
       
   430 	eeprom_cnfg_mdix  = 0x03,
       
   431 	eeprom_phy_iface  = 0x06,
       
   432 	eeprom_id         = 0x0A,
       
   433 	eeprom_config_asf = 0x0D,
       
   434 	eeprom_smbus_addr = 0x90,
       
   435 };
       
   436 
       
   437 enum eeprom_cnfg_mdix {
       
   438 	eeprom_mdix_enabled = 0x0080,
       
   439 };
       
   440 
       
   441 enum eeprom_phy_iface {
       
   442 	NoSuchPhy = 0,
       
   443 	I82553AB,
       
   444 	I82553C,
       
   445 	I82503,
       
   446 	DP83840,
       
   447 	S80C240,
       
   448 	S80C24,
       
   449 	I82555,
       
   450 	DP83840A = 10,
       
   451 };
       
   452 
       
   453 enum eeprom_id {
       
   454 	eeprom_id_wol = 0x0020,
       
   455 };
       
   456 
       
   457 enum eeprom_config_asf {
       
   458 	eeprom_asf = 0x8000,
       
   459 	eeprom_gcl = 0x4000,
       
   460 };
       
   461 
       
   462 enum cb_status {
       
   463 	cb_complete = 0x8000,
       
   464 	cb_ok       = 0x2000,
       
   465 };
       
   466 
       
   467 enum cb_command {
       
   468 	cb_nop    = 0x0000,
       
   469 	cb_iaaddr = 0x0001,
       
   470 	cb_config = 0x0002,
       
   471 	cb_multi  = 0x0003,
       
   472 	cb_tx     = 0x0004,
       
   473 	cb_ucode  = 0x0005,
       
   474 	cb_dump   = 0x0006,
       
   475 	cb_tx_sf  = 0x0008,
       
   476 	cb_cid    = 0x1f00,
       
   477 	cb_i      = 0x2000,
       
   478 	cb_s      = 0x4000,
       
   479 	cb_el     = 0x8000,
       
   480 };
       
   481 
       
   482 struct rfd {
       
   483 	__le16 status;
       
   484 	__le16 command;
       
   485 	__le32 link;
       
   486 	__le32 rbd;
       
   487 	__le16 actual_size;
       
   488 	__le16 size;
       
   489 };
       
   490 
       
   491 struct rx {
       
   492 	struct rx *next, *prev;
       
   493 	struct sk_buff *skb;
       
   494 	dma_addr_t dma_addr;
       
   495 };
       
   496 
       
   497 #if defined(__BIG_ENDIAN_BITFIELD)
       
   498 #define X(a,b)	b,a
       
   499 #else
       
   500 #define X(a,b)	a,b
       
   501 #endif
       
   502 struct config {
       
   503 /*0*/	u8 X(byte_count:6, pad0:2);
       
   504 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   505 /*2*/	u8 adaptive_ifs;
       
   506 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   507 	   term_write_cache_line:1), pad3:4);
       
   508 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   509 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   510 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   511 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   512 	   rx_discard_overruns:1), rx_save_bad_frames:1);
       
   513 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   514 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   515 	   tx_dynamic_tbd:1);
       
   516 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   517 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   518 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   519 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   520 	   loopback:2);
       
   521 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   522 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   523 /*13*/	u8 ip_addr_lo;
       
   524 /*14*/	u8 ip_addr_hi;
       
   525 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   526 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   527 	   pad15_2:1), crs_or_cdt:1);
       
   528 /*16*/	u8 fc_delay_lo;
       
   529 /*17*/	u8 fc_delay_hi;
       
   530 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   531 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   532 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   533 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   534 	   full_duplex_force:1), full_duplex_pin:1);
       
   535 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   536 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   537 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   538 	u8 pad_d102[9];
       
   539 };
       
   540 
       
   541 #define E100_MAX_MULTICAST_ADDRS	64
       
   542 struct multi {
       
   543 	__le16 count;
       
   544 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   545 };
       
   546 
       
   547 /* Important: keep total struct u32-aligned */
       
   548 #define UCODE_SIZE			134
       
   549 struct cb {
       
   550 	__le16 status;
       
   551 	__le16 command;
       
   552 	__le32 link;
       
   553 	union {
       
   554 		u8 iaaddr[ETH_ALEN];
       
   555 		__le32 ucode[UCODE_SIZE];
       
   556 		struct config config;
       
   557 		struct multi multi;
       
   558 		struct {
       
   559 			u32 tbd_array;
       
   560 			u16 tcb_byte_count;
       
   561 			u8 threshold;
       
   562 			u8 tbd_count;
       
   563 			struct {
       
   564 				__le32 buf_addr;
       
   565 				__le16 size;
       
   566 				u16 eol;
       
   567 			} tbd;
       
   568 		} tcb;
       
   569 		__le32 dump_buffer_addr;
       
   570 	} u;
       
   571 	struct cb *next, *prev;
       
   572 	dma_addr_t dma_addr;
       
   573 	struct sk_buff *skb;
       
   574 };
       
   575 
       
   576 enum loopback {
       
   577 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   578 };
       
   579 
       
   580 struct stats {
       
   581 	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   582 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   583 		tx_multiple_collisions, tx_total_collisions;
       
   584 	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   585 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   586 		rx_short_frame_errors;
       
   587 	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   588 	__le16 xmt_tco_frames, rcv_tco_frames;
       
   589 	__le32 complete;
       
   590 };
       
   591 
       
   592 struct mem {
       
   593 	struct {
       
   594 		u32 signature;
       
   595 		u32 result;
       
   596 	} selftest;
       
   597 	struct stats stats;
       
   598 	u8 dump_buf[596];
       
   599 };
       
   600 
       
   601 struct param_range {
       
   602 	u32 min;
       
   603 	u32 max;
       
   604 	u32 count;
       
   605 };
       
   606 
       
   607 struct params {
       
   608 	struct param_range rfds;
       
   609 	struct param_range cbs;
       
   610 };
       
   611 
       
   612 struct nic {
       
   613 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   614 	u32 msg_enable				____cacheline_aligned;
       
   615 	struct net_device *netdev;
       
   616 	struct pci_dev *pdev;
       
   617 	u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
       
   618 
       
   619 	struct rx *rxs				____cacheline_aligned;
       
   620 	struct rx *rx_to_use;
       
   621 	struct rx *rx_to_clean;
       
   622 	struct rfd blank_rfd;
       
   623 	enum ru_state ru_running;
       
   624 
       
   625 	spinlock_t cb_lock			____cacheline_aligned;
       
   626 	spinlock_t cmd_lock;
       
   627 	struct csr __iomem *csr;
       
   628 	enum scb_cmd_lo cuc_cmd;
       
   629 	unsigned int cbs_avail;
       
   630 	struct napi_struct napi;
       
   631 	struct cb *cbs;
       
   632 	struct cb *cb_to_use;
       
   633 	struct cb *cb_to_send;
       
   634 	struct cb *cb_to_clean;
       
   635 	__le16 tx_command;
       
   636 	/* End: frequently used values: keep adjacent for cache effect */
       
   637 
       
   638 	enum {
       
   639 		ich                = (1 << 0),
       
   640 		promiscuous        = (1 << 1),
       
   641 		multicast_all      = (1 << 2),
       
   642 		wol_magic          = (1 << 3),
       
   643 		ich_10h_workaround = (1 << 4),
       
   644 	} flags					____cacheline_aligned;
       
   645 
       
   646 	enum mac mac;
       
   647 	enum phy phy;
       
   648 	struct params params;
       
   649 	struct timer_list watchdog;
       
   650 	struct timer_list blink_timer;
       
   651 	struct mii_if_info mii;
       
   652 	struct work_struct tx_timeout_task;
       
   653 	enum loopback loopback;
       
   654 
       
   655 	struct mem *mem;
       
   656 	dma_addr_t dma_addr;
       
   657 
       
   658 	dma_addr_t cbs_dma_addr;
       
   659 	u8 adaptive_ifs;
       
   660 	u8 tx_threshold;
       
   661 	u32 tx_frames;
       
   662 	u32 tx_collisions;
       
   663 
       
   664 	u32 tx_deferred;
       
   665 	u32 tx_single_collisions;
       
   666 	u32 tx_multiple_collisions;
       
   667 	u32 tx_fc_pause;
       
   668 	u32 tx_tco_frames;
       
   669 
       
   670 	u32 rx_fc_pause;
       
   671 	u32 rx_fc_unsupported;
       
   672 	u32 rx_tco_frames;
       
   673 	u32 rx_over_length_errors;
       
   674 
       
   675 	u16 leds;
       
   676 	u16 eeprom_wc;
       
   677 
       
   678 	__le16 eeprom[256];
       
   679 	spinlock_t mdio_lock;
       
   680 
       
   681 	ec_device_t *ecdev;
       
   682 	unsigned long ec_watchdog_jiffies;
       
   683 };
       
   684 
       
   685 static inline void e100_write_flush(struct nic *nic)
       
   686 {
       
   687 	/* Flush previous PCI writes through intermediate bridges
       
   688 	 * by doing a benign read */
       
   689 	(void)ioread8(&nic->csr->scb.status);
       
   690 }
       
   691 
       
   692 static void e100_enable_irq(struct nic *nic)
       
   693 {
       
   694 	unsigned long flags;
       
   695 
       
   696 	if (nic->ecdev)
       
   697 		return;
       
   698 
       
   699 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   700 	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   701 	e100_write_flush(nic);
       
   702 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   703 }
       
   704 
       
   705 static void e100_disable_irq(struct nic *nic)
       
   706 {
       
   707 	unsigned long flags;
       
   708 
       
   709 	if (!nic->ecdev)
       
   710 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   711 	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   712 	e100_write_flush(nic);
       
   713 	if (!nic->ecdev)
       
   714 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   715 }
       
   716 
       
   717 static void e100_hw_reset(struct nic *nic)
       
   718 {
       
   719 	/* Put CU and RU into idle with a selective reset to get
       
   720 	 * device off of PCI bus */
       
   721 	iowrite32(selective_reset, &nic->csr->port);
       
   722 	e100_write_flush(nic); udelay(20);
       
   723 
       
   724 	/* Now fully reset device */
       
   725 	iowrite32(software_reset, &nic->csr->port);
       
   726 	e100_write_flush(nic); udelay(20);
       
   727 
       
   728 	/* Mask off our interrupt line - it's unmasked after reset */
       
   729 	e100_disable_irq(nic);
       
   730 }
       
   731 
       
   732 static int e100_self_test(struct nic *nic)
       
   733 {
       
   734 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   735 
       
   736 	/* Passing the self-test is a pretty good indication
       
   737 	 * that the device can DMA to/from host memory */
       
   738 
       
   739 	nic->mem->selftest.signature = 0;
       
   740 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   741 
       
   742 	iowrite32(selftest | dma_addr, &nic->csr->port);
       
   743 	e100_write_flush(nic);
       
   744 	/* Wait 10 msec for self-test to complete */
       
   745 	msleep(10);
       
   746 
       
   747 	/* Interrupts are enabled after self-test */
       
   748 	e100_disable_irq(nic);
       
   749 
       
   750 	/* Check results of self-test */
       
   751 	if (nic->mem->selftest.result != 0) {
       
   752 		DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
       
   753 			nic->mem->selftest.result);
       
   754 		return -ETIMEDOUT;
       
   755 	}
       
   756 	if (nic->mem->selftest.signature == 0) {
       
   757 		DPRINTK(HW, ERR, "Self-test failed: timed out\n");
       
   758 		return -ETIMEDOUT;
       
   759 	}
       
   760 
       
   761 	return 0;
       
   762 }
       
   763 
       
   764 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
       
   765 {
       
   766 	u32 cmd_addr_data[3];
       
   767 	u8 ctrl;
       
   768 	int i, j;
       
   769 
       
   770 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   771 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   772 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   773 		le16_to_cpu(data);
       
   774 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   775 
       
   776 	/* Bit-bang cmds to write word to eeprom */
       
   777 	for (j = 0; j < 3; j++) {
       
   778 
       
   779 		/* Chip select */
       
   780 		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   781 		e100_write_flush(nic); udelay(4);
       
   782 
       
   783 		for (i = 31; i >= 0; i--) {
       
   784 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   785 				eecs | eedi : eecs;
       
   786 			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   787 			e100_write_flush(nic); udelay(4);
       
   788 
       
   789 			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   790 			e100_write_flush(nic); udelay(4);
       
   791 		}
       
   792 		/* Wait 10 msec for cmd to complete */
       
   793 		msleep(10);
       
   794 
       
   795 		/* Chip deselect */
       
   796 		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   797 		e100_write_flush(nic); udelay(4);
       
   798 	}
       
   799 };
       
   800 
       
   801 /* General technique stolen from the eepro100 driver - very clever */
       
   802 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   803 {
       
   804 	u32 cmd_addr_data;
       
   805 	u16 data = 0;
       
   806 	u8 ctrl;
       
   807 	int i;
       
   808 
       
   809 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   810 
       
   811 	/* Chip select */
       
   812 	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   813 	e100_write_flush(nic); udelay(4);
       
   814 
       
   815 	/* Bit-bang to read word from eeprom */
       
   816 	for (i = 31; i >= 0; i--) {
       
   817 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   818 		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   819 		e100_write_flush(nic); udelay(4);
       
   820 
       
   821 		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   822 		e100_write_flush(nic); udelay(4);
       
   823 
       
   824 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   825 		 * complete address.  Use this to adjust addr_len. */
       
   826 		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
       
   827 		if (!(ctrl & eedo) && i > 16) {
       
   828 			*addr_len -= (i - 16);
       
   829 			i = 17;
       
   830 		}
       
   831 
       
   832 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   833 	}
       
   834 
       
   835 	/* Chip deselect */
       
   836 	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   837 	e100_write_flush(nic); udelay(4);
       
   838 
       
   839 	return cpu_to_le16(data);
       
   840 };
       
   841 
       
   842 /* Load entire EEPROM image into driver cache and validate checksum */
       
   843 static int e100_eeprom_load(struct nic *nic)
       
   844 {
       
   845 	u16 addr, addr_len = 8, checksum = 0;
       
   846 
       
   847 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   848 	e100_eeprom_read(nic, &addr_len, 0);
       
   849 	nic->eeprom_wc = 1 << addr_len;
       
   850 
       
   851 	for (addr = 0; addr < nic->eeprom_wc; addr++) {
       
   852 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   853 		if (addr < nic->eeprom_wc - 1)
       
   854 			checksum += le16_to_cpu(nic->eeprom[addr]);
       
   855 	}
       
   856 
       
   857 	/* The checksum, stored in the last word, is calculated such that
       
   858 	 * the sum of words should be 0xBABA */
       
   859 	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
       
   860 		DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
       
   861 		if (!eeprom_bad_csum_allow)
       
   862 			return -EAGAIN;
       
   863 	}
       
   864 
       
   865 	return 0;
       
   866 }
       
   867 
       
   868 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   869 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   870 {
       
   871 	u16 addr, addr_len = 8, checksum = 0;
       
   872 
       
   873 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   874 	e100_eeprom_read(nic, &addr_len, 0);
       
   875 	nic->eeprom_wc = 1 << addr_len;
       
   876 
       
   877 	if (start + count >= nic->eeprom_wc)
       
   878 		return -EINVAL;
       
   879 
       
   880 	for (addr = start; addr < start + count; addr++)
       
   881 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   882 
       
   883 	/* The checksum, stored in the last word, is calculated such that
       
   884 	 * the sum of words should be 0xBABA */
       
   885 	for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   886 		checksum += le16_to_cpu(nic->eeprom[addr]);
       
   887 	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
       
   888 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   889 		nic->eeprom[nic->eeprom_wc - 1]);
       
   890 
       
   891 	return 0;
       
   892 }
       
   893 
       
   894 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   895 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   896 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   897 {
       
   898 	unsigned long flags = 0;
       
   899 	unsigned int i;
       
   900 	int err = 0;
       
   901 
       
   902 	if (!nic->ecdev)
       
   903 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   904 
       
   905 	/* Previous command is accepted when SCB clears */
       
   906 	for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   907 		if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
       
   908 			break;
       
   909 		cpu_relax();
       
   910 		if (unlikely(i > E100_WAIT_SCB_FAST))
       
   911 			udelay(5);
       
   912 	}
       
   913 	if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   914 		err = -EAGAIN;
       
   915 		goto err_unlock;
       
   916 	}
       
   917 
       
   918 	if (unlikely(cmd != cuc_resume))
       
   919 		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
       
   920 	iowrite8(cmd, &nic->csr->scb.cmd_lo);
       
   921 
       
   922 err_unlock:
       
   923 	if (!nic->ecdev)
       
   924 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   925 
       
   926 	return err;
       
   927 }
       
   928 
       
   929 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   930 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   931 {
       
   932 	struct cb *cb;
       
   933 	unsigned long flags = 0;
       
   934 	int err = 0;
       
   935 
       
   936 	if (!nic->ecdev)
       
   937 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   938 
       
   939 	if (unlikely(!nic->cbs_avail)) {
       
   940 		err = -ENOMEM;
       
   941 		goto err_unlock;
       
   942 	}
       
   943 
       
   944 	cb = nic->cb_to_use;
       
   945 	nic->cb_to_use = cb->next;
       
   946 	nic->cbs_avail--;
       
   947 	cb->skb = skb;
       
   948 
       
   949 	if (unlikely(!nic->cbs_avail))
       
   950 		err = -ENOSPC;
       
   951 
       
   952 	cb_prepare(nic, cb, skb);
       
   953 
       
   954 	/* Order is important otherwise we'll be in a race with h/w:
       
   955 	 * set S-bit in current first, then clear S-bit in previous. */
       
   956 	cb->command |= cpu_to_le16(cb_s);
       
   957 	wmb();
       
   958 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   959 
       
   960 	while (nic->cb_to_send != nic->cb_to_use) {
       
   961 		if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   962 			nic->cb_to_send->dma_addr))) {
       
   963 			/* Ok, here's where things get sticky.  It's
       
   964 			 * possible that we can't schedule the command
       
   965 			 * because the controller is too busy, so
       
   966 			 * let's just queue the command and try again
       
   967 			 * when another command is scheduled. */
       
   968 			if (err == -ENOSPC) {
       
   969 				//request a reset
       
   970 				schedule_work(&nic->tx_timeout_task);
       
   971 			}
       
   972 			break;
       
   973 		} else {
       
   974 			nic->cuc_cmd = cuc_resume;
       
   975 			nic->cb_to_send = nic->cb_to_send->next;
       
   976 		}
       
   977 	}
       
   978 
       
   979 err_unlock:
       
   980 	if (!nic->ecdev)
       
   981 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   982 
       
   983 	return err;
       
   984 }
       
   985 
       
   986 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
   987 {
       
   988 	struct nic *nic = netdev_priv(netdev);
       
   989 	return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
       
   990 }
       
   991 
       
   992 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
   993 {
       
   994 	struct nic *nic = netdev_priv(netdev);
       
   995 
       
   996 	nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
       
   997 }
       
   998 
       
   999 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
       
  1000 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
  1001 {
       
  1002 	u32 data_out = 0;
       
  1003 	unsigned int i;
       
  1004 	unsigned long flags = 0;
       
  1005 
       
  1006 
       
  1007 	/*
       
  1008 	 * Stratus87247: we shouldn't be writing the MDI control
       
  1009 	 * register until the Ready bit shows True.  Also, since
       
  1010 	 * manipulation of the MDI control registers is a multi-step
       
  1011 	 * procedure it should be done under lock.
       
  1012 	 */
       
  1013 	if (!nic->ecdev)
       
  1014 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
  1015 	for (i = 100; i; --i) {
       
  1016 		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
       
  1017 			break;
       
  1018 		udelay(20);
       
  1019 	}
       
  1020 	if (unlikely(!i)) {
       
  1021 		printk("e100.mdio_ctrl(%s) won't go Ready\n",
       
  1022 			nic->netdev->name );
       
  1023 		if (!nic->ecdev)
       
  1024 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1025 		return 0;		/* No way to indicate timeout error */
       
  1026 	}
       
  1027 	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
  1028 
       
  1029 	for (i = 0; i < 100; i++) {
       
  1030 		udelay(20);
       
  1031 		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
       
  1032 			break;
       
  1033 	}
       
  1034 	if (!nic->ecdev)
       
  1035 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1036 	DPRINTK(HW, DEBUG,
       
  1037 		"%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
  1038 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
       
  1039 	return (u16)data_out;
       
  1040 }
       
  1041 
       
  1042 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
       
  1043 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
       
  1044 				 u32 addr,
       
  1045 				 u32 dir,
       
  1046 				 u32 reg,
       
  1047 				 u16 data)
       
  1048 {
       
  1049 	if ((reg == MII_BMCR) && (dir == mdi_write)) {
       
  1050 		if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
       
  1051 			u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
       
  1052 							MII_ADVERTISE);
       
  1053 
       
  1054 			/*
       
  1055 			 * Workaround Si issue where sometimes the part will not
       
  1056 			 * autoneg to 100Mbps even when advertised.
       
  1057 			 */
       
  1058 			if (advert & ADVERTISE_100FULL)
       
  1059 				data |= BMCR_SPEED100 | BMCR_FULLDPLX;
       
  1060 			else if (advert & ADVERTISE_100HALF)
       
  1061 				data |= BMCR_SPEED100;
       
  1062 		}
       
  1063 	}
       
  1064 	return mdio_ctrl_hw(nic, addr, dir, reg, data);
       
  1065 }
       
  1066 
       
  1067 /* Fully software-emulated mdio_ctrl() function for cards without
       
  1068  * MII-compliant PHYs.
       
  1069  * For now, this is mainly geared towards 80c24 support; in case of further
       
  1070  * requirements for other types (i82503, ...?) either extend this mechanism
       
  1071  * or split it, whichever is cleaner.
       
  1072  */
       
  1073 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
       
  1074 				      u32 addr,
       
  1075 				      u32 dir,
       
  1076 				      u32 reg,
       
  1077 				      u16 data)
       
  1078 {
       
  1079 	/* might need to allocate a netdev_priv'ed register array eventually
       
  1080 	 * to be able to record state changes, but for now
       
  1081 	 * some fully hardcoded register handling ought to be ok I guess. */
       
  1082 
       
  1083 	if (dir == mdi_read) {
       
  1084 		switch (reg) {
       
  1085 		case MII_BMCR:
       
  1086 			/* Auto-negotiation, right? */
       
  1087 			return  BMCR_ANENABLE |
       
  1088 				BMCR_FULLDPLX;
       
  1089 		case MII_BMSR:
       
  1090 			return	BMSR_LSTATUS /* for mii_link_ok() */ |
       
  1091 				BMSR_ANEGCAPABLE |
       
  1092 				BMSR_10FULL;
       
  1093 		case MII_ADVERTISE:
       
  1094 			/* 80c24 is a "combo card" PHY, right? */
       
  1095 			return	ADVERTISE_10HALF |
       
  1096 				ADVERTISE_10FULL;
       
  1097 		default:
       
  1098 			DPRINTK(HW, DEBUG,
       
  1099 		"%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1100 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
       
  1101 			return 0xFFFF;
       
  1102 		}
       
  1103 	} else {
       
  1104 		switch (reg) {
       
  1105 		default:
       
  1106 			DPRINTK(HW, DEBUG,
       
  1107 		"%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1108 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
       
  1109 			return 0xFFFF;
       
  1110 		}
       
  1111 	}
       
  1112 }
       
  1113 static inline int e100_phy_supports_mii(struct nic *nic)
       
  1114 {
       
  1115 	/* for now, just check it by comparing whether we
       
  1116 	   are using MII software emulation.
       
  1117 	*/
       
  1118 	return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
       
  1119 }
       
  1120 
       
  1121 static void e100_get_defaults(struct nic *nic)
       
  1122 {
       
  1123 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
  1124 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
  1125 
       
  1126 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1127 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
       
  1128 	if (nic->mac == mac_unknown)
       
  1129 		nic->mac = mac_82557_D100_A;
       
  1130 
       
  1131 	nic->params.rfds = rfds;
       
  1132 	nic->params.cbs = cbs;
       
  1133 
       
  1134 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1135 	nic->tx_threshold = 0xE0;
       
  1136 
       
  1137 	/* no interrupt for every tx completion, delay = 256us if not 557 */
       
  1138 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1139 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1140 
       
  1141 	/* Template for a freshly allocated RFD */
       
  1142 	nic->blank_rfd.command = 0;
       
  1143 	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
       
  1144 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  1145 
       
  1146 	/* MII setup */
       
  1147 	nic->mii.phy_id_mask = 0x1F;
       
  1148 	nic->mii.reg_num_mask = 0x1F;
       
  1149 	nic->mii.dev = nic->netdev;
       
  1150 	nic->mii.mdio_read = mdio_read;
       
  1151 	nic->mii.mdio_write = mdio_write;
       
  1152 }
       
  1153 
       
  1154 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1155 {
       
  1156 	struct config *config = &cb->u.config;
       
  1157 	u8 *c = (u8 *)config;
       
  1158 
       
  1159 	cb->command = cpu_to_le16(cb_config);
       
  1160 
       
  1161 	memset(config, 0, sizeof(struct config));
       
  1162 
       
  1163 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1164 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1165 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1166 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1167 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1168 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1169 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1170 	if (e100_phy_supports_mii(nic))
       
  1171 		config->mii_mode = 1;           /* 1=MII mode, 0=i82503 mode */
       
  1172 	config->pad10 = 0x6;
       
  1173 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1174 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1175 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1176 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1177 	config->pad15_1 = 0x1;
       
  1178 	config->pad15_2 = 0x1;
       
  1179 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1180 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1181 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1182 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1183 	config->pad18 = 0x1;
       
  1184 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1185 	config->pad20_1 = 0x1F;
       
  1186 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1187 	config->pad21_1 = 0x5;
       
  1188 
       
  1189 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1190 	config->loopback = nic->loopback;
       
  1191 
       
  1192 	if (nic->mii.force_media && nic->mii.full_duplex)
       
  1193 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1194 
       
  1195 	if (nic->flags & promiscuous || nic->loopback) {
       
  1196 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1197 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1198 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1199 	}
       
  1200 
       
  1201 	if (nic->flags & multicast_all)
       
  1202 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1203 
       
  1204 	/* disable WoL when up */
       
  1205 	if (nic->ecdev || 
       
  1206 			(netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1207 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1208 
       
  1209 	if (nic->mac >= mac_82558_D101_A4) {
       
  1210 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1211 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1212 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1213 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1214 		if (nic->mac >= mac_82559_D101M) {
       
  1215 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1216 			/* Enable TCO in extended config */
       
  1217 			if (nic->mac >= mac_82551_10) {
       
  1218 				config->byte_count = 0x20; /* extended bytes */
       
  1219 				config->rx_d102_mode = 0x1; /* GMRC for TCO */
       
  1220 			}
       
  1221 		} else {
       
  1222 			config->standard_stat_counter = 0x0;
       
  1223 		}
       
  1224 	}
       
  1225 
       
  1226 	DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1227 		c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1228 	DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1229 		c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1230 	DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1231 		c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1232 }
       
  1233 
       
  1234 /*************************************************************************
       
  1235 *  CPUSaver parameters
       
  1236 *
       
  1237 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1238 *  "move immediate value" instruction.  By changing the value of
       
  1239 *  the literal in the instruction before the code is loaded, the
       
  1240 *  driver can change the algorithm.
       
  1241 *
       
  1242 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1243 *    When this timer expires the interrupt is asserted, and the
       
  1244 *    timer is reset each time a new packet is received.  (see
       
  1245 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1246 *    The current default is 0x600 or 1536.  Experiments show that
       
  1247 *    the value should probably stay within the 0x200 - 0x1000.
       
  1248 *
       
  1249 *  BUNDLEMAX -
       
  1250 *    This sets the maximum number of frames that will be bundled.  In
       
  1251 *    some situations, such as the TCP windowing algorithm, it may be
       
  1252 *    better to limit the growth of the bundle size than let it go as
       
  1253 *    high as it can, because that could cause too much added latency.
       
  1254 *    The default is six, because this is the number of packets in the
       
  1255 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1256 *    an interrupt for every frame received.  If you do not want to put
       
  1257 *    a limit on the bundle size, set this value to xFFFF.
       
  1258 *
       
  1259 *  BUNDLESMALL -
       
  1260 *    This contains a bit-mask describing the minimum size frame that
       
  1261 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1262 *    that any frame less than 128 bytes in length will not be bundled,
       
  1263 *    but will instead immediately generate an interrupt.  This does
       
  1264 *    not affect the current bundle in any way.  Any frame that is 128
       
  1265 *    bytes or large will be bundled normally.  This feature is meant
       
  1266 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1267 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1268 *    enabled was sending but not receiving.  The delay introduced when
       
  1269 *    the ACKs were received was enough to reduce total throughput, because
       
  1270 *    the sender would sit idle until the ACK was finally seen.
       
  1271 *
       
  1272 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1273 *    This means that any frame which is x7F (127) bytes or smaller
       
  1274 *    will cause an immediate interrupt.  Because this value must be a
       
  1275 *    bit mask, there are only a few valid values that can be used.  To
       
  1276 *    turn this feature off, the driver can write the value xFFFF to the
       
  1277 *    lower word of this instruction (in the same way that the other
       
  1278 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1279 *    cause an interrupt to be generated for every frame, because all
       
  1280 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1281 *************************************************************************/
       
  1282 
       
  1283 /* if you wish to disable the ucode functionality, while maintaining the
       
  1284  * workarounds it provides, set the following defines to:
       
  1285  * BUNDLESMALL 0
       
  1286  * BUNDLEMAX 1
       
  1287  * INTDELAY 1
       
  1288  */
       
  1289 #define BUNDLESMALL 1
       
  1290 #define BUNDLEMAX (u16)6
       
  1291 #define INTDELAY (u16)1536 /* 0x600 */
       
  1292 
       
  1293 /* Initialize firmware */
       
  1294 static const struct firmware *e100_request_firmware(struct nic *nic)
       
  1295 {
       
  1296 	const char *fw_name;
       
  1297 	const struct firmware *fw;
       
  1298 	u8 timer, bundle, min_size;
       
  1299 	int err;
       
  1300 
       
  1301 	/* do not load u-code for ICH devices */
       
  1302 	if (nic->flags & ich)
       
  1303 		return NULL;
       
  1304 
       
  1305 	/* Search for ucode match against h/w revision */
       
  1306 	if (nic->mac == mac_82559_D101M)
       
  1307 		fw_name = FIRMWARE_D101M;
       
  1308 	else if (nic->mac == mac_82559_D101S)
       
  1309 		fw_name = FIRMWARE_D101S;
       
  1310 	else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
       
  1311 		fw_name = FIRMWARE_D102E;
       
  1312 	else /* No ucode on other devices */
       
  1313 		return NULL;
       
  1314 
       
  1315 	err = request_firmware(&fw, fw_name, &nic->pdev->dev);
       
  1316 	if (err) {
       
  1317 		DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
       
  1318 			fw_name, err);
       
  1319 		return ERR_PTR(err);
       
  1320 	}
       
  1321 	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
       
  1322 	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
       
  1323 	if (fw->size != UCODE_SIZE * 4 + 3) {
       
  1324 		DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
       
  1325 			fw_name, fw->size);
       
  1326 		release_firmware(fw);
       
  1327 		return ERR_PTR(-EINVAL);
       
  1328 	}
       
  1329 
       
  1330 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1331 	timer = fw->data[UCODE_SIZE * 4];
       
  1332 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1333 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1334 
       
  1335 	if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
       
  1336 	    min_size >= UCODE_SIZE) {
       
  1337 		DPRINTK(PROBE, ERR,
       
  1338 			"\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
       
  1339 			fw_name, timer, bundle, min_size);
       
  1340 		release_firmware(fw);
       
  1341 		return ERR_PTR(-EINVAL);
       
  1342 	}
       
  1343 	/* OK, firmware is validated and ready to use... */
       
  1344 	return fw;
       
  1345 }
       
  1346 
       
  1347 static void e100_setup_ucode(struct nic *nic, struct cb *cb,
       
  1348 			     struct sk_buff *skb)
       
  1349 {
       
  1350 	const struct firmware *fw = (void *)skb;
       
  1351 	u8 timer, bundle, min_size;
       
  1352 
       
  1353 	/* It's not a real skb; we just abused the fact that e100_exec_cb
       
  1354 	   will pass it through to here... */
       
  1355 	cb->skb = NULL;
       
  1356 
       
  1357 	/* firmware is stored as little endian already */
       
  1358 	memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
       
  1359 
       
  1360 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1361 	timer = fw->data[UCODE_SIZE * 4];
       
  1362 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1363 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1364 
       
  1365 	/* Insert user-tunable settings in cb->u.ucode */
       
  1366 	cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
       
  1367 	cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
       
  1368 	cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
       
  1369 	cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
       
  1370 	cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
       
  1371 	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
       
  1372 
       
  1373 	cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1374 }
       
  1375 
       
  1376 static inline int e100_load_ucode_wait(struct nic *nic)
       
  1377 {
       
  1378 	const struct firmware *fw;
       
  1379 	int err = 0, counter = 50;
       
  1380 	struct cb *cb = nic->cb_to_clean;
       
  1381 
       
  1382 	fw = e100_request_firmware(nic);
       
  1383 	/* If it's NULL, then no ucode is required */
       
  1384 	if (!fw || IS_ERR(fw))
       
  1385 		return PTR_ERR(fw);
       
  1386 
       
  1387 	if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
       
  1388 		DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
       
  1389 
       
  1390 	/* must restart cuc */
       
  1391 	nic->cuc_cmd = cuc_start;
       
  1392 
       
  1393 	/* wait for completion */
       
  1394 	e100_write_flush(nic);
       
  1395 	udelay(10);
       
  1396 
       
  1397 	/* wait for possibly (ouch) 500ms */
       
  1398 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1399 		msleep(10);
       
  1400 		if (!--counter) break;
       
  1401 	}
       
  1402 
       
  1403 	/* ack any interrupts, something could have been set */
       
  1404 	iowrite8(~0, &nic->csr->scb.stat_ack);
       
  1405 
       
  1406 	/* if the command failed, or is not OK, notify and return */
       
  1407 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1408 		DPRINTK(PROBE,ERR, "ucode load failed\n");
       
  1409 		err = -EPERM;
       
  1410 	}
       
  1411 
       
  1412 	return err;
       
  1413 }
       
  1414 
       
  1415 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1416 	struct sk_buff *skb)
       
  1417 {
       
  1418 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1419 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1420 }
       
  1421 
       
  1422 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1423 {
       
  1424 	cb->command = cpu_to_le16(cb_dump);
       
  1425 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1426 		offsetof(struct mem, dump_buf));
       
  1427 }
       
  1428 
       
  1429 static int e100_phy_check_without_mii(struct nic *nic)
       
  1430 {
       
  1431 	u8 phy_type;
       
  1432 	int without_mii;
       
  1433 
       
  1434 	phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
       
  1435 
       
  1436 	switch (phy_type) {
       
  1437 	case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
       
  1438 	case I82503: /* Non-MII PHY; UNTESTED! */
       
  1439 	case S80C24: /* Non-MII PHY; tested and working */
       
  1440 		/* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
       
  1441 		 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
       
  1442 		 * doesn't have a programming interface of any sort.  The
       
  1443 		 * media is sensed automatically based on how the link partner
       
  1444 		 * is configured.  This is, in essence, manual configuration.
       
  1445 		 */
       
  1446 		DPRINTK(PROBE, INFO,
       
  1447 			 "found MII-less i82503 or 80c24 or other PHY\n");
       
  1448 
       
  1449 		nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
       
  1450 		nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
       
  1451 
       
  1452 		/* these might be needed for certain MII-less cards...
       
  1453 		 * nic->flags |= ich;
       
  1454 		 * nic->flags |= ich_10h_workaround; */
       
  1455 
       
  1456 		without_mii = 1;
       
  1457 		break;
       
  1458 	default:
       
  1459 		without_mii = 0;
       
  1460 		break;
       
  1461 	}
       
  1462 	return without_mii;
       
  1463 }
       
  1464 
       
  1465 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1466 #define MII_NSC_CONG		MII_RESV1
       
  1467 #define NSC_CONG_ENABLE		0x0100
       
  1468 #define NSC_CONG_TXREADY	0x0400
       
  1469 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1470 static int e100_phy_init(struct nic *nic)
       
  1471 {
       
  1472 	struct net_device *netdev = nic->netdev;
       
  1473 	u32 addr;
       
  1474 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1475 
       
  1476 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1477 	for (addr = 0; addr < 32; addr++) {
       
  1478 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1479 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1480 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1481 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1482 		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1483 			break;
       
  1484 	}
       
  1485 	if (addr == 32) {
       
  1486 		/* uhoh, no PHY detected: check whether we seem to be some
       
  1487 		 * weird, rare variant which is *known* to not have any MII.
       
  1488 		 * But do this AFTER MII checking only, since this does
       
  1489 		 * lookup of EEPROM values which may easily be unreliable. */
       
  1490 		if (e100_phy_check_without_mii(nic))
       
  1491 			return 0; /* simply return and hope for the best */
       
  1492 		else {
       
  1493 			/* for unknown cases log a fatal error */
       
  1494 			DPRINTK(HW, ERR,
       
  1495 				"Failed to locate any known PHY, aborting.\n");
       
  1496 			return -EAGAIN;
       
  1497 		}
       
  1498 	} else
       
  1499 		DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
       
  1500 
       
  1501 	/* Isolate all the PHY ids */
       
  1502 	for (addr = 0; addr < 32; addr++)
       
  1503 		mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1504 	/* Select the discovered PHY */
       
  1505 	bmcr &= ~BMCR_ISOLATE;
       
  1506 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
       
  1507 
       
  1508 	/* Get phy ID */
       
  1509 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1510 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1511 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1512 	DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
       
  1513 
       
  1514 	/* Handle National tx phys */
       
  1515 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1516 	if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1517 		/* Disable congestion control */
       
  1518 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1519 		cong |= NSC_CONG_TXREADY;
       
  1520 		cong &= ~NSC_CONG_ENABLE;
       
  1521 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1522 	}
       
  1523 
       
  1524 	if (nic->phy == phy_82552_v) {
       
  1525 		u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
       
  1526 
       
  1527 		/* assign special tweaked mdio_ctrl() function */
       
  1528 		nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
       
  1529 
       
  1530 		/* Workaround Si not advertising flow-control during autoneg */
       
  1531 		advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
       
  1532 		mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
       
  1533 
       
  1534 		/* Reset for the above changes to take effect */
       
  1535 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1536 		bmcr |= BMCR_RESET;
       
  1537 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
       
  1538 	} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1539 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1540 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1541 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1542 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1543 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1544 	}
       
  1545 
       
  1546 	return 0;
       
  1547 }
       
  1548 
       
  1549 static int e100_hw_init(struct nic *nic)
       
  1550 {
       
  1551 	int err;
       
  1552 
       
  1553 	e100_hw_reset(nic);
       
  1554 
       
  1555 	DPRINTK(HW, ERR, "e100_hw_init\n");
       
  1556 	if (!in_interrupt() && (err = e100_self_test(nic)))
       
  1557 		return err;
       
  1558 
       
  1559 	if ((err = e100_phy_init(nic)))
       
  1560 		return err;
       
  1561 	if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1562 		return err;
       
  1563 	if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1564 		return err;
       
  1565 	if ((err = e100_load_ucode_wait(nic)))
       
  1566 		return err;
       
  1567 	if ((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1568 		return err;
       
  1569 	if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1570 		return err;
       
  1571 	if ((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1572 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1573 		return err;
       
  1574 	if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1575 		return err;
       
  1576 
       
  1577 	e100_disable_irq(nic);
       
  1578 
       
  1579 	return 0;
       
  1580 }
       
  1581 
       
  1582 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1583 {
       
  1584 	struct net_device *netdev = nic->netdev;
       
  1585 	struct dev_mc_list *list = netdev->mc_list;
       
  1586 	u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
       
  1587 
       
  1588 	cb->command = cpu_to_le16(cb_multi);
       
  1589 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1590 	for (i = 0; list && i < count; i++, list = list->next)
       
  1591 		memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
       
  1592 			ETH_ALEN);
       
  1593 }
       
  1594 
       
  1595 static void e100_set_multicast_list(struct net_device *netdev)
       
  1596 {
       
  1597 	struct nic *nic = netdev_priv(netdev);
       
  1598 
       
  1599 	DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
       
  1600 		netdev->mc_count, netdev->flags);
       
  1601 
       
  1602 	if (netdev->flags & IFF_PROMISC)
       
  1603 		nic->flags |= promiscuous;
       
  1604 	else
       
  1605 		nic->flags &= ~promiscuous;
       
  1606 
       
  1607 	if (netdev->flags & IFF_ALLMULTI ||
       
  1608 		netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
       
  1609 		nic->flags |= multicast_all;
       
  1610 	else
       
  1611 		nic->flags &= ~multicast_all;
       
  1612 
       
  1613 	e100_exec_cb(nic, NULL, e100_configure);
       
  1614 	e100_exec_cb(nic, NULL, e100_multi);
       
  1615 }
       
  1616 
       
  1617 static void e100_update_stats(struct nic *nic)
       
  1618 {
       
  1619 	struct net_device *dev = nic->netdev;
       
  1620 	struct net_device_stats *ns = &dev->stats;
       
  1621 	struct stats *s = &nic->mem->stats;
       
  1622 	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1623 		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
       
  1624 		&s->complete;
       
  1625 
       
  1626 	/* Device's stats reporting may take several microseconds to
       
  1627 	 * complete, so we're always waiting for results of the
       
  1628 	 * previous command. */
       
  1629 
       
  1630 	if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
       
  1631 		*complete = 0;
       
  1632 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1633 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1634 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1635 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1636 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1637 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1638 		ns->collisions += nic->tx_collisions;
       
  1639 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1640 			le32_to_cpu(s->tx_lost_crs);
       
  1641 		ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
       
  1642 			nic->rx_over_length_errors;
       
  1643 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1644 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1645 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1646 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1647 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1648 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1649 			le32_to_cpu(s->rx_alignment_errors) +
       
  1650 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1651 			le32_to_cpu(s->rx_cdt_errors);
       
  1652 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1653 		nic->tx_single_collisions +=
       
  1654 			le32_to_cpu(s->tx_single_collisions);
       
  1655 		nic->tx_multiple_collisions +=
       
  1656 			le32_to_cpu(s->tx_multiple_collisions);
       
  1657 		if (nic->mac >= mac_82558_D101_A4) {
       
  1658 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1659 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1660 			nic->rx_fc_unsupported +=
       
  1661 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1662 			if (nic->mac >= mac_82559_D101M) {
       
  1663 				nic->tx_tco_frames +=
       
  1664 					le16_to_cpu(s->xmt_tco_frames);
       
  1665 				nic->rx_tco_frames +=
       
  1666 					le16_to_cpu(s->rcv_tco_frames);
       
  1667 			}
       
  1668 		}
       
  1669 	}
       
  1670 
       
  1671 
       
  1672 	if (e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1673 		DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
       
  1674 }
       
  1675 
       
  1676 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1677 {
       
  1678 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1679 	 * we're getting collisions on a half-duplex connection. */
       
  1680 
       
  1681 	if (duplex == DUPLEX_HALF) {
       
  1682 		u32 prev = nic->adaptive_ifs;
       
  1683 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1684 
       
  1685 		if ((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1686 		   (nic->tx_frames > min_frames)) {
       
  1687 			if (nic->adaptive_ifs < 60)
       
  1688 				nic->adaptive_ifs += 5;
       
  1689 		} else if (nic->tx_frames < min_frames) {
       
  1690 			if (nic->adaptive_ifs >= 5)
       
  1691 				nic->adaptive_ifs -= 5;
       
  1692 		}
       
  1693 		if (nic->adaptive_ifs != prev)
       
  1694 			e100_exec_cb(nic, NULL, e100_configure);
       
  1695 	}
       
  1696 }
       
  1697 
       
  1698 static void e100_watchdog(unsigned long data)
       
  1699 {
       
  1700 	struct nic *nic = (struct nic *)data;
       
  1701 	struct ethtool_cmd cmd;
       
  1702 
       
  1703 	DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
       
  1704 
       
  1705 	/* mii library handles link maintenance tasks */
       
  1706 
       
  1707     if (nic->ecdev) {
       
  1708     	ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1709 	} else {
       
  1710 		mii_ethtool_gset(&nic->mii, &cmd);
       
  1711 
       
  1712 		if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1713 			printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
       
  1714 					nic->netdev->name,
       
  1715 					cmd.speed == SPEED_100 ? "100" : "10",
       
  1716 					cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
       
  1717 		} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1718 			printk(KERN_INFO "e100: %s NIC Link is Down\n",
       
  1719 					nic->netdev->name);
       
  1720 		}
       
  1721 
       
  1722 		mii_check_link(&nic->mii);
       
  1723 
       
  1724 		/* Software generated interrupt to recover from (rare) Rx
       
  1725 		 * allocation failure.
       
  1726 		 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1727 		 * accidentally, due to hardware that shares a register between the
       
  1728 		 * interrupt mask bit and the SW Interrupt generation bit */
       
  1729 		spin_lock_irq(&nic->cmd_lock);
       
  1730 		iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1731 		e100_write_flush(nic);
       
  1732 		spin_unlock_irq(&nic->cmd_lock);
       
  1733 
       
  1734 		e100_update_stats(nic);
       
  1735 		e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
       
  1736 
       
  1737 		if (nic->mac <= mac_82557_D100_C)
       
  1738 			/* Issue a multicast command to workaround a 557 lock up */
       
  1739 			e100_set_multicast_list(nic->netdev);
       
  1740 
       
  1741 		if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
       
  1742 			/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1743 			nic->flags |= ich_10h_workaround;
       
  1744 		else
       
  1745 			nic->flags &= ~ich_10h_workaround;
       
  1746 
       
  1747 		mod_timer(&nic->watchdog,
       
  1748 				round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
       
  1749 	}
       
  1750 }
       
  1751 
       
  1752 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1753 	struct sk_buff *skb)
       
  1754 {
       
  1755 	cb->command = nic->tx_command;
       
  1756 	/* interrupt every 16 packets regardless of delay */
       
  1757 	if ((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1758 		cb->command |= cpu_to_le16(cb_i);
       
  1759 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1760 	cb->u.tcb.tcb_byte_count = 0;
       
  1761 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1762 	cb->u.tcb.tbd_count = 1;
       
  1763 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
       
  1764 		skb->data, skb->len, PCI_DMA_TODEVICE));
       
  1765 	/* check for mapping failure? */
       
  1766 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1767 }
       
  1768 
       
  1769 static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
       
  1770 {
       
  1771 	struct nic *nic = netdev_priv(netdev);
       
  1772 	int err;
       
  1773 
       
  1774 	if (nic->flags & ich_10h_workaround) {
       
  1775 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1776 		   Issue a NOP command followed by a 1us delay before
       
  1777 		   issuing the Tx command. */
       
  1778 		if (e100_exec_cmd(nic, cuc_nop, 0))
       
  1779 			DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
       
  1780 		udelay(1);
       
  1781 	}
       
  1782 
       
  1783 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1784 
       
  1785 	switch (err) {
       
  1786 	case -ENOSPC:
       
  1787 		/* We queued the skb, but now we're out of space. */
       
  1788 		DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
       
  1789 		if (!nic->ecdev)
       
  1790 			netif_stop_queue(netdev);
       
  1791 		break;
       
  1792 	case -ENOMEM:
       
  1793 		/* This is a hard error - log it. */
       
  1794 		DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
       
  1795 		if (!nic->ecdev)
       
  1796 			netif_stop_queue(netdev);
       
  1797 		return NETDEV_TX_BUSY;
       
  1798 	}
       
  1799 
       
  1800 	netdev->trans_start = jiffies;
       
  1801 	return 0;
       
  1802 }
       
  1803 
       
  1804 static int e100_tx_clean(struct nic *nic)
       
  1805 {
       
  1806 	struct net_device *dev = nic->netdev;
       
  1807 	struct cb *cb;
       
  1808 	int tx_cleaned = 0;
       
  1809 
       
  1810 	if (!nic->ecdev)
       
  1811 		spin_lock(&nic->cb_lock);
       
  1812 
       
  1813 	/* Clean CBs marked complete */
       
  1814 	for (cb = nic->cb_to_clean;
       
  1815 	    cb->status & cpu_to_le16(cb_complete);
       
  1816 	    cb = nic->cb_to_clean = cb->next) {
       
  1817 		DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
       
  1818 		        (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1819 		        cb->status);
       
  1820 
       
  1821 		if (likely(cb->skb != NULL)) {
       
  1822 			dev->stats.tx_packets++;
       
  1823 			dev->stats.tx_bytes += cb->skb->len;
       
  1824 
       
  1825 			pci_unmap_single(nic->pdev,
       
  1826 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1827 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1828 				PCI_DMA_TODEVICE);
       
  1829 			if (!nic->ecdev)
       
  1830 				dev_kfree_skb_any(cb->skb);
       
  1831 			cb->skb = NULL;
       
  1832 			tx_cleaned = 1;
       
  1833 		}
       
  1834 		cb->status = 0;
       
  1835 		nic->cbs_avail++;
       
  1836 	}
       
  1837 
       
  1838 	if (!nic->ecdev) {
       
  1839 		spin_unlock(&nic->cb_lock);
       
  1840 
       
  1841 		/* Recover from running out of Tx resources in xmit_frame */
       
  1842 		if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1843 			netif_wake_queue(nic->netdev);
       
  1844 	}
       
  1845 
       
  1846 	return tx_cleaned;
       
  1847 }
       
  1848 
       
  1849 static void e100_clean_cbs(struct nic *nic)
       
  1850 {
       
  1851 	if (nic->cbs) {
       
  1852 		while (nic->cbs_avail != nic->params.cbs.count) {
       
  1853 			struct cb *cb = nic->cb_to_clean;
       
  1854 			if (cb->skb) {
       
  1855 				pci_unmap_single(nic->pdev,
       
  1856 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1857 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1858 					PCI_DMA_TODEVICE);
       
  1859 				if (!nic->ecdev)
       
  1860 					dev_kfree_skb(cb->skb);
       
  1861 			}
       
  1862 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1863 			nic->cbs_avail++;
       
  1864 		}
       
  1865 		pci_free_consistent(nic->pdev,
       
  1866 			sizeof(struct cb) * nic->params.cbs.count,
       
  1867 			nic->cbs, nic->cbs_dma_addr);
       
  1868 		nic->cbs = NULL;
       
  1869 		nic->cbs_avail = 0;
       
  1870 	}
       
  1871 	nic->cuc_cmd = cuc_start;
       
  1872 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1873 		nic->cbs;
       
  1874 }
       
  1875 
       
  1876 static int e100_alloc_cbs(struct nic *nic)
       
  1877 {
       
  1878 	struct cb *cb;
       
  1879 	unsigned int i, count = nic->params.cbs.count;
       
  1880 
       
  1881 	nic->cuc_cmd = cuc_start;
       
  1882 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1883 	nic->cbs_avail = 0;
       
  1884 
       
  1885 	nic->cbs = pci_alloc_consistent(nic->pdev,
       
  1886 		sizeof(struct cb) * count, &nic->cbs_dma_addr);
       
  1887 	if (!nic->cbs)
       
  1888 		return -ENOMEM;
       
  1889 
       
  1890 	for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1891 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1892 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  1893 
       
  1894 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  1895 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  1896 			((i+1) % count) * sizeof(struct cb));
       
  1897 		cb->skb = NULL;
       
  1898 	}
       
  1899 
       
  1900 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  1901 	nic->cbs_avail = count;
       
  1902 
       
  1903 	return 0;
       
  1904 }
       
  1905 
       
  1906 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  1907 {
       
  1908 	if (!nic->rxs) return;
       
  1909 	if (RU_SUSPENDED != nic->ru_running) return;
       
  1910 
       
  1911 	/* handle init time starts */
       
  1912 	if (!rx) rx = nic->rxs;
       
  1913 
       
  1914 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  1915 	if (rx->skb) {
       
  1916 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  1917 		nic->ru_running = RU_RUNNING;
       
  1918 	}
       
  1919 }
       
  1920 
       
  1921 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
       
  1922 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  1923 {
       
  1924 	if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
       
  1925 		return -ENOMEM;
       
  1926 
       
  1927 	/* Align, init, and map the RFD. */
       
  1928 	skb_reserve(rx->skb, NET_IP_ALIGN);
       
  1929 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
       
  1930 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  1931 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1932 
       
  1933 	if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  1934 		dev_kfree_skb_any(rx->skb);
       
  1935 		rx->skb = NULL;
       
  1936 		rx->dma_addr = 0;
       
  1937 		return -ENOMEM;
       
  1938 	}
       
  1939 
       
  1940 	/* Link the RFD to end of RFA by linking previous RFD to
       
  1941 	 * this one.  We are safe to touch the previous RFD because
       
  1942 	 * it is protected by the before last buffer's el bit being set */
       
  1943 	if (rx->prev->skb) {
       
  1944 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  1945 		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  1946 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  1947 			sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  1948 	}
       
  1949 
       
  1950 	return 0;
       
  1951 }
       
  1952 
       
  1953 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  1954 	unsigned int *work_done, unsigned int work_to_do)
       
  1955 {
       
  1956 	struct net_device *dev = nic->netdev;
       
  1957 	struct sk_buff *skb = rx->skb;
       
  1958 	struct rfd *rfd = (struct rfd *)skb->data;
       
  1959 	u16 rfd_status, actual_size;
       
  1960 
       
  1961 	if (unlikely(work_done && *work_done >= work_to_do))
       
  1962 		return -EAGAIN;
       
  1963 
       
  1964 	/* Need to sync before taking a peek at cb_complete bit */
       
  1965 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  1966 		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  1967 	rfd_status = le16_to_cpu(rfd->status);
       
  1968 
       
  1969 	DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
       
  1970 
       
  1971 	/* If data isn't ready, nothing to indicate */
       
  1972 	if (unlikely(!(rfd_status & cb_complete))) {
       
  1973 		/* If the next buffer has the el bit, but we think the receiver
       
  1974 		 * is still running, check to see if it really stopped while
       
  1975 		 * we had interrupts off.
       
  1976 		 * This allows for a fast restart without re-enabling
       
  1977 		 * interrupts */
       
  1978 		if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  1979 		    (RU_RUNNING == nic->ru_running))
       
  1980 
       
  1981 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  1982 				nic->ru_running = RU_SUSPENDED;
       
  1983 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  1984 					       sizeof(struct rfd),
       
  1985 					       PCI_DMA_FROMDEVICE);
       
  1986 		return -ENODATA;
       
  1987 	}
       
  1988 
       
  1989 	/* Get actual data size */
       
  1990 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  1991 	if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  1992 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  1993 
       
  1994 	/* Get data */
       
  1995 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  1996 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1997 
       
  1998 	/* If this buffer has the el bit, but we think the receiver
       
  1999 	 * is still running, check to see if it really stopped while
       
  2000 	 * we had interrupts off.
       
  2001 	 * This allows for a fast restart without re-enabling interrupts.
       
  2002 	 * This can happen when the RU sees the size change but also sees
       
  2003 	 * the el bit set. */
       
  2004 	if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2005 	    (RU_RUNNING == nic->ru_running)) {
       
  2006 
       
  2007 	    if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2008 		nic->ru_running = RU_SUSPENDED;
       
  2009 	}
       
  2010 
       
  2011 	if (!nic->ecdev) {
       
  2012 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  2013 		skb_reserve(skb, sizeof(struct rfd));
       
  2014 		skb_put(skb, actual_size);
       
  2015 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  2016 	}
       
  2017 
       
  2018 	if (unlikely(!(rfd_status & cb_ok))) {
       
  2019 		if (!nic->ecdev) {
       
  2020 			/* Don't indicate if hardware indicates errors */
       
  2021 			dev_kfree_skb_any(skb);
       
  2022 		}
       
  2023 	} else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
       
  2024 		/* Don't indicate oversized frames */
       
  2025 		nic->rx_over_length_errors++;
       
  2026 		if (!nic->ecdev)
       
  2027 			dev_kfree_skb_any(skb);
       
  2028 	} else {
       
  2029 		dev->stats.rx_packets++;
       
  2030 		dev->stats.rx_bytes += actual_size;
       
  2031 		if (nic->ecdev) {
       
  2032 			ecdev_receive(nic->ecdev,
       
  2033 					skb->data + sizeof(struct rfd), actual_size);
       
  2034 
       
  2035 			// No need to detect link status as
       
  2036 			// long as frames are received: Reset watchdog.
       
  2037 			nic->ec_watchdog_jiffies = jiffies;
       
  2038 		} else {
       
  2039 			netif_receive_skb(skb);
       
  2040 		}
       
  2041 		if (work_done)
       
  2042 			(*work_done)++;
       
  2043 	}
       
  2044 
       
  2045 	if (nic->ecdev) {
       
  2046 		// make receive frame descriptior usable again
       
  2047 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  2048 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  2049 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2050 		if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2051 			rx->dma_addr = 0;
       
  2052 		}
       
  2053 
       
  2054 		/* Link the RFD to end of RFA by linking previous RFD to
       
  2055 		 * this one.  We are safe to touch the previous RFD because
       
  2056 		 * it is protected by the before last buffer's el bit being set */
       
  2057 		if (rx->prev->skb) {
       
  2058 			struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  2059 			put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2060 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2061 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  2062 		}
       
  2063 	} else {
       
  2064 		rx->skb = NULL;
       
  2065 	}
       
  2066 
       
  2067 	return 0;
       
  2068 }
       
  2069 
       
  2070 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  2071 	unsigned int work_to_do)
       
  2072 {
       
  2073 	struct rx *rx;
       
  2074 	int restart_required = 0, err = 0;
       
  2075 	struct rx *old_before_last_rx, *new_before_last_rx;
       
  2076 	struct rfd *old_before_last_rfd, *new_before_last_rfd;
       
  2077 
       
  2078 	/* Indicate newly arrived packets */
       
  2079 	for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  2080 		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  2081 		/* Hit quota or no more to clean */
       
  2082 		if (-EAGAIN == err || -ENODATA == err)
       
  2083 			break;
       
  2084 	}
       
  2085 
       
  2086 
       
  2087 	/* On EAGAIN, hit quota so have more work to do, restart once
       
  2088 	 * cleanup is complete.
       
  2089 	 * Else, are we already rnr? then pay attention!!! this ensures that
       
  2090 	 * the state machine progression never allows a start with a
       
  2091 	 * partially cleaned list, avoiding a race between hardware
       
  2092 	 * and rx_to_clean when in NAPI mode */
       
  2093 	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
       
  2094 		restart_required = 1;
       
  2095 
       
  2096 	old_before_last_rx = nic->rx_to_use->prev->prev;
       
  2097 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
       
  2098 
       
  2099 	if (!nic->ecdev) {
       
  2100 		/* Alloc new skbs to refill list */
       
  2101 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  2102 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  2103 				break; /* Better luck next time (see watchdog) */
       
  2104 		}
       
  2105 	}
       
  2106 
       
  2107 	new_before_last_rx = nic->rx_to_use->prev->prev;
       
  2108 	if (new_before_last_rx != old_before_last_rx) {
       
  2109 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2110 		 * This lets us update the next pointer on the last buffer
       
  2111 		 * without worrying about hardware touching it.
       
  2112 		 * We set the size to 0 to prevent hardware from touching this
       
  2113 		 * buffer.
       
  2114 		 * When the hardware hits the before last buffer with el-bit
       
  2115 		 * and size of 0, it will RNR interrupt, the RUS will go into
       
  2116 		 * the No Resources state.  It will not complete nor write to
       
  2117 		 * this buffer. */
       
  2118 		new_before_last_rfd =
       
  2119 			(struct rfd *)new_before_last_rx->skb->data;
       
  2120 		new_before_last_rfd->size = 0;
       
  2121 		new_before_last_rfd->command |= cpu_to_le16(cb_el);
       
  2122 		pci_dma_sync_single_for_device(nic->pdev,
       
  2123 			new_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2124 			PCI_DMA_BIDIRECTIONAL);
       
  2125 
       
  2126 		/* Now that we have a new stopping point, we can clear the old
       
  2127 		 * stopping point.  We must sync twice to get the proper
       
  2128 		 * ordering on the hardware side of things. */
       
  2129 		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
       
  2130 		pci_dma_sync_single_for_device(nic->pdev,
       
  2131 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2132 			PCI_DMA_BIDIRECTIONAL);
       
  2133 		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  2134 		pci_dma_sync_single_for_device(nic->pdev,
       
  2135 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2136 			PCI_DMA_BIDIRECTIONAL);
       
  2137 	}
       
  2138 
       
  2139 	if (restart_required) {
       
  2140 		// ack the rnr?
       
  2141 		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2142 		e100_start_receiver(nic, nic->rx_to_clean);
       
  2143 		if (work_done)
       
  2144 			(*work_done)++;
       
  2145 	}
       
  2146 }
       
  2147 
       
  2148 static void e100_rx_clean_list(struct nic *nic)
       
  2149 {
       
  2150 	struct rx *rx;
       
  2151 	unsigned int i, count = nic->params.rfds.count;
       
  2152 
       
  2153 	nic->ru_running = RU_UNINITIALIZED;
       
  2154 
       
  2155 	if (nic->rxs) {
       
  2156 		for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2157 			if (rx->skb) {
       
  2158 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2159 					RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2160 				dev_kfree_skb(rx->skb);
       
  2161 			}
       
  2162 		}
       
  2163 		kfree(nic->rxs);
       
  2164 		nic->rxs = NULL;
       
  2165 	}
       
  2166 
       
  2167 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2168 }
       
  2169 
       
  2170 static int e100_rx_alloc_list(struct nic *nic)
       
  2171 {
       
  2172 	struct rx *rx;
       
  2173 	unsigned int i, count = nic->params.rfds.count;
       
  2174 	struct rfd *before_last;
       
  2175 
       
  2176 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2177 	nic->ru_running = RU_UNINITIALIZED;
       
  2178 
       
  2179 	if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2180 		return -ENOMEM;
       
  2181 
       
  2182 	for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2183 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2184 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2185 		if (e100_rx_alloc_skb(nic, rx)) {
       
  2186 			e100_rx_clean_list(nic);
       
  2187 			return -ENOMEM;
       
  2188 		}
       
  2189 	}
       
  2190 
       
  2191 	if (!nic->ecdev) {
       
  2192 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2193 		 * This lets us update the next pointer on the last buffer without
       
  2194 		 * worrying about hardware touching it.
       
  2195 		 * We set the size to 0 to prevent hardware from touching this buffer.
       
  2196 		 * When the hardware hits the before last buffer with el-bit and size
       
  2197 		 * of 0, it will RNR interrupt, the RU will go into the No Resources
       
  2198 		 * state.  It will not complete nor write to this buffer. */
       
  2199 		rx = nic->rxs->prev->prev;
       
  2200 		before_last = (struct rfd *)rx->skb->data;
       
  2201 		before_last->command |= cpu_to_le16(cb_el);
       
  2202 		before_last->size = 0;
       
  2203 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2204 				sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2205 	}
       
  2206 
       
  2207 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2208 	nic->ru_running = RU_SUSPENDED;
       
  2209 
       
  2210 	return 0;
       
  2211 }
       
  2212 
       
  2213 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2214 {
       
  2215 	struct net_device *netdev = dev_id;
       
  2216 	struct nic *nic = netdev_priv(netdev);
       
  2217 	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
       
  2218 
       
  2219 	DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
       
  2220 
       
  2221 	if (stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2222 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2223 		return IRQ_NONE;
       
  2224 
       
  2225 	/* Ack interrupt(s) */
       
  2226 	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
       
  2227 
       
  2228 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2229 	if (stat_ack & stat_ack_rnr)
       
  2230 		nic->ru_running = RU_SUSPENDED;
       
  2231 
       
  2232 	if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) {
       
  2233 		e100_disable_irq(nic);
       
  2234 		__napi_schedule(&nic->napi);
       
  2235 	}
       
  2236 
       
  2237 	return IRQ_HANDLED;
       
  2238 }
       
  2239 
       
  2240 void e100_ec_poll(struct net_device *netdev)
       
  2241 {
       
  2242 	struct nic *nic = netdev_priv(netdev);
       
  2243 
       
  2244 	e100_rx_clean(nic, NULL, 100);
       
  2245 	e100_tx_clean(nic);
       
  2246 
       
  2247     if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2248         e100_watchdog((unsigned long) nic);
       
  2249         nic->ec_watchdog_jiffies = jiffies;
       
  2250     }
       
  2251 }
       
  2252 
       
  2253 
       
  2254 static int e100_poll(struct napi_struct *napi, int budget)
       
  2255 {
       
  2256 	struct nic *nic = container_of(napi, struct nic, napi);
       
  2257 	unsigned int work_done = 0;
       
  2258 
       
  2259 	e100_rx_clean(nic, &work_done, budget);
       
  2260 	e100_tx_clean(nic);
       
  2261 
       
  2262 	/* If budget not fully consumed, exit the polling mode */
       
  2263 	if (work_done < budget) {
       
  2264 		napi_complete(napi);
       
  2265 		e100_enable_irq(nic);
       
  2266 	}
       
  2267 
       
  2268 	return work_done;
       
  2269 }
       
  2270 
       
  2271 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2272 static void e100_netpoll(struct net_device *netdev)
       
  2273 {
       
  2274 	struct nic *nic = netdev_priv(netdev);
       
  2275 
       
  2276 	e100_disable_irq(nic);
       
  2277 	e100_intr(nic->pdev->irq, netdev);
       
  2278 	e100_tx_clean(nic);
       
  2279 	e100_enable_irq(nic);
       
  2280 }
       
  2281 #endif
       
  2282 
       
  2283 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2284 {
       
  2285 	struct nic *nic = netdev_priv(netdev);
       
  2286 	struct sockaddr *addr = p;
       
  2287 
       
  2288 	if (!is_valid_ether_addr(addr->sa_data))
       
  2289 		return -EADDRNOTAVAIL;
       
  2290 
       
  2291 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2292 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2293 
       
  2294 	return 0;
       
  2295 }
       
  2296 
       
  2297 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2298 {
       
  2299 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2300 		return -EINVAL;
       
  2301 	netdev->mtu = new_mtu;
       
  2302 	return 0;
       
  2303 }
       
  2304 
       
  2305 static int e100_asf(struct nic *nic)
       
  2306 {
       
  2307 	/* ASF can be enabled from eeprom */
       
  2308 	return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2309 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2310 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2311 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
       
  2312 }
       
  2313 
       
  2314 static int e100_up(struct nic *nic)
       
  2315 {
       
  2316 	int err;
       
  2317 
       
  2318 	if ((err = e100_rx_alloc_list(nic)))
       
  2319 		return err;
       
  2320 	if ((err = e100_alloc_cbs(nic)))
       
  2321 		goto err_rx_clean_list;
       
  2322 	if ((err = e100_hw_init(nic)))
       
  2323 		goto err_clean_cbs;
       
  2324 	e100_set_multicast_list(nic->netdev);
       
  2325 	e100_start_receiver(nic, NULL);
       
  2326 	if (!nic->ecdev) {
       
  2327 		mod_timer(&nic->watchdog, jiffies);
       
  2328 	}
       
  2329 	if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2330 		nic->netdev->name, nic->netdev)))
       
  2331 		goto err_no_irq;
       
  2332 	if (!nic->ecdev) {
       
  2333 		netif_wake_queue(nic->netdev);
       
  2334 		napi_enable(&nic->napi);
       
  2335 		/* enable ints _after_ enabling poll, preventing a race between
       
  2336 		 * disable ints+schedule */
       
  2337 		e100_enable_irq(nic);
       
  2338 	}
       
  2339 	return 0;
       
  2340 
       
  2341 err_no_irq:
       
  2342 	if (!nic->ecdev)
       
  2343 		del_timer_sync(&nic->watchdog);
       
  2344 err_clean_cbs:
       
  2345 	e100_clean_cbs(nic);
       
  2346 err_rx_clean_list:
       
  2347 	e100_rx_clean_list(nic);
       
  2348 	return err;
       
  2349 }
       
  2350 
       
  2351 static void e100_down(struct nic *nic)
       
  2352 {
       
  2353 	if (!nic->ecdev) {
       
  2354 		/* wait here for poll to complete */
       
  2355 		napi_disable(&nic->napi);
       
  2356 		netif_stop_queue(nic->netdev);
       
  2357 	}
       
  2358 	e100_hw_reset(nic);
       
  2359 	free_irq(nic->pdev->irq, nic->netdev);
       
  2360 	if (!nic->ecdev) {
       
  2361 		del_timer_sync(&nic->watchdog);
       
  2362 		netif_carrier_off(nic->netdev);
       
  2363 	}
       
  2364 	e100_clean_cbs(nic);
       
  2365 	e100_rx_clean_list(nic);
       
  2366 }
       
  2367 
       
  2368 static void e100_tx_timeout(struct net_device *netdev)
       
  2369 {
       
  2370 	struct nic *nic = netdev_priv(netdev);
       
  2371 
       
  2372 	/* Reset outside of interrupt context, to avoid request_irq
       
  2373 	 * in interrupt context */
       
  2374 	schedule_work(&nic->tx_timeout_task);
       
  2375 }
       
  2376 
       
  2377 static void e100_tx_timeout_task(struct work_struct *work)
       
  2378 {
       
  2379 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2380 	struct net_device *netdev = nic->netdev;
       
  2381 
       
  2382 	DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
       
  2383 		ioread8(&nic->csr->scb.status));
       
  2384 	e100_down(netdev_priv(netdev));
       
  2385 	e100_up(netdev_priv(netdev));
       
  2386 }
       
  2387 
       
  2388 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2389 {
       
  2390 	int err;
       
  2391 	struct sk_buff *skb;
       
  2392 
       
  2393 	/* Use driver resources to perform internal MAC or PHY
       
  2394 	 * loopback test.  A single packet is prepared and transmitted
       
  2395 	 * in loopback mode, and the test passes if the received
       
  2396 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2397 
       
  2398 	if ((err = e100_rx_alloc_list(nic)))
       
  2399 		return err;
       
  2400 	if ((err = e100_alloc_cbs(nic)))
       
  2401 		goto err_clean_rx;
       
  2402 
       
  2403 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2404 	if (nic->flags & ich && loopback_mode == lb_phy)
       
  2405 		loopback_mode = lb_mac;
       
  2406 
       
  2407 	nic->loopback = loopback_mode;
       
  2408 	if ((err = e100_hw_init(nic)))
       
  2409 		goto err_loopback_none;
       
  2410 
       
  2411 	if (loopback_mode == lb_phy)
       
  2412 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2413 			BMCR_LOOPBACK);
       
  2414 
       
  2415 	e100_start_receiver(nic, NULL);
       
  2416 
       
  2417 	if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2418 		err = -ENOMEM;
       
  2419 		goto err_loopback_none;
       
  2420 	}
       
  2421 	skb_put(skb, ETH_DATA_LEN);
       
  2422 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2423 	e100_xmit_frame(skb, nic->netdev);
       
  2424 
       
  2425 	msleep(10);
       
  2426 
       
  2427 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2428 			RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2429 
       
  2430 	if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2431 	   skb->data, ETH_DATA_LEN))
       
  2432 		err = -EAGAIN;
       
  2433 
       
  2434 err_loopback_none:
       
  2435 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2436 	nic->loopback = lb_none;
       
  2437 	e100_clean_cbs(nic);
       
  2438 	e100_hw_reset(nic);
       
  2439 err_clean_rx:
       
  2440 	e100_rx_clean_list(nic);
       
  2441 	return err;
       
  2442 }
       
  2443 
       
  2444 #define MII_LED_CONTROL	0x1B
       
  2445 #define E100_82552_LED_OVERRIDE 0x19
       
  2446 #define E100_82552_LED_ON       0x000F /* LEDTX and LED_RX both on */
       
  2447 #define E100_82552_LED_OFF      0x000A /* LEDTX and LED_RX both off */
       
  2448 static void e100_blink_led(unsigned long data)
       
  2449 {
       
  2450 	struct nic *nic = (struct nic *)data;
       
  2451 	enum led_state {
       
  2452 		led_on     = 0x01,
       
  2453 		led_off    = 0x04,
       
  2454 		led_on_559 = 0x05,
       
  2455 		led_on_557 = 0x07,
       
  2456 	};
       
  2457 	u16 led_reg = MII_LED_CONTROL;
       
  2458 
       
  2459 	if (nic->phy == phy_82552_v) {
       
  2460 		led_reg = E100_82552_LED_OVERRIDE;
       
  2461 
       
  2462 		nic->leds = (nic->leds == E100_82552_LED_ON) ?
       
  2463 		            E100_82552_LED_OFF : E100_82552_LED_ON;
       
  2464 	} else {
       
  2465 		nic->leds = (nic->leds & led_on) ? led_off :
       
  2466 		            (nic->mac < mac_82559_D101M) ? led_on_557 :
       
  2467 		            led_on_559;
       
  2468 	}
       
  2469 	mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
       
  2470 	mod_timer(&nic->blink_timer, jiffies + HZ / 4);
       
  2471 }
       
  2472 
       
  2473 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2474 {
       
  2475 	struct nic *nic = netdev_priv(netdev);
       
  2476 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2477 }
       
  2478 
       
  2479 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2480 {
       
  2481 	struct nic *nic = netdev_priv(netdev);
       
  2482 	int err;
       
  2483 
       
  2484 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2485 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2486 	e100_exec_cb(nic, NULL, e100_configure);
       
  2487 
       
  2488 	return err;
       
  2489 }
       
  2490 
       
  2491 static void e100_get_drvinfo(struct net_device *netdev,
       
  2492 	struct ethtool_drvinfo *info)
       
  2493 {
       
  2494 	struct nic *nic = netdev_priv(netdev);
       
  2495 	strcpy(info->driver, DRV_NAME);
       
  2496 	strcpy(info->version, DRV_VERSION);
       
  2497 	strcpy(info->fw_version, "N/A");
       
  2498 	strcpy(info->bus_info, pci_name(nic->pdev));
       
  2499 }
       
  2500 
       
  2501 #define E100_PHY_REGS 0x1C
       
  2502 static int e100_get_regs_len(struct net_device *netdev)
       
  2503 {
       
  2504 	struct nic *nic = netdev_priv(netdev);
       
  2505 	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
       
  2506 }
       
  2507 
       
  2508 static void e100_get_regs(struct net_device *netdev,
       
  2509 	struct ethtool_regs *regs, void *p)
       
  2510 {
       
  2511 	struct nic *nic = netdev_priv(netdev);
       
  2512 	u32 *buff = p;
       
  2513 	int i;
       
  2514 
       
  2515 	regs->version = (1 << 24) | nic->pdev->revision;
       
  2516 	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
       
  2517 		ioread8(&nic->csr->scb.cmd_lo) << 16 |
       
  2518 		ioread16(&nic->csr->scb.status);
       
  2519 	for (i = E100_PHY_REGS; i >= 0; i--)
       
  2520 		buff[1 + E100_PHY_REGS - i] =
       
  2521 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2522 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2523 	e100_exec_cb(nic, NULL, e100_dump);
       
  2524 	msleep(10);
       
  2525 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2526 		sizeof(nic->mem->dump_buf));
       
  2527 }
       
  2528 
       
  2529 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2530 {
       
  2531 	struct nic *nic = netdev_priv(netdev);
       
  2532 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2533 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2534 }
       
  2535 
       
  2536 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2537 {
       
  2538 	struct nic *nic = netdev_priv(netdev);
       
  2539 
       
  2540 	if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
       
  2541 	    !device_can_wakeup(&nic->pdev->dev))
       
  2542 		return -EOPNOTSUPP;
       
  2543 
       
  2544 	if (wol->wolopts)
       
  2545 		nic->flags |= wol_magic;
       
  2546 	else
       
  2547 		nic->flags &= ~wol_magic;
       
  2548 
       
  2549 	device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
       
  2550 
       
  2551 	e100_exec_cb(nic, NULL, e100_configure);
       
  2552 
       
  2553 	return 0;
       
  2554 }
       
  2555 
       
  2556 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2557 {
       
  2558 	struct nic *nic = netdev_priv(netdev);
       
  2559 	return nic->msg_enable;
       
  2560 }
       
  2561 
       
  2562 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2563 {
       
  2564 	struct nic *nic = netdev_priv(netdev);
       
  2565 	nic->msg_enable = value;
       
  2566 }
       
  2567 
       
  2568 static int e100_nway_reset(struct net_device *netdev)
       
  2569 {
       
  2570 	struct nic *nic = netdev_priv(netdev);
       
  2571 	return mii_nway_restart(&nic->mii);
       
  2572 }
       
  2573 
       
  2574 static u32 e100_get_link(struct net_device *netdev)
       
  2575 {
       
  2576 	struct nic *nic = netdev_priv(netdev);
       
  2577 	return mii_link_ok(&nic->mii);
       
  2578 }
       
  2579 
       
  2580 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2581 {
       
  2582 	struct nic *nic = netdev_priv(netdev);
       
  2583 	return nic->eeprom_wc << 1;
       
  2584 }
       
  2585 
       
  2586 #define E100_EEPROM_MAGIC	0x1234
       
  2587 static int e100_get_eeprom(struct net_device *netdev,
       
  2588 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2589 {
       
  2590 	struct nic *nic = netdev_priv(netdev);
       
  2591 
       
  2592 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2593 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2594 
       
  2595 	return 0;
       
  2596 }
       
  2597 
       
  2598 static int e100_set_eeprom(struct net_device *netdev,
       
  2599 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2600 {
       
  2601 	struct nic *nic = netdev_priv(netdev);
       
  2602 
       
  2603 	if (eeprom->magic != E100_EEPROM_MAGIC)
       
  2604 		return -EINVAL;
       
  2605 
       
  2606 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2607 
       
  2608 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2609 		(eeprom->len >> 1) + 1);
       
  2610 }
       
  2611 
       
  2612 static void e100_get_ringparam(struct net_device *netdev,
       
  2613 	struct ethtool_ringparam *ring)
       
  2614 {
       
  2615 	struct nic *nic = netdev_priv(netdev);
       
  2616 	struct param_range *rfds = &nic->params.rfds;
       
  2617 	struct param_range *cbs = &nic->params.cbs;
       
  2618 
       
  2619 	ring->rx_max_pending = rfds->max;
       
  2620 	ring->tx_max_pending = cbs->max;
       
  2621 	ring->rx_mini_max_pending = 0;
       
  2622 	ring->rx_jumbo_max_pending = 0;
       
  2623 	ring->rx_pending = rfds->count;
       
  2624 	ring->tx_pending = cbs->count;
       
  2625 	ring->rx_mini_pending = 0;
       
  2626 	ring->rx_jumbo_pending = 0;
       
  2627 }
       
  2628 
       
  2629 static int e100_set_ringparam(struct net_device *netdev,
       
  2630 	struct ethtool_ringparam *ring)
       
  2631 {
       
  2632 	struct nic *nic = netdev_priv(netdev);
       
  2633 	struct param_range *rfds = &nic->params.rfds;
       
  2634 	struct param_range *cbs = &nic->params.cbs;
       
  2635 
       
  2636 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2637 		return -EINVAL;
       
  2638 
       
  2639 	if (netif_running(netdev))
       
  2640 		e100_down(nic);
       
  2641 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2642 	rfds->count = min(rfds->count, rfds->max);
       
  2643 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2644 	cbs->count = min(cbs->count, cbs->max);
       
  2645 	DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
       
  2646 	        rfds->count, cbs->count);
       
  2647 	if (netif_running(netdev))
       
  2648 		e100_up(nic);
       
  2649 
       
  2650 	return 0;
       
  2651 }
       
  2652 
       
  2653 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2654 	"Link test     (on/offline)",
       
  2655 	"Eeprom test   (on/offline)",
       
  2656 	"Self test        (offline)",
       
  2657 	"Mac loopback     (offline)",
       
  2658 	"Phy loopback     (offline)",
       
  2659 };
       
  2660 #define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
       
  2661 
       
  2662 static void e100_diag_test(struct net_device *netdev,
       
  2663 	struct ethtool_test *test, u64 *data)
       
  2664 {
       
  2665 	struct ethtool_cmd cmd;
       
  2666 	struct nic *nic = netdev_priv(netdev);
       
  2667 	int i, err;
       
  2668 
       
  2669 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2670 	data[0] = !mii_link_ok(&nic->mii);
       
  2671 	data[1] = e100_eeprom_load(nic);
       
  2672 	if (test->flags & ETH_TEST_FL_OFFLINE) {
       
  2673 
       
  2674 		/* save speed, duplex & autoneg settings */
       
  2675 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2676 
       
  2677 		if (netif_running(netdev))
       
  2678 			e100_down(nic);
       
  2679 		data[2] = e100_self_test(nic);
       
  2680 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2681 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2682 
       
  2683 		/* restore speed, duplex & autoneg settings */
       
  2684 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2685 
       
  2686 		if (netif_running(netdev))
       
  2687 			e100_up(nic);
       
  2688 	}
       
  2689 	for (i = 0; i < E100_TEST_LEN; i++)
       
  2690 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2691 
       
  2692 	msleep_interruptible(4 * 1000);
       
  2693 }
       
  2694 
       
  2695 static int e100_phys_id(struct net_device *netdev, u32 data)
       
  2696 {
       
  2697 	struct nic *nic = netdev_priv(netdev);
       
  2698 	u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
       
  2699 	              MII_LED_CONTROL;
       
  2700 
       
  2701 	if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
       
  2702 		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
       
  2703 	mod_timer(&nic->blink_timer, jiffies);
       
  2704 	msleep_interruptible(data * 1000);
       
  2705 	del_timer_sync(&nic->blink_timer);
       
  2706 	mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
       
  2707 
       
  2708 	return 0;
       
  2709 }
       
  2710 
       
  2711 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2712 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2713 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2714 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2715 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2716 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2717 	"tx_heartbeat_errors", "tx_window_errors",
       
  2718 	/* device-specific stats */
       
  2719 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2720 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2721 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2722 };
       
  2723 #define E100_NET_STATS_LEN	21
       
  2724 #define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
       
  2725 
       
  2726 static int e100_get_sset_count(struct net_device *netdev, int sset)
       
  2727 {
       
  2728 	switch (sset) {
       
  2729 	case ETH_SS_TEST:
       
  2730 		return E100_TEST_LEN;
       
  2731 	case ETH_SS_STATS:
       
  2732 		return E100_STATS_LEN;
       
  2733 	default:
       
  2734 		return -EOPNOTSUPP;
       
  2735 	}
       
  2736 }
       
  2737 
       
  2738 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2739 	struct ethtool_stats *stats, u64 *data)
       
  2740 {
       
  2741 	struct nic *nic = netdev_priv(netdev);
       
  2742 	int i;
       
  2743 
       
  2744 	for (i = 0; i < E100_NET_STATS_LEN; i++)
       
  2745 		data[i] = ((unsigned long *)&netdev->stats)[i];
       
  2746 
       
  2747 	data[i++] = nic->tx_deferred;
       
  2748 	data[i++] = nic->tx_single_collisions;
       
  2749 	data[i++] = nic->tx_multiple_collisions;
       
  2750 	data[i++] = nic->tx_fc_pause;
       
  2751 	data[i++] = nic->rx_fc_pause;
       
  2752 	data[i++] = nic->rx_fc_unsupported;
       
  2753 	data[i++] = nic->tx_tco_frames;
       
  2754 	data[i++] = nic->rx_tco_frames;
       
  2755 }
       
  2756 
       
  2757 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2758 {
       
  2759 	switch (stringset) {
       
  2760 	case ETH_SS_TEST:
       
  2761 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2762 		break;
       
  2763 	case ETH_SS_STATS:
       
  2764 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2765 		break;
       
  2766 	}
       
  2767 }
       
  2768 
       
  2769 static const struct ethtool_ops e100_ethtool_ops = {
       
  2770 	.get_settings		= e100_get_settings,
       
  2771 	.set_settings		= e100_set_settings,
       
  2772 	.get_drvinfo		= e100_get_drvinfo,
       
  2773 	.get_regs_len		= e100_get_regs_len,
       
  2774 	.get_regs		= e100_get_regs,
       
  2775 	.get_wol		= e100_get_wol,
       
  2776 	.set_wol		= e100_set_wol,
       
  2777 	.get_msglevel		= e100_get_msglevel,
       
  2778 	.set_msglevel		= e100_set_msglevel,
       
  2779 	.nway_reset		= e100_nway_reset,
       
  2780 	.get_link		= e100_get_link,
       
  2781 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2782 	.get_eeprom		= e100_get_eeprom,
       
  2783 	.set_eeprom		= e100_set_eeprom,
       
  2784 	.get_ringparam		= e100_get_ringparam,
       
  2785 	.set_ringparam		= e100_set_ringparam,
       
  2786 	.self_test		= e100_diag_test,
       
  2787 	.get_strings		= e100_get_strings,
       
  2788 	.phys_id		= e100_phys_id,
       
  2789 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2790 	.get_sset_count		= e100_get_sset_count,
       
  2791 };
       
  2792 
       
  2793 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2794 {
       
  2795 	struct nic *nic = netdev_priv(netdev);
       
  2796 
       
  2797 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2798 }
       
  2799 
       
  2800 static int e100_alloc(struct nic *nic)
       
  2801 {
       
  2802 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2803 		&nic->dma_addr);
       
  2804 	return nic->mem ? 0 : -ENOMEM;
       
  2805 }
       
  2806 
       
  2807 static void e100_free(struct nic *nic)
       
  2808 {
       
  2809 	if (nic->mem) {
       
  2810 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2811 			nic->mem, nic->dma_addr);
       
  2812 		nic->mem = NULL;
       
  2813 	}
       
  2814 }
       
  2815 
       
  2816 static int e100_open(struct net_device *netdev)
       
  2817 {
       
  2818 	struct nic *nic = netdev_priv(netdev);
       
  2819 	int err = 0;
       
  2820 
       
  2821 	if (!nic->ecdev)
       
  2822 		netif_carrier_off(netdev);
       
  2823 	if ((err = e100_up(nic)))
       
  2824 		DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
       
  2825 	return err;
       
  2826 }
       
  2827 
       
  2828 static int e100_close(struct net_device *netdev)
       
  2829 {
       
  2830 	e100_down(netdev_priv(netdev));
       
  2831 	return 0;
       
  2832 }
       
  2833 
       
  2834 static const struct net_device_ops e100_netdev_ops = {
       
  2835 	.ndo_open		= e100_open,
       
  2836 	.ndo_stop		= e100_close,
       
  2837 	.ndo_start_xmit		= e100_xmit_frame,
       
  2838 	.ndo_validate_addr	= eth_validate_addr,
       
  2839 	.ndo_set_multicast_list	= e100_set_multicast_list,
       
  2840 	.ndo_set_mac_address	= e100_set_mac_address,
       
  2841 	.ndo_change_mtu		= e100_change_mtu,
       
  2842 	.ndo_do_ioctl		= e100_do_ioctl,
       
  2843 	.ndo_tx_timeout		= e100_tx_timeout,
       
  2844 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2845 	.ndo_poll_controller	= e100_netpoll,
       
  2846 #endif
       
  2847 };
       
  2848 
       
  2849 static int __devinit e100_probe(struct pci_dev *pdev,
       
  2850 	const struct pci_device_id *ent)
       
  2851 {
       
  2852 	struct net_device *netdev;
       
  2853 	struct nic *nic;
       
  2854 	int err;
       
  2855 
       
  2856 	if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
       
  2857 		if (((1 << debug) - 1) & NETIF_MSG_PROBE)
       
  2858 			printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
       
  2859 		return -ENOMEM;
       
  2860 	}
       
  2861 
       
  2862 	netdev->netdev_ops = &e100_netdev_ops;
       
  2863 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  2864 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  2865 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  2866 
       
  2867 	nic = netdev_priv(netdev);
       
  2868 	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
       
  2869 	nic->netdev = netdev;
       
  2870 	nic->pdev = pdev;
       
  2871 	nic->msg_enable = (1 << debug) - 1;
       
  2872 	nic->mdio_ctrl = mdio_ctrl_hw;
       
  2873 	pci_set_drvdata(pdev, netdev);
       
  2874 
       
  2875 	if ((err = pci_enable_device(pdev))) {
       
  2876 		DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
       
  2877 		goto err_out_free_dev;
       
  2878 	}
       
  2879 
       
  2880 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  2881 		DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
       
  2882 			"base address, aborting.\n");
       
  2883 		err = -ENODEV;
       
  2884 		goto err_out_disable_pdev;
       
  2885 	}
       
  2886 
       
  2887 	if ((err = pci_request_regions(pdev, DRV_NAME))) {
       
  2888 		DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
       
  2889 		goto err_out_disable_pdev;
       
  2890 	}
       
  2891 
       
  2892 	if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
       
  2893 		DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
       
  2894 		goto err_out_free_res;
       
  2895 	}
       
  2896 
       
  2897 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  2898 
       
  2899 	if (use_io)
       
  2900 		DPRINTK(PROBE, INFO, "using i/o access mode\n");
       
  2901 
       
  2902 	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
       
  2903 	if (!nic->csr) {
       
  2904 		DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
       
  2905 		err = -ENOMEM;
       
  2906 		goto err_out_free_res;
       
  2907 	}
       
  2908 
       
  2909 	if (ent->driver_data)
       
  2910 		nic->flags |= ich;
       
  2911 	else
       
  2912 		nic->flags &= ~ich;
       
  2913 
       
  2914 	e100_get_defaults(nic);
       
  2915 
       
  2916 	/* locks must be initialized before calling hw_reset */
       
  2917 	spin_lock_init(&nic->cb_lock);
       
  2918 	spin_lock_init(&nic->cmd_lock);
       
  2919 	spin_lock_init(&nic->mdio_lock);
       
  2920 
       
  2921 	/* Reset the device before pci_set_master() in case device is in some
       
  2922 	 * funky state and has an interrupt pending - hint: we don't have the
       
  2923 	 * interrupt handler registered yet. */
       
  2924 	e100_hw_reset(nic);
       
  2925 
       
  2926 	pci_set_master(pdev);
       
  2927 
       
  2928 	init_timer(&nic->watchdog);
       
  2929 	nic->watchdog.function = e100_watchdog;
       
  2930 	nic->watchdog.data = (unsigned long)nic;
       
  2931 	init_timer(&nic->blink_timer);
       
  2932 	nic->blink_timer.function = e100_blink_led;
       
  2933 	nic->blink_timer.data = (unsigned long)nic;
       
  2934 
       
  2935 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  2936 
       
  2937 	if ((err = e100_alloc(nic))) {
       
  2938 		DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
       
  2939 		goto err_out_iounmap;
       
  2940 	}
       
  2941 
       
  2942 	if ((err = e100_eeprom_load(nic)))
       
  2943 		goto err_out_free;
       
  2944 
       
  2945 	e100_phy_init(nic);
       
  2946 
       
  2947 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  2948 	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
       
  2949 	if (!is_valid_ether_addr(netdev->perm_addr)) {
       
  2950 		if (!eeprom_bad_csum_allow) {
       
  2951 			DPRINTK(PROBE, ERR, "Invalid MAC address from "
       
  2952 			        "EEPROM, aborting.\n");
       
  2953 			err = -EAGAIN;
       
  2954 			goto err_out_free;
       
  2955 		} else {
       
  2956 			DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
       
  2957 			        "you MUST configure one.\n");
       
  2958 		}
       
  2959 	}
       
  2960 
       
  2961 	/* Wol magic packet can be enabled from eeprom */
       
  2962 	if ((nic->mac >= mac_82558_D101_A4) &&
       
  2963 	   (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
       
  2964 		nic->flags |= wol_magic;
       
  2965 		device_set_wakeup_enable(&pdev->dev, true);
       
  2966 	}
       
  2967 
       
  2968 	/* ack any pending wake events, disable PME */
       
  2969 	pci_pme_active(pdev, false);
       
  2970 
       
  2971 	// offer device to EtherCAT master module
       
  2972 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  2973 	if (nic->ecdev) {
       
  2974 		if (ecdev_open(nic->ecdev)) {
       
  2975 			ecdev_withdraw(nic->ecdev);
       
  2976 			goto err_out_free;
       
  2977 		}
       
  2978 	} else {
       
  2979 		strcpy(netdev->name, "eth%d");
       
  2980 		if((err = register_netdev(netdev))) {
       
  2981 			DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
       
  2982 			goto err_out_free;
       
  2983 		}
       
  2984 	}
       
  2985 
       
  2986 	DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
       
  2987 		(unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
       
  2988 		pdev->irq, netdev->dev_addr);
       
  2989 
       
  2990 	return 0;
       
  2991 
       
  2992 err_out_free:
       
  2993 	e100_free(nic);
       
  2994 err_out_iounmap:
       
  2995 	pci_iounmap(pdev, nic->csr);
       
  2996 err_out_free_res:
       
  2997 	pci_release_regions(pdev);
       
  2998 err_out_disable_pdev:
       
  2999 	pci_disable_device(pdev);
       
  3000 err_out_free_dev:
       
  3001 	pci_set_drvdata(pdev, NULL);
       
  3002 	free_netdev(netdev);
       
  3003 	return err;
       
  3004 }
       
  3005 
       
  3006 static void __devexit e100_remove(struct pci_dev *pdev)
       
  3007 {
       
  3008 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3009 
       
  3010 	if (netdev) {
       
  3011 		struct nic *nic = netdev_priv(netdev);
       
  3012 		if (nic->ecdev) {
       
  3013 			ecdev_close(nic->ecdev);
       
  3014 			ecdev_withdraw(nic->ecdev);
       
  3015 		} else {
       
  3016 			unregister_netdev(netdev);
       
  3017 		}
       
  3018 
       
  3019 		e100_free(nic);
       
  3020 		pci_iounmap(pdev, nic->csr);
       
  3021 		free_netdev(netdev);
       
  3022 		pci_release_regions(pdev);
       
  3023 		pci_disable_device(pdev);
       
  3024 		pci_set_drvdata(pdev, NULL);
       
  3025 	}
       
  3026 }
       
  3027 
       
  3028 #define E100_82552_SMARTSPEED   0x14   /* SmartSpeed Ctrl register */
       
  3029 #define E100_82552_REV_ANEG     0x0200 /* Reverse auto-negotiation */
       
  3030 #define E100_82552_ANEG_NOW     0x0400 /* Auto-negotiate now */
       
  3031 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  3032 {
       
  3033 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3034 	struct nic *nic = netdev_priv(netdev);
       
  3035 
       
  3036 	if (netif_running(netdev))
       
  3037 		e100_down(nic);
       
  3038 	netif_device_detach(netdev);
       
  3039 
       
  3040 	pci_save_state(pdev);
       
  3041 
       
  3042 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  3043 		/* enable reverse auto-negotiation */
       
  3044 		if (nic->phy == phy_82552_v) {
       
  3045 			u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3046 			                           E100_82552_SMARTSPEED);
       
  3047 
       
  3048 			mdio_write(netdev, nic->mii.phy_id,
       
  3049 			           E100_82552_SMARTSPEED, smartspeed |
       
  3050 			           E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
       
  3051 		}
       
  3052 		*enable_wake = true;
       
  3053 	} else {
       
  3054 		*enable_wake = false;
       
  3055 	}
       
  3056 
       
  3057 	pci_disable_device(pdev);
       
  3058 }
       
  3059 
       
  3060 static int __e100_power_off(struct pci_dev *pdev, bool wake)
       
  3061 {
       
  3062 	if (wake)
       
  3063 		return pci_prepare_to_sleep(pdev);
       
  3064 
       
  3065 	pci_wake_from_d3(pdev, false);
       
  3066 	pci_set_power_state(pdev, PCI_D3hot);
       
  3067 
       
  3068 	return 0;
       
  3069 }
       
  3070 
       
  3071 #ifdef CONFIG_PM
       
  3072 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  3073 {
       
  3074 	bool wake;
       
  3075 	__e100_shutdown(pdev, &wake);
       
  3076 	return __e100_power_off(pdev, wake);
       
  3077 }
       
  3078 
       
  3079 static int e100_resume(struct pci_dev *pdev)
       
  3080 {
       
  3081 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3082 	struct nic *nic = netdev_priv(netdev);
       
  3083 
       
  3084 	pci_set_power_state(pdev, PCI_D0);
       
  3085 	pci_restore_state(pdev);
       
  3086 	/* ack any pending wake events, disable PME */
       
  3087 	pci_enable_wake(pdev, 0, 0);
       
  3088 
       
  3089 	/* disable reverse auto-negotiation */
       
  3090 	if (nic->phy == phy_82552_v) {
       
  3091 		u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3092 		                           E100_82552_SMARTSPEED);
       
  3093 
       
  3094 		mdio_write(netdev, nic->mii.phy_id,
       
  3095 		           E100_82552_SMARTSPEED,
       
  3096 		           smartspeed & ~(E100_82552_REV_ANEG));
       
  3097 	}
       
  3098 
       
  3099 	netif_device_attach(netdev);
       
  3100 	if (netif_running(netdev))
       
  3101 		e100_up(nic);
       
  3102 
       
  3103 	return 0;
       
  3104 }
       
  3105 #endif /* CONFIG_PM */
       
  3106 
       
  3107 static void e100_shutdown(struct pci_dev *pdev)
       
  3108 {
       
  3109 	bool wake;
       
  3110 	__e100_shutdown(pdev, &wake);
       
  3111 	if (system_state == SYSTEM_POWER_OFF)
       
  3112 		__e100_power_off(pdev, wake);
       
  3113 }
       
  3114 
       
  3115 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  3116 /**
       
  3117  * e100_io_error_detected - called when PCI error is detected.
       
  3118  * @pdev: Pointer to PCI device
       
  3119  * @state: The current pci connection state
       
  3120  */
       
  3121 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  3122 {
       
  3123 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3124 	struct nic *nic = netdev_priv(netdev);
       
  3125 
       
  3126 	if (nic->ecdev)
       
  3127 		return -EBUSY;
       
  3128 
       
  3129 	netif_device_detach(netdev);
       
  3130 
       
  3131 	if (state == pci_channel_io_perm_failure)
       
  3132 		return PCI_ERS_RESULT_DISCONNECT;
       
  3133 
       
  3134 	if (netif_running(netdev))
       
  3135 		e100_down(nic);
       
  3136 	pci_disable_device(pdev);
       
  3137 
       
  3138 	/* Request a slot reset. */
       
  3139 	return PCI_ERS_RESULT_NEED_RESET;
       
  3140 }
       
  3141 
       
  3142 /**
       
  3143  * e100_io_slot_reset - called after the pci bus has been reset.
       
  3144  * @pdev: Pointer to PCI device
       
  3145  *
       
  3146  * Restart the card from scratch.
       
  3147  */
       
  3148 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  3149 {
       
  3150 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3151 	struct nic *nic = netdev_priv(netdev);
       
  3152 
       
  3153 	if (nic->ecdev)
       
  3154 		return -EBUSY;
       
  3155 
       
  3156 	if (pci_enable_device(pdev)) {
       
  3157 		printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
       
  3158 		return PCI_ERS_RESULT_DISCONNECT;
       
  3159 	}
       
  3160 	pci_set_master(pdev);
       
  3161 
       
  3162 	/* Only one device per card can do a reset */
       
  3163 	if (0 != PCI_FUNC(pdev->devfn))
       
  3164 		return PCI_ERS_RESULT_RECOVERED;
       
  3165 	e100_hw_reset(nic);
       
  3166 	e100_phy_init(nic);
       
  3167 
       
  3168 	return PCI_ERS_RESULT_RECOVERED;
       
  3169 }
       
  3170 
       
  3171 /**
       
  3172  * e100_io_resume - resume normal operations
       
  3173  * @pdev: Pointer to PCI device
       
  3174  *
       
  3175  * Resume normal operations after an error recovery
       
  3176  * sequence has been completed.
       
  3177  */
       
  3178 static void e100_io_resume(struct pci_dev *pdev)
       
  3179 {
       
  3180 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3181 	struct nic *nic = netdev_priv(netdev);
       
  3182 
       
  3183 	/* ack any pending wake events, disable PME */
       
  3184 	pci_enable_wake(pdev, 0, 0);
       
  3185 
       
  3186 	if (!nic->ecdev)
       
  3187 		netif_device_attach(netdev);
       
  3188 	if (nic->ecdev || netif_running(netdev)) {
       
  3189 		e100_open(netdev);
       
  3190 		if (!nic->ecdev)
       
  3191 			mod_timer(&nic->watchdog, jiffies);
       
  3192 	}
       
  3193 }
       
  3194 
       
  3195 static struct pci_error_handlers e100_err_handler = {
       
  3196 	.error_detected = e100_io_error_detected,
       
  3197 	.slot_reset = e100_io_slot_reset,
       
  3198 	.resume = e100_io_resume,
       
  3199 };
       
  3200 
       
  3201 static struct pci_driver e100_driver = {
       
  3202 	.name =         DRV_NAME,
       
  3203 	.id_table =     e100_id_table,
       
  3204 	.probe =        e100_probe,
       
  3205 	.remove =       __devexit_p(e100_remove),
       
  3206 #ifdef CONFIG_PM
       
  3207 	/* Power Management hooks */
       
  3208 	.suspend =      e100_suspend,
       
  3209 	.resume =       e100_resume,
       
  3210 #endif
       
  3211 	.shutdown =     e100_shutdown,
       
  3212 	.err_handler = &e100_err_handler,
       
  3213 };
       
  3214 
       
  3215 static int __init e100_init_module(void)
       
  3216 {
       
  3217 	printk(KERN_INFO DRV_NAME " " DRV_DESCRIPTION " " DRV_VERSION
       
  3218 			", master " EC_MASTER_VERSION "\n");
       
  3219  
       
  3220  	return pci_register_driver(&e100_driver);
       
  3221 }
       
  3222 
       
  3223 static void __exit e100_cleanup_module(void)
       
  3224 {
       
  3225 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3226 	pci_unregister_driver(&e100_driver);
       
  3227 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3228 }
       
  3229 
       
  3230 module_init(e100_init_module);
       
  3231 module_exit(e100_cleanup_module);