devices/e100-2.6.32-ethercat.c
changeset 2147 af5c0ddbeb87
child 2142 26c74f035ab0
child 2143 5ba5b3792365
equal deleted inserted replaced
2146:05c6ddef33ab 2147:af5c0ddbeb87
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007-2008  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it and/or
       
    10  *  modify it under the terms of the GNU General Public License version 2, as
       
    11  *  published by the Free Software Foundation.
       
    12  *
       
    13  *  The IgH EtherCAT Master is distributed in the hope that it will be useful,
       
    14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
       
    16  *  Public License for more details.
       
    17  *
       
    18  *  You should have received a copy of the GNU General Public License along
       
    19  *  with the IgH EtherCAT Master; if not, write to the Free Software
       
    20  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    21  *
       
    22  *  ---
       
    23  *
       
    24  *  The license mentioned above concerns the source code only. Using the
       
    25  *  EtherCAT technology and brand is only permitted in compliance with the
       
    26  *  industrial property and similar rights of Beckhoff Automation GmbH.
       
    27  *
       
    28  *  ---
       
    29  *
       
    30  *  vim: noexpandtab
       
    31  *
       
    32  *****************************************************************************/
       
    33 
       
    34 /**
       
    35    \file
       
    36    EtherCAT driver for e100-compatible NICs.
       
    37 */
       
    38 
       
    39 /* Former documentation: */
       
    40 
       
    41 /*******************************************************************************
       
    42 
       
    43   Intel PRO/100 Linux driver
       
    44   Copyright(c) 1999 - 2006 Intel Corporation.
       
    45 
       
    46   This program is free software; you can redistribute it and/or modify it
       
    47   under the terms and conditions of the GNU General Public License,
       
    48   version 2, as published by the Free Software Foundation.
       
    49 
       
    50   This program is distributed in the hope it will be useful, but WITHOUT
       
    51   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    52   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    53   more details.
       
    54 
       
    55   You should have received a copy of the GNU General Public License along with
       
    56   this program; if not, write to the Free Software Foundation, Inc.,
       
    57   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    58 
       
    59   The full GNU General Public License is included in this distribution in
       
    60   the file called "COPYING".
       
    61 
       
    62   Contact Information:
       
    63   Linux NICS <linux.nics@intel.com>
       
    64   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    65   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    66 
       
    67 *******************************************************************************/
       
    68 
       
    69 /*
       
    70  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    71  *
       
    72  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    73  *	original e100 driver, but better described as a munging of
       
    74  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    75  *
       
    76  *	References:
       
    77  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    78  *		Open Source Software Developers Manual,
       
    79  *		http://sourceforge.net/projects/e1000
       
    80  *
       
    81  *
       
    82  *	                      Theory of Operation
       
    83  *
       
    84  *	I.   General
       
    85  *
       
    86  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    87  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    88  *	82551, and 82562 devices.  82558 and greater controllers
       
    89  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    90  *	server and client network interface cards, as well as in
       
    91  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    92  *	configurations.  8255x supports a 32-bit linear addressing
       
    93  *	mode and operates at 33Mhz PCI clock rate.
       
    94  *
       
    95  *	II.  Driver Operation
       
    96  *
       
    97  *	Memory-mapped mode is used exclusively to access the device's
       
    98  *	shared-memory structure, the Control/Status Registers (CSR). All
       
    99  *	setup, configuration, and control of the device, including queuing
       
   100  *	of Tx, Rx, and configuration commands is through the CSR.
       
   101  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   102  *	protects the shared Command Block List (CBL).
       
   103  *
       
   104  *	8255x is highly MII-compliant and all access to the PHY go
       
   105  *	through the Management Data Interface (MDI).  Consequently, the
       
   106  *	driver leverages the mii.c library shared with other MII-compliant
       
   107  *	devices.
       
   108  *
       
   109  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   110  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   111  *	archs are supported.
       
   112  *
       
   113  *	III. Transmit
       
   114  *
       
   115  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   116  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   117  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   118  *	the end of the ring.  The last TCB processed suspends the
       
   119  *	controller, and the controller can be restarted by issue a CU
       
   120  *	resume command to continue from the suspend point, or a CU start
       
   121  *	command to start at a given position in the ring.
       
   122  *
       
   123  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   124  *	into the CBL ring along with Tx commands.  The common structure
       
   125  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   126  *
       
   127  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   128  *	is the next CB to check for completion; cb_to_send is the first
       
   129  *	CB to start on in case of a previous failure to resume.  CB clean
       
   130  *	up happens in interrupt context in response to a CU interrupt.
       
   131  *	cbs_avail keeps track of number of free CB resources available.
       
   132  *
       
   133  * 	Hardware padding of short packets to minimum packet size is
       
   134  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   135  * 	with 00h.
       
   136  *
       
   137  *	IV.  Receive
       
   138  *
       
   139  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   140  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   141  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   142  *	and the data buffer, but the RFD is pulled off before the skb is
       
   143  *	indicated.  The data buffer is aligned such that encapsulated
       
   144  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   145  *	mapped shared memory, and completion status is contained within
       
   146  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   147  *	view from software and hardware.
       
   148  *
       
   149  *	In order to keep updates to the RFD link field from colliding with
       
   150  *	hardware writes to mark packets complete, we use the feature that
       
   151  *	hardware will not write to a size 0 descriptor and mark the previous
       
   152  *	packet as end-of-list (EL).   After updating the link, we remove EL
       
   153  *	and only then restore the size such that hardware may use the
       
   154  *	previous-to-end RFD.
       
   155  *
       
   156  *	Under typical operation, the  receive unit (RU) is start once,
       
   157  *	and the controller happily fills RFDs as frames arrive.  If
       
   158  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   159  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   160  *	and Rx indication and re-allocation happen in the same context,
       
   161  *	therefore no locking is required.  A software-generated interrupt
       
   162  *	is generated from the watchdog to recover from a failed allocation
       
   163  *	scenario where all Rx resources have been indicated and none re-
       
   164  *	placed.
       
   165  *
       
   166  *	V.   Miscellaneous
       
   167  *
       
   168  * 	VLAN offloading of tagging, stripping and filtering is not
       
   169  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   170  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   171  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   172  * 	not supported (hardware limitation).
       
   173  *
       
   174  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   175  *
       
   176  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   177  * 	testing/troubleshooting the development driver.
       
   178  *
       
   179  * 	TODO:
       
   180  * 	o several entry points race with dev->close
       
   181  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   182  *
       
   183  *	FIXES:
       
   184  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   185  *	- Stratus87247: protect MDI control register manipulations
       
   186  * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
       
   187  *      - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
       
   188  */
       
   189 
       
   190 #include <linux/module.h>
       
   191 #include <linux/moduleparam.h>
       
   192 #include <linux/kernel.h>
       
   193 #include <linux/types.h>
       
   194 #include <linux/sched.h>
       
   195 #include <linux/slab.h>
       
   196 #include <linux/delay.h>
       
   197 #include <linux/init.h>
       
   198 #include <linux/pci.h>
       
   199 #include <linux/dma-mapping.h>
       
   200 #include <linux/dmapool.h>
       
   201 #include <linux/netdevice.h>
       
   202 #include <linux/etherdevice.h>
       
   203 #include <linux/mii.h>
       
   204 #include <linux/if_vlan.h>
       
   205 #include <linux/skbuff.h>
       
   206 #include <linux/ethtool.h>
       
   207 
       
   208 // EtherCAT includes
       
   209 #include "../globals.h"
       
   210 #include "ecdev.h"
       
   211 
       
   212 #define DRV_NAME		"ec_e100"
       
   213 #include <linux/string.h>
       
   214 #include <linux/firmware.h>
       
   215 #include <asm/unaligned.h>
       
   216 
       
   217 
       
   218 #define DRV_EXT			"-NAPI"
       
   219 #define DRV_VERSION		"3.5.24-k2"DRV_EXT
       
   220 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   221 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   222 #define PFX			DRV_NAME ": "
       
   223 
       
   224 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   225 #define E100_NAPI_WEIGHT	16
       
   226 
       
   227 #define FIRMWARE_D101M		"e100/d101m_ucode.bin"
       
   228 #define FIRMWARE_D101S		"e100/d101s_ucode.bin"
       
   229 #define FIRMWARE_D102E		"e100/d102e_ucode.bin"
       
   230 
       
   231 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   232 MODULE_AUTHOR(DRV_COPYRIGHT);
       
   233 MODULE_LICENSE("GPL");
       
   234 MODULE_VERSION(DRV_VERSION);
       
   235 MODULE_FIRMWARE(FIRMWARE_D101M);
       
   236 MODULE_FIRMWARE(FIRMWARE_D101S);
       
   237 MODULE_FIRMWARE(FIRMWARE_D102E);
       
   238 
       
   239 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   240 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   241 MODULE_LICENSE("GPL");
       
   242 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   243 
       
   244 void e100_ec_poll(struct net_device *);
       
   245 
       
   246 static int debug = 3;
       
   247 static int eeprom_bad_csum_allow = 0;
       
   248 static int use_io = 0;
       
   249 module_param(debug, int, 0);
       
   250 module_param(eeprom_bad_csum_allow, int, 0);
       
   251 module_param(use_io, int, 0);
       
   252 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   253 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   254 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
       
   255 #define DPRINTK(nlevel, klevel, fmt, args...) \
       
   256 	(void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
       
   257 	printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
       
   258 		__func__ , ## args))
       
   259 
       
   260 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   261 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   262 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   263 static struct pci_device_id e100_id_table[] = {
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   277 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   278 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   279 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   280 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   281 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   282 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   283 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   284 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   285 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   286 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   287 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   288 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   289 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   290 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   291 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   292 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   293 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   294 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   295 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   296 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   297 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   298 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   299 	INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
       
   300 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   301 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   302 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   303 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   304 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   305 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   306 	{ 0, }
       
   307 };
       
   308 
       
   309 // prevent from being loaded automatically
       
   310 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   311 
       
   312 enum mac {
       
   313 	mac_82557_D100_A  = 0,
       
   314 	mac_82557_D100_B  = 1,
       
   315 	mac_82557_D100_C  = 2,
       
   316 	mac_82558_D101_A4 = 4,
       
   317 	mac_82558_D101_B0 = 5,
       
   318 	mac_82559_D101M   = 8,
       
   319 	mac_82559_D101S   = 9,
       
   320 	mac_82550_D102    = 12,
       
   321 	mac_82550_D102_C  = 13,
       
   322 	mac_82551_E       = 14,
       
   323 	mac_82551_F       = 15,
       
   324 	mac_82551_10      = 16,
       
   325 	mac_unknown       = 0xFF,
       
   326 };
       
   327 
       
   328 enum phy {
       
   329 	phy_100a     = 0x000003E0,
       
   330 	phy_100c     = 0x035002A8,
       
   331 	phy_82555_tx = 0x015002A8,
       
   332 	phy_nsc_tx   = 0x5C002000,
       
   333 	phy_82562_et = 0x033002A8,
       
   334 	phy_82562_em = 0x032002A8,
       
   335 	phy_82562_ek = 0x031002A8,
       
   336 	phy_82562_eh = 0x017002A8,
       
   337 	phy_82552_v  = 0xd061004d,
       
   338 	phy_unknown  = 0xFFFFFFFF,
       
   339 };
       
   340 
       
   341 /* CSR (Control/Status Registers) */
       
   342 struct csr {
       
   343 	struct {
       
   344 		u8 status;
       
   345 		u8 stat_ack;
       
   346 		u8 cmd_lo;
       
   347 		u8 cmd_hi;
       
   348 		u32 gen_ptr;
       
   349 	} scb;
       
   350 	u32 port;
       
   351 	u16 flash_ctrl;
       
   352 	u8 eeprom_ctrl_lo;
       
   353 	u8 eeprom_ctrl_hi;
       
   354 	u32 mdi_ctrl;
       
   355 	u32 rx_dma_count;
       
   356 };
       
   357 
       
   358 enum scb_status {
       
   359 	rus_no_res       = 0x08,
       
   360 	rus_ready        = 0x10,
       
   361 	rus_mask         = 0x3C,
       
   362 };
       
   363 
       
   364 enum ru_state  {
       
   365 	RU_SUSPENDED = 0,
       
   366 	RU_RUNNING	 = 1,
       
   367 	RU_UNINITIALIZED = -1,
       
   368 };
       
   369 
       
   370 enum scb_stat_ack {
       
   371 	stat_ack_not_ours    = 0x00,
       
   372 	stat_ack_sw_gen      = 0x04,
       
   373 	stat_ack_rnr         = 0x10,
       
   374 	stat_ack_cu_idle     = 0x20,
       
   375 	stat_ack_frame_rx    = 0x40,
       
   376 	stat_ack_cu_cmd_done = 0x80,
       
   377 	stat_ack_not_present = 0xFF,
       
   378 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   379 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   380 };
       
   381 
       
   382 enum scb_cmd_hi {
       
   383 	irq_mask_none = 0x00,
       
   384 	irq_mask_all  = 0x01,
       
   385 	irq_sw_gen    = 0x02,
       
   386 };
       
   387 
       
   388 enum scb_cmd_lo {
       
   389 	cuc_nop        = 0x00,
       
   390 	ruc_start      = 0x01,
       
   391 	ruc_load_base  = 0x06,
       
   392 	cuc_start      = 0x10,
       
   393 	cuc_resume     = 0x20,
       
   394 	cuc_dump_addr  = 0x40,
       
   395 	cuc_dump_stats = 0x50,
       
   396 	cuc_load_base  = 0x60,
       
   397 	cuc_dump_reset = 0x70,
       
   398 };
       
   399 
       
   400 enum cuc_dump {
       
   401 	cuc_dump_complete       = 0x0000A005,
       
   402 	cuc_dump_reset_complete = 0x0000A007,
       
   403 };
       
   404 
       
   405 enum port {
       
   406 	software_reset  = 0x0000,
       
   407 	selftest        = 0x0001,
       
   408 	selective_reset = 0x0002,
       
   409 };
       
   410 
       
   411 enum eeprom_ctrl_lo {
       
   412 	eesk = 0x01,
       
   413 	eecs = 0x02,
       
   414 	eedi = 0x04,
       
   415 	eedo = 0x08,
       
   416 };
       
   417 
       
   418 enum mdi_ctrl {
       
   419 	mdi_write = 0x04000000,
       
   420 	mdi_read  = 0x08000000,
       
   421 	mdi_ready = 0x10000000,
       
   422 };
       
   423 
       
   424 enum eeprom_op {
       
   425 	op_write = 0x05,
       
   426 	op_read  = 0x06,
       
   427 	op_ewds  = 0x10,
       
   428 	op_ewen  = 0x13,
       
   429 };
       
   430 
       
   431 enum eeprom_offsets {
       
   432 	eeprom_cnfg_mdix  = 0x03,
       
   433 	eeprom_phy_iface  = 0x06,
       
   434 	eeprom_id         = 0x0A,
       
   435 	eeprom_config_asf = 0x0D,
       
   436 	eeprom_smbus_addr = 0x90,
       
   437 };
       
   438 
       
   439 enum eeprom_cnfg_mdix {
       
   440 	eeprom_mdix_enabled = 0x0080,
       
   441 };
       
   442 
       
   443 enum eeprom_phy_iface {
       
   444 	NoSuchPhy = 0,
       
   445 	I82553AB,
       
   446 	I82553C,
       
   447 	I82503,
       
   448 	DP83840,
       
   449 	S80C240,
       
   450 	S80C24,
       
   451 	I82555,
       
   452 	DP83840A = 10,
       
   453 };
       
   454 
       
   455 enum eeprom_id {
       
   456 	eeprom_id_wol = 0x0020,
       
   457 };
       
   458 
       
   459 enum eeprom_config_asf {
       
   460 	eeprom_asf = 0x8000,
       
   461 	eeprom_gcl = 0x4000,
       
   462 };
       
   463 
       
   464 enum cb_status {
       
   465 	cb_complete = 0x8000,
       
   466 	cb_ok       = 0x2000,
       
   467 };
       
   468 
       
   469 enum cb_command {
       
   470 	cb_nop    = 0x0000,
       
   471 	cb_iaaddr = 0x0001,
       
   472 	cb_config = 0x0002,
       
   473 	cb_multi  = 0x0003,
       
   474 	cb_tx     = 0x0004,
       
   475 	cb_ucode  = 0x0005,
       
   476 	cb_dump   = 0x0006,
       
   477 	cb_tx_sf  = 0x0008,
       
   478 	cb_cid    = 0x1f00,
       
   479 	cb_i      = 0x2000,
       
   480 	cb_s      = 0x4000,
       
   481 	cb_el     = 0x8000,
       
   482 };
       
   483 
       
   484 struct rfd {
       
   485 	__le16 status;
       
   486 	__le16 command;
       
   487 	__le32 link;
       
   488 	__le32 rbd;
       
   489 	__le16 actual_size;
       
   490 	__le16 size;
       
   491 };
       
   492 
       
   493 struct rx {
       
   494 	struct rx *next, *prev;
       
   495 	struct sk_buff *skb;
       
   496 	dma_addr_t dma_addr;
       
   497 };
       
   498 
       
   499 #if defined(__BIG_ENDIAN_BITFIELD)
       
   500 #define X(a,b)	b,a
       
   501 #else
       
   502 #define X(a,b)	a,b
       
   503 #endif
       
   504 struct config {
       
   505 /*0*/	u8 X(byte_count:6, pad0:2);
       
   506 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   507 /*2*/	u8 adaptive_ifs;
       
   508 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   509 	   term_write_cache_line:1), pad3:4);
       
   510 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   511 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   512 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   513 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   514 	   rx_discard_overruns:1), rx_save_bad_frames:1);
       
   515 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   516 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   517 	   tx_dynamic_tbd:1);
       
   518 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   519 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   520 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   521 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   522 	   loopback:2);
       
   523 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   524 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   525 /*13*/	u8 ip_addr_lo;
       
   526 /*14*/	u8 ip_addr_hi;
       
   527 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   528 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   529 	   pad15_2:1), crs_or_cdt:1);
       
   530 /*16*/	u8 fc_delay_lo;
       
   531 /*17*/	u8 fc_delay_hi;
       
   532 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   533 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   534 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   535 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   536 	   full_duplex_force:1), full_duplex_pin:1);
       
   537 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   538 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   539 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   540 	u8 pad_d102[9];
       
   541 };
       
   542 
       
   543 #define E100_MAX_MULTICAST_ADDRS	64
       
   544 struct multi {
       
   545 	__le16 count;
       
   546 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   547 };
       
   548 
       
   549 /* Important: keep total struct u32-aligned */
       
   550 #define UCODE_SIZE			134
       
   551 struct cb {
       
   552 	__le16 status;
       
   553 	__le16 command;
       
   554 	__le32 link;
       
   555 	union {
       
   556 		u8 iaaddr[ETH_ALEN];
       
   557 		__le32 ucode[UCODE_SIZE];
       
   558 		struct config config;
       
   559 		struct multi multi;
       
   560 		struct {
       
   561 			u32 tbd_array;
       
   562 			u16 tcb_byte_count;
       
   563 			u8 threshold;
       
   564 			u8 tbd_count;
       
   565 			struct {
       
   566 				__le32 buf_addr;
       
   567 				__le16 size;
       
   568 				u16 eol;
       
   569 			} tbd;
       
   570 		} tcb;
       
   571 		__le32 dump_buffer_addr;
       
   572 	} u;
       
   573 	struct cb *next, *prev;
       
   574 	dma_addr_t dma_addr;
       
   575 	struct sk_buff *skb;
       
   576 };
       
   577 
       
   578 enum loopback {
       
   579 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   580 };
       
   581 
       
   582 struct stats {
       
   583 	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   584 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   585 		tx_multiple_collisions, tx_total_collisions;
       
   586 	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   587 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   588 		rx_short_frame_errors;
       
   589 	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   590 	__le16 xmt_tco_frames, rcv_tco_frames;
       
   591 	__le32 complete;
       
   592 };
       
   593 
       
   594 struct mem {
       
   595 	struct {
       
   596 		u32 signature;
       
   597 		u32 result;
       
   598 	} selftest;
       
   599 	struct stats stats;
       
   600 	u8 dump_buf[596];
       
   601 };
       
   602 
       
   603 struct param_range {
       
   604 	u32 min;
       
   605 	u32 max;
       
   606 	u32 count;
       
   607 };
       
   608 
       
   609 struct params {
       
   610 	struct param_range rfds;
       
   611 	struct param_range cbs;
       
   612 };
       
   613 
       
   614 struct nic {
       
   615 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   616 	u32 msg_enable				____cacheline_aligned;
       
   617 	struct net_device *netdev;
       
   618 	struct pci_dev *pdev;
       
   619 	u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
       
   620 
       
   621 	struct rx *rxs				____cacheline_aligned;
       
   622 	struct rx *rx_to_use;
       
   623 	struct rx *rx_to_clean;
       
   624 	struct rfd blank_rfd;
       
   625 	enum ru_state ru_running;
       
   626 
       
   627 	spinlock_t cb_lock			____cacheline_aligned;
       
   628 	spinlock_t cmd_lock;
       
   629 	struct csr __iomem *csr;
       
   630 	enum scb_cmd_lo cuc_cmd;
       
   631 	unsigned int cbs_avail;
       
   632 	struct napi_struct napi;
       
   633 	struct cb *cbs;
       
   634 	struct cb *cb_to_use;
       
   635 	struct cb *cb_to_send;
       
   636 	struct cb *cb_to_clean;
       
   637 	__le16 tx_command;
       
   638 	/* End: frequently used values: keep adjacent for cache effect */
       
   639 
       
   640 	enum {
       
   641 		ich                = (1 << 0),
       
   642 		promiscuous        = (1 << 1),
       
   643 		multicast_all      = (1 << 2),
       
   644 		wol_magic          = (1 << 3),
       
   645 		ich_10h_workaround = (1 << 4),
       
   646 	} flags					____cacheline_aligned;
       
   647 
       
   648 	enum mac mac;
       
   649 	enum phy phy;
       
   650 	struct params params;
       
   651 	struct timer_list watchdog;
       
   652 	struct timer_list blink_timer;
       
   653 	struct mii_if_info mii;
       
   654 	struct work_struct tx_timeout_task;
       
   655 	enum loopback loopback;
       
   656 
       
   657 	struct mem *mem;
       
   658 	dma_addr_t dma_addr;
       
   659 
       
   660 	struct pci_pool *cbs_pool;
       
   661 	dma_addr_t cbs_dma_addr;
       
   662 	u8 adaptive_ifs;
       
   663 	u8 tx_threshold;
       
   664 	u32 tx_frames;
       
   665 
       
   666 	u32 tx_collisions;
       
   667 	u32 tx_deferred;
       
   668 	u32 tx_single_collisions;
       
   669 	u32 tx_multiple_collisions;
       
   670 	u32 tx_fc_pause;
       
   671 	u32 tx_tco_frames;
       
   672 
       
   673 	u32 rx_fc_pause;
       
   674 	u32 rx_fc_unsupported;
       
   675 	u32 rx_tco_frames;
       
   676 	u32 rx_over_length_errors;
       
   677 
       
   678 	u16 leds;
       
   679 
       
   680 	u16 eeprom_wc;
       
   681 	__le16 eeprom[256];
       
   682 
       
   683 	ec_device_t *ecdev;
       
   684 	unsigned long ec_watchdog_jiffies;
       
   685 	spinlock_t mdio_lock;
       
   686 };
       
   687 
       
   688 static inline void e100_write_flush(struct nic *nic)
       
   689 {
       
   690 	/* Flush previous PCI writes through intermediate bridges
       
   691 	 * by doing a benign read */
       
   692 	(void)ioread8(&nic->csr->scb.status);
       
   693 }
       
   694 
       
   695 static void e100_enable_irq(struct nic *nic)
       
   696 {
       
   697 	unsigned long flags;
       
   698 	if (nic->ecdev)
       
   699 		return;
       
   700 
       
   701 
       
   702 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   703 	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   704 	e100_write_flush(nic);
       
   705 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   706 }
       
   707 
       
   708 static void e100_disable_irq(struct nic *nic)
       
   709 {
       
   710 	unsigned long flags = 0;
       
   711 
       
   712 	if (!nic->ecdev)
       
   713 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   714 	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   715 	e100_write_flush(nic);
       
   716 	if (!nic->ecdev)
       
   717 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   718 }
       
   719 
       
   720 static void e100_hw_reset(struct nic *nic)
       
   721 {
       
   722 	/* Put CU and RU into idle with a selective reset to get
       
   723 	 * device off of PCI bus */
       
   724 	iowrite32(selective_reset, &nic->csr->port);
       
   725 	e100_write_flush(nic); udelay(20);
       
   726 
       
   727 	/* Now fully reset device */
       
   728 	iowrite32(software_reset, &nic->csr->port);
       
   729 	e100_write_flush(nic); udelay(20);
       
   730 
       
   731 	/* Mask off our interrupt line - it's unmasked after reset */
       
   732 	e100_disable_irq(nic);
       
   733 }
       
   734 
       
   735 static int e100_self_test(struct nic *nic)
       
   736 {
       
   737 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   738 
       
   739 	/* Passing the self-test is a pretty good indication
       
   740 	 * that the device can DMA to/from host memory */
       
   741 
       
   742 	nic->mem->selftest.signature = 0;
       
   743 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   744 
       
   745 	iowrite32(selftest | dma_addr, &nic->csr->port);
       
   746 	e100_write_flush(nic);
       
   747 	/* Wait 10 msec for self-test to complete */
       
   748 	msleep(10);
       
   749 
       
   750 	/* Interrupts are enabled after self-test */
       
   751 	e100_disable_irq(nic);
       
   752 
       
   753 	/* Check results of self-test */
       
   754 	if (nic->mem->selftest.result != 0) {
       
   755 		DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
       
   756 			nic->mem->selftest.result);
       
   757 		return -ETIMEDOUT;
       
   758 	}
       
   759 	if (nic->mem->selftest.signature == 0) {
       
   760 		DPRINTK(HW, ERR, "Self-test failed: timed out\n");
       
   761 		return -ETIMEDOUT;
       
   762 	}
       
   763 
       
   764 	return 0;
       
   765 }
       
   766 
       
   767 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
       
   768 {
       
   769 	u32 cmd_addr_data[3];
       
   770 	u8 ctrl;
       
   771 	int i, j;
       
   772 
       
   773 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   774 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   775 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   776 		le16_to_cpu(data);
       
   777 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   778 
       
   779 	/* Bit-bang cmds to write word to eeprom */
       
   780 	for (j = 0; j < 3; j++) {
       
   781 
       
   782 		/* Chip select */
       
   783 		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   784 		e100_write_flush(nic); udelay(4);
       
   785 
       
   786 		for (i = 31; i >= 0; i--) {
       
   787 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   788 				eecs | eedi : eecs;
       
   789 			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   790 			e100_write_flush(nic); udelay(4);
       
   791 
       
   792 			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   793 			e100_write_flush(nic); udelay(4);
       
   794 		}
       
   795 		/* Wait 10 msec for cmd to complete */
       
   796 		msleep(10);
       
   797 
       
   798 		/* Chip deselect */
       
   799 		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   800 		e100_write_flush(nic); udelay(4);
       
   801 	}
       
   802 };
       
   803 
       
   804 /* General technique stolen from the eepro100 driver - very clever */
       
   805 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   806 {
       
   807 	u32 cmd_addr_data;
       
   808 	u16 data = 0;
       
   809 	u8 ctrl;
       
   810 	int i;
       
   811 
       
   812 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   813 
       
   814 	/* Chip select */
       
   815 	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   816 	e100_write_flush(nic); udelay(4);
       
   817 
       
   818 	/* Bit-bang to read word from eeprom */
       
   819 	for (i = 31; i >= 0; i--) {
       
   820 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   821 		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   822 		e100_write_flush(nic); udelay(4);
       
   823 
       
   824 		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   825 		e100_write_flush(nic); udelay(4);
       
   826 
       
   827 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   828 		 * complete address.  Use this to adjust addr_len. */
       
   829 		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
       
   830 		if (!(ctrl & eedo) && i > 16) {
       
   831 			*addr_len -= (i - 16);
       
   832 			i = 17;
       
   833 		}
       
   834 
       
   835 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   836 	}
       
   837 
       
   838 	/* Chip deselect */
       
   839 	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   840 	e100_write_flush(nic); udelay(4);
       
   841 
       
   842 	return cpu_to_le16(data);
       
   843 };
       
   844 
       
   845 /* Load entire EEPROM image into driver cache and validate checksum */
       
   846 static int e100_eeprom_load(struct nic *nic)
       
   847 {
       
   848 	u16 addr, addr_len = 8, checksum = 0;
       
   849 
       
   850 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   851 	e100_eeprom_read(nic, &addr_len, 0);
       
   852 	nic->eeprom_wc = 1 << addr_len;
       
   853 
       
   854 	for (addr = 0; addr < nic->eeprom_wc; addr++) {
       
   855 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   856 		if (addr < nic->eeprom_wc - 1)
       
   857 			checksum += le16_to_cpu(nic->eeprom[addr]);
       
   858 	}
       
   859 
       
   860 	/* The checksum, stored in the last word, is calculated such that
       
   861 	 * the sum of words should be 0xBABA */
       
   862 	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
       
   863 		DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
       
   864 		if (!eeprom_bad_csum_allow)
       
   865 			return -EAGAIN;
       
   866 	}
       
   867 
       
   868 	return 0;
       
   869 }
       
   870 
       
   871 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   872 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   873 {
       
   874 	u16 addr, addr_len = 8, checksum = 0;
       
   875 
       
   876 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   877 	e100_eeprom_read(nic, &addr_len, 0);
       
   878 	nic->eeprom_wc = 1 << addr_len;
       
   879 
       
   880 	if (start + count >= nic->eeprom_wc)
       
   881 		return -EINVAL;
       
   882 
       
   883 	for (addr = start; addr < start + count; addr++)
       
   884 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   885 
       
   886 	/* The checksum, stored in the last word, is calculated such that
       
   887 	 * the sum of words should be 0xBABA */
       
   888 	for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   889 		checksum += le16_to_cpu(nic->eeprom[addr]);
       
   890 	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
       
   891 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   892 		nic->eeprom[nic->eeprom_wc - 1]);
       
   893 
       
   894 	return 0;
       
   895 }
       
   896 
       
   897 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   898 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   899 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   900 {
       
   901 	unsigned long flags = 0;
       
   902 	unsigned int i;
       
   903 	int err = 0;
       
   904 
       
   905 	if (!nic->ecdev)
       
   906 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   907 
       
   908 	/* Previous command is accepted when SCB clears */
       
   909 	for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   910 		if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
       
   911 			break;
       
   912 		cpu_relax();
       
   913 		if (unlikely(i > E100_WAIT_SCB_FAST))
       
   914 			udelay(5);
       
   915 	}
       
   916 	if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   917 		err = -EAGAIN;
       
   918 		goto err_unlock;
       
   919 	}
       
   920 
       
   921 	if (unlikely(cmd != cuc_resume))
       
   922 		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
       
   923 	iowrite8(cmd, &nic->csr->scb.cmd_lo);
       
   924 
       
   925 err_unlock:
       
   926 	if (!nic->ecdev)
       
   927 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   928 
       
   929 	return err;
       
   930 }
       
   931 
       
   932 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   933 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   934 {
       
   935 	struct cb *cb;
       
   936 	unsigned long flags = 0;
       
   937 	int err = 0;
       
   938 
       
   939 	if (!nic->ecdev)
       
   940 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   941 
       
   942 	if (unlikely(!nic->cbs_avail)) {
       
   943 		err = -ENOMEM;
       
   944 		goto err_unlock;
       
   945 	}
       
   946 
       
   947 	cb = nic->cb_to_use;
       
   948 	nic->cb_to_use = cb->next;
       
   949 	nic->cbs_avail--;
       
   950 	cb->skb = skb;
       
   951 
       
   952 	if (unlikely(!nic->cbs_avail))
       
   953 		err = -ENOSPC;
       
   954 
       
   955 	cb_prepare(nic, cb, skb);
       
   956 
       
   957 	/* Order is important otherwise we'll be in a race with h/w:
       
   958 	 * set S-bit in current first, then clear S-bit in previous. */
       
   959 	cb->command |= cpu_to_le16(cb_s);
       
   960 	wmb();
       
   961 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   962 
       
   963 	while (nic->cb_to_send != nic->cb_to_use) {
       
   964 		if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   965 			nic->cb_to_send->dma_addr))) {
       
   966 			/* Ok, here's where things get sticky.  It's
       
   967 			 * possible that we can't schedule the command
       
   968 			 * because the controller is too busy, so
       
   969 			 * let's just queue the command and try again
       
   970 			 * when another command is scheduled. */
       
   971 			if (err == -ENOSPC) {
       
   972 				//request a reset
       
   973 				schedule_work(&nic->tx_timeout_task);
       
   974 			}
       
   975 			break;
       
   976 		} else {
       
   977 			nic->cuc_cmd = cuc_resume;
       
   978 			nic->cb_to_send = nic->cb_to_send->next;
       
   979 		}
       
   980 	}
       
   981 
       
   982 err_unlock:
       
   983 	if (!nic->ecdev)
       
   984 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   985 
       
   986 	return err;
       
   987 }
       
   988 
       
   989 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
   990 {
       
   991 	struct nic *nic = netdev_priv(netdev);
       
   992 	return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
       
   993 }
       
   994 
       
   995 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
   996 {
       
   997 	struct nic *nic = netdev_priv(netdev);
       
   998 
       
   999 	nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
       
  1000 }
       
  1001 
       
  1002 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
       
  1003 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
  1004 {
       
  1005 	u32 data_out = 0;
       
  1006 	unsigned int i;
       
  1007 	unsigned long flags = 0;
       
  1008 
       
  1009 
       
  1010 	/*
       
  1011 	 * Stratus87247: we shouldn't be writing the MDI control
       
  1012 	 * register until the Ready bit shows True.  Also, since
       
  1013 	 * manipulation of the MDI control registers is a multi-step
       
  1014 	 * procedure it should be done under lock.
       
  1015 	 */
       
  1016 	if (!nic->ecdev)
       
  1017 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
  1018 	for (i = 100; i; --i) {
       
  1019 		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
       
  1020 			break;
       
  1021 		udelay(20);
       
  1022 	}
       
  1023 	if (unlikely(!i)) {
       
  1024 		printk("e100.mdio_ctrl(%s) won't go Ready\n",
       
  1025 			nic->netdev->name );
       
  1026 		if (!nic->ecdev)
       
  1027 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1028 		return 0;		/* No way to indicate timeout error */
       
  1029 	}
       
  1030 	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
  1031 
       
  1032 	for (i = 0; i < 100; i++) {
       
  1033 		udelay(20);
       
  1034 		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
       
  1035 			break;
       
  1036 	}
       
  1037 	if (!nic->ecdev)
       
  1038 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1039 	DPRINTK(HW, DEBUG,
       
  1040 		"%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
  1041 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
       
  1042 	return (u16)data_out;
       
  1043 }
       
  1044 
       
  1045 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
       
  1046 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
       
  1047 				 u32 addr,
       
  1048 				 u32 dir,
       
  1049 				 u32 reg,
       
  1050 				 u16 data)
       
  1051 {
       
  1052 	if ((reg == MII_BMCR) && (dir == mdi_write)) {
       
  1053 		if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
       
  1054 			u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
       
  1055 							MII_ADVERTISE);
       
  1056 
       
  1057 			/*
       
  1058 			 * Workaround Si issue where sometimes the part will not
       
  1059 			 * autoneg to 100Mbps even when advertised.
       
  1060 			 */
       
  1061 			if (advert & ADVERTISE_100FULL)
       
  1062 				data |= BMCR_SPEED100 | BMCR_FULLDPLX;
       
  1063 			else if (advert & ADVERTISE_100HALF)
       
  1064 				data |= BMCR_SPEED100;
       
  1065 		}
       
  1066 	}
       
  1067 	return mdio_ctrl_hw(nic, addr, dir, reg, data);
       
  1068 }
       
  1069 
       
  1070 /* Fully software-emulated mdio_ctrl() function for cards without
       
  1071  * MII-compliant PHYs.
       
  1072  * For now, this is mainly geared towards 80c24 support; in case of further
       
  1073  * requirements for other types (i82503, ...?) either extend this mechanism
       
  1074  * or split it, whichever is cleaner.
       
  1075  */
       
  1076 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
       
  1077 				      u32 addr,
       
  1078 				      u32 dir,
       
  1079 				      u32 reg,
       
  1080 				      u16 data)
       
  1081 {
       
  1082 	/* might need to allocate a netdev_priv'ed register array eventually
       
  1083 	 * to be able to record state changes, but for now
       
  1084 	 * some fully hardcoded register handling ought to be ok I guess. */
       
  1085 
       
  1086 	if (dir == mdi_read) {
       
  1087 		switch (reg) {
       
  1088 		case MII_BMCR:
       
  1089 			/* Auto-negotiation, right? */
       
  1090 			return  BMCR_ANENABLE |
       
  1091 				BMCR_FULLDPLX;
       
  1092 		case MII_BMSR:
       
  1093 			return	BMSR_LSTATUS /* for mii_link_ok() */ |
       
  1094 				BMSR_ANEGCAPABLE |
       
  1095 				BMSR_10FULL;
       
  1096 		case MII_ADVERTISE:
       
  1097 			/* 80c24 is a "combo card" PHY, right? */
       
  1098 			return	ADVERTISE_10HALF |
       
  1099 				ADVERTISE_10FULL;
       
  1100 		default:
       
  1101 			DPRINTK(HW, DEBUG,
       
  1102 		"%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1103 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
       
  1104 			return 0xFFFF;
       
  1105 		}
       
  1106 	} else {
       
  1107 		switch (reg) {
       
  1108 		default:
       
  1109 			DPRINTK(HW, DEBUG,
       
  1110 		"%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1111 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
       
  1112 			return 0xFFFF;
       
  1113 		}
       
  1114 	}
       
  1115 }
       
  1116 static inline int e100_phy_supports_mii(struct nic *nic)
       
  1117 {
       
  1118 	/* for now, just check it by comparing whether we
       
  1119 	   are using MII software emulation.
       
  1120 	*/
       
  1121 	return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
       
  1122 }
       
  1123 
       
  1124 static void e100_get_defaults(struct nic *nic)
       
  1125 {
       
  1126 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
  1127 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
  1128 
       
  1129 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1130 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
       
  1131 	if (nic->mac == mac_unknown)
       
  1132 		nic->mac = mac_82557_D100_A;
       
  1133 
       
  1134 	nic->params.rfds = rfds;
       
  1135 	nic->params.cbs = cbs;
       
  1136 
       
  1137 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1138 	nic->tx_threshold = 0xE0;
       
  1139 
       
  1140 	/* no interrupt for every tx completion, delay = 256us if not 557 */
       
  1141 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1142 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1143 
       
  1144 	/* Template for a freshly allocated RFD */
       
  1145 	nic->blank_rfd.command = 0;
       
  1146 	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
       
  1147 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  1148 
       
  1149 	/* MII setup */
       
  1150 	nic->mii.phy_id_mask = 0x1F;
       
  1151 	nic->mii.reg_num_mask = 0x1F;
       
  1152 	nic->mii.dev = nic->netdev;
       
  1153 	nic->mii.mdio_read = mdio_read;
       
  1154 	nic->mii.mdio_write = mdio_write;
       
  1155 }
       
  1156 
       
  1157 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1158 {
       
  1159 	struct config *config = &cb->u.config;
       
  1160 	u8 *c = (u8 *)config;
       
  1161 
       
  1162 	cb->command = cpu_to_le16(cb_config);
       
  1163 
       
  1164 	memset(config, 0, sizeof(struct config));
       
  1165 
       
  1166 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1167 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1168 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1169 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1170 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1171 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1172 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1173 	if (e100_phy_supports_mii(nic))
       
  1174 		config->mii_mode = 1;           /* 1=MII mode, 0=i82503 mode */
       
  1175 	config->pad10 = 0x6;
       
  1176 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1177 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1178 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1179 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1180 	config->pad15_1 = 0x1;
       
  1181 	config->pad15_2 = 0x1;
       
  1182 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1183 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1184 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1185 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1186 	config->pad18 = 0x1;
       
  1187 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1188 	config->pad20_1 = 0x1F;
       
  1189 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1190 	config->pad21_1 = 0x5;
       
  1191 
       
  1192 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1193 	config->loopback = nic->loopback;
       
  1194 
       
  1195 	if (nic->mii.force_media && nic->mii.full_duplex)
       
  1196 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1197 
       
  1198 	if (nic->flags & promiscuous || nic->loopback) {
       
  1199 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1200 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1201 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1202 	}
       
  1203 
       
  1204 	if (nic->flags & multicast_all)
       
  1205 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1206 
       
  1207 	/* disable WoL when up */
       
  1208 	if (nic->ecdev || 
       
  1209 			(netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1210 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1211 
       
  1212 	if (nic->mac >= mac_82558_D101_A4) {
       
  1213 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1214 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1215 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1216 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1217 		if (nic->mac >= mac_82559_D101M) {
       
  1218 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1219 			/* Enable TCO in extended config */
       
  1220 			if (nic->mac >= mac_82551_10) {
       
  1221 				config->byte_count = 0x20; /* extended bytes */
       
  1222 				config->rx_d102_mode = 0x1; /* GMRC for TCO */
       
  1223 			}
       
  1224 		} else {
       
  1225 			config->standard_stat_counter = 0x0;
       
  1226 		}
       
  1227 	}
       
  1228 
       
  1229 	DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1230 		c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1231 	DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1232 		c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1233 	DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1234 		c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1235 }
       
  1236 
       
  1237 /*************************************************************************
       
  1238 *  CPUSaver parameters
       
  1239 *
       
  1240 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1241 *  "move immediate value" instruction.  By changing the value of
       
  1242 *  the literal in the instruction before the code is loaded, the
       
  1243 *  driver can change the algorithm.
       
  1244 *
       
  1245 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1246 *    When this timer expires the interrupt is asserted, and the
       
  1247 *    timer is reset each time a new packet is received.  (see
       
  1248 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1249 *    The current default is 0x600 or 1536.  Experiments show that
       
  1250 *    the value should probably stay within the 0x200 - 0x1000.
       
  1251 *
       
  1252 *  BUNDLEMAX -
       
  1253 *    This sets the maximum number of frames that will be bundled.  In
       
  1254 *    some situations, such as the TCP windowing algorithm, it may be
       
  1255 *    better to limit the growth of the bundle size than let it go as
       
  1256 *    high as it can, because that could cause too much added latency.
       
  1257 *    The default is six, because this is the number of packets in the
       
  1258 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1259 *    an interrupt for every frame received.  If you do not want to put
       
  1260 *    a limit on the bundle size, set this value to xFFFF.
       
  1261 *
       
  1262 *  BUNDLESMALL -
       
  1263 *    This contains a bit-mask describing the minimum size frame that
       
  1264 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1265 *    that any frame less than 128 bytes in length will not be bundled,
       
  1266 *    but will instead immediately generate an interrupt.  This does
       
  1267 *    not affect the current bundle in any way.  Any frame that is 128
       
  1268 *    bytes or large will be bundled normally.  This feature is meant
       
  1269 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1270 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1271 *    enabled was sending but not receiving.  The delay introduced when
       
  1272 *    the ACKs were received was enough to reduce total throughput, because
       
  1273 *    the sender would sit idle until the ACK was finally seen.
       
  1274 *
       
  1275 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1276 *    This means that any frame which is x7F (127) bytes or smaller
       
  1277 *    will cause an immediate interrupt.  Because this value must be a
       
  1278 *    bit mask, there are only a few valid values that can be used.  To
       
  1279 *    turn this feature off, the driver can write the value xFFFF to the
       
  1280 *    lower word of this instruction (in the same way that the other
       
  1281 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1282 *    cause an interrupt to be generated for every frame, because all
       
  1283 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1284 *************************************************************************/
       
  1285 
       
  1286 /* if you wish to disable the ucode functionality, while maintaining the
       
  1287  * workarounds it provides, set the following defines to:
       
  1288  * BUNDLESMALL 0
       
  1289  * BUNDLEMAX 1
       
  1290  * INTDELAY 1
       
  1291  */
       
  1292 #define BUNDLESMALL 1
       
  1293 #define BUNDLEMAX (u16)6
       
  1294 #define INTDELAY (u16)1536 /* 0x600 */
       
  1295 
       
  1296 /* Initialize firmware */
       
  1297 static const struct firmware *e100_request_firmware(struct nic *nic)
       
  1298 {
       
  1299 	const char *fw_name;
       
  1300 	const struct firmware *fw;
       
  1301 	u8 timer, bundle, min_size;
       
  1302 	int err;
       
  1303 
       
  1304 	/* do not load u-code for ICH devices */
       
  1305 	if (nic->flags & ich)
       
  1306 		return NULL;
       
  1307 
       
  1308 	/* Search for ucode match against h/w revision */
       
  1309 	if (nic->mac == mac_82559_D101M)
       
  1310 		fw_name = FIRMWARE_D101M;
       
  1311 	else if (nic->mac == mac_82559_D101S)
       
  1312 		fw_name = FIRMWARE_D101S;
       
  1313 	else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
       
  1314 		fw_name = FIRMWARE_D102E;
       
  1315 	else /* No ucode on other devices */
       
  1316 		return NULL;
       
  1317 
       
  1318 	err = request_firmware(&fw, fw_name, &nic->pdev->dev);
       
  1319 	if (err) {
       
  1320 		DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
       
  1321 			fw_name, err);
       
  1322 		return ERR_PTR(err);
       
  1323 	}
       
  1324 	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
       
  1325 	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
       
  1326 	if (fw->size != UCODE_SIZE * 4 + 3) {
       
  1327 		DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
       
  1328 			fw_name, fw->size);
       
  1329 		release_firmware(fw);
       
  1330 		return ERR_PTR(-EINVAL);
       
  1331 	}
       
  1332 
       
  1333 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1334 	timer = fw->data[UCODE_SIZE * 4];
       
  1335 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1336 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1337 
       
  1338 	if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
       
  1339 	    min_size >= UCODE_SIZE) {
       
  1340 		DPRINTK(PROBE, ERR,
       
  1341 			"\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
       
  1342 			fw_name, timer, bundle, min_size);
       
  1343 		release_firmware(fw);
       
  1344 		return ERR_PTR(-EINVAL);
       
  1345 	}
       
  1346 	/* OK, firmware is validated and ready to use... */
       
  1347 	return fw;
       
  1348 }
       
  1349 
       
  1350 static void e100_setup_ucode(struct nic *nic, struct cb *cb,
       
  1351 			     struct sk_buff *skb)
       
  1352 {
       
  1353 	const struct firmware *fw = (void *)skb;
       
  1354 	u8 timer, bundle, min_size;
       
  1355 
       
  1356 	/* It's not a real skb; we just abused the fact that e100_exec_cb
       
  1357 	   will pass it through to here... */
       
  1358 	cb->skb = NULL;
       
  1359 
       
  1360 	/* firmware is stored as little endian already */
       
  1361 	memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
       
  1362 
       
  1363 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1364 	timer = fw->data[UCODE_SIZE * 4];
       
  1365 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1366 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1367 
       
  1368 	/* Insert user-tunable settings in cb->u.ucode */
       
  1369 	cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
       
  1370 	cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
       
  1371 	cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
       
  1372 	cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
       
  1373 	cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
       
  1374 	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
       
  1375 
       
  1376 	cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1377 }
       
  1378 
       
  1379 static inline int e100_load_ucode_wait(struct nic *nic)
       
  1380 {
       
  1381 	const struct firmware *fw;
       
  1382 	int err = 0, counter = 50;
       
  1383 	struct cb *cb = nic->cb_to_clean;
       
  1384 
       
  1385 	fw = e100_request_firmware(nic);
       
  1386 	/* If it's NULL, then no ucode is required */
       
  1387 	if (!fw || IS_ERR(fw))
       
  1388 		return PTR_ERR(fw);
       
  1389 
       
  1390 	if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
       
  1391 		DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
       
  1392 
       
  1393 	/* must restart cuc */
       
  1394 	nic->cuc_cmd = cuc_start;
       
  1395 
       
  1396 	/* wait for completion */
       
  1397 	e100_write_flush(nic);
       
  1398 	udelay(10);
       
  1399 
       
  1400 	/* wait for possibly (ouch) 500ms */
       
  1401 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1402 		msleep(10);
       
  1403 		if (!--counter) break;
       
  1404 	}
       
  1405 
       
  1406 	/* ack any interrupts, something could have been set */
       
  1407 	iowrite8(~0, &nic->csr->scb.stat_ack);
       
  1408 
       
  1409 	/* if the command failed, or is not OK, notify and return */
       
  1410 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1411 		DPRINTK(PROBE,ERR, "ucode load failed\n");
       
  1412 		err = -EPERM;
       
  1413 	}
       
  1414 
       
  1415 	return err;
       
  1416 }
       
  1417 
       
  1418 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1419 	struct sk_buff *skb)
       
  1420 {
       
  1421 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1422 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1423 }
       
  1424 
       
  1425 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1426 {
       
  1427 	cb->command = cpu_to_le16(cb_dump);
       
  1428 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1429 		offsetof(struct mem, dump_buf));
       
  1430 }
       
  1431 
       
  1432 static int e100_phy_check_without_mii(struct nic *nic)
       
  1433 {
       
  1434 	u8 phy_type;
       
  1435 	int without_mii;
       
  1436 
       
  1437 	phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
       
  1438 
       
  1439 	switch (phy_type) {
       
  1440 	case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
       
  1441 	case I82503: /* Non-MII PHY; UNTESTED! */
       
  1442 	case S80C24: /* Non-MII PHY; tested and working */
       
  1443 		/* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
       
  1444 		 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
       
  1445 		 * doesn't have a programming interface of any sort.  The
       
  1446 		 * media is sensed automatically based on how the link partner
       
  1447 		 * is configured.  This is, in essence, manual configuration.
       
  1448 		 */
       
  1449 		DPRINTK(PROBE, INFO,
       
  1450 			 "found MII-less i82503 or 80c24 or other PHY\n");
       
  1451 
       
  1452 		nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
       
  1453 		nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
       
  1454 
       
  1455 		/* these might be needed for certain MII-less cards...
       
  1456 		 * nic->flags |= ich;
       
  1457 		 * nic->flags |= ich_10h_workaround; */
       
  1458 
       
  1459 		without_mii = 1;
       
  1460 		break;
       
  1461 	default:
       
  1462 		without_mii = 0;
       
  1463 		break;
       
  1464 	}
       
  1465 	return without_mii;
       
  1466 }
       
  1467 
       
  1468 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1469 #define MII_NSC_CONG		MII_RESV1
       
  1470 #define NSC_CONG_ENABLE		0x0100
       
  1471 #define NSC_CONG_TXREADY	0x0400
       
  1472 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1473 static int e100_phy_init(struct nic *nic)
       
  1474 {
       
  1475 	struct net_device *netdev = nic->netdev;
       
  1476 	u32 addr;
       
  1477 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1478 
       
  1479 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1480 	for (addr = 0; addr < 32; addr++) {
       
  1481 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1482 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1483 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1484 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1485 		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1486 			break;
       
  1487 	}
       
  1488 	if (addr == 32) {
       
  1489 		/* uhoh, no PHY detected: check whether we seem to be some
       
  1490 		 * weird, rare variant which is *known* to not have any MII.
       
  1491 		 * But do this AFTER MII checking only, since this does
       
  1492 		 * lookup of EEPROM values which may easily be unreliable. */
       
  1493 		if (e100_phy_check_without_mii(nic))
       
  1494 			return 0; /* simply return and hope for the best */
       
  1495 		else {
       
  1496 			/* for unknown cases log a fatal error */
       
  1497 			DPRINTK(HW, ERR,
       
  1498 				"Failed to locate any known PHY, aborting.\n");
       
  1499 			return -EAGAIN;
       
  1500 		}
       
  1501 	} else
       
  1502 		DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
       
  1503 
       
  1504 	/* Get phy ID */
       
  1505 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1506 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1507 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1508 	DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
       
  1509 
       
  1510 	/* Select the phy and isolate the rest */
       
  1511 	for (addr = 0; addr < 32; addr++) {
       
  1512 		if (addr != nic->mii.phy_id) {
       
  1513 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1514 		} else if (nic->phy != phy_82552_v) {
       
  1515 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1516 			mdio_write(netdev, addr, MII_BMCR,
       
  1517 				bmcr & ~BMCR_ISOLATE);
       
  1518 		}
       
  1519 	}
       
  1520 	/*
       
  1521 	 * Workaround for 82552:
       
  1522 	 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
       
  1523 	 * other phy_id's) using bmcr value from addr discovery loop above.
       
  1524 	 */
       
  1525 	if (nic->phy == phy_82552_v)
       
  1526 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
       
  1527 			bmcr & ~BMCR_ISOLATE);
       
  1528 
       
  1529 	/* Handle National tx phys */
       
  1530 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1531 	if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1532 		/* Disable congestion control */
       
  1533 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1534 		cong |= NSC_CONG_TXREADY;
       
  1535 		cong &= ~NSC_CONG_ENABLE;
       
  1536 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1537 	}
       
  1538 
       
  1539 	if (nic->phy == phy_82552_v) {
       
  1540 		u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
       
  1541 
       
  1542 		/* assign special tweaked mdio_ctrl() function */
       
  1543 		nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
       
  1544 
       
  1545 		/* Workaround Si not advertising flow-control during autoneg */
       
  1546 		advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
       
  1547 		mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
       
  1548 
       
  1549 		/* Reset for the above changes to take effect */
       
  1550 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1551 		bmcr |= BMCR_RESET;
       
  1552 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
       
  1553 	} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1554 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1555 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1556 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1557 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1558 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1559 	}
       
  1560 
       
  1561 	return 0;
       
  1562 }
       
  1563 
       
  1564 static int e100_hw_init(struct nic *nic)
       
  1565 {
       
  1566 	int err;
       
  1567 
       
  1568 	e100_hw_reset(nic);
       
  1569 
       
  1570 	DPRINTK(HW, ERR, "e100_hw_init\n");
       
  1571 	if (!in_interrupt() && (err = e100_self_test(nic)))
       
  1572 		return err;
       
  1573 
       
  1574 	if ((err = e100_phy_init(nic)))
       
  1575 		return err;
       
  1576 	if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1577 		return err;
       
  1578 	if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1579 		return err;
       
  1580 	if ((err = e100_load_ucode_wait(nic)))
       
  1581 		return err;
       
  1582 	if ((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1583 		return err;
       
  1584 	if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1585 		return err;
       
  1586 	if ((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1587 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1588 		return err;
       
  1589 	if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1590 		return err;
       
  1591 
       
  1592 	e100_disable_irq(nic);
       
  1593 
       
  1594 	return 0;
       
  1595 }
       
  1596 
       
  1597 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1598 {
       
  1599 	struct net_device *netdev = nic->netdev;
       
  1600 	struct dev_mc_list *list = netdev->mc_list;
       
  1601 	u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
       
  1602 
       
  1603 	cb->command = cpu_to_le16(cb_multi);
       
  1604 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1605 	for (i = 0; list && i < count; i++, list = list->next)
       
  1606 		memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
       
  1607 			ETH_ALEN);
       
  1608 }
       
  1609 
       
  1610 static void e100_set_multicast_list(struct net_device *netdev)
       
  1611 {
       
  1612 	struct nic *nic = netdev_priv(netdev);
       
  1613 
       
  1614 	DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
       
  1615 		netdev->mc_count, netdev->flags);
       
  1616 
       
  1617 	if (netdev->flags & IFF_PROMISC)
       
  1618 		nic->flags |= promiscuous;
       
  1619 	else
       
  1620 		nic->flags &= ~promiscuous;
       
  1621 
       
  1622 	if (netdev->flags & IFF_ALLMULTI ||
       
  1623 		netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
       
  1624 		nic->flags |= multicast_all;
       
  1625 	else
       
  1626 		nic->flags &= ~multicast_all;
       
  1627 
       
  1628 	e100_exec_cb(nic, NULL, e100_configure);
       
  1629 	e100_exec_cb(nic, NULL, e100_multi);
       
  1630 }
       
  1631 
       
  1632 static void e100_update_stats(struct nic *nic)
       
  1633 {
       
  1634 	struct net_device *dev = nic->netdev;
       
  1635 	struct net_device_stats *ns = &dev->stats;
       
  1636 	struct stats *s = &nic->mem->stats;
       
  1637 	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1638 		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
       
  1639 		&s->complete;
       
  1640 
       
  1641 	/* Device's stats reporting may take several microseconds to
       
  1642 	 * complete, so we're always waiting for results of the
       
  1643 	 * previous command. */
       
  1644 
       
  1645 	if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
       
  1646 		*complete = 0;
       
  1647 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1648 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1649 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1650 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1651 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1652 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1653 		ns->collisions += nic->tx_collisions;
       
  1654 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1655 			le32_to_cpu(s->tx_lost_crs);
       
  1656 		ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
       
  1657 			nic->rx_over_length_errors;
       
  1658 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1659 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1660 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1661 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1662 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1663 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1664 			le32_to_cpu(s->rx_alignment_errors) +
       
  1665 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1666 			le32_to_cpu(s->rx_cdt_errors);
       
  1667 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1668 		nic->tx_single_collisions +=
       
  1669 			le32_to_cpu(s->tx_single_collisions);
       
  1670 		nic->tx_multiple_collisions +=
       
  1671 			le32_to_cpu(s->tx_multiple_collisions);
       
  1672 		if (nic->mac >= mac_82558_D101_A4) {
       
  1673 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1674 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1675 			nic->rx_fc_unsupported +=
       
  1676 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1677 			if (nic->mac >= mac_82559_D101M) {
       
  1678 				nic->tx_tco_frames +=
       
  1679 					le16_to_cpu(s->xmt_tco_frames);
       
  1680 				nic->rx_tco_frames +=
       
  1681 					le16_to_cpu(s->rcv_tco_frames);
       
  1682 			}
       
  1683 		}
       
  1684 	}
       
  1685 
       
  1686 
       
  1687 	if (e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1688 		DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
       
  1689 }
       
  1690 
       
  1691 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1692 {
       
  1693 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1694 	 * we're getting collisions on a half-duplex connection. */
       
  1695 
       
  1696 	if (duplex == DUPLEX_HALF) {
       
  1697 		u32 prev = nic->adaptive_ifs;
       
  1698 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1699 
       
  1700 		if ((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1701 		   (nic->tx_frames > min_frames)) {
       
  1702 			if (nic->adaptive_ifs < 60)
       
  1703 				nic->adaptive_ifs += 5;
       
  1704 		} else if (nic->tx_frames < min_frames) {
       
  1705 			if (nic->adaptive_ifs >= 5)
       
  1706 				nic->adaptive_ifs -= 5;
       
  1707 		}
       
  1708 		if (nic->adaptive_ifs != prev)
       
  1709 			e100_exec_cb(nic, NULL, e100_configure);
       
  1710 	}
       
  1711 }
       
  1712 
       
  1713 static void e100_watchdog(unsigned long data)
       
  1714 {
       
  1715 	struct nic *nic = (struct nic *)data;
       
  1716 	struct ethtool_cmd cmd;
       
  1717 
       
  1718 	DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
       
  1719 
       
  1720 	/* mii library handles link maintenance tasks */
       
  1721 
       
  1722     if (nic->ecdev) {
       
  1723     	ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1724 	} else {
       
  1725 		mii_ethtool_gset(&nic->mii, &cmd);
       
  1726 
       
  1727 		if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1728 			printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
       
  1729 					nic->netdev->name,
       
  1730 					cmd.speed == SPEED_100 ? "100" : "10",
       
  1731 					cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
       
  1732 		} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1733 			printk(KERN_INFO "e100: %s NIC Link is Down\n",
       
  1734 					nic->netdev->name);
       
  1735 		}
       
  1736 
       
  1737 		mii_check_link(&nic->mii);
       
  1738 
       
  1739 		/* Software generated interrupt to recover from (rare) Rx
       
  1740 		 * allocation failure.
       
  1741 		 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1742 		 * accidentally, due to hardware that shares a register between the
       
  1743 		 * interrupt mask bit and the SW Interrupt generation bit */
       
  1744 		spin_lock_irq(&nic->cmd_lock);
       
  1745 		iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1746 		e100_write_flush(nic);
       
  1747 		spin_unlock_irq(&nic->cmd_lock);
       
  1748 
       
  1749 		e100_update_stats(nic);
       
  1750 		e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
       
  1751 
       
  1752 		if (nic->mac <= mac_82557_D100_C)
       
  1753 			/* Issue a multicast command to workaround a 557 lock up */
       
  1754 			e100_set_multicast_list(nic->netdev);
       
  1755 
       
  1756 		if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
       
  1757 			/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1758 			nic->flags |= ich_10h_workaround;
       
  1759 		else
       
  1760 			nic->flags &= ~ich_10h_workaround;
       
  1761 
       
  1762 		mod_timer(&nic->watchdog,
       
  1763 				round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
       
  1764 	}
       
  1765 }
       
  1766 
       
  1767 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1768 	struct sk_buff *skb)
       
  1769 {
       
  1770 	cb->command = nic->tx_command;
       
  1771 	/* interrupt every 16 packets regardless of delay */
       
  1772 	if ((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1773 		cb->command |= cpu_to_le16(cb_i);
       
  1774 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1775 	cb->u.tcb.tcb_byte_count = 0;
       
  1776 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1777 	cb->u.tcb.tbd_count = 1;
       
  1778 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
       
  1779 		skb->data, skb->len, PCI_DMA_TODEVICE));
       
  1780 	/* check for mapping failure? */
       
  1781 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1782 }
       
  1783 
       
  1784 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
       
  1785 				   struct net_device *netdev)
       
  1786 {
       
  1787 	struct nic *nic = netdev_priv(netdev);
       
  1788 	int err;
       
  1789 
       
  1790 	if (nic->flags & ich_10h_workaround) {
       
  1791 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1792 		   Issue a NOP command followed by a 1us delay before
       
  1793 		   issuing the Tx command. */
       
  1794 		if (e100_exec_cmd(nic, cuc_nop, 0))
       
  1795 			DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
       
  1796 		udelay(1);
       
  1797 	}
       
  1798 
       
  1799 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1800 
       
  1801 	switch (err) {
       
  1802 	case -ENOSPC:
       
  1803 		/* We queued the skb, but now we're out of space. */
       
  1804 		DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
       
  1805 		if (!nic->ecdev)
       
  1806 			netif_stop_queue(netdev);
       
  1807 		break;
       
  1808 	case -ENOMEM:
       
  1809 		/* This is a hard error - log it. */
       
  1810 		DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
       
  1811 		if (!nic->ecdev)
       
  1812 			netif_stop_queue(netdev);
       
  1813 		return NETDEV_TX_BUSY;
       
  1814 	}
       
  1815 
       
  1816 	netdev->trans_start = jiffies;
       
  1817 	return NETDEV_TX_OK;
       
  1818 }
       
  1819 
       
  1820 static int e100_tx_clean(struct nic *nic)
       
  1821 {
       
  1822 	struct net_device *dev = nic->netdev;
       
  1823 	struct cb *cb;
       
  1824 	int tx_cleaned = 0;
       
  1825 
       
  1826 	if (!nic->ecdev)
       
  1827 		spin_lock(&nic->cb_lock);
       
  1828 
       
  1829 	/* Clean CBs marked complete */
       
  1830 	for (cb = nic->cb_to_clean;
       
  1831 	    cb->status & cpu_to_le16(cb_complete);
       
  1832 	    cb = nic->cb_to_clean = cb->next) {
       
  1833 		DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
       
  1834 		        (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1835 		        cb->status);
       
  1836 
       
  1837 		if (likely(cb->skb != NULL)) {
       
  1838 			dev->stats.tx_packets++;
       
  1839 			dev->stats.tx_bytes += cb->skb->len;
       
  1840 
       
  1841 			pci_unmap_single(nic->pdev,
       
  1842 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1843 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1844 				PCI_DMA_TODEVICE);
       
  1845 			if (!nic->ecdev)
       
  1846 				dev_kfree_skb_any(cb->skb);
       
  1847 			cb->skb = NULL;
       
  1848 			tx_cleaned = 1;
       
  1849 		}
       
  1850 		cb->status = 0;
       
  1851 		nic->cbs_avail++;
       
  1852 	}
       
  1853 
       
  1854 	if (!nic->ecdev) {
       
  1855 		spin_unlock(&nic->cb_lock);
       
  1856 
       
  1857 		/* Recover from running out of Tx resources in xmit_frame */
       
  1858 		if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1859 			netif_wake_queue(nic->netdev);
       
  1860 	}
       
  1861 
       
  1862 	return tx_cleaned;
       
  1863 }
       
  1864 
       
  1865 static void e100_clean_cbs(struct nic *nic)
       
  1866 {
       
  1867 	if (nic->cbs) {
       
  1868 		while (nic->cbs_avail != nic->params.cbs.count) {
       
  1869 			struct cb *cb = nic->cb_to_clean;
       
  1870 			if (cb->skb) {
       
  1871 				pci_unmap_single(nic->pdev,
       
  1872 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1873 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1874 					PCI_DMA_TODEVICE);
       
  1875 				if (!nic->ecdev)
       
  1876 					dev_kfree_skb(cb->skb);
       
  1877 			}
       
  1878 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1879 			nic->cbs_avail++;
       
  1880 		}
       
  1881 		pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
       
  1882 		nic->cbs = NULL;
       
  1883 		nic->cbs_avail = 0;
       
  1884 	}
       
  1885 	nic->cuc_cmd = cuc_start;
       
  1886 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1887 		nic->cbs;
       
  1888 }
       
  1889 
       
  1890 static int e100_alloc_cbs(struct nic *nic)
       
  1891 {
       
  1892 	struct cb *cb;
       
  1893 	unsigned int i, count = nic->params.cbs.count;
       
  1894 
       
  1895 	nic->cuc_cmd = cuc_start;
       
  1896 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1897 	nic->cbs_avail = 0;
       
  1898 
       
  1899 	nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
       
  1900 				  &nic->cbs_dma_addr);
       
  1901 	if (!nic->cbs)
       
  1902 		return -ENOMEM;
       
  1903 	memset(nic->cbs, 0, count * sizeof(struct cb));
       
  1904 
       
  1905 	for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1906 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1907 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  1908 
       
  1909 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  1910 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  1911 			((i+1) % count) * sizeof(struct cb));
       
  1912 	}
       
  1913 
       
  1914 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  1915 	nic->cbs_avail = count;
       
  1916 
       
  1917 	return 0;
       
  1918 }
       
  1919 
       
  1920 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  1921 {
       
  1922 	if (!nic->rxs) return;
       
  1923 	if (RU_SUSPENDED != nic->ru_running) return;
       
  1924 
       
  1925 	/* handle init time starts */
       
  1926 	if (!rx) rx = nic->rxs;
       
  1927 
       
  1928 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  1929 	if (rx->skb) {
       
  1930 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  1931 		nic->ru_running = RU_RUNNING;
       
  1932 	}
       
  1933 }
       
  1934 
       
  1935 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
       
  1936 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  1937 {
       
  1938 	if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
       
  1939 		return -ENOMEM;
       
  1940 
       
  1941 	/* Align, init, and map the RFD. */
       
  1942 	skb_reserve(rx->skb, NET_IP_ALIGN);
       
  1943 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
       
  1944 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  1945 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1946 
       
  1947 	if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  1948 		dev_kfree_skb_any(rx->skb);
       
  1949 		rx->skb = NULL;
       
  1950 		rx->dma_addr = 0;
       
  1951 		return -ENOMEM;
       
  1952 	}
       
  1953 
       
  1954 	/* Link the RFD to end of RFA by linking previous RFD to
       
  1955 	 * this one.  We are safe to touch the previous RFD because
       
  1956 	 * it is protected by the before last buffer's el bit being set */
       
  1957 	if (rx->prev->skb) {
       
  1958 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  1959 		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  1960 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  1961 			sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  1962 	}
       
  1963 
       
  1964 	return 0;
       
  1965 }
       
  1966 
       
  1967 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  1968 	unsigned int *work_done, unsigned int work_to_do)
       
  1969 {
       
  1970 	struct net_device *dev = nic->netdev;
       
  1971 	struct sk_buff *skb = rx->skb;
       
  1972 	struct rfd *rfd = (struct rfd *)skb->data;
       
  1973 	u16 rfd_status, actual_size;
       
  1974 
       
  1975 	if (unlikely(work_done && *work_done >= work_to_do))
       
  1976 		return -EAGAIN;
       
  1977 
       
  1978 	/* Need to sync before taking a peek at cb_complete bit */
       
  1979 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  1980 		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  1981 	rfd_status = le16_to_cpu(rfd->status);
       
  1982 
       
  1983 	DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
       
  1984 
       
  1985 	/* If data isn't ready, nothing to indicate */
       
  1986 	if (unlikely(!(rfd_status & cb_complete))) {
       
  1987 		/* If the next buffer has the el bit, but we think the receiver
       
  1988 		 * is still running, check to see if it really stopped while
       
  1989 		 * we had interrupts off.
       
  1990 		 * This allows for a fast restart without re-enabling
       
  1991 		 * interrupts */
       
  1992 		if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  1993 		    (RU_RUNNING == nic->ru_running))
       
  1994 
       
  1995 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  1996 				nic->ru_running = RU_SUSPENDED;
       
  1997 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  1998 					       sizeof(struct rfd),
       
  1999 					       PCI_DMA_FROMDEVICE);
       
  2000 		return -ENODATA;
       
  2001 	}
       
  2002 
       
  2003 	/* Get actual data size */
       
  2004 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  2005 	if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  2006 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  2007 
       
  2008 	/* Get data */
       
  2009 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2010 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2011 
       
  2012 	/* If this buffer has the el bit, but we think the receiver
       
  2013 	 * is still running, check to see if it really stopped while
       
  2014 	 * we had interrupts off.
       
  2015 	 * This allows for a fast restart without re-enabling interrupts.
       
  2016 	 * This can happen when the RU sees the size change but also sees
       
  2017 	 * the el bit set. */
       
  2018 	if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2019 	    (RU_RUNNING == nic->ru_running)) {
       
  2020 
       
  2021 	    if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2022 		nic->ru_running = RU_SUSPENDED;
       
  2023 	}
       
  2024 
       
  2025 	if (!nic->ecdev) {
       
  2026 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  2027 		skb_reserve(skb, sizeof(struct rfd));
       
  2028 		skb_put(skb, actual_size);
       
  2029 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  2030 	}
       
  2031 
       
  2032 	if (unlikely(!(rfd_status & cb_ok))) {
       
  2033 		if (!nic->ecdev) {
       
  2034 			/* Don't indicate if hardware indicates errors */
       
  2035 			dev_kfree_skb_any(skb);
       
  2036 		}
       
  2037 	} else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
       
  2038 		/* Don't indicate oversized frames */
       
  2039 		nic->rx_over_length_errors++;
       
  2040 		if (!nic->ecdev)
       
  2041 			dev_kfree_skb_any(skb);
       
  2042 	} else {
       
  2043 		dev->stats.rx_packets++;
       
  2044 		dev->stats.rx_bytes += actual_size;
       
  2045 		if (nic->ecdev) {
       
  2046 			ecdev_receive(nic->ecdev,
       
  2047 					skb->data + sizeof(struct rfd), actual_size);
       
  2048 
       
  2049 			// No need to detect link status as
       
  2050 			// long as frames are received: Reset watchdog.
       
  2051 			nic->ec_watchdog_jiffies = jiffies;
       
  2052 		} else {
       
  2053 			netif_receive_skb(skb);
       
  2054 		}
       
  2055 		if (work_done)
       
  2056 			(*work_done)++;
       
  2057 	}
       
  2058 
       
  2059 	if (nic->ecdev) {
       
  2060 		// make receive frame descriptior usable again
       
  2061 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  2062 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  2063 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2064 		if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2065 			rx->dma_addr = 0;
       
  2066 		}
       
  2067 
       
  2068 		/* Link the RFD to end of RFA by linking previous RFD to
       
  2069 		 * this one.  We are safe to touch the previous RFD because
       
  2070 		 * it is protected by the before last buffer's el bit being set */
       
  2071 		if (rx->prev->skb) {
       
  2072 			struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  2073 			put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2074 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2075 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  2076 		}
       
  2077 	} else {
       
  2078 		rx->skb = NULL;
       
  2079 	}
       
  2080 
       
  2081 	return 0;
       
  2082 }
       
  2083 
       
  2084 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  2085 	unsigned int work_to_do)
       
  2086 {
       
  2087 	struct rx *rx;
       
  2088 	int restart_required = 0, err = 0;
       
  2089 	struct rx *old_before_last_rx, *new_before_last_rx;
       
  2090 	struct rfd *old_before_last_rfd, *new_before_last_rfd;
       
  2091 
       
  2092 	/* Indicate newly arrived packets */
       
  2093 	for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  2094 		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  2095 		/* Hit quota or no more to clean */
       
  2096 		if (-EAGAIN == err || -ENODATA == err)
       
  2097 			break;
       
  2098 	}
       
  2099 
       
  2100 
       
  2101 	/* On EAGAIN, hit quota so have more work to do, restart once
       
  2102 	 * cleanup is complete.
       
  2103 	 * Else, are we already rnr? then pay attention!!! this ensures that
       
  2104 	 * the state machine progression never allows a start with a
       
  2105 	 * partially cleaned list, avoiding a race between hardware
       
  2106 	 * and rx_to_clean when in NAPI mode */
       
  2107 	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
       
  2108 		restart_required = 1;
       
  2109 
       
  2110 	old_before_last_rx = nic->rx_to_use->prev->prev;
       
  2111 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
       
  2112 
       
  2113 	if (!nic->ecdev) {
       
  2114 		/* Alloc new skbs to refill list */
       
  2115 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  2116 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  2117 				break; /* Better luck next time (see watchdog) */
       
  2118 		}
       
  2119 	}
       
  2120 
       
  2121 	new_before_last_rx = nic->rx_to_use->prev->prev;
       
  2122 	if (new_before_last_rx != old_before_last_rx) {
       
  2123 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2124 		 * This lets us update the next pointer on the last buffer
       
  2125 		 * without worrying about hardware touching it.
       
  2126 		 * We set the size to 0 to prevent hardware from touching this
       
  2127 		 * buffer.
       
  2128 		 * When the hardware hits the before last buffer with el-bit
       
  2129 		 * and size of 0, it will RNR interrupt, the RUS will go into
       
  2130 		 * the No Resources state.  It will not complete nor write to
       
  2131 		 * this buffer. */
       
  2132 		new_before_last_rfd =
       
  2133 			(struct rfd *)new_before_last_rx->skb->data;
       
  2134 		new_before_last_rfd->size = 0;
       
  2135 		new_before_last_rfd->command |= cpu_to_le16(cb_el);
       
  2136 		pci_dma_sync_single_for_device(nic->pdev,
       
  2137 			new_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2138 			PCI_DMA_BIDIRECTIONAL);
       
  2139 
       
  2140 		/* Now that we have a new stopping point, we can clear the old
       
  2141 		 * stopping point.  We must sync twice to get the proper
       
  2142 		 * ordering on the hardware side of things. */
       
  2143 		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
       
  2144 		pci_dma_sync_single_for_device(nic->pdev,
       
  2145 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2146 			PCI_DMA_BIDIRECTIONAL);
       
  2147 		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  2148 		pci_dma_sync_single_for_device(nic->pdev,
       
  2149 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2150 			PCI_DMA_BIDIRECTIONAL);
       
  2151 	}
       
  2152 
       
  2153 	if (restart_required) {
       
  2154 		// ack the rnr?
       
  2155 		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2156 		e100_start_receiver(nic, nic->rx_to_clean);
       
  2157 		if (work_done)
       
  2158 			(*work_done)++;
       
  2159 	}
       
  2160 }
       
  2161 
       
  2162 static void e100_rx_clean_list(struct nic *nic)
       
  2163 {
       
  2164 	struct rx *rx;
       
  2165 	unsigned int i, count = nic->params.rfds.count;
       
  2166 
       
  2167 	nic->ru_running = RU_UNINITIALIZED;
       
  2168 
       
  2169 	if (nic->rxs) {
       
  2170 		for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2171 			if (rx->skb) {
       
  2172 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2173 					RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2174 				dev_kfree_skb(rx->skb);
       
  2175 			}
       
  2176 		}
       
  2177 		kfree(nic->rxs);
       
  2178 		nic->rxs = NULL;
       
  2179 	}
       
  2180 
       
  2181 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2182 }
       
  2183 
       
  2184 static int e100_rx_alloc_list(struct nic *nic)
       
  2185 {
       
  2186 	struct rx *rx;
       
  2187 	unsigned int i, count = nic->params.rfds.count;
       
  2188 	struct rfd *before_last;
       
  2189 
       
  2190 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2191 	nic->ru_running = RU_UNINITIALIZED;
       
  2192 
       
  2193 	if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2194 		return -ENOMEM;
       
  2195 
       
  2196 	for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2197 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2198 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2199 		if (e100_rx_alloc_skb(nic, rx)) {
       
  2200 			e100_rx_clean_list(nic);
       
  2201 			return -ENOMEM;
       
  2202 		}
       
  2203 	}
       
  2204 
       
  2205 	if (!nic->ecdev) {
       
  2206 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2207 		 * This lets us update the next pointer on the last buffer without
       
  2208 		 * worrying about hardware touching it.
       
  2209 		 * We set the size to 0 to prevent hardware from touching this buffer.
       
  2210 		 * When the hardware hits the before last buffer with el-bit and size
       
  2211 		 * of 0, it will RNR interrupt, the RU will go into the No Resources
       
  2212 		 * state.  It will not complete nor write to this buffer. */
       
  2213 		rx = nic->rxs->prev->prev;
       
  2214 		before_last = (struct rfd *)rx->skb->data;
       
  2215 		before_last->command |= cpu_to_le16(cb_el);
       
  2216 		before_last->size = 0;
       
  2217 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2218 				sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2219 	}
       
  2220 
       
  2221 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2222 	nic->ru_running = RU_SUSPENDED;
       
  2223 
       
  2224 	return 0;
       
  2225 }
       
  2226 
       
  2227 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2228 {
       
  2229 	struct net_device *netdev = dev_id;
       
  2230 	struct nic *nic = netdev_priv(netdev);
       
  2231 	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
       
  2232 
       
  2233 	DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
       
  2234 
       
  2235 	if (stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2236 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2237 		return IRQ_NONE;
       
  2238 
       
  2239 	/* Ack interrupt(s) */
       
  2240 	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
       
  2241 
       
  2242 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2243 	if (stat_ack & stat_ack_rnr)
       
  2244 		nic->ru_running = RU_SUSPENDED;
       
  2245 
       
  2246 	if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) {
       
  2247 		e100_disable_irq(nic);
       
  2248 		__napi_schedule(&nic->napi);
       
  2249 	}
       
  2250 
       
  2251 	return IRQ_HANDLED;
       
  2252 }
       
  2253 
       
  2254 void e100_ec_poll(struct net_device *netdev)
       
  2255 {
       
  2256 	struct nic *nic = netdev_priv(netdev);
       
  2257 
       
  2258 	e100_rx_clean(nic, NULL, 100);
       
  2259 	e100_tx_clean(nic);
       
  2260 
       
  2261     if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2262         e100_watchdog((unsigned long) nic);
       
  2263         nic->ec_watchdog_jiffies = jiffies;
       
  2264     }
       
  2265 }
       
  2266 
       
  2267 
       
  2268 static int e100_poll(struct napi_struct *napi, int budget)
       
  2269 {
       
  2270 	struct nic *nic = container_of(napi, struct nic, napi);
       
  2271 	unsigned int work_done = 0;
       
  2272 
       
  2273 	e100_rx_clean(nic, &work_done, budget);
       
  2274 	e100_tx_clean(nic);
       
  2275 
       
  2276 	/* If budget not fully consumed, exit the polling mode */
       
  2277 	if (work_done < budget) {
       
  2278 		napi_complete(napi);
       
  2279 		e100_enable_irq(nic);
       
  2280 	}
       
  2281 
       
  2282 	return work_done;
       
  2283 }
       
  2284 
       
  2285 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2286 static void e100_netpoll(struct net_device *netdev)
       
  2287 {
       
  2288 	struct nic *nic = netdev_priv(netdev);
       
  2289 
       
  2290 	e100_disable_irq(nic);
       
  2291 	e100_intr(nic->pdev->irq, netdev);
       
  2292 	e100_tx_clean(nic);
       
  2293 	e100_enable_irq(nic);
       
  2294 }
       
  2295 #endif
       
  2296 
       
  2297 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2298 {
       
  2299 	struct nic *nic = netdev_priv(netdev);
       
  2300 	struct sockaddr *addr = p;
       
  2301 
       
  2302 	if (!is_valid_ether_addr(addr->sa_data))
       
  2303 		return -EADDRNOTAVAIL;
       
  2304 
       
  2305 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2306 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2307 
       
  2308 	return 0;
       
  2309 }
       
  2310 
       
  2311 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2312 {
       
  2313 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2314 		return -EINVAL;
       
  2315 	netdev->mtu = new_mtu;
       
  2316 	return 0;
       
  2317 }
       
  2318 
       
  2319 static int e100_asf(struct nic *nic)
       
  2320 {
       
  2321 	/* ASF can be enabled from eeprom */
       
  2322 	return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2323 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2324 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2325 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
       
  2326 }
       
  2327 
       
  2328 static int e100_up(struct nic *nic)
       
  2329 {
       
  2330 	int err;
       
  2331 
       
  2332 	if ((err = e100_rx_alloc_list(nic)))
       
  2333 		return err;
       
  2334 	if ((err = e100_alloc_cbs(nic)))
       
  2335 		goto err_rx_clean_list;
       
  2336 	if ((err = e100_hw_init(nic)))
       
  2337 		goto err_clean_cbs;
       
  2338 	e100_set_multicast_list(nic->netdev);
       
  2339 	e100_start_receiver(nic, NULL);
       
  2340 	if (!nic->ecdev) {
       
  2341 		mod_timer(&nic->watchdog, jiffies);
       
  2342 	}
       
  2343 	if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2344 		nic->netdev->name, nic->netdev)))
       
  2345 		goto err_no_irq;
       
  2346 	if (!nic->ecdev) {
       
  2347 		netif_wake_queue(nic->netdev);
       
  2348 		napi_enable(&nic->napi);
       
  2349 		/* enable ints _after_ enabling poll, preventing a race between
       
  2350 		 * disable ints+schedule */
       
  2351 		e100_enable_irq(nic);
       
  2352 	}
       
  2353 	return 0;
       
  2354 
       
  2355 err_no_irq:
       
  2356 	if (!nic->ecdev)
       
  2357 		del_timer_sync(&nic->watchdog);
       
  2358 err_clean_cbs:
       
  2359 	e100_clean_cbs(nic);
       
  2360 err_rx_clean_list:
       
  2361 	e100_rx_clean_list(nic);
       
  2362 	return err;
       
  2363 }
       
  2364 
       
  2365 static void e100_down(struct nic *nic)
       
  2366 {
       
  2367 	if (!nic->ecdev) {
       
  2368 		/* wait here for poll to complete */
       
  2369 		napi_disable(&nic->napi);
       
  2370 		netif_stop_queue(nic->netdev);
       
  2371 	}
       
  2372 	e100_hw_reset(nic);
       
  2373 	free_irq(nic->pdev->irq, nic->netdev);
       
  2374 	if (!nic->ecdev) {
       
  2375 		del_timer_sync(&nic->watchdog);
       
  2376 		netif_carrier_off(nic->netdev);
       
  2377 	}
       
  2378 	e100_clean_cbs(nic);
       
  2379 	e100_rx_clean_list(nic);
       
  2380 }
       
  2381 
       
  2382 static void e100_tx_timeout(struct net_device *netdev)
       
  2383 {
       
  2384 	struct nic *nic = netdev_priv(netdev);
       
  2385 
       
  2386 	/* Reset outside of interrupt context, to avoid request_irq
       
  2387 	 * in interrupt context */
       
  2388 	schedule_work(&nic->tx_timeout_task);
       
  2389 }
       
  2390 
       
  2391 static void e100_tx_timeout_task(struct work_struct *work)
       
  2392 {
       
  2393 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2394 	struct net_device *netdev = nic->netdev;
       
  2395 
       
  2396 	DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
       
  2397 		ioread8(&nic->csr->scb.status));
       
  2398 	e100_down(netdev_priv(netdev));
       
  2399 	e100_up(netdev_priv(netdev));
       
  2400 }
       
  2401 
       
  2402 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2403 {
       
  2404 	int err;
       
  2405 	struct sk_buff *skb;
       
  2406 
       
  2407 	/* Use driver resources to perform internal MAC or PHY
       
  2408 	 * loopback test.  A single packet is prepared and transmitted
       
  2409 	 * in loopback mode, and the test passes if the received
       
  2410 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2411 
       
  2412 	if ((err = e100_rx_alloc_list(nic)))
       
  2413 		return err;
       
  2414 	if ((err = e100_alloc_cbs(nic)))
       
  2415 		goto err_clean_rx;
       
  2416 
       
  2417 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2418 	if (nic->flags & ich && loopback_mode == lb_phy)
       
  2419 		loopback_mode = lb_mac;
       
  2420 
       
  2421 	nic->loopback = loopback_mode;
       
  2422 	if ((err = e100_hw_init(nic)))
       
  2423 		goto err_loopback_none;
       
  2424 
       
  2425 	if (loopback_mode == lb_phy)
       
  2426 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2427 			BMCR_LOOPBACK);
       
  2428 
       
  2429 	e100_start_receiver(nic, NULL);
       
  2430 
       
  2431 	if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2432 		err = -ENOMEM;
       
  2433 		goto err_loopback_none;
       
  2434 	}
       
  2435 	skb_put(skb, ETH_DATA_LEN);
       
  2436 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2437 	e100_xmit_frame(skb, nic->netdev);
       
  2438 
       
  2439 	msleep(10);
       
  2440 
       
  2441 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2442 			RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2443 
       
  2444 	if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2445 	   skb->data, ETH_DATA_LEN))
       
  2446 		err = -EAGAIN;
       
  2447 
       
  2448 err_loopback_none:
       
  2449 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2450 	nic->loopback = lb_none;
       
  2451 	e100_clean_cbs(nic);
       
  2452 	e100_hw_reset(nic);
       
  2453 err_clean_rx:
       
  2454 	e100_rx_clean_list(nic);
       
  2455 	return err;
       
  2456 }
       
  2457 
       
  2458 #define MII_LED_CONTROL	0x1B
       
  2459 #define E100_82552_LED_OVERRIDE 0x19
       
  2460 #define E100_82552_LED_ON       0x000F /* LEDTX and LED_RX both on */
       
  2461 #define E100_82552_LED_OFF      0x000A /* LEDTX and LED_RX both off */
       
  2462 static void e100_blink_led(unsigned long data)
       
  2463 {
       
  2464 	struct nic *nic = (struct nic *)data;
       
  2465 	enum led_state {
       
  2466 		led_on     = 0x01,
       
  2467 		led_off    = 0x04,
       
  2468 		led_on_559 = 0x05,
       
  2469 		led_on_557 = 0x07,
       
  2470 	};
       
  2471 	u16 led_reg = MII_LED_CONTROL;
       
  2472 
       
  2473 	if (nic->phy == phy_82552_v) {
       
  2474 		led_reg = E100_82552_LED_OVERRIDE;
       
  2475 
       
  2476 		nic->leds = (nic->leds == E100_82552_LED_ON) ?
       
  2477 		            E100_82552_LED_OFF : E100_82552_LED_ON;
       
  2478 	} else {
       
  2479 		nic->leds = (nic->leds & led_on) ? led_off :
       
  2480 		            (nic->mac < mac_82559_D101M) ? led_on_557 :
       
  2481 		            led_on_559;
       
  2482 	}
       
  2483 	mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
       
  2484 	mod_timer(&nic->blink_timer, jiffies + HZ / 4);
       
  2485 }
       
  2486 
       
  2487 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2488 {
       
  2489 	struct nic *nic = netdev_priv(netdev);
       
  2490 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2491 }
       
  2492 
       
  2493 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2494 {
       
  2495 	struct nic *nic = netdev_priv(netdev);
       
  2496 	int err;
       
  2497 
       
  2498 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2499 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2500 	e100_exec_cb(nic, NULL, e100_configure);
       
  2501 
       
  2502 	return err;
       
  2503 }
       
  2504 
       
  2505 static void e100_get_drvinfo(struct net_device *netdev,
       
  2506 	struct ethtool_drvinfo *info)
       
  2507 {
       
  2508 	struct nic *nic = netdev_priv(netdev);
       
  2509 	strcpy(info->driver, DRV_NAME);
       
  2510 	strcpy(info->version, DRV_VERSION);
       
  2511 	strcpy(info->fw_version, "N/A");
       
  2512 	strcpy(info->bus_info, pci_name(nic->pdev));
       
  2513 }
       
  2514 
       
  2515 #define E100_PHY_REGS 0x1C
       
  2516 static int e100_get_regs_len(struct net_device *netdev)
       
  2517 {
       
  2518 	struct nic *nic = netdev_priv(netdev);
       
  2519 	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
       
  2520 }
       
  2521 
       
  2522 static void e100_get_regs(struct net_device *netdev,
       
  2523 	struct ethtool_regs *regs, void *p)
       
  2524 {
       
  2525 	struct nic *nic = netdev_priv(netdev);
       
  2526 	u32 *buff = p;
       
  2527 	int i;
       
  2528 
       
  2529 	regs->version = (1 << 24) | nic->pdev->revision;
       
  2530 	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
       
  2531 		ioread8(&nic->csr->scb.cmd_lo) << 16 |
       
  2532 		ioread16(&nic->csr->scb.status);
       
  2533 	for (i = E100_PHY_REGS; i >= 0; i--)
       
  2534 		buff[1 + E100_PHY_REGS - i] =
       
  2535 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2536 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2537 	e100_exec_cb(nic, NULL, e100_dump);
       
  2538 	msleep(10);
       
  2539 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2540 		sizeof(nic->mem->dump_buf));
       
  2541 }
       
  2542 
       
  2543 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2544 {
       
  2545 	struct nic *nic = netdev_priv(netdev);
       
  2546 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2547 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2548 }
       
  2549 
       
  2550 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2551 {
       
  2552 	struct nic *nic = netdev_priv(netdev);
       
  2553 
       
  2554 	if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
       
  2555 	    !device_can_wakeup(&nic->pdev->dev))
       
  2556 		return -EOPNOTSUPP;
       
  2557 
       
  2558 	if (wol->wolopts)
       
  2559 		nic->flags |= wol_magic;
       
  2560 	else
       
  2561 		nic->flags &= ~wol_magic;
       
  2562 
       
  2563 	device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
       
  2564 
       
  2565 	e100_exec_cb(nic, NULL, e100_configure);
       
  2566 
       
  2567 	return 0;
       
  2568 }
       
  2569 
       
  2570 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2571 {
       
  2572 	struct nic *nic = netdev_priv(netdev);
       
  2573 	return nic->msg_enable;
       
  2574 }
       
  2575 
       
  2576 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2577 {
       
  2578 	struct nic *nic = netdev_priv(netdev);
       
  2579 	nic->msg_enable = value;
       
  2580 }
       
  2581 
       
  2582 static int e100_nway_reset(struct net_device *netdev)
       
  2583 {
       
  2584 	struct nic *nic = netdev_priv(netdev);
       
  2585 	return mii_nway_restart(&nic->mii);
       
  2586 }
       
  2587 
       
  2588 static u32 e100_get_link(struct net_device *netdev)
       
  2589 {
       
  2590 	struct nic *nic = netdev_priv(netdev);
       
  2591 	return mii_link_ok(&nic->mii);
       
  2592 }
       
  2593 
       
  2594 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2595 {
       
  2596 	struct nic *nic = netdev_priv(netdev);
       
  2597 	return nic->eeprom_wc << 1;
       
  2598 }
       
  2599 
       
  2600 #define E100_EEPROM_MAGIC	0x1234
       
  2601 static int e100_get_eeprom(struct net_device *netdev,
       
  2602 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2603 {
       
  2604 	struct nic *nic = netdev_priv(netdev);
       
  2605 
       
  2606 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2607 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2608 
       
  2609 	return 0;
       
  2610 }
       
  2611 
       
  2612 static int e100_set_eeprom(struct net_device *netdev,
       
  2613 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2614 {
       
  2615 	struct nic *nic = netdev_priv(netdev);
       
  2616 
       
  2617 	if (eeprom->magic != E100_EEPROM_MAGIC)
       
  2618 		return -EINVAL;
       
  2619 
       
  2620 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2621 
       
  2622 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2623 		(eeprom->len >> 1) + 1);
       
  2624 }
       
  2625 
       
  2626 static void e100_get_ringparam(struct net_device *netdev,
       
  2627 	struct ethtool_ringparam *ring)
       
  2628 {
       
  2629 	struct nic *nic = netdev_priv(netdev);
       
  2630 	struct param_range *rfds = &nic->params.rfds;
       
  2631 	struct param_range *cbs = &nic->params.cbs;
       
  2632 
       
  2633 	ring->rx_max_pending = rfds->max;
       
  2634 	ring->tx_max_pending = cbs->max;
       
  2635 	ring->rx_mini_max_pending = 0;
       
  2636 	ring->rx_jumbo_max_pending = 0;
       
  2637 	ring->rx_pending = rfds->count;
       
  2638 	ring->tx_pending = cbs->count;
       
  2639 	ring->rx_mini_pending = 0;
       
  2640 	ring->rx_jumbo_pending = 0;
       
  2641 }
       
  2642 
       
  2643 static int e100_set_ringparam(struct net_device *netdev,
       
  2644 	struct ethtool_ringparam *ring)
       
  2645 {
       
  2646 	struct nic *nic = netdev_priv(netdev);
       
  2647 	struct param_range *rfds = &nic->params.rfds;
       
  2648 	struct param_range *cbs = &nic->params.cbs;
       
  2649 
       
  2650 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2651 		return -EINVAL;
       
  2652 
       
  2653 	if (netif_running(netdev))
       
  2654 		e100_down(nic);
       
  2655 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2656 	rfds->count = min(rfds->count, rfds->max);
       
  2657 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2658 	cbs->count = min(cbs->count, cbs->max);
       
  2659 	DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
       
  2660 	        rfds->count, cbs->count);
       
  2661 	if (netif_running(netdev))
       
  2662 		e100_up(nic);
       
  2663 
       
  2664 	return 0;
       
  2665 }
       
  2666 
       
  2667 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2668 	"Link test     (on/offline)",
       
  2669 	"Eeprom test   (on/offline)",
       
  2670 	"Self test        (offline)",
       
  2671 	"Mac loopback     (offline)",
       
  2672 	"Phy loopback     (offline)",
       
  2673 };
       
  2674 #define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
       
  2675 
       
  2676 static void e100_diag_test(struct net_device *netdev,
       
  2677 	struct ethtool_test *test, u64 *data)
       
  2678 {
       
  2679 	struct ethtool_cmd cmd;
       
  2680 	struct nic *nic = netdev_priv(netdev);
       
  2681 	int i, err;
       
  2682 
       
  2683 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2684 	data[0] = !mii_link_ok(&nic->mii);
       
  2685 	data[1] = e100_eeprom_load(nic);
       
  2686 	if (test->flags & ETH_TEST_FL_OFFLINE) {
       
  2687 
       
  2688 		/* save speed, duplex & autoneg settings */
       
  2689 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2690 
       
  2691 		if (netif_running(netdev))
       
  2692 			e100_down(nic);
       
  2693 		data[2] = e100_self_test(nic);
       
  2694 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2695 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2696 
       
  2697 		/* restore speed, duplex & autoneg settings */
       
  2698 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2699 
       
  2700 		if (netif_running(netdev))
       
  2701 			e100_up(nic);
       
  2702 	}
       
  2703 	for (i = 0; i < E100_TEST_LEN; i++)
       
  2704 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2705 
       
  2706 	msleep_interruptible(4 * 1000);
       
  2707 }
       
  2708 
       
  2709 static int e100_phys_id(struct net_device *netdev, u32 data)
       
  2710 {
       
  2711 	struct nic *nic = netdev_priv(netdev);
       
  2712 	u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
       
  2713 	              MII_LED_CONTROL;
       
  2714 
       
  2715 	if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
       
  2716 		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
       
  2717 	mod_timer(&nic->blink_timer, jiffies);
       
  2718 	msleep_interruptible(data * 1000);
       
  2719 	del_timer_sync(&nic->blink_timer);
       
  2720 	mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
       
  2721 
       
  2722 	return 0;
       
  2723 }
       
  2724 
       
  2725 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2726 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2727 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2728 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2729 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2730 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2731 	"tx_heartbeat_errors", "tx_window_errors",
       
  2732 	/* device-specific stats */
       
  2733 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2734 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2735 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2736 };
       
  2737 #define E100_NET_STATS_LEN	21
       
  2738 #define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
       
  2739 
       
  2740 static int e100_get_sset_count(struct net_device *netdev, int sset)
       
  2741 {
       
  2742 	switch (sset) {
       
  2743 	case ETH_SS_TEST:
       
  2744 		return E100_TEST_LEN;
       
  2745 	case ETH_SS_STATS:
       
  2746 		return E100_STATS_LEN;
       
  2747 	default:
       
  2748 		return -EOPNOTSUPP;
       
  2749 	}
       
  2750 }
       
  2751 
       
  2752 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2753 	struct ethtool_stats *stats, u64 *data)
       
  2754 {
       
  2755 	struct nic *nic = netdev_priv(netdev);
       
  2756 	int i;
       
  2757 
       
  2758 	for (i = 0; i < E100_NET_STATS_LEN; i++)
       
  2759 		data[i] = ((unsigned long *)&netdev->stats)[i];
       
  2760 
       
  2761 	data[i++] = nic->tx_deferred;
       
  2762 	data[i++] = nic->tx_single_collisions;
       
  2763 	data[i++] = nic->tx_multiple_collisions;
       
  2764 	data[i++] = nic->tx_fc_pause;
       
  2765 	data[i++] = nic->rx_fc_pause;
       
  2766 	data[i++] = nic->rx_fc_unsupported;
       
  2767 	data[i++] = nic->tx_tco_frames;
       
  2768 	data[i++] = nic->rx_tco_frames;
       
  2769 }
       
  2770 
       
  2771 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2772 {
       
  2773 	switch (stringset) {
       
  2774 	case ETH_SS_TEST:
       
  2775 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2776 		break;
       
  2777 	case ETH_SS_STATS:
       
  2778 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2779 		break;
       
  2780 	}
       
  2781 }
       
  2782 
       
  2783 static const struct ethtool_ops e100_ethtool_ops = {
       
  2784 	.get_settings		= e100_get_settings,
       
  2785 	.set_settings		= e100_set_settings,
       
  2786 	.get_drvinfo		= e100_get_drvinfo,
       
  2787 	.get_regs_len		= e100_get_regs_len,
       
  2788 	.get_regs		= e100_get_regs,
       
  2789 	.get_wol		= e100_get_wol,
       
  2790 	.set_wol		= e100_set_wol,
       
  2791 	.get_msglevel		= e100_get_msglevel,
       
  2792 	.set_msglevel		= e100_set_msglevel,
       
  2793 	.nway_reset		= e100_nway_reset,
       
  2794 	.get_link		= e100_get_link,
       
  2795 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2796 	.get_eeprom		= e100_get_eeprom,
       
  2797 	.set_eeprom		= e100_set_eeprom,
       
  2798 	.get_ringparam		= e100_get_ringparam,
       
  2799 	.set_ringparam		= e100_set_ringparam,
       
  2800 	.self_test		= e100_diag_test,
       
  2801 	.get_strings		= e100_get_strings,
       
  2802 	.phys_id		= e100_phys_id,
       
  2803 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2804 	.get_sset_count		= e100_get_sset_count,
       
  2805 };
       
  2806 
       
  2807 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2808 {
       
  2809 	struct nic *nic = netdev_priv(netdev);
       
  2810 
       
  2811 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2812 }
       
  2813 
       
  2814 static int e100_alloc(struct nic *nic)
       
  2815 {
       
  2816 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2817 		&nic->dma_addr);
       
  2818 	return nic->mem ? 0 : -ENOMEM;
       
  2819 }
       
  2820 
       
  2821 static void e100_free(struct nic *nic)
       
  2822 {
       
  2823 	if (nic->mem) {
       
  2824 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2825 			nic->mem, nic->dma_addr);
       
  2826 		nic->mem = NULL;
       
  2827 	}
       
  2828 }
       
  2829 
       
  2830 static int e100_open(struct net_device *netdev)
       
  2831 {
       
  2832 	struct nic *nic = netdev_priv(netdev);
       
  2833 	int err = 0;
       
  2834 
       
  2835 	if (!nic->ecdev)
       
  2836 		netif_carrier_off(netdev);
       
  2837 	if ((err = e100_up(nic)))
       
  2838 		DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
       
  2839 	return err;
       
  2840 }
       
  2841 
       
  2842 static int e100_close(struct net_device *netdev)
       
  2843 {
       
  2844 	e100_down(netdev_priv(netdev));
       
  2845 	return 0;
       
  2846 }
       
  2847 
       
  2848 static const struct net_device_ops e100_netdev_ops = {
       
  2849 	.ndo_open		= e100_open,
       
  2850 	.ndo_stop		= e100_close,
       
  2851 	.ndo_start_xmit		= e100_xmit_frame,
       
  2852 	.ndo_validate_addr	= eth_validate_addr,
       
  2853 	.ndo_set_multicast_list	= e100_set_multicast_list,
       
  2854 	.ndo_set_mac_address	= e100_set_mac_address,
       
  2855 	.ndo_change_mtu		= e100_change_mtu,
       
  2856 	.ndo_do_ioctl		= e100_do_ioctl,
       
  2857 	.ndo_tx_timeout		= e100_tx_timeout,
       
  2858 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2859 	.ndo_poll_controller	= e100_netpoll,
       
  2860 #endif
       
  2861 };
       
  2862 
       
  2863 static int __devinit e100_probe(struct pci_dev *pdev,
       
  2864 	const struct pci_device_id *ent)
       
  2865 {
       
  2866 	struct net_device *netdev;
       
  2867 	struct nic *nic;
       
  2868 	int err;
       
  2869 
       
  2870 	if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
       
  2871 		if (((1 << debug) - 1) & NETIF_MSG_PROBE)
       
  2872 			printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
       
  2873 		return -ENOMEM;
       
  2874 	}
       
  2875 
       
  2876 	netdev->netdev_ops = &e100_netdev_ops;
       
  2877 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  2878 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  2879 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  2880 
       
  2881 	nic = netdev_priv(netdev);
       
  2882 	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
       
  2883 	nic->netdev = netdev;
       
  2884 	nic->pdev = pdev;
       
  2885 	nic->msg_enable = (1 << debug) - 1;
       
  2886 	nic->mdio_ctrl = mdio_ctrl_hw;
       
  2887 	pci_set_drvdata(pdev, netdev);
       
  2888 
       
  2889 	if ((err = pci_enable_device(pdev))) {
       
  2890 		DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
       
  2891 		goto err_out_free_dev;
       
  2892 	}
       
  2893 
       
  2894 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  2895 		DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
       
  2896 			"base address, aborting.\n");
       
  2897 		err = -ENODEV;
       
  2898 		goto err_out_disable_pdev;
       
  2899 	}
       
  2900 
       
  2901 	if ((err = pci_request_regions(pdev, DRV_NAME))) {
       
  2902 		DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
       
  2903 		goto err_out_disable_pdev;
       
  2904 	}
       
  2905 
       
  2906 	if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
       
  2907 		DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
       
  2908 		goto err_out_free_res;
       
  2909 	}
       
  2910 
       
  2911 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  2912 
       
  2913 	if (use_io)
       
  2914 		DPRINTK(PROBE, INFO, "using i/o access mode\n");
       
  2915 
       
  2916 	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
       
  2917 	if (!nic->csr) {
       
  2918 		DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
       
  2919 		err = -ENOMEM;
       
  2920 		goto err_out_free_res;
       
  2921 	}
       
  2922 
       
  2923 	if (ent->driver_data)
       
  2924 		nic->flags |= ich;
       
  2925 	else
       
  2926 		nic->flags &= ~ich;
       
  2927 
       
  2928 	e100_get_defaults(nic);
       
  2929 
       
  2930 	/* locks must be initialized before calling hw_reset */
       
  2931 	spin_lock_init(&nic->cb_lock);
       
  2932 	spin_lock_init(&nic->cmd_lock);
       
  2933 	spin_lock_init(&nic->mdio_lock);
       
  2934 
       
  2935 	/* Reset the device before pci_set_master() in case device is in some
       
  2936 	 * funky state and has an interrupt pending - hint: we don't have the
       
  2937 	 * interrupt handler registered yet. */
       
  2938 	e100_hw_reset(nic);
       
  2939 
       
  2940 	pci_set_master(pdev);
       
  2941 
       
  2942 	init_timer(&nic->watchdog);
       
  2943 	nic->watchdog.function = e100_watchdog;
       
  2944 	nic->watchdog.data = (unsigned long)nic;
       
  2945 	init_timer(&nic->blink_timer);
       
  2946 	nic->blink_timer.function = e100_blink_led;
       
  2947 	nic->blink_timer.data = (unsigned long)nic;
       
  2948 
       
  2949 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  2950 
       
  2951 	if ((err = e100_alloc(nic))) {
       
  2952 		DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
       
  2953 		goto err_out_iounmap;
       
  2954 	}
       
  2955 
       
  2956 	if ((err = e100_eeprom_load(nic)))
       
  2957 		goto err_out_free;
       
  2958 
       
  2959 	e100_phy_init(nic);
       
  2960 
       
  2961 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  2962 	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
       
  2963 	if (!is_valid_ether_addr(netdev->perm_addr)) {
       
  2964 		if (!eeprom_bad_csum_allow) {
       
  2965 			DPRINTK(PROBE, ERR, "Invalid MAC address from "
       
  2966 			        "EEPROM, aborting.\n");
       
  2967 			err = -EAGAIN;
       
  2968 			goto err_out_free;
       
  2969 		} else {
       
  2970 			DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
       
  2971 			        "you MUST configure one.\n");
       
  2972 		}
       
  2973 	}
       
  2974 
       
  2975 	/* Wol magic packet can be enabled from eeprom */
       
  2976 	if ((nic->mac >= mac_82558_D101_A4) &&
       
  2977 	   (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
       
  2978 		nic->flags |= wol_magic;
       
  2979 		device_set_wakeup_enable(&pdev->dev, true);
       
  2980 	}
       
  2981 
       
  2982 	/* ack any pending wake events, disable PME */
       
  2983 	pci_pme_active(pdev, false);
       
  2984 
       
  2985 	// offer device to EtherCAT master module
       
  2986 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  2987 	if (nic->ecdev) {
       
  2988 		if (ecdev_open(nic->ecdev)) {
       
  2989 			ecdev_withdraw(nic->ecdev);
       
  2990 			goto err_out_free;
       
  2991 		}
       
  2992 	} else {
       
  2993 		strcpy(netdev->name, "eth%d");
       
  2994 		if((err = register_netdev(netdev))) {
       
  2995 			DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
       
  2996 			goto err_out_free;
       
  2997 		}
       
  2998 	}
       
  2999 	nic->cbs_pool = pci_pool_create(netdev->name,
       
  3000 			   nic->pdev,
       
  3001 			   nic->params.cbs.count * sizeof(struct cb),
       
  3002 			   sizeof(u32),
       
  3003 			   0);
       
  3004 	DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
       
  3005 		(unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
       
  3006 		pdev->irq, netdev->dev_addr);
       
  3007 
       
  3008 	return 0;
       
  3009 
       
  3010 err_out_free:
       
  3011 	e100_free(nic);
       
  3012 err_out_iounmap:
       
  3013 	pci_iounmap(pdev, nic->csr);
       
  3014 err_out_free_res:
       
  3015 	pci_release_regions(pdev);
       
  3016 err_out_disable_pdev:
       
  3017 	pci_disable_device(pdev);
       
  3018 err_out_free_dev:
       
  3019 	pci_set_drvdata(pdev, NULL);
       
  3020 	free_netdev(netdev);
       
  3021 	return err;
       
  3022 }
       
  3023 
       
  3024 static void __devexit e100_remove(struct pci_dev *pdev)
       
  3025 {
       
  3026 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3027 
       
  3028 	if (netdev) {
       
  3029 		struct nic *nic = netdev_priv(netdev);
       
  3030 		if (nic->ecdev) {
       
  3031 			ecdev_close(nic->ecdev);
       
  3032 			ecdev_withdraw(nic->ecdev);
       
  3033 		} else {
       
  3034 			unregister_netdev(netdev);
       
  3035 		}
       
  3036 
       
  3037 		e100_free(nic);
       
  3038 		pci_iounmap(pdev, nic->csr);
       
  3039 		pci_pool_destroy(nic->cbs_pool);
       
  3040 		free_netdev(netdev);
       
  3041 		pci_release_regions(pdev);
       
  3042 		pci_disable_device(pdev);
       
  3043 		pci_set_drvdata(pdev, NULL);
       
  3044 	}
       
  3045 }
       
  3046 
       
  3047 #define E100_82552_SMARTSPEED   0x14   /* SmartSpeed Ctrl register */
       
  3048 #define E100_82552_REV_ANEG     0x0200 /* Reverse auto-negotiation */
       
  3049 #define E100_82552_ANEG_NOW     0x0400 /* Auto-negotiate now */
       
  3050 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  3051 {
       
  3052 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3053 	struct nic *nic = netdev_priv(netdev);
       
  3054 
       
  3055 	if (netif_running(netdev))
       
  3056 		e100_down(nic);
       
  3057 	netif_device_detach(netdev);
       
  3058 
       
  3059 	pci_save_state(pdev);
       
  3060 
       
  3061 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  3062 		/* enable reverse auto-negotiation */
       
  3063 		if (nic->phy == phy_82552_v) {
       
  3064 			u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3065 			                           E100_82552_SMARTSPEED);
       
  3066 
       
  3067 			mdio_write(netdev, nic->mii.phy_id,
       
  3068 			           E100_82552_SMARTSPEED, smartspeed |
       
  3069 			           E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
       
  3070 		}
       
  3071 		*enable_wake = true;
       
  3072 	} else {
       
  3073 		*enable_wake = false;
       
  3074 	}
       
  3075 
       
  3076 	pci_disable_device(pdev);
       
  3077 }
       
  3078 
       
  3079 static int __e100_power_off(struct pci_dev *pdev, bool wake)
       
  3080 {
       
  3081 	if (wake)
       
  3082 		return pci_prepare_to_sleep(pdev);
       
  3083 
       
  3084 	pci_wake_from_d3(pdev, false);
       
  3085 	pci_set_power_state(pdev, PCI_D3hot);
       
  3086 
       
  3087 	return 0;
       
  3088 }
       
  3089 
       
  3090 #ifdef CONFIG_PM
       
  3091 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  3092 {
       
  3093 	bool wake;
       
  3094 	__e100_shutdown(pdev, &wake);
       
  3095 	return __e100_power_off(pdev, wake);
       
  3096 }
       
  3097 
       
  3098 static int e100_resume(struct pci_dev *pdev)
       
  3099 {
       
  3100 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3101 	struct nic *nic = netdev_priv(netdev);
       
  3102 
       
  3103 	pci_set_power_state(pdev, PCI_D0);
       
  3104 	pci_restore_state(pdev);
       
  3105 	/* ack any pending wake events, disable PME */
       
  3106 	pci_enable_wake(pdev, 0, 0);
       
  3107 
       
  3108 	/* disable reverse auto-negotiation */
       
  3109 	if (nic->phy == phy_82552_v) {
       
  3110 		u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3111 		                           E100_82552_SMARTSPEED);
       
  3112 
       
  3113 		mdio_write(netdev, nic->mii.phy_id,
       
  3114 		           E100_82552_SMARTSPEED,
       
  3115 		           smartspeed & ~(E100_82552_REV_ANEG));
       
  3116 	}
       
  3117 
       
  3118 	netif_device_attach(netdev);
       
  3119 	if (netif_running(netdev))
       
  3120 		e100_up(nic);
       
  3121 
       
  3122 	return 0;
       
  3123 }
       
  3124 #endif /* CONFIG_PM */
       
  3125 
       
  3126 static void e100_shutdown(struct pci_dev *pdev)
       
  3127 {
       
  3128 	bool wake;
       
  3129 	__e100_shutdown(pdev, &wake);
       
  3130 	if (system_state == SYSTEM_POWER_OFF)
       
  3131 		__e100_power_off(pdev, wake);
       
  3132 }
       
  3133 
       
  3134 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  3135 /**
       
  3136  * e100_io_error_detected - called when PCI error is detected.
       
  3137  * @pdev: Pointer to PCI device
       
  3138  * @state: The current pci connection state
       
  3139  */
       
  3140 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  3141 {
       
  3142 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3143 	struct nic *nic = netdev_priv(netdev);
       
  3144 	if (nic->ecdev)
       
  3145 		return -EBUSY;
       
  3146 
       
  3147 
       
  3148 	netif_device_detach(netdev);
       
  3149 
       
  3150 	if (state == pci_channel_io_perm_failure)
       
  3151 		return PCI_ERS_RESULT_DISCONNECT;
       
  3152 
       
  3153 	if (netif_running(netdev))
       
  3154 		e100_down(nic);
       
  3155 	pci_disable_device(pdev);
       
  3156 
       
  3157 	/* Request a slot reset. */
       
  3158 	return PCI_ERS_RESULT_NEED_RESET;
       
  3159 }
       
  3160 
       
  3161 /**
       
  3162  * e100_io_slot_reset - called after the pci bus has been reset.
       
  3163  * @pdev: Pointer to PCI device
       
  3164  *
       
  3165  * Restart the card from scratch.
       
  3166  */
       
  3167 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  3168 {
       
  3169 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3170 	struct nic *nic = netdev_priv(netdev);
       
  3171 	if (nic->ecdev)
       
  3172 		return -EBUSY;
       
  3173 
       
  3174 
       
  3175 	if (pci_enable_device(pdev)) {
       
  3176 		printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
       
  3177 		return PCI_ERS_RESULT_DISCONNECT;
       
  3178 	}
       
  3179 	pci_set_master(pdev);
       
  3180 
       
  3181 	/* Only one device per card can do a reset */
       
  3182 	if (0 != PCI_FUNC(pdev->devfn))
       
  3183 		return PCI_ERS_RESULT_RECOVERED;
       
  3184 	e100_hw_reset(nic);
       
  3185 	e100_phy_init(nic);
       
  3186 
       
  3187 	return PCI_ERS_RESULT_RECOVERED;
       
  3188 }
       
  3189 
       
  3190 /**
       
  3191  * e100_io_resume - resume normal operations
       
  3192  * @pdev: Pointer to PCI device
       
  3193  *
       
  3194  * Resume normal operations after an error recovery
       
  3195  * sequence has been completed.
       
  3196  */
       
  3197 static void e100_io_resume(struct pci_dev *pdev)
       
  3198 {
       
  3199 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3200 	struct nic *nic = netdev_priv(netdev);
       
  3201 
       
  3202 	/* ack any pending wake events, disable PME */
       
  3203 	pci_enable_wake(pdev, 0, 0);
       
  3204 
       
  3205 	if (!nic->ecdev)
       
  3206 		netif_device_attach(netdev);
       
  3207 	if (nic->ecdev || netif_running(netdev)) {
       
  3208 		e100_open(netdev);
       
  3209 		if (!nic->ecdev)
       
  3210 			mod_timer(&nic->watchdog, jiffies);
       
  3211 	}
       
  3212 }
       
  3213 
       
  3214 static struct pci_error_handlers e100_err_handler = {
       
  3215 	.error_detected = e100_io_error_detected,
       
  3216 	.slot_reset = e100_io_slot_reset,
       
  3217 	.resume = e100_io_resume,
       
  3218 };
       
  3219 
       
  3220 static struct pci_driver e100_driver = {
       
  3221 	.name =         DRV_NAME,
       
  3222 	.id_table =     e100_id_table,
       
  3223 	.probe =        e100_probe,
       
  3224 	.remove =       __devexit_p(e100_remove),
       
  3225 #ifdef CONFIG_PM
       
  3226 	/* Power Management hooks */
       
  3227 	.suspend =      e100_suspend,
       
  3228 	.resume =       e100_resume,
       
  3229 #endif
       
  3230 	.shutdown =     e100_shutdown,
       
  3231 	.err_handler = &e100_err_handler,
       
  3232 };
       
  3233 
       
  3234 static int __init e100_init_module(void)
       
  3235 {
       
  3236 	printk(KERN_INFO DRV_NAME " " DRV_DESCRIPTION " " DRV_VERSION
       
  3237 			", master " EC_MASTER_VERSION "\n");
       
  3238  
       
  3239  	return pci_register_driver(&e100_driver);
       
  3240 }
       
  3241 
       
  3242 static void __exit e100_cleanup_module(void)
       
  3243 {
       
  3244 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3245 	pci_unregister_driver(&e100_driver);
       
  3246 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3247 }
       
  3248 
       
  3249 module_init(e100_init_module);
       
  3250 module_exit(e100_cleanup_module);