devices/e100-2.6.20-ethercat.c
changeset 1227 471ceaf7f89d
child 1228 6d9c686f922e
equal deleted inserted replaced
1226:afb189516fcf 1227:471ceaf7f89d
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it
       
    10  *  and/or modify it under the terms of the GNU General Public License
       
    11  *  as published by the Free Software Foundation; either version 2 of the
       
    12  *  License, or (at your option) any later version.
       
    13  *
       
    14  *  The IgH EtherCAT Master is distributed in the hope that it will be
       
    15  *  useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
       
    17  *  GNU General Public License for more details.
       
    18  *
       
    19  *  You should have received a copy of the GNU General Public License
       
    20  *  along with the IgH EtherCAT Master; if not, write to the Free Software
       
    21  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    22  *
       
    23  *  The right to use EtherCAT Technology is granted and comes free of
       
    24  *  charge under condition of compatibility of product made by
       
    25  *  Licensee. People intending to distribute/sell products based on the
       
    26  *  code, have to sign an agreement to guarantee that products using
       
    27  *  software based on IgH EtherCAT master stay compatible with the actual
       
    28  *  EtherCAT specification (which are released themselves as an open
       
    29  *  standard) as the (only) precondition to have the right to use EtherCAT
       
    30  *  Technology, IP and trade marks.
       
    31  *
       
    32  *  vim: noexpandtab
       
    33  *
       
    34  *****************************************************************************/
       
    35 
       
    36 /**
       
    37    \file
       
    38    EtherCAT driver for e100-compatible NICs.
       
    39 */
       
    40 
       
    41 /* Former documentation: */
       
    42 
       
    43 /*******************************************************************************
       
    44   Intel PRO/100 Linux driver
       
    45   Copyright(c) 1999 - 2006 Intel Corporation.
       
    46 
       
    47   This program is free software; you can redistribute it and/or modify it
       
    48   under the terms and conditions of the GNU General Public License,
       
    49   version 2, as published by the Free Software Foundation.
       
    50 
       
    51   This program is distributed in the hope it will be useful, but WITHOUT
       
    52   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    53   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    54   more details.
       
    55 
       
    56   You should have received a copy of the GNU General Public License along with
       
    57   this program; if not, write to the Free Software Foundation, Inc.,
       
    58   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    59 
       
    60   The full GNU General Public License is included in this distribution in
       
    61   the file called "COPYING".
       
    62 
       
    63   Contact Information:
       
    64   Linux NICS <linux.nics@intel.com>
       
    65   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    66   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    67 
       
    68 *******************************************************************************/
       
    69 
       
    70 /*
       
    71  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    72  *
       
    73  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    74  *	original e100 driver, but better described as a munging of
       
    75  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    76  *
       
    77  *	References:
       
    78  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    79  *		Open Source Software Developers Manual,
       
    80  *		http://sourceforge.net/projects/e1000
       
    81  *
       
    82  *
       
    83  *	                      Theory of Operation
       
    84  *
       
    85  *	I.   General
       
    86  *
       
    87  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    88  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    89  *	82551, and 82562 devices.  82558 and greater controllers
       
    90  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    91  *	server and client network interface cards, as well as in
       
    92  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    93  *	configurations.  8255x supports a 32-bit linear addressing
       
    94  *	mode and operates at 33Mhz PCI clock rate.
       
    95  *
       
    96  *	II.  Driver Operation
       
    97  *
       
    98  *	Memory-mapped mode is used exclusively to access the device's
       
    99  *	shared-memory structure, the Control/Status Registers (CSR). All
       
   100  *	setup, configuration, and control of the device, including queuing
       
   101  *	of Tx, Rx, and configuration commands is through the CSR.
       
   102  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   103  *	protects the shared Command Block List (CBL).
       
   104  *
       
   105  *	8255x is highly MII-compliant and all access to the PHY go
       
   106  *	through the Management Data Interface (MDI).  Consequently, the
       
   107  *	driver leverages the mii.c library shared with other MII-compliant
       
   108  *	devices.
       
   109  *
       
   110  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   111  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   112  *	archs are supported.
       
   113  *
       
   114  *	III. Transmit
       
   115  *
       
   116  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   117  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   118  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   119  *	the end of the ring.  The last TCB processed suspends the
       
   120  *	controller, and the controller can be restarted by issue a CU
       
   121  *	resume command to continue from the suspend point, or a CU start
       
   122  *	command to start at a given position in the ring.
       
   123  *
       
   124  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   125  *	into the CBL ring along with Tx commands.  The common structure
       
   126  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   127  *
       
   128  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   129  *	is the next CB to check for completion; cb_to_send is the first
       
   130  *	CB to start on in case of a previous failure to resume.  CB clean
       
   131  *	up happens in interrupt context in response to a CU interrupt.
       
   132  *	cbs_avail keeps track of number of free CB resources available.
       
   133  *
       
   134  * 	Hardware padding of short packets to minimum packet size is
       
   135  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   136  * 	with 00h.
       
   137  *
       
   138  *	IV.  Recieve
       
   139  *
       
   140  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   141  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   142  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   143  *	and the data buffer, but the RFD is pulled off before the skb is
       
   144  *	indicated.  The data buffer is aligned such that encapsulated
       
   145  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   146  *	mapped shared memory, and completion status is contained within
       
   147  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   148  *	view from software and hardware.
       
   149  *
       
   150  *	Under typical operation, the  receive unit (RU) is start once,
       
   151  *	and the controller happily fills RFDs as frames arrive.  If
       
   152  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   153  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   154  *	and Rx indication and re-allocation happen in the same context,
       
   155  *	therefore no locking is required.  A software-generated interrupt
       
   156  *	is generated from the watchdog to recover from a failed allocation
       
   157  *	senario where all Rx resources have been indicated and none re-
       
   158  *	placed.
       
   159  *
       
   160  *	V.   Miscellaneous
       
   161  *
       
   162  * 	VLAN offloading of tagging, stripping and filtering is not
       
   163  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   164  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   165  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   166  * 	not supported (hardware limitation).
       
   167  *
       
   168  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   169  *
       
   170  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   171  * 	testing/troubleshooting the development driver.
       
   172  *
       
   173  * 	TODO:
       
   174  * 	o several entry points race with dev->close
       
   175  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   176  *
       
   177  *	FIXES:
       
   178  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   179  *	- Stratus87247: protect MDI control register manipulations
       
   180  */
       
   181 
       
   182 #include <linux/module.h>
       
   183 #include <linux/moduleparam.h>
       
   184 #include <linux/kernel.h>
       
   185 #include <linux/types.h>
       
   186 #include <linux/slab.h>
       
   187 #include <linux/delay.h>
       
   188 #include <linux/init.h>
       
   189 #include <linux/pci.h>
       
   190 #include <linux/dma-mapping.h>
       
   191 #include <linux/netdevice.h>
       
   192 #include <linux/etherdevice.h>
       
   193 #include <linux/mii.h>
       
   194 #include <linux/if_vlan.h>
       
   195 #include <linux/skbuff.h>
       
   196 #include <linux/ethtool.h>
       
   197 #include <linux/string.h>
       
   198 #include <asm/unaligned.h>
       
   199 
       
   200 // EtherCAT includes
       
   201 #include "../globals.h"
       
   202 #include "ecdev.h"
       
   203 
       
   204 #define DRV_NAME		"ec_e100"
       
   205 #define DRV_EXT			"-NAPI"
       
   206 #define DRV_VERSION		"3.5.17-k2"DRV_EXT
       
   207 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   208 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   209 #define PFX			DRV_NAME ": "
       
   210 
       
   211 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   212 #define E100_NAPI_WEIGHT	16
       
   213 
       
   214 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   215 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   216 MODULE_LICENSE("GPL");
       
   217 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   218 
       
   219 void e100_ec_poll(struct net_device *);
       
   220 
       
   221 static int debug = 3;
       
   222 static int eeprom_bad_csum_allow = 0;
       
   223 module_param(debug, int, 0);
       
   224 module_param(eeprom_bad_csum_allow, int, 0);
       
   225 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   226 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   227 #define DPRINTK(nlevel, klevel, fmt, args...) \
       
   228 	(void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
       
   229 	printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
       
   230 		__FUNCTION__ , ## args))
       
   231 
       
   232 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   233 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   234 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   235 static struct pci_device_id e100_id_table[] = {
       
   236 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   237 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   238 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   239 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   240 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   241 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   242 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   243 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   244 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   245 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   246 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   247 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   248 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   249 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   250 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   251 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   252 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   253 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   254 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   255 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   256 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   257 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   258 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   259 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   260 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   261 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   262 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   263 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   277 	{ 0, }
       
   278 };
       
   279 // prevent from being loaded automatically
       
   280 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   281 
       
   282 enum mac {
       
   283 	mac_82557_D100_A  = 0,
       
   284 	mac_82557_D100_B  = 1,
       
   285 	mac_82557_D100_C  = 2,
       
   286 	mac_82558_D101_A4 = 4,
       
   287 	mac_82558_D101_B0 = 5,
       
   288 	mac_82559_D101M   = 8,
       
   289 	mac_82559_D101S   = 9,
       
   290 	mac_82550_D102    = 12,
       
   291 	mac_82550_D102_C  = 13,
       
   292 	mac_82551_E       = 14,
       
   293 	mac_82551_F       = 15,
       
   294 	mac_82551_10      = 16,
       
   295 	mac_unknown       = 0xFF,
       
   296 };
       
   297 
       
   298 enum phy {
       
   299 	phy_100a     = 0x000003E0,
       
   300 	phy_100c     = 0x035002A8,
       
   301 	phy_82555_tx = 0x015002A8,
       
   302 	phy_nsc_tx   = 0x5C002000,
       
   303 	phy_82562_et = 0x033002A8,
       
   304 	phy_82562_em = 0x032002A8,
       
   305 	phy_82562_ek = 0x031002A8,
       
   306 	phy_82562_eh = 0x017002A8,
       
   307 	phy_unknown  = 0xFFFFFFFF,
       
   308 };
       
   309 
       
   310 /* CSR (Control/Status Registers) */
       
   311 struct csr {
       
   312 	struct {
       
   313 		u8 status;
       
   314 		u8 stat_ack;
       
   315 		u8 cmd_lo;
       
   316 		u8 cmd_hi;
       
   317 		u32 gen_ptr;
       
   318 	} scb;
       
   319 	u32 port;
       
   320 	u16 flash_ctrl;
       
   321 	u8 eeprom_ctrl_lo;
       
   322 	u8 eeprom_ctrl_hi;
       
   323 	u32 mdi_ctrl;
       
   324 	u32 rx_dma_count;
       
   325 };
       
   326 
       
   327 enum scb_status {
       
   328 	rus_ready        = 0x10,
       
   329 	rus_mask         = 0x3C,
       
   330 };
       
   331 
       
   332 enum ru_state  {
       
   333 	RU_SUSPENDED = 0,
       
   334 	RU_RUNNING	 = 1,
       
   335 	RU_UNINITIALIZED = -1,
       
   336 };
       
   337 
       
   338 enum scb_stat_ack {
       
   339 	stat_ack_not_ours    = 0x00,
       
   340 	stat_ack_sw_gen      = 0x04,
       
   341 	stat_ack_rnr         = 0x10,
       
   342 	stat_ack_cu_idle     = 0x20,
       
   343 	stat_ack_frame_rx    = 0x40,
       
   344 	stat_ack_cu_cmd_done = 0x80,
       
   345 	stat_ack_not_present = 0xFF,
       
   346 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   347 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   348 };
       
   349 
       
   350 enum scb_cmd_hi {
       
   351 	irq_mask_none = 0x00,
       
   352 	irq_mask_all  = 0x01,
       
   353 	irq_sw_gen    = 0x02,
       
   354 };
       
   355 
       
   356 enum scb_cmd_lo {
       
   357 	cuc_nop        = 0x00,
       
   358 	ruc_start      = 0x01,
       
   359 	ruc_load_base  = 0x06,
       
   360 	cuc_start      = 0x10,
       
   361 	cuc_resume     = 0x20,
       
   362 	cuc_dump_addr  = 0x40,
       
   363 	cuc_dump_stats = 0x50,
       
   364 	cuc_load_base  = 0x60,
       
   365 	cuc_dump_reset = 0x70,
       
   366 };
       
   367 
       
   368 enum cuc_dump {
       
   369 	cuc_dump_complete       = 0x0000A005,
       
   370 	cuc_dump_reset_complete = 0x0000A007,
       
   371 };
       
   372 
       
   373 enum port {
       
   374 	software_reset  = 0x0000,
       
   375 	selftest        = 0x0001,
       
   376 	selective_reset = 0x0002,
       
   377 };
       
   378 
       
   379 enum eeprom_ctrl_lo {
       
   380 	eesk = 0x01,
       
   381 	eecs = 0x02,
       
   382 	eedi = 0x04,
       
   383 	eedo = 0x08,
       
   384 };
       
   385 
       
   386 enum mdi_ctrl {
       
   387 	mdi_write = 0x04000000,
       
   388 	mdi_read  = 0x08000000,
       
   389 	mdi_ready = 0x10000000,
       
   390 };
       
   391 
       
   392 enum eeprom_op {
       
   393 	op_write = 0x05,
       
   394 	op_read  = 0x06,
       
   395 	op_ewds  = 0x10,
       
   396 	op_ewen  = 0x13,
       
   397 };
       
   398 
       
   399 enum eeprom_offsets {
       
   400 	eeprom_cnfg_mdix  = 0x03,
       
   401 	eeprom_id         = 0x0A,
       
   402 	eeprom_config_asf = 0x0D,
       
   403 	eeprom_smbus_addr = 0x90,
       
   404 };
       
   405 
       
   406 enum eeprom_cnfg_mdix {
       
   407 	eeprom_mdix_enabled = 0x0080,
       
   408 };
       
   409 
       
   410 enum eeprom_id {
       
   411 	eeprom_id_wol = 0x0020,
       
   412 };
       
   413 
       
   414 enum eeprom_config_asf {
       
   415 	eeprom_asf = 0x8000,
       
   416 	eeprom_gcl = 0x4000,
       
   417 };
       
   418 
       
   419 enum cb_status {
       
   420 	cb_complete = 0x8000,
       
   421 	cb_ok       = 0x2000,
       
   422 };
       
   423 
       
   424 enum cb_command {
       
   425 	cb_nop    = 0x0000,
       
   426 	cb_iaaddr = 0x0001,
       
   427 	cb_config = 0x0002,
       
   428 	cb_multi  = 0x0003,
       
   429 	cb_tx     = 0x0004,
       
   430 	cb_ucode  = 0x0005,
       
   431 	cb_dump   = 0x0006,
       
   432 	cb_tx_sf  = 0x0008,
       
   433 	cb_cid    = 0x1f00,
       
   434 	cb_i      = 0x2000,
       
   435 	cb_s      = 0x4000,
       
   436 	cb_el     = 0x8000,
       
   437 };
       
   438 
       
   439 struct rfd {
       
   440 	u16 status;
       
   441 	u16 command;
       
   442 	u32 link;
       
   443 	u32 rbd;
       
   444 	u16 actual_size;
       
   445 	u16 size;
       
   446 };
       
   447 
       
   448 struct rx {
       
   449 	struct rx *next, *prev;
       
   450 	struct sk_buff *skb;
       
   451 	dma_addr_t dma_addr;
       
   452 };
       
   453 
       
   454 #if defined(__BIG_ENDIAN_BITFIELD)
       
   455 #define X(a,b)	b,a
       
   456 #else
       
   457 #define X(a,b)	a,b
       
   458 #endif
       
   459 struct config {
       
   460 /*0*/	u8 X(byte_count:6, pad0:2);
       
   461 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   462 /*2*/	u8 adaptive_ifs;
       
   463 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   464 	   term_write_cache_line:1), pad3:4);
       
   465 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   466 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   467 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   468 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   469 	   rx_discard_overruns:1), rx_save_bad_frames:1);
       
   470 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   471 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   472 	   tx_dynamic_tbd:1);
       
   473 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   474 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   475 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   476 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   477 	   loopback:2);
       
   478 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   479 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   480 /*13*/	u8 ip_addr_lo;
       
   481 /*14*/	u8 ip_addr_hi;
       
   482 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   483 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   484 	   pad15_2:1), crs_or_cdt:1);
       
   485 /*16*/	u8 fc_delay_lo;
       
   486 /*17*/	u8 fc_delay_hi;
       
   487 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   488 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   489 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   490 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   491 	   full_duplex_force:1), full_duplex_pin:1);
       
   492 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   493 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   494 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   495 	u8 pad_d102[9];
       
   496 };
       
   497 
       
   498 #define E100_MAX_MULTICAST_ADDRS	64
       
   499 struct multi {
       
   500 	u16 count;
       
   501 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   502 };
       
   503 
       
   504 /* Important: keep total struct u32-aligned */
       
   505 #define UCODE_SIZE			134
       
   506 struct cb {
       
   507 	u16 status;
       
   508 	u16 command;
       
   509 	u32 link;
       
   510 	union {
       
   511 		u8 iaaddr[ETH_ALEN];
       
   512 		u32 ucode[UCODE_SIZE];
       
   513 		struct config config;
       
   514 		struct multi multi;
       
   515 		struct {
       
   516 			u32 tbd_array;
       
   517 			u16 tcb_byte_count;
       
   518 			u8 threshold;
       
   519 			u8 tbd_count;
       
   520 			struct {
       
   521 				u32 buf_addr;
       
   522 				u16 size;
       
   523 				u16 eol;
       
   524 			} tbd;
       
   525 		} tcb;
       
   526 		u32 dump_buffer_addr;
       
   527 	} u;
       
   528 	struct cb *next, *prev;
       
   529 	dma_addr_t dma_addr;
       
   530 	struct sk_buff *skb;
       
   531 };
       
   532 
       
   533 enum loopback {
       
   534 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   535 };
       
   536 
       
   537 struct stats {
       
   538 	u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   539 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   540 		tx_multiple_collisions, tx_total_collisions;
       
   541 	u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   542 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   543 		rx_short_frame_errors;
       
   544 	u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   545 	u16 xmt_tco_frames, rcv_tco_frames;
       
   546 	u32 complete;
       
   547 };
       
   548 
       
   549 struct mem {
       
   550 	struct {
       
   551 		u32 signature;
       
   552 		u32 result;
       
   553 	} selftest;
       
   554 	struct stats stats;
       
   555 	u8 dump_buf[596];
       
   556 };
       
   557 
       
   558 struct param_range {
       
   559 	u32 min;
       
   560 	u32 max;
       
   561 	u32 count;
       
   562 };
       
   563 
       
   564 struct params {
       
   565 	struct param_range rfds;
       
   566 	struct param_range cbs;
       
   567 };
       
   568 
       
   569 struct nic {
       
   570 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   571 	u32 msg_enable				____cacheline_aligned;
       
   572 	struct net_device *netdev;
       
   573 	struct pci_dev *pdev;
       
   574 
       
   575 	struct rx *rxs				____cacheline_aligned;
       
   576 	struct rx *rx_to_use;
       
   577 	struct rx *rx_to_clean;
       
   578 	struct rfd blank_rfd;
       
   579 	enum ru_state ru_running;
       
   580 
       
   581 	spinlock_t cb_lock			____cacheline_aligned;
       
   582 	spinlock_t cmd_lock;
       
   583 	struct csr __iomem *csr;
       
   584 	enum scb_cmd_lo cuc_cmd;
       
   585 	unsigned int cbs_avail;
       
   586 	struct cb *cbs;
       
   587 	struct cb *cb_to_use;
       
   588 	struct cb *cb_to_send;
       
   589 	struct cb *cb_to_clean;
       
   590 	u16 tx_command;
       
   591 	/* End: frequently used values: keep adjacent for cache effect */
       
   592 
       
   593 	enum {
       
   594 		ich                = (1 << 0),
       
   595 		promiscuous        = (1 << 1),
       
   596 		multicast_all      = (1 << 2),
       
   597 		wol_magic          = (1 << 3),
       
   598 		ich_10h_workaround = (1 << 4),
       
   599 	} flags					____cacheline_aligned;
       
   600 
       
   601 	enum mac mac;
       
   602 	enum phy phy;
       
   603 	struct params params;
       
   604 	struct net_device_stats net_stats;
       
   605 	struct timer_list watchdog;
       
   606 	struct timer_list blink_timer;
       
   607 	struct mii_if_info mii;
       
   608 	struct work_struct tx_timeout_task;
       
   609 	enum loopback loopback;
       
   610 
       
   611 	struct mem *mem;
       
   612 	dma_addr_t dma_addr;
       
   613 
       
   614 	dma_addr_t cbs_dma_addr;
       
   615 	u8 adaptive_ifs;
       
   616 	u8 tx_threshold;
       
   617 	u32 tx_frames;
       
   618 	u32 tx_collisions;
       
   619 	u32 tx_deferred;
       
   620 	u32 tx_single_collisions;
       
   621 	u32 tx_multiple_collisions;
       
   622 	u32 tx_fc_pause;
       
   623 	u32 tx_tco_frames;
       
   624 
       
   625 	u32 rx_fc_pause;
       
   626 	u32 rx_fc_unsupported;
       
   627 	u32 rx_tco_frames;
       
   628 	u32 rx_over_length_errors;
       
   629 
       
   630 	u8 rev_id;
       
   631 	u16 leds;
       
   632 	u16 eeprom_wc;
       
   633 	u16 eeprom[256];
       
   634 	spinlock_t mdio_lock;
       
   635 
       
   636     ec_device_t *ecdev;
       
   637     unsigned long ec_watchdog_jiffies;
       
   638 };
       
   639 
       
   640 static inline void e100_write_flush(struct nic *nic)
       
   641 {
       
   642 	/* Flush previous PCI writes through intermediate bridges
       
   643 	 * by doing a benign read */
       
   644 	(void)readb(&nic->csr->scb.status);
       
   645 }
       
   646 
       
   647 static void e100_enable_irq(struct nic *nic)
       
   648 {
       
   649 	unsigned long flags;
       
   650 
       
   651     if (nic->ecdev)
       
   652         return;
       
   653 
       
   654 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   655 	writeb(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   656 	e100_write_flush(nic);
       
   657 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   658 }
       
   659 
       
   660 static void e100_disable_irq(struct nic *nic)
       
   661 {
       
   662 	unsigned long flags;
       
   663 
       
   664     if (nic->ecdev)
       
   665         return;
       
   666 
       
   667 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   668 	writeb(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   669 	e100_write_flush(nic);
       
   670 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   671 }
       
   672 
       
   673 static void e100_hw_reset(struct nic *nic)
       
   674 {
       
   675 	/* Put CU and RU into idle with a selective reset to get
       
   676 	 * device off of PCI bus */
       
   677 	writel(selective_reset, &nic->csr->port);
       
   678 	e100_write_flush(nic); udelay(20);
       
   679 
       
   680 	/* Now fully reset device */
       
   681 	writel(software_reset, &nic->csr->port);
       
   682 	e100_write_flush(nic); udelay(20);
       
   683 
       
   684 	/* Mask off our interrupt line - it's unmasked after reset */
       
   685 	e100_disable_irq(nic);
       
   686 }
       
   687 
       
   688 static int e100_self_test(struct nic *nic)
       
   689 {
       
   690 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   691 
       
   692 	/* Passing the self-test is a pretty good indication
       
   693 	 * that the device can DMA to/from host memory */
       
   694 
       
   695 	nic->mem->selftest.signature = 0;
       
   696 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   697 
       
   698 	writel(selftest | dma_addr, &nic->csr->port);
       
   699 	e100_write_flush(nic);
       
   700 	/* Wait 10 msec for self-test to complete */
       
   701 	msleep(10);
       
   702 
       
   703 	/* Interrupts are enabled after self-test */
       
   704 	e100_disable_irq(nic);
       
   705 
       
   706 	/* Check results of self-test */
       
   707 	if(nic->mem->selftest.result != 0) {
       
   708 		DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
       
   709 			nic->mem->selftest.result);
       
   710 		return -ETIMEDOUT;
       
   711 	}
       
   712 	if(nic->mem->selftest.signature == 0) {
       
   713 		DPRINTK(HW, ERR, "Self-test failed: timed out\n");
       
   714 		return -ETIMEDOUT;
       
   715 	}
       
   716 
       
   717 	return 0;
       
   718 }
       
   719 
       
   720 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
       
   721 {
       
   722 	u32 cmd_addr_data[3];
       
   723 	u8 ctrl;
       
   724 	int i, j;
       
   725 
       
   726 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   727 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   728 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   729 		cpu_to_le16(data);
       
   730 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   731 
       
   732 	/* Bit-bang cmds to write word to eeprom */
       
   733 	for(j = 0; j < 3; j++) {
       
   734 
       
   735 		/* Chip select */
       
   736 		writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   737 		e100_write_flush(nic); udelay(4);
       
   738 
       
   739 		for(i = 31; i >= 0; i--) {
       
   740 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   741 				eecs | eedi : eecs;
       
   742 			writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   743 			e100_write_flush(nic); udelay(4);
       
   744 
       
   745 			writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   746 			e100_write_flush(nic); udelay(4);
       
   747 		}
       
   748 		/* Wait 10 msec for cmd to complete */
       
   749 		msleep(10);
       
   750 
       
   751 		/* Chip deselect */
       
   752 		writeb(0, &nic->csr->eeprom_ctrl_lo);
       
   753 		e100_write_flush(nic); udelay(4);
       
   754 	}
       
   755 };
       
   756 
       
   757 /* General technique stolen from the eepro100 driver - very clever */
       
   758 static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   759 {
       
   760 	u32 cmd_addr_data;
       
   761 	u16 data = 0;
       
   762 	u8 ctrl;
       
   763 	int i;
       
   764 
       
   765 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   766 
       
   767 	/* Chip select */
       
   768 	writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   769 	e100_write_flush(nic); udelay(4);
       
   770 
       
   771 	/* Bit-bang to read word from eeprom */
       
   772 	for(i = 31; i >= 0; i--) {
       
   773 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   774 		writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   775 		e100_write_flush(nic); udelay(4);
       
   776 
       
   777 		writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   778 		e100_write_flush(nic); udelay(4);
       
   779 
       
   780 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   781 		 * complete address.  Use this to adjust addr_len. */
       
   782 		ctrl = readb(&nic->csr->eeprom_ctrl_lo);
       
   783 		if(!(ctrl & eedo) && i > 16) {
       
   784 			*addr_len -= (i - 16);
       
   785 			i = 17;
       
   786 		}
       
   787 
       
   788 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   789 	}
       
   790 
       
   791 	/* Chip deselect */
       
   792 	writeb(0, &nic->csr->eeprom_ctrl_lo);
       
   793 	e100_write_flush(nic); udelay(4);
       
   794 
       
   795 	return le16_to_cpu(data);
       
   796 };
       
   797 
       
   798 /* Load entire EEPROM image into driver cache and validate checksum */
       
   799 static int e100_eeprom_load(struct nic *nic)
       
   800 {
       
   801 	u16 addr, addr_len = 8, checksum = 0;
       
   802 
       
   803 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   804 	e100_eeprom_read(nic, &addr_len, 0);
       
   805 	nic->eeprom_wc = 1 << addr_len;
       
   806 
       
   807 	for(addr = 0; addr < nic->eeprom_wc; addr++) {
       
   808 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   809 		if(addr < nic->eeprom_wc - 1)
       
   810 			checksum += cpu_to_le16(nic->eeprom[addr]);
       
   811 	}
       
   812 
       
   813 	/* The checksum, stored in the last word, is calculated such that
       
   814 	 * the sum of words should be 0xBABA */
       
   815 	checksum = le16_to_cpu(0xBABA - checksum);
       
   816 	if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
       
   817 		DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
       
   818 		if (!eeprom_bad_csum_allow)
       
   819 			return -EAGAIN;
       
   820 	}
       
   821 
       
   822 	return 0;
       
   823 }
       
   824 
       
   825 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   826 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   827 {
       
   828 	u16 addr, addr_len = 8, checksum = 0;
       
   829 
       
   830 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   831 	e100_eeprom_read(nic, &addr_len, 0);
       
   832 	nic->eeprom_wc = 1 << addr_len;
       
   833 
       
   834 	if(start + count >= nic->eeprom_wc)
       
   835 		return -EINVAL;
       
   836 
       
   837 	for(addr = start; addr < start + count; addr++)
       
   838 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   839 
       
   840 	/* The checksum, stored in the last word, is calculated such that
       
   841 	 * the sum of words should be 0xBABA */
       
   842 	for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   843 		checksum += cpu_to_le16(nic->eeprom[addr]);
       
   844 	nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
       
   845 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   846 		nic->eeprom[nic->eeprom_wc - 1]);
       
   847 
       
   848 	return 0;
       
   849 }
       
   850 
       
   851 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   852 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   853 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   854 {
       
   855 	unsigned long flags = 0;
       
   856 	unsigned int i;
       
   857 	int err = 0;
       
   858 
       
   859 	if (!nic->ecdev)
       
   860 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   861 
       
   862 	/* Previous command is accepted when SCB clears */
       
   863 	for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   864 		if(likely(!readb(&nic->csr->scb.cmd_lo)))
       
   865 			break;
       
   866 		cpu_relax();
       
   867 		if(unlikely(i > E100_WAIT_SCB_FAST))
       
   868 			udelay(5);
       
   869 	}
       
   870 	if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   871 		err = -EAGAIN;
       
   872 		goto err_unlock;
       
   873 	}
       
   874 
       
   875 	if(unlikely(cmd != cuc_resume))
       
   876 		writel(dma_addr, &nic->csr->scb.gen_ptr);
       
   877 	writeb(cmd, &nic->csr->scb.cmd_lo);
       
   878 
       
   879 err_unlock:
       
   880 	if (!nic->ecdev)
       
   881 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   882 
       
   883 	return err;
       
   884 }
       
   885 
       
   886 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   887 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   888 {
       
   889 	struct cb *cb;
       
   890 	unsigned long flags = 0;
       
   891 	int err = 0;
       
   892 
       
   893 	if (!nic->ecdev)
       
   894 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   895 
       
   896 	if(unlikely(!nic->cbs_avail)) {
       
   897 		err = -ENOMEM;
       
   898 		goto err_unlock;
       
   899 	}
       
   900 
       
   901 	cb = nic->cb_to_use;
       
   902 	nic->cb_to_use = cb->next;
       
   903 	nic->cbs_avail--;
       
   904 	cb->skb = skb;
       
   905 
       
   906 	if(unlikely(!nic->cbs_avail))
       
   907 		err = -ENOSPC;
       
   908 
       
   909 	cb_prepare(nic, cb, skb);
       
   910 
       
   911 	/* Order is important otherwise we'll be in a race with h/w:
       
   912 	 * set S-bit in current first, then clear S-bit in previous. */
       
   913 	cb->command |= cpu_to_le16(cb_s);
       
   914 	wmb();
       
   915 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   916 
       
   917 	while(nic->cb_to_send != nic->cb_to_use) {
       
   918 		if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   919 			nic->cb_to_send->dma_addr))) {
       
   920 			/* Ok, here's where things get sticky.  It's
       
   921 			 * possible that we can't schedule the command
       
   922 			 * because the controller is too busy, so
       
   923 			 * let's just queue the command and try again
       
   924 			 * when another command is scheduled. */
       
   925 			if(err == -ENOSPC) {
       
   926 				//request a reset
       
   927 				schedule_work(&nic->tx_timeout_task);
       
   928 			}
       
   929 			break;
       
   930 		} else {
       
   931 			nic->cuc_cmd = cuc_resume;
       
   932 			nic->cb_to_send = nic->cb_to_send->next;
       
   933 		}
       
   934 	}
       
   935 
       
   936 err_unlock:
       
   937 	if (!nic->ecdev)
       
   938 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   939 
       
   940 	return err;
       
   941 }
       
   942 
       
   943 static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
   944 {
       
   945 	u32 data_out = 0;
       
   946 	unsigned int i;
       
   947 	unsigned long flags = 0;
       
   948 
       
   949 
       
   950 	/*
       
   951 	 * Stratus87247: we shouldn't be writing the MDI control
       
   952 	 * register until the Ready bit shows True.  Also, since
       
   953 	 * manipulation of the MDI control registers is a multi-step
       
   954 	 * procedure it should be done under lock.
       
   955 	 */
       
   956 	if (!nic->ecdev)
       
   957 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
   958 	for (i = 100; i; --i) {
       
   959 		if (readl(&nic->csr->mdi_ctrl) & mdi_ready)
       
   960 			break;
       
   961 		udelay(20);
       
   962 	}
       
   963 	if (unlikely(!i)) {
       
   964 		printk("e100.mdio_ctrl(%s) won't go Ready\n",
       
   965 			nic->netdev->name );
       
   966 		if (!nic->ecdev)
       
   967 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
   968 		return 0;		/* No way to indicate timeout error */
       
   969 	}
       
   970 	writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
   971 
       
   972 	for (i = 0; i < 100; i++) {
       
   973 		udelay(20);
       
   974 		if ((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
       
   975 			break;
       
   976 	}
       
   977 	if (!nic->ecdev)
       
   978 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
   979 	DPRINTK(HW, DEBUG,
       
   980 		"%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
   981 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
       
   982 	return (u16)data_out;
       
   983 }
       
   984 
       
   985 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
   986 {
       
   987 	return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
       
   988 }
       
   989 
       
   990 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
   991 {
       
   992 	mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
       
   993 }
       
   994 
       
   995 static void e100_get_defaults(struct nic *nic)
       
   996 {
       
   997 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
   998 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
   999 
       
  1000 	pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
       
  1001 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1002 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id;
       
  1003 	if(nic->mac == mac_unknown)
       
  1004 		nic->mac = mac_82557_D100_A;
       
  1005 
       
  1006 	nic->params.rfds = rfds;
       
  1007 	nic->params.cbs = cbs;
       
  1008 
       
  1009 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1010 	nic->tx_threshold = 0xE0;
       
  1011 
       
  1012 	/* no interrupt for every tx completion, delay = 256us if not 557*/
       
  1013 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1014 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1015 
       
  1016 	/* Template for a freshly allocated RFD */
       
  1017 	nic->blank_rfd.command = cpu_to_le16(cb_el);
       
  1018 	nic->blank_rfd.rbd = 0xFFFFFFFF;
       
  1019 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  1020 
       
  1021 	/* MII setup */
       
  1022 	nic->mii.phy_id_mask = 0x1F;
       
  1023 	nic->mii.reg_num_mask = 0x1F;
       
  1024 	nic->mii.dev = nic->netdev;
       
  1025 	nic->mii.mdio_read = mdio_read;
       
  1026 	nic->mii.mdio_write = mdio_write;
       
  1027 }
       
  1028 
       
  1029 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1030 {
       
  1031 	struct config *config = &cb->u.config;
       
  1032 	u8 *c = (u8 *)config;
       
  1033 
       
  1034 	cb->command = cpu_to_le16(cb_config);
       
  1035 
       
  1036 	memset(config, 0, sizeof(struct config));
       
  1037 
       
  1038 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1039 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1040 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1041 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1042 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1043 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1044 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1045 	config->mii_mode = 0x1;			/* 1=MII mode, 0=503 mode */
       
  1046 	config->pad10 = 0x6;
       
  1047 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1048 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1049 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1050 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1051 	config->pad15_1 = 0x1;
       
  1052 	config->pad15_2 = 0x1;
       
  1053 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1054 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1055 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1056 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1057 	config->pad18 = 0x1;
       
  1058 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1059 	config->pad20_1 = 0x1F;
       
  1060 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1061 	config->pad21_1 = 0x5;
       
  1062 
       
  1063 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1064 	config->loopback = nic->loopback;
       
  1065 
       
  1066 	if(nic->mii.force_media && nic->mii.full_duplex)
       
  1067 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1068 
       
  1069 	if(nic->flags & promiscuous || nic->loopback) {
       
  1070 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1071 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1072 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1073 	}
       
  1074 
       
  1075 	if(nic->flags & multicast_all)
       
  1076 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1077 
       
  1078 	/* disable WoL when up */
       
  1079 	if (nic->ecdev ||
       
  1080             (netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1081 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1082 
       
  1083 	if(nic->mac >= mac_82558_D101_A4) {
       
  1084 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1085 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1086 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1087 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1088 		if(nic->mac >= mac_82559_D101M)
       
  1089 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1090 		else
       
  1091 			config->standard_stat_counter = 0x0;
       
  1092 	}
       
  1093 
       
  1094 	DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1095 		c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1096 	DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1097 		c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1098 	DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1099 		c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1100 }
       
  1101 
       
  1102 /********************************************************/
       
  1103 /*  Micro code for 8086:1229 Rev 8                      */
       
  1104 /********************************************************/
       
  1105 
       
  1106 /*  Parameter values for the D101M B-step  */
       
  1107 #define D101M_CPUSAVER_TIMER_DWORD		78
       
  1108 #define D101M_CPUSAVER_BUNDLE_DWORD		65
       
  1109 #define D101M_CPUSAVER_MIN_SIZE_DWORD		126
       
  1110 
       
  1111 #define D101M_B_RCVBUNDLE_UCODE \
       
  1112 {\
       
  1113 0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
       
  1114 0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
       
  1115 0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
       
  1116 0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
       
  1117 0x00380438, 0x00000000, 0x00140000, 0x00380555, \
       
  1118 0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
       
  1119 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
       
  1120 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
       
  1121 0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
       
  1122 0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
       
  1123 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1124 0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
       
  1125 0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
       
  1126 0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
       
  1127 0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
       
  1128 0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
       
  1129 0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
       
  1130 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1131 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1132 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
       
  1133 0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
       
  1134 0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
       
  1135 0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
       
  1136 0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
       
  1137 0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
       
  1138 0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
       
  1139 0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
       
  1140 0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
       
  1141 0x00380559, 0x00000000, 0x00000000, 0x00000000, \
       
  1142 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1143 0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
       
  1144 0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
       
  1145 0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
       
  1146 }
       
  1147 
       
  1148 /********************************************************/
       
  1149 /*  Micro code for 8086:1229 Rev 9                      */
       
  1150 /********************************************************/
       
  1151 
       
  1152 /*  Parameter values for the D101S  */
       
  1153 #define D101S_CPUSAVER_TIMER_DWORD		78
       
  1154 #define D101S_CPUSAVER_BUNDLE_DWORD		67
       
  1155 #define D101S_CPUSAVER_MIN_SIZE_DWORD		128
       
  1156 
       
  1157 #define D101S_RCVBUNDLE_UCODE \
       
  1158 {\
       
  1159 0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
       
  1160 0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
       
  1161 0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
       
  1162 0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
       
  1163 0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
       
  1164 0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
       
  1165 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
       
  1166 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
       
  1167 0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
       
  1168 0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
       
  1169 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1170 0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
       
  1171 0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
       
  1172 0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
       
  1173 0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
       
  1174 0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
       
  1175 0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
       
  1176 0x00101313, 0x00380700, 0x00000000, 0x00000000, \
       
  1177 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1178 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
       
  1179 0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
       
  1180 0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
       
  1181 0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
       
  1182 0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
       
  1183 0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
       
  1184 0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
       
  1185 0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
       
  1186 0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
       
  1187 0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
       
  1188 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1189 0x00000000, 0x00000000, 0x00000000, 0x00130831, \
       
  1190 0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
       
  1191 0x00041000, 0x00010004, 0x00380700  \
       
  1192 }
       
  1193 
       
  1194 /********************************************************/
       
  1195 /*  Micro code for the 8086:1229 Rev F/10               */
       
  1196 /********************************************************/
       
  1197 
       
  1198 /*  Parameter values for the D102 E-step  */
       
  1199 #define D102_E_CPUSAVER_TIMER_DWORD		42
       
  1200 #define D102_E_CPUSAVER_BUNDLE_DWORD		54
       
  1201 #define D102_E_CPUSAVER_MIN_SIZE_DWORD		46
       
  1202 
       
  1203 #define     D102_E_RCVBUNDLE_UCODE \
       
  1204 {\
       
  1205 0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
       
  1206 0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
       
  1207 0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
       
  1208 0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
       
  1209 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1210 0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
       
  1211 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1212 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1213 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1214 0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
       
  1215 0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
       
  1216 0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
       
  1217 0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
       
  1218 0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
       
  1219 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1220 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1221 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1222 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
       
  1223 0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
       
  1224 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1225 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1226 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1227 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1228 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1229 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1230 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1231 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1232 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1233 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1234 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1235 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1236 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1237 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1238 }
       
  1239 
       
  1240 static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1241 {
       
  1242 /* *INDENT-OFF* */
       
  1243 	static struct {
       
  1244 		u32 ucode[UCODE_SIZE + 1];
       
  1245 		u8 mac;
       
  1246 		u8 timer_dword;
       
  1247 		u8 bundle_dword;
       
  1248 		u8 min_size_dword;
       
  1249 	} ucode_opts[] = {
       
  1250 		{ D101M_B_RCVBUNDLE_UCODE,
       
  1251 		  mac_82559_D101M,
       
  1252 		  D101M_CPUSAVER_TIMER_DWORD,
       
  1253 		  D101M_CPUSAVER_BUNDLE_DWORD,
       
  1254 		  D101M_CPUSAVER_MIN_SIZE_DWORD },
       
  1255 		{ D101S_RCVBUNDLE_UCODE,
       
  1256 		  mac_82559_D101S,
       
  1257 		  D101S_CPUSAVER_TIMER_DWORD,
       
  1258 		  D101S_CPUSAVER_BUNDLE_DWORD,
       
  1259 		  D101S_CPUSAVER_MIN_SIZE_DWORD },
       
  1260 		{ D102_E_RCVBUNDLE_UCODE,
       
  1261 		  mac_82551_F,
       
  1262 		  D102_E_CPUSAVER_TIMER_DWORD,
       
  1263 		  D102_E_CPUSAVER_BUNDLE_DWORD,
       
  1264 		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
       
  1265 		{ D102_E_RCVBUNDLE_UCODE,
       
  1266 		  mac_82551_10,
       
  1267 		  D102_E_CPUSAVER_TIMER_DWORD,
       
  1268 		  D102_E_CPUSAVER_BUNDLE_DWORD,
       
  1269 		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
       
  1270 		{ {0}, 0, 0, 0, 0}
       
  1271 	}, *opts;
       
  1272 /* *INDENT-ON* */
       
  1273 
       
  1274 /*************************************************************************
       
  1275 *  CPUSaver parameters
       
  1276 *
       
  1277 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1278 *  "move immediate value" instruction.  By changing the value of
       
  1279 *  the literal in the instruction before the code is loaded, the
       
  1280 *  driver can change the algorithm.
       
  1281 *
       
  1282 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1283 *    When this timer expires the interrupt is asserted, and the
       
  1284 *    timer is reset each time a new packet is received.  (see
       
  1285 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1286 *    The current default is 0x600 or 1536.  Experiments show that
       
  1287 *    the value should probably stay within the 0x200 - 0x1000.
       
  1288 *
       
  1289 *  BUNDLEMAX -
       
  1290 *    This sets the maximum number of frames that will be bundled.  In
       
  1291 *    some situations, such as the TCP windowing algorithm, it may be
       
  1292 *    better to limit the growth of the bundle size than let it go as
       
  1293 *    high as it can, because that could cause too much added latency.
       
  1294 *    The default is six, because this is the number of packets in the
       
  1295 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1296 *    an interrupt for every frame received.  If you do not want to put
       
  1297 *    a limit on the bundle size, set this value to xFFFF.
       
  1298 *
       
  1299 *  BUNDLESMALL -
       
  1300 *    This contains a bit-mask describing the minimum size frame that
       
  1301 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1302 *    that any frame less than 128 bytes in length will not be bundled,
       
  1303 *    but will instead immediately generate an interrupt.  This does
       
  1304 *    not affect the current bundle in any way.  Any frame that is 128
       
  1305 *    bytes or large will be bundled normally.  This feature is meant
       
  1306 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1307 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1308 *    enabled was sending but not receiving.  The delay introduced when
       
  1309 *    the ACKs were received was enough to reduce total throughput, because
       
  1310 *    the sender would sit idle until the ACK was finally seen.
       
  1311 *
       
  1312 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1313 *    This means that any frame which is x7F (127) bytes or smaller
       
  1314 *    will cause an immediate interrupt.  Because this value must be a
       
  1315 *    bit mask, there are only a few valid values that can be used.  To
       
  1316 *    turn this feature off, the driver can write the value xFFFF to the
       
  1317 *    lower word of this instruction (in the same way that the other
       
  1318 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1319 *    cause an interrupt to be generated for every frame, because all
       
  1320 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1321 *************************************************************************/
       
  1322 
       
  1323 /* if you wish to disable the ucode functionality, while maintaining the
       
  1324  * workarounds it provides, set the following defines to:
       
  1325  * BUNDLESMALL 0
       
  1326  * BUNDLEMAX 1
       
  1327  * INTDELAY 1
       
  1328  */
       
  1329 #define BUNDLESMALL 1
       
  1330 #define BUNDLEMAX (u16)6
       
  1331 #define INTDELAY (u16)1536 /* 0x600 */
       
  1332 
       
  1333 	/* do not load u-code for ICH devices */
       
  1334 	if (nic->flags & ich)
       
  1335 		goto noloaducode;
       
  1336 
       
  1337 	/* Search for ucode match against h/w rev_id */
       
  1338 	for (opts = ucode_opts; opts->mac; opts++) {
       
  1339 		int i;
       
  1340 		u32 *ucode = opts->ucode;
       
  1341 		if (nic->mac != opts->mac)
       
  1342 			continue;
       
  1343 
       
  1344 		/* Insert user-tunable settings */
       
  1345 		ucode[opts->timer_dword] &= 0xFFFF0000;
       
  1346 		ucode[opts->timer_dword] |= INTDELAY;
       
  1347 		ucode[opts->bundle_dword] &= 0xFFFF0000;
       
  1348 		ucode[opts->bundle_dword] |= BUNDLEMAX;
       
  1349 		ucode[opts->min_size_dword] &= 0xFFFF0000;
       
  1350 		ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
       
  1351 
       
  1352 		for (i = 0; i < UCODE_SIZE; i++)
       
  1353 			cb->u.ucode[i] = cpu_to_le32(ucode[i]);
       
  1354 		cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1355 		return;
       
  1356 	}
       
  1357 
       
  1358 noloaducode:
       
  1359 	cb->command = cpu_to_le16(cb_nop | cb_el);
       
  1360 }
       
  1361 
       
  1362 static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
       
  1363 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
  1364 {
       
  1365 	int err = 0, counter = 50;
       
  1366 	struct cb *cb = nic->cb_to_clean;
       
  1367 
       
  1368 	if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
       
  1369 		DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
       
  1370 
       
  1371 	/* must restart cuc */
       
  1372 	nic->cuc_cmd = cuc_start;
       
  1373 
       
  1374 	/* wait for completion */
       
  1375 	e100_write_flush(nic);
       
  1376 	udelay(10);
       
  1377 
       
  1378 	/* wait for possibly (ouch) 500ms */
       
  1379 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1380 		msleep(10);
       
  1381 		if (!--counter) break;
       
  1382 	}
       
  1383 
       
  1384 	/* ack any interupts, something could have been set */
       
  1385 	writeb(~0, &nic->csr->scb.stat_ack);
       
  1386 
       
  1387 	/* if the command failed, or is not OK, notify and return */
       
  1388 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1389 		DPRINTK(PROBE,ERR, "ucode load failed\n");
       
  1390 		err = -EPERM;
       
  1391 	}
       
  1392 
       
  1393 	return err;
       
  1394 }
       
  1395 
       
  1396 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1397 	struct sk_buff *skb)
       
  1398 {
       
  1399 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1400 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1401 }
       
  1402 
       
  1403 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1404 {
       
  1405 	cb->command = cpu_to_le16(cb_dump);
       
  1406 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1407 		offsetof(struct mem, dump_buf));
       
  1408 }
       
  1409 
       
  1410 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1411 #define MII_NSC_CONG		MII_RESV1
       
  1412 #define NSC_CONG_ENABLE		0x0100
       
  1413 #define NSC_CONG_TXREADY	0x0400
       
  1414 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1415 static int e100_phy_init(struct nic *nic)
       
  1416 {
       
  1417 	struct net_device *netdev = nic->netdev;
       
  1418 	u32 addr;
       
  1419 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1420 
       
  1421 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1422 	for(addr = 0; addr < 32; addr++) {
       
  1423 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1424 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1425 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1426 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1427 		if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1428 			break;
       
  1429 	}
       
  1430 	DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
       
  1431 	if(addr == 32)
       
  1432 		return -EAGAIN;
       
  1433 
       
  1434 	/* Selected the phy and isolate the rest */
       
  1435 	for(addr = 0; addr < 32; addr++) {
       
  1436 		if(addr != nic->mii.phy_id) {
       
  1437 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1438 		} else {
       
  1439 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1440 			mdio_write(netdev, addr, MII_BMCR,
       
  1441 				bmcr & ~BMCR_ISOLATE);
       
  1442 		}
       
  1443 	}
       
  1444 
       
  1445 	/* Get phy ID */
       
  1446 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1447 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1448 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1449 	DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
       
  1450 
       
  1451 	/* Handle National tx phys */
       
  1452 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1453 	if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1454 		/* Disable congestion control */
       
  1455 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1456 		cong |= NSC_CONG_TXREADY;
       
  1457 		cong &= ~NSC_CONG_ENABLE;
       
  1458 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1459 	}
       
  1460 
       
  1461 	if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1462 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1463 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1464 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1465 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1466 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1467 	}
       
  1468 
       
  1469 	return 0;
       
  1470 }
       
  1471 
       
  1472 static int e100_hw_init(struct nic *nic)
       
  1473 {
       
  1474 	int err;
       
  1475 
       
  1476 	e100_hw_reset(nic);
       
  1477 
       
  1478 	DPRINTK(HW, ERR, "e100_hw_init\n");
       
  1479 	if(!in_interrupt() && (err = e100_self_test(nic)))
       
  1480 		return err;
       
  1481 
       
  1482 	if((err = e100_phy_init(nic)))
       
  1483 		return err;
       
  1484 	if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1485 		return err;
       
  1486 	if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1487 		return err;
       
  1488 	if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
       
  1489 		return err;
       
  1490 	if((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1491 		return err;
       
  1492 	if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1493 		return err;
       
  1494 	if((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1495 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1496 		return err;
       
  1497 	if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1498 		return err;
       
  1499 
       
  1500 	e100_disable_irq(nic);
       
  1501 
       
  1502 	return 0;
       
  1503 }
       
  1504 
       
  1505 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1506 {
       
  1507 	struct net_device *netdev = nic->netdev;
       
  1508 	struct dev_mc_list *list = netdev->mc_list;
       
  1509 	u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
       
  1510 
       
  1511 	cb->command = cpu_to_le16(cb_multi);
       
  1512 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1513 	for(i = 0; list && i < count; i++, list = list->next)
       
  1514 		memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
       
  1515 			ETH_ALEN);
       
  1516 }
       
  1517 
       
  1518 static void e100_set_multicast_list(struct net_device *netdev)
       
  1519 {
       
  1520 	struct nic *nic = netdev_priv(netdev);
       
  1521 
       
  1522 	DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
       
  1523 		netdev->mc_count, netdev->flags);
       
  1524 
       
  1525 	if(netdev->flags & IFF_PROMISC)
       
  1526 		nic->flags |= promiscuous;
       
  1527 	else
       
  1528 		nic->flags &= ~promiscuous;
       
  1529 
       
  1530 	if(netdev->flags & IFF_ALLMULTI ||
       
  1531 		netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
       
  1532 		nic->flags |= multicast_all;
       
  1533 	else
       
  1534 		nic->flags &= ~multicast_all;
       
  1535 
       
  1536 	e100_exec_cb(nic, NULL, e100_configure);
       
  1537 	e100_exec_cb(nic, NULL, e100_multi);
       
  1538 }
       
  1539 
       
  1540 static void e100_update_stats(struct nic *nic)
       
  1541 {
       
  1542 	struct net_device_stats *ns = &nic->net_stats;
       
  1543 	struct stats *s = &nic->mem->stats;
       
  1544 	u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1545 		(nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
       
  1546 		&s->complete;
       
  1547 
       
  1548 	/* Device's stats reporting may take several microseconds to
       
  1549 	 * complete, so where always waiting for results of the
       
  1550 	 * previous command. */
       
  1551 
       
  1552 	if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
       
  1553 		*complete = 0;
       
  1554 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1555 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1556 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1557 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1558 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1559 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1560 		ns->collisions += nic->tx_collisions;
       
  1561 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1562 			le32_to_cpu(s->tx_lost_crs);
       
  1563 		ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
       
  1564 			nic->rx_over_length_errors;
       
  1565 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1566 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1567 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1568 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1569 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1570 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1571 			le32_to_cpu(s->rx_alignment_errors) +
       
  1572 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1573 			le32_to_cpu(s->rx_cdt_errors);
       
  1574 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1575 		nic->tx_single_collisions +=
       
  1576 			le32_to_cpu(s->tx_single_collisions);
       
  1577 		nic->tx_multiple_collisions +=
       
  1578 			le32_to_cpu(s->tx_multiple_collisions);
       
  1579 		if(nic->mac >= mac_82558_D101_A4) {
       
  1580 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1581 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1582 			nic->rx_fc_unsupported +=
       
  1583 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1584 			if(nic->mac >= mac_82559_D101M) {
       
  1585 				nic->tx_tco_frames +=
       
  1586 					le16_to_cpu(s->xmt_tco_frames);
       
  1587 				nic->rx_tco_frames +=
       
  1588 					le16_to_cpu(s->rcv_tco_frames);
       
  1589 			}
       
  1590 		}
       
  1591 	}
       
  1592 
       
  1593 
       
  1594 	if(e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1595 		DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
       
  1596 }
       
  1597 
       
  1598 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1599 {
       
  1600 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1601 	 * we're getting collisions on a half-duplex connection. */
       
  1602 
       
  1603 	if(duplex == DUPLEX_HALF) {
       
  1604 		u32 prev = nic->adaptive_ifs;
       
  1605 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1606 
       
  1607 		if((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1608 		   (nic->tx_frames > min_frames)) {
       
  1609 			if(nic->adaptive_ifs < 60)
       
  1610 				nic->adaptive_ifs += 5;
       
  1611 		} else if (nic->tx_frames < min_frames) {
       
  1612 			if(nic->adaptive_ifs >= 5)
       
  1613 				nic->adaptive_ifs -= 5;
       
  1614 		}
       
  1615 		if(nic->adaptive_ifs != prev)
       
  1616 			e100_exec_cb(nic, NULL, e100_configure);
       
  1617 	}
       
  1618 }
       
  1619 
       
  1620 static void e100_watchdog(unsigned long data)
       
  1621 {
       
  1622 	struct nic *nic = (struct nic *)data;
       
  1623 	struct ethtool_cmd cmd;
       
  1624 
       
  1625 	DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
       
  1626 	printk(KERN_INFO "ec_e100: watchdog\n");
       
  1627 
       
  1628 	/* mii library handles link maintenance tasks */
       
  1629 
       
  1630     if (nic->ecdev) {
       
  1631 		ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1632     } else {
       
  1633 		mii_ethtool_gset(&nic->mii, &cmd);
       
  1634 
       
  1635 		if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1636 			DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
       
  1637 					cmd.speed == SPEED_100 ? "100" : "10",
       
  1638 					cmd.duplex == DUPLEX_FULL ? "full" : "half");
       
  1639 		} else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1640 			DPRINTK(LINK, INFO, "link down\n");
       
  1641 		}
       
  1642 	}
       
  1643 
       
  1644 	mii_check_link(&nic->mii);
       
  1645 
       
  1646 	/* Software generated interrupt to recover from (rare) Rx
       
  1647 	 * allocation failure.
       
  1648 	 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1649 	 * accidentally, due to hardware that shares a register between the
       
  1650 	 * interrupt mask bit and the SW Interrupt generation bit */
       
  1651 	if (!nic->ecdev)
       
  1652 		spin_lock_irq(&nic->cmd_lock);
       
  1653 	writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1654 	e100_write_flush(nic);
       
  1655 	if (!nic->ecdev)
       
  1656 		spin_unlock_irq(&nic->cmd_lock);
       
  1657 
       
  1658 	e100_update_stats(nic);
       
  1659 	e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
       
  1660 
       
  1661 	if(nic->mac <= mac_82557_D100_C)
       
  1662 		/* Issue a multicast command to workaround a 557 lock up */
       
  1663 		e100_set_multicast_list(nic->netdev);
       
  1664 
       
  1665 	if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
       
  1666 		/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1667 		nic->flags |= ich_10h_workaround;
       
  1668 	else
       
  1669 		nic->flags &= ~ich_10h_workaround;
       
  1670 
       
  1671     if (!nic->ecdev)
       
  1672 		mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
       
  1673 }
       
  1674 
       
  1675 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1676 	struct sk_buff *skb)
       
  1677 {
       
  1678 	cb->command = nic->tx_command;
       
  1679 	/* interrupt every 16 packets regardless of delay */
       
  1680 	if((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1681 		cb->command |= cpu_to_le16(cb_i);
       
  1682 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1683 	cb->u.tcb.tcb_byte_count = 0;
       
  1684 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1685 	cb->u.tcb.tbd_count = 1;
       
  1686 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
       
  1687 		skb->data, skb->len, PCI_DMA_TODEVICE));
       
  1688 	/* check for mapping failure? */
       
  1689 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1690 }
       
  1691 
       
  1692 static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
       
  1693 {
       
  1694 	struct nic *nic = netdev_priv(netdev);
       
  1695 	int err;
       
  1696 
       
  1697 	if(nic->flags & ich_10h_workaround) {
       
  1698 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1699 		   Issue a NOP command followed by a 1us delay before
       
  1700 		   issuing the Tx command. */
       
  1701 		if(e100_exec_cmd(nic, cuc_nop, 0))
       
  1702 			DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
       
  1703 		udelay(1);
       
  1704 	}
       
  1705 
       
  1706 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1707 
       
  1708 	switch(err) {
       
  1709 	case -ENOSPC:
       
  1710 		/* We queued the skb, but now we're out of space. */
       
  1711 		DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
       
  1712         if (!nic->ecdev)
       
  1713             netif_stop_queue(netdev);
       
  1714 		break;
       
  1715 	case -ENOMEM:
       
  1716 		/* This is a hard error - log it. */
       
  1717 		DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
       
  1718         if (!nic->ecdev)
       
  1719             netif_stop_queue(netdev);
       
  1720 		return 1;
       
  1721 	}
       
  1722 
       
  1723 	netdev->trans_start = jiffies;
       
  1724 	return 0;
       
  1725 }
       
  1726 
       
  1727 static int e100_tx_clean(struct nic *nic)
       
  1728 {
       
  1729 	struct cb *cb;
       
  1730 	int tx_cleaned = 0;
       
  1731 
       
  1732 	if (!nic->ecdev)
       
  1733 		spin_lock(&nic->cb_lock);
       
  1734 
       
  1735 	/* Clean CBs marked complete */
       
  1736 	for(cb = nic->cb_to_clean;
       
  1737 	    cb->status & cpu_to_le16(cb_complete);
       
  1738 	    cb = nic->cb_to_clean = cb->next) {
       
  1739 		DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
       
  1740 		        (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1741 		        cb->status);
       
  1742 
       
  1743 		if(likely(cb->skb != NULL)) {
       
  1744 			nic->net_stats.tx_packets++;
       
  1745 			nic->net_stats.tx_bytes += cb->skb->len;
       
  1746 
       
  1747 			pci_unmap_single(nic->pdev,
       
  1748 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1749 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1750 				PCI_DMA_TODEVICE);
       
  1751             if (!nic->ecdev) {
       
  1752                 dev_kfree_skb_any(cb->skb);
       
  1753 				cb->skb = NULL;
       
  1754 			}
       
  1755 			tx_cleaned = 1;
       
  1756 		}
       
  1757 		cb->status = 0;
       
  1758 		nic->cbs_avail++;
       
  1759 	}
       
  1760 
       
  1761 	if (!nic->ecdev) {
       
  1762 		spin_unlock(&nic->cb_lock);
       
  1763 
       
  1764 		/* Recover from running out of Tx resources in xmit_frame */
       
  1765 		if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1766 			netif_wake_queue(nic->netdev);
       
  1767 	}
       
  1768 
       
  1769 	return tx_cleaned;
       
  1770 }
       
  1771 
       
  1772 static void e100_clean_cbs(struct nic *nic)
       
  1773 {
       
  1774 	if(nic->cbs) {
       
  1775 		while(nic->cbs_avail != nic->params.cbs.count) {
       
  1776 			struct cb *cb = nic->cb_to_clean;
       
  1777 			if(cb->skb) {
       
  1778 				pci_unmap_single(nic->pdev,
       
  1779 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1780 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1781 					PCI_DMA_TODEVICE);
       
  1782 				dev_kfree_skb(cb->skb);
       
  1783 			}
       
  1784 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1785 			nic->cbs_avail++;
       
  1786 		}
       
  1787 		pci_free_consistent(nic->pdev,
       
  1788 			sizeof(struct cb) * nic->params.cbs.count,
       
  1789 			nic->cbs, nic->cbs_dma_addr);
       
  1790 		nic->cbs = NULL;
       
  1791 		nic->cbs_avail = 0;
       
  1792 	}
       
  1793 	nic->cuc_cmd = cuc_start;
       
  1794 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1795 		nic->cbs;
       
  1796 }
       
  1797 
       
  1798 static int e100_alloc_cbs(struct nic *nic)
       
  1799 {
       
  1800 	struct cb *cb;
       
  1801 	unsigned int i, count = nic->params.cbs.count;
       
  1802 
       
  1803 	nic->cuc_cmd = cuc_start;
       
  1804 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1805 	nic->cbs_avail = 0;
       
  1806 
       
  1807 	nic->cbs = pci_alloc_consistent(nic->pdev,
       
  1808 		sizeof(struct cb) * count, &nic->cbs_dma_addr);
       
  1809 	if(!nic->cbs)
       
  1810 		return -ENOMEM;
       
  1811 
       
  1812 	for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1813 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1814 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  1815 
       
  1816 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  1817 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  1818 			((i+1) % count) * sizeof(struct cb));
       
  1819 		cb->skb = NULL;
       
  1820 	}
       
  1821 
       
  1822 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  1823 	nic->cbs_avail = count;
       
  1824 
       
  1825 	return 0;
       
  1826 }
       
  1827 
       
  1828 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  1829 {
       
  1830 	if(!nic->rxs) return;
       
  1831 	if(RU_SUSPENDED != nic->ru_running) return;
       
  1832 
       
  1833 	/* handle init time starts */
       
  1834 	if(!rx) rx = nic->rxs;
       
  1835 
       
  1836 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  1837 	if(rx->skb) {
       
  1838 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  1839 		nic->ru_running = RU_RUNNING;
       
  1840 	}
       
  1841 }
       
  1842 
       
  1843 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
       
  1844 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  1845 {
       
  1846 	if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
       
  1847 		return -ENOMEM;
       
  1848 
       
  1849 	/* Align, init, and map the RFD. */
       
  1850 	skb_reserve(rx->skb, NET_IP_ALIGN);
       
  1851 	memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  1852 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  1853 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1854 
       
  1855 	if(pci_dma_mapping_error(rx->dma_addr)) {
       
  1856 		dev_kfree_skb_any(rx->skb);
       
  1857 		rx->skb = NULL;
       
  1858 		rx->dma_addr = 0;
       
  1859 		return -ENOMEM;
       
  1860 	}
       
  1861 
       
  1862 	/* Link the RFD to end of RFA by linking previous RFD to
       
  1863 	 * this one, and clearing EL bit of previous.  */
       
  1864 	if(rx->prev->skb) {
       
  1865 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  1866 		put_unaligned(cpu_to_le32(rx->dma_addr),
       
  1867 			(u32 *)&prev_rfd->link);
       
  1868 		wmb();
       
  1869 		prev_rfd->command &= ~cpu_to_le16(cb_el);
       
  1870 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  1871 			sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  1872 	}
       
  1873 
       
  1874 	return 0;
       
  1875 }
       
  1876 
       
  1877 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  1878 	unsigned int *work_done, unsigned int work_to_do)
       
  1879 {
       
  1880 	struct sk_buff *skb = rx->skb;
       
  1881 	struct rfd *rfd = (struct rfd *)skb->data;
       
  1882 	u16 rfd_status, actual_size;
       
  1883 
       
  1884 	if(unlikely(work_done && *work_done >= work_to_do))
       
  1885 		return -EAGAIN;
       
  1886 
       
  1887 	return -ENODATA; // FIXME
       
  1888 
       
  1889 	/* Need to sync before taking a peek at cb_complete bit */
       
  1890 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  1891 		sizeof(struct rfd), PCI_DMA_FROMDEVICE);
       
  1892 	rfd_status = le16_to_cpu(rfd->status);
       
  1893 
       
  1894 	DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
       
  1895 
       
  1896 	/* If data isn't ready, nothing to indicate */
       
  1897 	if(unlikely(!(rfd_status & cb_complete)))
       
  1898 		return -ENODATA;
       
  1899 
       
  1900 	/* Get actual data size */
       
  1901 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  1902 	if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  1903 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  1904 
       
  1905 	printk(KERN_INFO "ec_e100 rx %p rec %u\n", rx, actual_size);
       
  1906 	msleep(500); // FIXME
       
  1907 
       
  1908 	/* Get data */
       
  1909 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  1910 		RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
       
  1911 
       
  1912 	/* this allows for a fast restart without re-enabling interrupts */
       
  1913 	if(le16_to_cpu(rfd->command) & cb_el)
       
  1914 		nic->ru_running = RU_SUSPENDED;
       
  1915 
       
  1916 	if (!nic->ecdev) {
       
  1917 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  1918 		skb_reserve(skb, sizeof(struct rfd));
       
  1919 		skb_put(skb, actual_size);
       
  1920 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  1921 	}
       
  1922 
       
  1923 	if(unlikely(!(rfd_status & cb_ok))) {
       
  1924 		if (!nic->ecdev) {
       
  1925 			/* Don't indicate if hardware indicates errors */
       
  1926 			dev_kfree_skb_any(skb);
       
  1927 		}
       
  1928 	} else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
       
  1929 		/* Don't indicate oversized frames */
       
  1930 		nic->rx_over_length_errors++;
       
  1931 		if (!nic->ecdev)
       
  1932 			dev_kfree_skb_any(skb);
       
  1933 	} else {
       
  1934 		nic->net_stats.rx_packets++;
       
  1935 		nic->net_stats.rx_bytes += actual_size;
       
  1936 		nic->netdev->last_rx = jiffies;
       
  1937 		if (nic->ecdev) {
       
  1938 #if 0
       
  1939 			ecdev_receive(nic->ecdev,
       
  1940 					skb->data + sizeof(struct rfd), actual_size);
       
  1941 #endif
       
  1942 			// No need to detect link status as
       
  1943 			// long as frames are received: Reset watchdog.
       
  1944 #if 0
       
  1945 			nic->ec_watchdog_jiffies = jiffies;
       
  1946 #endif
       
  1947 		} else {
       
  1948 			netif_receive_skb(skb);
       
  1949 		}
       
  1950 		if(work_done)
       
  1951 			(*work_done)++;
       
  1952 	}
       
  1953 
       
  1954 	if (!nic->ecdev)
       
  1955 		rx->skb = NULL;
       
  1956 
       
  1957 	return 0;
       
  1958 }
       
  1959 
       
  1960 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  1961 	unsigned int work_to_do)
       
  1962 {
       
  1963 	struct rx *rx;
       
  1964 	int restart_required = 0;
       
  1965 	struct rx *rx_to_start = NULL;
       
  1966 
       
  1967 	/* are we already rnr? then pay attention!!! this ensures that
       
  1968 	 * the state machine progression never allows a start with a
       
  1969 	 * partially cleaned list, avoiding a race between hardware
       
  1970 	 * and rx_to_clean when in NAPI mode */
       
  1971 	if(RU_SUSPENDED == nic->ru_running)
       
  1972 		restart_required = 1;
       
  1973 
       
  1974 	/* Indicate newly arrived packets */
       
  1975 	for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  1976 		int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  1977 		if(-EAGAIN == err) {
       
  1978 			/* hit quota so have more work to do, restart once
       
  1979 			 * cleanup is complete */
       
  1980 			restart_required = 0;
       
  1981 			break;
       
  1982 		} else if(-ENODATA == err)
       
  1983 			break; /* No more to clean */
       
  1984 	}
       
  1985 
       
  1986 	/* save our starting point as the place we'll restart the receiver */
       
  1987 	if(restart_required)
       
  1988 		rx_to_start = nic->rx_to_clean;
       
  1989 
       
  1990 	if (!nic->ecdev) {
       
  1991 		/* Alloc new skbs to refill list */
       
  1992 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  1993 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  1994 				break; /* Better luck next time (see watchdog) */
       
  1995 		}
       
  1996 	}
       
  1997 
       
  1998 	if(restart_required) {
       
  1999 		// ack the rnr?
       
  2000 		writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2001 		e100_start_receiver(nic, rx_to_start);
       
  2002 		if(work_done)
       
  2003 			(*work_done)++;
       
  2004 	}
       
  2005 }
       
  2006 
       
  2007 static void e100_rx_clean_list(struct nic *nic)
       
  2008 {
       
  2009 	struct rx *rx;
       
  2010 	unsigned int i, count = nic->params.rfds.count;
       
  2011 
       
  2012 	nic->ru_running = RU_UNINITIALIZED;
       
  2013 
       
  2014 	if(nic->rxs) {
       
  2015 		for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2016 			if(rx->skb) {
       
  2017 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2018 					RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
       
  2019 				dev_kfree_skb(rx->skb);
       
  2020 			}
       
  2021 		}
       
  2022 		kfree(nic->rxs);
       
  2023 		nic->rxs = NULL;
       
  2024 	}
       
  2025 
       
  2026 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2027 }
       
  2028 
       
  2029 static int e100_rx_alloc_list(struct nic *nic)
       
  2030 {
       
  2031 	struct rx *rx;
       
  2032 	unsigned int i, count = nic->params.rfds.count;
       
  2033 
       
  2034 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2035 	nic->ru_running = RU_UNINITIALIZED;
       
  2036 
       
  2037 	if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2038 		return -ENOMEM;
       
  2039 
       
  2040 	for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2041 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2042 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2043 		if(e100_rx_alloc_skb(nic, rx)) {
       
  2044 			e100_rx_clean_list(nic);
       
  2045 			return -ENOMEM;
       
  2046 		}
       
  2047 	}
       
  2048 
       
  2049 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2050 	nic->ru_running = RU_SUSPENDED;
       
  2051 
       
  2052 	return 0;
       
  2053 }
       
  2054 
       
  2055 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2056 {
       
  2057 	struct net_device *netdev = dev_id;
       
  2058 	struct nic *nic = netdev_priv(netdev);
       
  2059 	u8 stat_ack = readb(&nic->csr->scb.stat_ack);
       
  2060 
       
  2061 	DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
       
  2062 
       
  2063 	if(stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2064 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2065 		return IRQ_NONE;
       
  2066 
       
  2067 	/* Ack interrupt(s) */
       
  2068 	writeb(stat_ack, &nic->csr->scb.stat_ack);
       
  2069 
       
  2070 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2071 	if(stat_ack & stat_ack_rnr)
       
  2072 		nic->ru_running = RU_SUSPENDED;
       
  2073 
       
  2074 	if(!nic->ecdev && likely(netif_rx_schedule_prep(netdev))) {
       
  2075 		e100_disable_irq(nic);
       
  2076 		__netif_rx_schedule(netdev);
       
  2077 	}
       
  2078 
       
  2079 	return IRQ_HANDLED;
       
  2080 }
       
  2081 
       
  2082 void e100_ec_poll(struct net_device *netdev)
       
  2083 {
       
  2084 	struct nic *nic = netdev_priv(netdev);
       
  2085 
       
  2086 	e100_rx_clean(nic, NULL, 100); // FIXME
       
  2087 	e100_tx_clean(nic);
       
  2088 
       
  2089     if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2090         e100_watchdog((unsigned long) nic);
       
  2091         nic->ec_watchdog_jiffies = jiffies;
       
  2092     }
       
  2093 }
       
  2094 
       
  2095 static int e100_poll(struct net_device *netdev, int *budget)
       
  2096 {
       
  2097 	struct nic *nic = netdev_priv(netdev);
       
  2098 	unsigned int work_to_do = min(netdev->quota, *budget);
       
  2099 	unsigned int work_done = 0;
       
  2100 	int tx_cleaned;
       
  2101 
       
  2102 	e100_rx_clean(nic, &work_done, work_to_do);
       
  2103 	tx_cleaned = e100_tx_clean(nic);
       
  2104 
       
  2105 	/* If no Rx and Tx cleanup work was done, exit polling mode. */
       
  2106 	if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
       
  2107 		netif_rx_complete(netdev);
       
  2108 		e100_enable_irq(nic);
       
  2109 		return 0;
       
  2110 	}
       
  2111 
       
  2112 	*budget -= work_done;
       
  2113 	netdev->quota -= work_done;
       
  2114 
       
  2115 	return 1;
       
  2116 }
       
  2117 
       
  2118 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2119 static void e100_netpoll(struct net_device *netdev)
       
  2120 {
       
  2121 	struct nic *nic = netdev_priv(netdev);
       
  2122 
       
  2123     if (nic->ecdev)
       
  2124         return;
       
  2125 
       
  2126 	e100_disable_irq(nic);
       
  2127 	e100_intr(nic->pdev->irq, netdev);
       
  2128 	e100_tx_clean(nic);
       
  2129 	e100_enable_irq(nic);
       
  2130 }
       
  2131 #endif
       
  2132 
       
  2133 static struct net_device_stats *e100_get_stats(struct net_device *netdev)
       
  2134 {
       
  2135 	struct nic *nic = netdev_priv(netdev);
       
  2136 	return &nic->net_stats;
       
  2137 }
       
  2138 
       
  2139 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2140 {
       
  2141 	struct nic *nic = netdev_priv(netdev);
       
  2142 	struct sockaddr *addr = p;
       
  2143 
       
  2144 	if (!is_valid_ether_addr(addr->sa_data))
       
  2145 		return -EADDRNOTAVAIL;
       
  2146 
       
  2147 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2148 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2149 
       
  2150 	return 0;
       
  2151 }
       
  2152 
       
  2153 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2154 {
       
  2155 	if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2156 		return -EINVAL;
       
  2157 	netdev->mtu = new_mtu;
       
  2158 	return 0;
       
  2159 }
       
  2160 
       
  2161 static int e100_asf(struct nic *nic)
       
  2162 {
       
  2163 	/* ASF can be enabled from eeprom */
       
  2164 	return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2165 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2166 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2167 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
       
  2168 }
       
  2169 
       
  2170 static int e100_up(struct nic *nic)
       
  2171 {
       
  2172 	int err;
       
  2173 
       
  2174 	if((err = e100_rx_alloc_list(nic)))
       
  2175 		return err;
       
  2176 	if((err = e100_alloc_cbs(nic)))
       
  2177 		goto err_rx_clean_list;
       
  2178 	if((err = e100_hw_init(nic)))
       
  2179 		goto err_clean_cbs;
       
  2180 	e100_set_multicast_list(nic->netdev);
       
  2181 	e100_start_receiver(nic, NULL);
       
  2182     if (!nic->ecdev) {
       
  2183 		mod_timer(&nic->watchdog, jiffies);
       
  2184         if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2185                         nic->netdev->name, nic->netdev)))
       
  2186             goto err_no_irq;
       
  2187         netif_wake_queue(nic->netdev);
       
  2188         netif_poll_enable(nic->netdev);
       
  2189         /* enable ints _after_ enabling poll, preventing a race between
       
  2190          * disable ints+schedule */
       
  2191         e100_enable_irq(nic);
       
  2192     }
       
  2193 	return 0;
       
  2194 
       
  2195 err_no_irq:
       
  2196     if (!nic->ecdev)
       
  2197 		del_timer_sync(&nic->watchdog);
       
  2198 err_clean_cbs:
       
  2199 	e100_clean_cbs(nic);
       
  2200 err_rx_clean_list:
       
  2201 	e100_rx_clean_list(nic);
       
  2202 	return err;
       
  2203 }
       
  2204 
       
  2205 static void e100_down(struct nic *nic)
       
  2206 {
       
  2207     if (!nic->ecdev) {
       
  2208         /* wait here for poll to complete */
       
  2209         netif_poll_disable(nic->netdev);
       
  2210         netif_stop_queue(nic->netdev);
       
  2211     }
       
  2212 	e100_hw_reset(nic);
       
  2213     if (!nic->ecdev) {
       
  2214         free_irq(nic->pdev->irq, nic->netdev);
       
  2215 		del_timer_sync(&nic->watchdog);
       
  2216         netif_carrier_off(nic->netdev);
       
  2217 	}
       
  2218 	e100_clean_cbs(nic);
       
  2219 	e100_rx_clean_list(nic);
       
  2220 }
       
  2221 
       
  2222 static void e100_tx_timeout(struct net_device *netdev)
       
  2223 {
       
  2224 	struct nic *nic = netdev_priv(netdev);
       
  2225 
       
  2226 	/* Reset outside of interrupt context, to avoid request_irq
       
  2227 	 * in interrupt context */
       
  2228 	schedule_work(&nic->tx_timeout_task);
       
  2229 }
       
  2230 
       
  2231 static void e100_tx_timeout_task(struct work_struct *work)
       
  2232 {
       
  2233 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2234 	struct net_device *netdev = nic->netdev;
       
  2235 
       
  2236 	DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
       
  2237 		readb(&nic->csr->scb.status));
       
  2238 	e100_down(netdev_priv(netdev));
       
  2239 	e100_up(netdev_priv(netdev));
       
  2240 }
       
  2241 
       
  2242 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2243 {
       
  2244 	int err;
       
  2245 	struct sk_buff *skb;
       
  2246 
       
  2247 	/* Use driver resources to perform internal MAC or PHY
       
  2248 	 * loopback test.  A single packet is prepared and transmitted
       
  2249 	 * in loopback mode, and the test passes if the received
       
  2250 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2251 
       
  2252 	if((err = e100_rx_alloc_list(nic)))
       
  2253 		return err;
       
  2254 	if((err = e100_alloc_cbs(nic)))
       
  2255 		goto err_clean_rx;
       
  2256 
       
  2257 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2258 	if(nic->flags & ich && loopback_mode == lb_phy)
       
  2259 		loopback_mode = lb_mac;
       
  2260 
       
  2261 	nic->loopback = loopback_mode;
       
  2262 	if((err = e100_hw_init(nic)))
       
  2263 		goto err_loopback_none;
       
  2264 
       
  2265 	if(loopback_mode == lb_phy)
       
  2266 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2267 			BMCR_LOOPBACK);
       
  2268 
       
  2269 	e100_start_receiver(nic, NULL);
       
  2270 
       
  2271 	if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2272 		err = -ENOMEM;
       
  2273 		goto err_loopback_none;
       
  2274 	}
       
  2275 	skb_put(skb, ETH_DATA_LEN);
       
  2276 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2277 	e100_xmit_frame(skb, nic->netdev);
       
  2278 
       
  2279 	msleep(10);
       
  2280 
       
  2281 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2282 			RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
       
  2283 
       
  2284 	if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2285 	   skb->data, ETH_DATA_LEN))
       
  2286 		err = -EAGAIN;
       
  2287 
       
  2288 err_loopback_none:
       
  2289 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2290 	nic->loopback = lb_none;
       
  2291 	e100_clean_cbs(nic);
       
  2292 	e100_hw_reset(nic);
       
  2293 err_clean_rx:
       
  2294 	e100_rx_clean_list(nic);
       
  2295 	return err;
       
  2296 }
       
  2297 
       
  2298 #define MII_LED_CONTROL	0x1B
       
  2299 static void e100_blink_led(unsigned long data)
       
  2300 {
       
  2301 	struct nic *nic = (struct nic *)data;
       
  2302 	enum led_state {
       
  2303 		led_on     = 0x01,
       
  2304 		led_off    = 0x04,
       
  2305 		led_on_559 = 0x05,
       
  2306 		led_on_557 = 0x07,
       
  2307 	};
       
  2308 
       
  2309 	nic->leds = (nic->leds & led_on) ? led_off :
       
  2310 		(nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
       
  2311 	mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
       
  2312 	mod_timer(&nic->blink_timer, jiffies + HZ / 4);
       
  2313 }
       
  2314 
       
  2315 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2316 {
       
  2317 	struct nic *nic = netdev_priv(netdev);
       
  2318 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2319 }
       
  2320 
       
  2321 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2322 {
       
  2323 	struct nic *nic = netdev_priv(netdev);
       
  2324 	int err;
       
  2325 
       
  2326 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2327 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2328 	e100_exec_cb(nic, NULL, e100_configure);
       
  2329 
       
  2330 	return err;
       
  2331 }
       
  2332 
       
  2333 static void e100_get_drvinfo(struct net_device *netdev,
       
  2334 	struct ethtool_drvinfo *info)
       
  2335 {
       
  2336 	struct nic *nic = netdev_priv(netdev);
       
  2337 	strcpy(info->driver, DRV_NAME);
       
  2338 	strcpy(info->version, DRV_VERSION);
       
  2339 	strcpy(info->fw_version, "N/A");
       
  2340 	strcpy(info->bus_info, pci_name(nic->pdev));
       
  2341 }
       
  2342 
       
  2343 static int e100_get_regs_len(struct net_device *netdev)
       
  2344 {
       
  2345 	struct nic *nic = netdev_priv(netdev);
       
  2346 #define E100_PHY_REGS		0x1C
       
  2347 #define E100_REGS_LEN		1 + E100_PHY_REGS + \
       
  2348 	sizeof(nic->mem->dump_buf) / sizeof(u32)
       
  2349 	return E100_REGS_LEN * sizeof(u32);
       
  2350 }
       
  2351 
       
  2352 static void e100_get_regs(struct net_device *netdev,
       
  2353 	struct ethtool_regs *regs, void *p)
       
  2354 {
       
  2355 	struct nic *nic = netdev_priv(netdev);
       
  2356 	u32 *buff = p;
       
  2357 	int i;
       
  2358 
       
  2359 	regs->version = (1 << 24) | nic->rev_id;
       
  2360 	buff[0] = readb(&nic->csr->scb.cmd_hi) << 24 |
       
  2361 		readb(&nic->csr->scb.cmd_lo) << 16 |
       
  2362 		readw(&nic->csr->scb.status);
       
  2363 	for(i = E100_PHY_REGS; i >= 0; i--)
       
  2364 		buff[1 + E100_PHY_REGS - i] =
       
  2365 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2366 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2367 	e100_exec_cb(nic, NULL, e100_dump);
       
  2368 	msleep(10);
       
  2369 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2370 		sizeof(nic->mem->dump_buf));
       
  2371 }
       
  2372 
       
  2373 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2374 {
       
  2375 	struct nic *nic = netdev_priv(netdev);
       
  2376 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2377 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2378 }
       
  2379 
       
  2380 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2381 {
       
  2382 	struct nic *nic = netdev_priv(netdev);
       
  2383 
       
  2384 	if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
       
  2385 		return -EOPNOTSUPP;
       
  2386 
       
  2387 	if(wol->wolopts)
       
  2388 		nic->flags |= wol_magic;
       
  2389 	else
       
  2390 		nic->flags &= ~wol_magic;
       
  2391 
       
  2392 	e100_exec_cb(nic, NULL, e100_configure);
       
  2393 
       
  2394 	return 0;
       
  2395 }
       
  2396 
       
  2397 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2398 {
       
  2399 	struct nic *nic = netdev_priv(netdev);
       
  2400 	return nic->msg_enable;
       
  2401 }
       
  2402 
       
  2403 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2404 {
       
  2405 	struct nic *nic = netdev_priv(netdev);
       
  2406 	nic->msg_enable = value;
       
  2407 }
       
  2408 
       
  2409 static int e100_nway_reset(struct net_device *netdev)
       
  2410 {
       
  2411 	struct nic *nic = netdev_priv(netdev);
       
  2412 	return mii_nway_restart(&nic->mii);
       
  2413 }
       
  2414 
       
  2415 static u32 e100_get_link(struct net_device *netdev)
       
  2416 {
       
  2417 	struct nic *nic = netdev_priv(netdev);
       
  2418 	return mii_link_ok(&nic->mii);
       
  2419 }
       
  2420 
       
  2421 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2422 {
       
  2423 	struct nic *nic = netdev_priv(netdev);
       
  2424 	return nic->eeprom_wc << 1;
       
  2425 }
       
  2426 
       
  2427 #define E100_EEPROM_MAGIC	0x1234
       
  2428 static int e100_get_eeprom(struct net_device *netdev,
       
  2429 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2430 {
       
  2431 	struct nic *nic = netdev_priv(netdev);
       
  2432 
       
  2433 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2434 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2435 
       
  2436 	return 0;
       
  2437 }
       
  2438 
       
  2439 static int e100_set_eeprom(struct net_device *netdev,
       
  2440 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2441 {
       
  2442 	struct nic *nic = netdev_priv(netdev);
       
  2443 
       
  2444 	if(eeprom->magic != E100_EEPROM_MAGIC)
       
  2445 		return -EINVAL;
       
  2446 
       
  2447 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2448 
       
  2449 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2450 		(eeprom->len >> 1) + 1);
       
  2451 }
       
  2452 
       
  2453 static void e100_get_ringparam(struct net_device *netdev,
       
  2454 	struct ethtool_ringparam *ring)
       
  2455 {
       
  2456 	struct nic *nic = netdev_priv(netdev);
       
  2457 	struct param_range *rfds = &nic->params.rfds;
       
  2458 	struct param_range *cbs = &nic->params.cbs;
       
  2459 
       
  2460 	ring->rx_max_pending = rfds->max;
       
  2461 	ring->tx_max_pending = cbs->max;
       
  2462 	ring->rx_mini_max_pending = 0;
       
  2463 	ring->rx_jumbo_max_pending = 0;
       
  2464 	ring->rx_pending = rfds->count;
       
  2465 	ring->tx_pending = cbs->count;
       
  2466 	ring->rx_mini_pending = 0;
       
  2467 	ring->rx_jumbo_pending = 0;
       
  2468 }
       
  2469 
       
  2470 static int e100_set_ringparam(struct net_device *netdev,
       
  2471 	struct ethtool_ringparam *ring)
       
  2472 {
       
  2473 	struct nic *nic = netdev_priv(netdev);
       
  2474 	struct param_range *rfds = &nic->params.rfds;
       
  2475 	struct param_range *cbs = &nic->params.cbs;
       
  2476 
       
  2477 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2478 		return -EINVAL;
       
  2479 
       
  2480 	if(netif_running(netdev))
       
  2481 		e100_down(nic);
       
  2482 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2483 	rfds->count = min(rfds->count, rfds->max);
       
  2484 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2485 	cbs->count = min(cbs->count, cbs->max);
       
  2486 	DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
       
  2487 	        rfds->count, cbs->count);
       
  2488 	if(netif_running(netdev))
       
  2489 		e100_up(nic);
       
  2490 
       
  2491 	return 0;
       
  2492 }
       
  2493 
       
  2494 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2495 	"Link test     (on/offline)",
       
  2496 	"Eeprom test   (on/offline)",
       
  2497 	"Self test        (offline)",
       
  2498 	"Mac loopback     (offline)",
       
  2499 	"Phy loopback     (offline)",
       
  2500 };
       
  2501 #define E100_TEST_LEN	sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
       
  2502 
       
  2503 static int e100_diag_test_count(struct net_device *netdev)
       
  2504 {
       
  2505 	return E100_TEST_LEN;
       
  2506 }
       
  2507 
       
  2508 static void e100_diag_test(struct net_device *netdev,
       
  2509 	struct ethtool_test *test, u64 *data)
       
  2510 {
       
  2511 	struct ethtool_cmd cmd;
       
  2512 	struct nic *nic = netdev_priv(netdev);
       
  2513 	int i, err;
       
  2514 
       
  2515 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2516 	data[0] = !mii_link_ok(&nic->mii);
       
  2517 	data[1] = e100_eeprom_load(nic);
       
  2518 	if(test->flags & ETH_TEST_FL_OFFLINE) {
       
  2519 
       
  2520 		/* save speed, duplex & autoneg settings */
       
  2521 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2522 
       
  2523 		if(netif_running(netdev))
       
  2524 			e100_down(nic);
       
  2525 		data[2] = e100_self_test(nic);
       
  2526 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2527 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2528 
       
  2529 		/* restore speed, duplex & autoneg settings */
       
  2530 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2531 
       
  2532 		if(netif_running(netdev))
       
  2533 			e100_up(nic);
       
  2534 	}
       
  2535 	for(i = 0; i < E100_TEST_LEN; i++)
       
  2536 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2537 
       
  2538 	msleep_interruptible(4 * 1000);
       
  2539 }
       
  2540 
       
  2541 static int e100_phys_id(struct net_device *netdev, u32 data)
       
  2542 {
       
  2543 	struct nic *nic = netdev_priv(netdev);
       
  2544 
       
  2545 	if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
       
  2546 		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
       
  2547 	mod_timer(&nic->blink_timer, jiffies);
       
  2548 	msleep_interruptible(data * 1000);
       
  2549 	del_timer_sync(&nic->blink_timer);
       
  2550 	mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
       
  2551 
       
  2552 	return 0;
       
  2553 }
       
  2554 
       
  2555 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2556 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2557 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2558 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2559 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2560 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2561 	"tx_heartbeat_errors", "tx_window_errors",
       
  2562 	/* device-specific stats */
       
  2563 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2564 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2565 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2566 };
       
  2567 #define E100_NET_STATS_LEN	21
       
  2568 #define E100_STATS_LEN	sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
       
  2569 
       
  2570 static int e100_get_stats_count(struct net_device *netdev)
       
  2571 {
       
  2572 	return E100_STATS_LEN;
       
  2573 }
       
  2574 
       
  2575 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2576 	struct ethtool_stats *stats, u64 *data)
       
  2577 {
       
  2578 	struct nic *nic = netdev_priv(netdev);
       
  2579 	int i;
       
  2580 
       
  2581 	for(i = 0; i < E100_NET_STATS_LEN; i++)
       
  2582 		data[i] = ((unsigned long *)&nic->net_stats)[i];
       
  2583 
       
  2584 	data[i++] = nic->tx_deferred;
       
  2585 	data[i++] = nic->tx_single_collisions;
       
  2586 	data[i++] = nic->tx_multiple_collisions;
       
  2587 	data[i++] = nic->tx_fc_pause;
       
  2588 	data[i++] = nic->rx_fc_pause;
       
  2589 	data[i++] = nic->rx_fc_unsupported;
       
  2590 	data[i++] = nic->tx_tco_frames;
       
  2591 	data[i++] = nic->rx_tco_frames;
       
  2592 }
       
  2593 
       
  2594 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2595 {
       
  2596 	switch(stringset) {
       
  2597 	case ETH_SS_TEST:
       
  2598 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2599 		break;
       
  2600 	case ETH_SS_STATS:
       
  2601 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2602 		break;
       
  2603 	}
       
  2604 }
       
  2605 
       
  2606 static const struct ethtool_ops e100_ethtool_ops = {
       
  2607 	.get_settings		= e100_get_settings,
       
  2608 	.set_settings		= e100_set_settings,
       
  2609 	.get_drvinfo		= e100_get_drvinfo,
       
  2610 	.get_regs_len		= e100_get_regs_len,
       
  2611 	.get_regs		= e100_get_regs,
       
  2612 	.get_wol		= e100_get_wol,
       
  2613 	.set_wol		= e100_set_wol,
       
  2614 	.get_msglevel		= e100_get_msglevel,
       
  2615 	.set_msglevel		= e100_set_msglevel,
       
  2616 	.nway_reset		= e100_nway_reset,
       
  2617 	.get_link		= e100_get_link,
       
  2618 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2619 	.get_eeprom		= e100_get_eeprom,
       
  2620 	.set_eeprom		= e100_set_eeprom,
       
  2621 	.get_ringparam		= e100_get_ringparam,
       
  2622 	.set_ringparam		= e100_set_ringparam,
       
  2623 	.self_test_count	= e100_diag_test_count,
       
  2624 	.self_test		= e100_diag_test,
       
  2625 	.get_strings		= e100_get_strings,
       
  2626 	.phys_id		= e100_phys_id,
       
  2627 	.get_stats_count	= e100_get_stats_count,
       
  2628 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2629 	.get_perm_addr		= ethtool_op_get_perm_addr,
       
  2630 };
       
  2631 
       
  2632 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2633 {
       
  2634 	struct nic *nic = netdev_priv(netdev);
       
  2635 
       
  2636 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2637 }
       
  2638 
       
  2639 static int e100_alloc(struct nic *nic)
       
  2640 {
       
  2641 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2642 		&nic->dma_addr);
       
  2643 	return nic->mem ? 0 : -ENOMEM;
       
  2644 }
       
  2645 
       
  2646 static void e100_free(struct nic *nic)
       
  2647 {
       
  2648 	if(nic->mem) {
       
  2649 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2650 			nic->mem, nic->dma_addr);
       
  2651 		nic->mem = NULL;
       
  2652 	}
       
  2653 }
       
  2654 
       
  2655 static int e100_open(struct net_device *netdev)
       
  2656 {
       
  2657 	struct nic *nic = netdev_priv(netdev);
       
  2658 	int err = 0;
       
  2659 
       
  2660     if (!nic->ecdev)
       
  2661         netif_carrier_off(netdev);
       
  2662 	if((err = e100_up(nic)))
       
  2663 		DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
       
  2664 	return err;
       
  2665 }
       
  2666 
       
  2667 static int e100_close(struct net_device *netdev)
       
  2668 {
       
  2669 	e100_down(netdev_priv(netdev));
       
  2670 	return 0;
       
  2671 }
       
  2672 
       
  2673 static int __devinit e100_probe(struct pci_dev *pdev,
       
  2674 	const struct pci_device_id *ent)
       
  2675 {
       
  2676 	struct net_device *netdev;
       
  2677 	struct nic *nic;
       
  2678 	int err;
       
  2679 
       
  2680 	if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
       
  2681 		if(((1 << debug) - 1) & NETIF_MSG_PROBE)
       
  2682 			printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
       
  2683 		return -ENOMEM;
       
  2684 	}
       
  2685 
       
  2686 	netdev->open = e100_open;
       
  2687 	netdev->stop = e100_close;
       
  2688 	netdev->hard_start_xmit = e100_xmit_frame;
       
  2689 	netdev->get_stats = e100_get_stats;
       
  2690 	netdev->set_multicast_list = e100_set_multicast_list;
       
  2691 	netdev->set_mac_address = e100_set_mac_address;
       
  2692 	netdev->change_mtu = e100_change_mtu;
       
  2693 	netdev->do_ioctl = e100_do_ioctl;
       
  2694 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  2695 	netdev->tx_timeout = e100_tx_timeout;
       
  2696 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  2697 	netdev->poll = e100_poll;
       
  2698 	netdev->weight = E100_NAPI_WEIGHT;
       
  2699 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2700 	netdev->poll_controller = e100_netpoll;
       
  2701 #endif
       
  2702 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  2703 
       
  2704 	nic = netdev_priv(netdev);
       
  2705 	nic->netdev = netdev;
       
  2706 	nic->pdev = pdev;
       
  2707 	nic->msg_enable = (1 << debug) - 1;
       
  2708 	pci_set_drvdata(pdev, netdev);
       
  2709 
       
  2710 	if((err = pci_enable_device(pdev))) {
       
  2711 		DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
       
  2712 		goto err_out_free_dev;
       
  2713 	}
       
  2714 
       
  2715 	if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  2716 		DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
       
  2717 			"base address, aborting.\n");
       
  2718 		err = -ENODEV;
       
  2719 		goto err_out_disable_pdev;
       
  2720 	}
       
  2721 
       
  2722 	if((err = pci_request_regions(pdev, DRV_NAME))) {
       
  2723 		DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
       
  2724 		goto err_out_disable_pdev;
       
  2725 	}
       
  2726 
       
  2727 	if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
       
  2728 		DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
       
  2729 		goto err_out_free_res;
       
  2730 	}
       
  2731 
       
  2732 	SET_MODULE_OWNER(netdev);
       
  2733 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  2734 
       
  2735 	nic->csr = ioremap(pci_resource_start(pdev, 0), sizeof(struct csr));
       
  2736 	if(!nic->csr) {
       
  2737 		DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
       
  2738 		err = -ENOMEM;
       
  2739 		goto err_out_free_res;
       
  2740 	}
       
  2741 
       
  2742 	if(ent->driver_data)
       
  2743 		nic->flags |= ich;
       
  2744 	else
       
  2745 		nic->flags &= ~ich;
       
  2746 
       
  2747 	e100_get_defaults(nic);
       
  2748 
       
  2749 	/* locks must be initialized before calling hw_reset */
       
  2750 	spin_lock_init(&nic->cb_lock);
       
  2751 	spin_lock_init(&nic->cmd_lock);
       
  2752 	spin_lock_init(&nic->mdio_lock);
       
  2753 
       
  2754 	/* Reset the device before pci_set_master() in case device is in some
       
  2755 	 * funky state and has an interrupt pending - hint: we don't have the
       
  2756 	 * interrupt handler registered yet. */
       
  2757 	e100_hw_reset(nic);
       
  2758 
       
  2759 	pci_set_master(pdev);
       
  2760 
       
  2761 	init_timer(&nic->watchdog);
       
  2762 	nic->watchdog.function = e100_watchdog;
       
  2763 	nic->watchdog.data = (unsigned long)nic;
       
  2764 	init_timer(&nic->blink_timer);
       
  2765 	nic->blink_timer.function = e100_blink_led;
       
  2766 	nic->blink_timer.data = (unsigned long)nic;
       
  2767 
       
  2768 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  2769 
       
  2770 	if((err = e100_alloc(nic))) {
       
  2771 		DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
       
  2772 		goto err_out_iounmap;
       
  2773 	}
       
  2774 
       
  2775 	if((err = e100_eeprom_load(nic)))
       
  2776 		goto err_out_free;
       
  2777 
       
  2778 	e100_phy_init(nic);
       
  2779 
       
  2780 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  2781 	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
       
  2782 	if(!is_valid_ether_addr(netdev->perm_addr)) {
       
  2783 		DPRINTK(PROBE, ERR, "Invalid MAC address from "
       
  2784 			"EEPROM, aborting.\n");
       
  2785 		err = -EAGAIN;
       
  2786 		goto err_out_free;
       
  2787 	}
       
  2788 
       
  2789 	/* Wol magic packet can be enabled from eeprom */
       
  2790 	if((nic->mac >= mac_82558_D101_A4) &&
       
  2791 	   (nic->eeprom[eeprom_id] & eeprom_id_wol))
       
  2792 		nic->flags |= wol_magic;
       
  2793 
       
  2794 	/* ack any pending wake events, disable PME */
       
  2795 	err = pci_enable_wake(pdev, 0, 0);
       
  2796 	if (err)
       
  2797 		DPRINTK(PROBE, ERR, "Error clearing wake event\n");
       
  2798 
       
  2799 	// offer device to EtherCAT master module
       
  2800 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  2801     if (nic->ecdev) {
       
  2802         strcpy(netdev->name, "ec0");
       
  2803 		if (ecdev_open(nic->ecdev)) {
       
  2804 			ecdev_withdraw(nic->ecdev);
       
  2805 			goto err_out_free;
       
  2806 		}
       
  2807 	} else {
       
  2808         strcpy(netdev->name, "eth%d");
       
  2809         if((err = register_netdev(netdev))) {
       
  2810             DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
       
  2811             goto err_out_free;
       
  2812         }
       
  2813     }
       
  2814 
       
  2815 	DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, "
       
  2816 		"MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
       
  2817 		(unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
       
  2818 		netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
       
  2819 		netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
       
  2820 
       
  2821 	return 0;
       
  2822 
       
  2823 err_out_free:
       
  2824 	e100_free(nic);
       
  2825 err_out_iounmap:
       
  2826 	iounmap(nic->csr);
       
  2827 err_out_free_res:
       
  2828 	pci_release_regions(pdev);
       
  2829 err_out_disable_pdev:
       
  2830 	pci_disable_device(pdev);
       
  2831 err_out_free_dev:
       
  2832 	pci_set_drvdata(pdev, NULL);
       
  2833 	free_netdev(netdev);
       
  2834 	return err;
       
  2835 }
       
  2836 
       
  2837 static void __devexit e100_remove(struct pci_dev *pdev)
       
  2838 {
       
  2839 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2840 
       
  2841 	if(netdev) {
       
  2842 		struct nic *nic = netdev_priv(netdev);
       
  2843 		if (nic->ecdev) {
       
  2844 			ecdev_close(nic->ecdev);
       
  2845 			ecdev_withdraw(nic->ecdev);
       
  2846 		} else {
       
  2847 			unregister_netdev(netdev);
       
  2848 		}
       
  2849 		e100_free(nic);
       
  2850 		iounmap(nic->csr);
       
  2851 		free_netdev(netdev);
       
  2852 		pci_release_regions(pdev);
       
  2853 		pci_disable_device(pdev);
       
  2854 		pci_set_drvdata(pdev, NULL);
       
  2855 	}
       
  2856 }
       
  2857 
       
  2858 #ifdef CONFIG_PM
       
  2859 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  2860 {
       
  2861 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2862 	struct nic *nic = netdev_priv(netdev);
       
  2863 
       
  2864 	if (nic->ecdev)
       
  2865 		return;
       
  2866 
       
  2867 	if (netif_running(netdev))
       
  2868 		netif_poll_disable(nic->netdev);
       
  2869 	del_timer_sync(&nic->watchdog);
       
  2870 	netif_carrier_off(nic->netdev);
       
  2871 	netif_device_detach(netdev);
       
  2872 
       
  2873 	pci_save_state(pdev);
       
  2874 
       
  2875 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  2876 		pci_enable_wake(pdev, PCI_D3hot, 1);
       
  2877 		pci_enable_wake(pdev, PCI_D3cold, 1);
       
  2878 	} else {
       
  2879 		pci_enable_wake(pdev, PCI_D3hot, 0);
       
  2880 		pci_enable_wake(pdev, PCI_D3cold, 0);
       
  2881 	}
       
  2882 
       
  2883 	pci_disable_device(pdev);
       
  2884 	free_irq(pdev->irq, netdev);
       
  2885 	pci_set_power_state(pdev, PCI_D3hot);
       
  2886 
       
  2887 	return 0;
       
  2888 }
       
  2889 
       
  2890 static int e100_resume(struct pci_dev *pdev)
       
  2891 {
       
  2892 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2893 	struct nic *nic = netdev_priv(netdev);
       
  2894 
       
  2895 	if (nic->ecdev)
       
  2896 		return;
       
  2897 
       
  2898 	pci_set_power_state(pdev, PCI_D0);
       
  2899 	pci_restore_state(pdev);
       
  2900 	/* ack any pending wake events, disable PME */
       
  2901 	pci_enable_wake(pdev, 0, 0);
       
  2902 
       
  2903 	netif_device_attach(netdev);
       
  2904 	if (netif_running(netdev))
       
  2905 		e100_up(nic);
       
  2906 
       
  2907 	return 0;
       
  2908 }
       
  2909 #endif /* CONFIG_PM */
       
  2910 
       
  2911 static void e100_shutdown(struct pci_dev *pdev)
       
  2912 {
       
  2913 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2914 	struct nic *nic = netdev_priv(netdev);
       
  2915 
       
  2916 	if (nic->ecdev)
       
  2917 		return;
       
  2918 
       
  2919 	if (netif_running(netdev))
       
  2920 		netif_poll_disable(nic->netdev);
       
  2921 	del_timer_sync(&nic->watchdog);
       
  2922 	netif_carrier_off(nic->netdev);
       
  2923 
       
  2924 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  2925 		pci_enable_wake(pdev, PCI_D3hot, 1);
       
  2926 		pci_enable_wake(pdev, PCI_D3cold, 1);
       
  2927 	} else {
       
  2928 		pci_enable_wake(pdev, PCI_D3hot, 0);
       
  2929 		pci_enable_wake(pdev, PCI_D3cold, 0);
       
  2930 	}
       
  2931 
       
  2932 	pci_disable_device(pdev);
       
  2933 	pci_set_power_state(pdev, PCI_D3hot);
       
  2934 }
       
  2935 
       
  2936 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  2937 /**
       
  2938  * e100_io_error_detected - called when PCI error is detected.
       
  2939  * @pdev: Pointer to PCI device
       
  2940  * @state: The current pci conneection state
       
  2941  */
       
  2942 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  2943 {
       
  2944 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2945 	struct nic *nic = netdev_priv(netdev);
       
  2946 
       
  2947 	/* Similar to calling e100_down(), but avoids adpater I/O. */
       
  2948 	netdev->stop(netdev);
       
  2949 
       
  2950     if (!nic->ecdev) {
       
  2951         /* Detach; put netif into state similar to hotplug unplug. */
       
  2952         netif_poll_enable(netdev);
       
  2953         netif_device_detach(netdev);
       
  2954     }
       
  2955 	pci_disable_device(pdev);
       
  2956 
       
  2957 	/* Request a slot reset. */
       
  2958 	return PCI_ERS_RESULT_NEED_RESET;
       
  2959 }
       
  2960 
       
  2961 /**
       
  2962  * e100_io_slot_reset - called after the pci bus has been reset.
       
  2963  * @pdev: Pointer to PCI device
       
  2964  *
       
  2965  * Restart the card from scratch.
       
  2966  */
       
  2967 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  2968 {
       
  2969 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2970 	struct nic *nic = netdev_priv(netdev);
       
  2971 
       
  2972 	if (pci_enable_device(pdev)) {
       
  2973 		printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
       
  2974 		return PCI_ERS_RESULT_DISCONNECT;
       
  2975 	}
       
  2976 	pci_set_master(pdev);
       
  2977 
       
  2978 	/* Only one device per card can do a reset */
       
  2979 	if (0 != PCI_FUNC(pdev->devfn))
       
  2980 		return PCI_ERS_RESULT_RECOVERED;
       
  2981 	e100_hw_reset(nic);
       
  2982 	e100_phy_init(nic);
       
  2983 
       
  2984 	return PCI_ERS_RESULT_RECOVERED;
       
  2985 }
       
  2986 
       
  2987 /**
       
  2988  * e100_io_resume - resume normal operations
       
  2989  * @pdev: Pointer to PCI device
       
  2990  *
       
  2991  * Resume normal operations after an error recovery
       
  2992  * sequence has been completed.
       
  2993  */
       
  2994 static void e100_io_resume(struct pci_dev *pdev)
       
  2995 {
       
  2996 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2997 	struct nic *nic = netdev_priv(netdev);
       
  2998 
       
  2999 	/* ack any pending wake events, disable PME */
       
  3000 	pci_enable_wake(pdev, 0, 0);
       
  3001 
       
  3002     if (!nic->ecdev)
       
  3003         netif_device_attach(netdev);
       
  3004 	if (nic->ecdev || netif_running(netdev)) {
       
  3005 		e100_open(netdev);
       
  3006 		if (!nic->ecdev)
       
  3007 			mod_timer(&nic->watchdog, jiffies);
       
  3008 	}
       
  3009 }
       
  3010 
       
  3011 static struct pci_error_handlers e100_err_handler = {
       
  3012 	.error_detected = e100_io_error_detected,
       
  3013 	.slot_reset = e100_io_slot_reset,
       
  3014 	.resume = e100_io_resume,
       
  3015 };
       
  3016 
       
  3017 static struct pci_driver e100_driver = {
       
  3018 	.name =         DRV_NAME,
       
  3019 	.id_table =     e100_id_table,
       
  3020 	.probe =        e100_probe,
       
  3021 	.remove =       __devexit_p(e100_remove),
       
  3022 #ifdef CONFIG_PM
       
  3023 	/* Power Management hooks */
       
  3024 	.suspend =      e100_suspend,
       
  3025 	.resume =       e100_resume,
       
  3026 #endif
       
  3027 	.shutdown =     e100_shutdown,
       
  3028 	.err_handler = &e100_err_handler,
       
  3029 };
       
  3030 
       
  3031 static int __init e100_init_module(void)
       
  3032 {
       
  3033     printk(KERN_INFO DRV_NAME " " DRV_DESCRIPTION " " DRV_VERSION
       
  3034             ", master " EC_MASTER_VERSION "\n");
       
  3035 
       
  3036 	return pci_register_driver(&e100_driver);
       
  3037 }
       
  3038 
       
  3039 static void __exit e100_cleanup_module(void)
       
  3040 {
       
  3041 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3042 	pci_unregister_driver(&e100_driver);
       
  3043 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3044 }
       
  3045 
       
  3046 module_init(e100_init_module);
       
  3047 module_exit(e100_cleanup_module);