devices/e100-2.6.20-ethercat.c
branchstable-1.3
changeset 1759 c3b4d3a50ac6
equal deleted inserted replaced
1758:2f7f5fa7b870 1759:c3b4d3a50ac6
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it
       
    10  *  and/or modify it under the terms of the GNU General Public License
       
    11  *  as published by the Free Software Foundation; either version 2 of the
       
    12  *  License, or (at your option) any later version.
       
    13  *
       
    14  *  The IgH EtherCAT Master is distributed in the hope that it will be
       
    15  *  useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
       
    17  *  GNU General Public License for more details.
       
    18  *
       
    19  *  You should have received a copy of the GNU General Public License
       
    20  *  along with the IgH EtherCAT Master; if not, write to the Free Software
       
    21  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    22  *
       
    23  *  The right to use EtherCAT Technology is granted and comes free of
       
    24  *  charge under condition of compatibility of product made by
       
    25  *  Licensee. People intending to distribute/sell products based on the
       
    26  *  code, have to sign an agreement to guarantee that products using
       
    27  *  software based on IgH EtherCAT master stay compatible with the actual
       
    28  *  EtherCAT specification (which are released themselves as an open
       
    29  *  standard) as the (only) precondition to have the right to use EtherCAT
       
    30  *  Technology, IP and trade marks.
       
    31  *
       
    32  *  vim: noexpandtab
       
    33  *
       
    34  *****************************************************************************/
       
    35 
       
    36 /**
       
    37    \file
       
    38    EtherCAT driver for e100-compatible NICs.
       
    39 */
       
    40 
       
    41 /* Former documentation: */
       
    42 
       
    43 /*******************************************************************************
       
    44   Intel PRO/100 Linux driver
       
    45   Copyright(c) 1999 - 2006 Intel Corporation.
       
    46 
       
    47   This program is free software; you can redistribute it and/or modify it
       
    48   under the terms and conditions of the GNU General Public License,
       
    49   version 2, as published by the Free Software Foundation.
       
    50 
       
    51   This program is distributed in the hope it will be useful, but WITHOUT
       
    52   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    53   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    54   more details.
       
    55 
       
    56   You should have received a copy of the GNU General Public License along with
       
    57   this program; if not, write to the Free Software Foundation, Inc.,
       
    58   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    59 
       
    60   The full GNU General Public License is included in this distribution in
       
    61   the file called "COPYING".
       
    62 
       
    63   Contact Information:
       
    64   Linux NICS <linux.nics@intel.com>
       
    65   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    66   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    67 
       
    68 *******************************************************************************/
       
    69 
       
    70 /*
       
    71  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    72  *
       
    73  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    74  *	original e100 driver, but better described as a munging of
       
    75  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    76  *
       
    77  *	References:
       
    78  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    79  *		Open Source Software Developers Manual,
       
    80  *		http://sourceforge.net/projects/e1000
       
    81  *
       
    82  *
       
    83  *	                      Theory of Operation
       
    84  *
       
    85  *	I.   General
       
    86  *
       
    87  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    88  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    89  *	82551, and 82562 devices.  82558 and greater controllers
       
    90  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    91  *	server and client network interface cards, as well as in
       
    92  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    93  *	configurations.  8255x supports a 32-bit linear addressing
       
    94  *	mode and operates at 33Mhz PCI clock rate.
       
    95  *
       
    96  *	II.  Driver Operation
       
    97  *
       
    98  *	Memory-mapped mode is used exclusively to access the device's
       
    99  *	shared-memory structure, the Control/Status Registers (CSR). All
       
   100  *	setup, configuration, and control of the device, including queuing
       
   101  *	of Tx, Rx, and configuration commands is through the CSR.
       
   102  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   103  *	protects the shared Command Block List (CBL).
       
   104  *
       
   105  *	8255x is highly MII-compliant and all access to the PHY go
       
   106  *	through the Management Data Interface (MDI).  Consequently, the
       
   107  *	driver leverages the mii.c library shared with other MII-compliant
       
   108  *	devices.
       
   109  *
       
   110  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   111  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   112  *	archs are supported.
       
   113  *
       
   114  *	III. Transmit
       
   115  *
       
   116  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   117  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   118  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   119  *	the end of the ring.  The last TCB processed suspends the
       
   120  *	controller, and the controller can be restarted by issue a CU
       
   121  *	resume command to continue from the suspend point, or a CU start
       
   122  *	command to start at a given position in the ring.
       
   123  *
       
   124  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   125  *	into the CBL ring along with Tx commands.  The common structure
       
   126  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   127  *
       
   128  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   129  *	is the next CB to check for completion; cb_to_send is the first
       
   130  *	CB to start on in case of a previous failure to resume.  CB clean
       
   131  *	up happens in interrupt context in response to a CU interrupt.
       
   132  *	cbs_avail keeps track of number of free CB resources available.
       
   133  *
       
   134  * 	Hardware padding of short packets to minimum packet size is
       
   135  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   136  * 	with 00h.
       
   137  *
       
   138  *	IV.  Recieve
       
   139  *
       
   140  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   141  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   142  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   143  *	and the data buffer, but the RFD is pulled off before the skb is
       
   144  *	indicated.  The data buffer is aligned such that encapsulated
       
   145  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   146  *	mapped shared memory, and completion status is contained within
       
   147  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   148  *	view from software and hardware.
       
   149  *
       
   150  *	Under typical operation, the  receive unit (RU) is start once,
       
   151  *	and the controller happily fills RFDs as frames arrive.  If
       
   152  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   153  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   154  *	and Rx indication and re-allocation happen in the same context,
       
   155  *	therefore no locking is required.  A software-generated interrupt
       
   156  *	is generated from the watchdog to recover from a failed allocation
       
   157  *	senario where all Rx resources have been indicated and none re-
       
   158  *	placed.
       
   159  *
       
   160  *	V.   Miscellaneous
       
   161  *
       
   162  * 	VLAN offloading of tagging, stripping and filtering is not
       
   163  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   164  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   165  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   166  * 	not supported (hardware limitation).
       
   167  *
       
   168  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   169  *
       
   170  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   171  * 	testing/troubleshooting the development driver.
       
   172  *
       
   173  * 	TODO:
       
   174  * 	o several entry points race with dev->close
       
   175  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   176  *
       
   177  *	FIXES:
       
   178  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   179  *	- Stratus87247: protect MDI control register manipulations
       
   180  */
       
   181 
       
   182 #include <linux/module.h>
       
   183 #include <linux/moduleparam.h>
       
   184 #include <linux/kernel.h>
       
   185 #include <linux/types.h>
       
   186 #include <linux/slab.h>
       
   187 #include <linux/delay.h>
       
   188 #include <linux/init.h>
       
   189 #include <linux/pci.h>
       
   190 #include <linux/dma-mapping.h>
       
   191 #include <linux/netdevice.h>
       
   192 #include <linux/etherdevice.h>
       
   193 #include <linux/mii.h>
       
   194 #include <linux/if_vlan.h>
       
   195 #include <linux/skbuff.h>
       
   196 #include <linux/ethtool.h>
       
   197 #include <linux/string.h>
       
   198 #include <asm/unaligned.h>
       
   199 
       
   200 // EtherCAT includes
       
   201 #include "../globals.h"
       
   202 #include "ecdev.h"
       
   203 
       
   204 #define DRV_NAME		"ec_e100"
       
   205 #define DRV_EXT			"-NAPI"
       
   206 #define DRV_VERSION		"3.5.17-k2"DRV_EXT
       
   207 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   208 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   209 #define PFX			DRV_NAME ": "
       
   210 
       
   211 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   212 #define E100_NAPI_WEIGHT	16
       
   213 
       
   214 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   215 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   216 MODULE_LICENSE("GPL");
       
   217 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   218 
       
   219 void e100_ec_poll(struct net_device *);
       
   220 
       
   221 static int debug = 3;
       
   222 static int eeprom_bad_csum_allow = 0;
       
   223 module_param(debug, int, 0);
       
   224 module_param(eeprom_bad_csum_allow, int, 0);
       
   225 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   226 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   227 #define DPRINTK(nlevel, klevel, fmt, args...) \
       
   228 	(void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
       
   229 	printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
       
   230 		__FUNCTION__ , ## args))
       
   231 
       
   232 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   233 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   234 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   235 static struct pci_device_id e100_id_table[] = {
       
   236 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   237 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   238 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   239 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   240 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   241 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   242 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   243 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   244 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   245 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   246 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   247 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   248 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   249 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   250 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   251 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   252 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   253 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   254 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   255 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   256 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   257 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   258 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   259 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   260 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   261 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   262 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   263 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   277 	{ 0, }
       
   278 };
       
   279 // prevent from being loaded automatically
       
   280 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   281 
       
   282 enum mac {
       
   283 	mac_82557_D100_A  = 0,
       
   284 	mac_82557_D100_B  = 1,
       
   285 	mac_82557_D100_C  = 2,
       
   286 	mac_82558_D101_A4 = 4,
       
   287 	mac_82558_D101_B0 = 5,
       
   288 	mac_82559_D101M   = 8,
       
   289 	mac_82559_D101S   = 9,
       
   290 	mac_82550_D102    = 12,
       
   291 	mac_82550_D102_C  = 13,
       
   292 	mac_82551_E       = 14,
       
   293 	mac_82551_F       = 15,
       
   294 	mac_82551_10      = 16,
       
   295 	mac_unknown       = 0xFF,
       
   296 };
       
   297 
       
   298 enum phy {
       
   299 	phy_100a     = 0x000003E0,
       
   300 	phy_100c     = 0x035002A8,
       
   301 	phy_82555_tx = 0x015002A8,
       
   302 	phy_nsc_tx   = 0x5C002000,
       
   303 	phy_82562_et = 0x033002A8,
       
   304 	phy_82562_em = 0x032002A8,
       
   305 	phy_82562_ek = 0x031002A8,
       
   306 	phy_82562_eh = 0x017002A8,
       
   307 	phy_unknown  = 0xFFFFFFFF,
       
   308 };
       
   309 
       
   310 /* CSR (Control/Status Registers) */
       
   311 struct csr {
       
   312 	struct {
       
   313 		u8 status;
       
   314 		u8 stat_ack;
       
   315 		u8 cmd_lo;
       
   316 		u8 cmd_hi;
       
   317 		u32 gen_ptr;
       
   318 	} scb;
       
   319 	u32 port;
       
   320 	u16 flash_ctrl;
       
   321 	u8 eeprom_ctrl_lo;
       
   322 	u8 eeprom_ctrl_hi;
       
   323 	u32 mdi_ctrl;
       
   324 	u32 rx_dma_count;
       
   325 };
       
   326 
       
   327 enum scb_status {
       
   328 	rus_ready        = 0x10,
       
   329 	rus_mask         = 0x3C,
       
   330 };
       
   331 
       
   332 enum ru_state  {
       
   333 	RU_SUSPENDED = 0,
       
   334 	RU_RUNNING	 = 1,
       
   335 	RU_UNINITIALIZED = -1,
       
   336 };
       
   337 
       
   338 enum scb_stat_ack {
       
   339 	stat_ack_not_ours    = 0x00,
       
   340 	stat_ack_sw_gen      = 0x04,
       
   341 	stat_ack_rnr         = 0x10,
       
   342 	stat_ack_cu_idle     = 0x20,
       
   343 	stat_ack_frame_rx    = 0x40,
       
   344 	stat_ack_cu_cmd_done = 0x80,
       
   345 	stat_ack_not_present = 0xFF,
       
   346 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   347 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   348 };
       
   349 
       
   350 enum scb_cmd_hi {
       
   351 	irq_mask_none = 0x00,
       
   352 	irq_mask_all  = 0x01,
       
   353 	irq_sw_gen    = 0x02,
       
   354 };
       
   355 
       
   356 enum scb_cmd_lo {
       
   357 	cuc_nop        = 0x00,
       
   358 	ruc_start      = 0x01,
       
   359 	ruc_load_base  = 0x06,
       
   360 	cuc_start      = 0x10,
       
   361 	cuc_resume     = 0x20,
       
   362 	cuc_dump_addr  = 0x40,
       
   363 	cuc_dump_stats = 0x50,
       
   364 	cuc_load_base  = 0x60,
       
   365 	cuc_dump_reset = 0x70,
       
   366 };
       
   367 
       
   368 enum cuc_dump {
       
   369 	cuc_dump_complete       = 0x0000A005,
       
   370 	cuc_dump_reset_complete = 0x0000A007,
       
   371 };
       
   372 
       
   373 enum port {
       
   374 	software_reset  = 0x0000,
       
   375 	selftest        = 0x0001,
       
   376 	selective_reset = 0x0002,
       
   377 };
       
   378 
       
   379 enum eeprom_ctrl_lo {
       
   380 	eesk = 0x01,
       
   381 	eecs = 0x02,
       
   382 	eedi = 0x04,
       
   383 	eedo = 0x08,
       
   384 };
       
   385 
       
   386 enum mdi_ctrl {
       
   387 	mdi_write = 0x04000000,
       
   388 	mdi_read  = 0x08000000,
       
   389 	mdi_ready = 0x10000000,
       
   390 };
       
   391 
       
   392 enum eeprom_op {
       
   393 	op_write = 0x05,
       
   394 	op_read  = 0x06,
       
   395 	op_ewds  = 0x10,
       
   396 	op_ewen  = 0x13,
       
   397 };
       
   398 
       
   399 enum eeprom_offsets {
       
   400 	eeprom_cnfg_mdix  = 0x03,
       
   401 	eeprom_id         = 0x0A,
       
   402 	eeprom_config_asf = 0x0D,
       
   403 	eeprom_smbus_addr = 0x90,
       
   404 };
       
   405 
       
   406 enum eeprom_cnfg_mdix {
       
   407 	eeprom_mdix_enabled = 0x0080,
       
   408 };
       
   409 
       
   410 enum eeprom_id {
       
   411 	eeprom_id_wol = 0x0020,
       
   412 };
       
   413 
       
   414 enum eeprom_config_asf {
       
   415 	eeprom_asf = 0x8000,
       
   416 	eeprom_gcl = 0x4000,
       
   417 };
       
   418 
       
   419 enum cb_status {
       
   420 	cb_complete = 0x8000,
       
   421 	cb_ok       = 0x2000,
       
   422 };
       
   423 
       
   424 enum cb_command {
       
   425 	cb_nop    = 0x0000,
       
   426 	cb_iaaddr = 0x0001,
       
   427 	cb_config = 0x0002,
       
   428 	cb_multi  = 0x0003,
       
   429 	cb_tx     = 0x0004,
       
   430 	cb_ucode  = 0x0005,
       
   431 	cb_dump   = 0x0006,
       
   432 	cb_tx_sf  = 0x0008,
       
   433 	cb_cid    = 0x1f00,
       
   434 	cb_i      = 0x2000,
       
   435 	cb_s      = 0x4000,
       
   436 	cb_el     = 0x8000,
       
   437 };
       
   438 
       
   439 struct rfd {
       
   440 	u16 status;
       
   441 	u16 command;
       
   442 	u32 link;
       
   443 	u32 rbd;
       
   444 	u16 actual_size;
       
   445 	u16 size;
       
   446 };
       
   447 
       
   448 struct rx {
       
   449 	struct rx *next, *prev;
       
   450 	struct sk_buff *skb;
       
   451 	dma_addr_t dma_addr;
       
   452 };
       
   453 
       
   454 #if defined(__BIG_ENDIAN_BITFIELD)
       
   455 #define X(a,b)	b,a
       
   456 #else
       
   457 #define X(a,b)	a,b
       
   458 #endif
       
   459 struct config {
       
   460 /*0*/	u8 X(byte_count:6, pad0:2);
       
   461 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   462 /*2*/	u8 adaptive_ifs;
       
   463 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   464 	   term_write_cache_line:1), pad3:4);
       
   465 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   466 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   467 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   468 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   469 	   rx_discard_overruns:1), rx_save_bad_frames:1);
       
   470 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   471 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   472 	   tx_dynamic_tbd:1);
       
   473 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   474 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   475 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   476 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   477 	   loopback:2);
       
   478 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   479 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   480 /*13*/	u8 ip_addr_lo;
       
   481 /*14*/	u8 ip_addr_hi;
       
   482 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   483 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   484 	   pad15_2:1), crs_or_cdt:1);
       
   485 /*16*/	u8 fc_delay_lo;
       
   486 /*17*/	u8 fc_delay_hi;
       
   487 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   488 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   489 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   490 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   491 	   full_duplex_force:1), full_duplex_pin:1);
       
   492 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   493 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   494 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   495 	u8 pad_d102[9];
       
   496 };
       
   497 
       
   498 #define E100_MAX_MULTICAST_ADDRS	64
       
   499 struct multi {
       
   500 	u16 count;
       
   501 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   502 };
       
   503 
       
   504 /* Important: keep total struct u32-aligned */
       
   505 #define UCODE_SIZE			134
       
   506 struct cb {
       
   507 	u16 status;
       
   508 	u16 command;
       
   509 	u32 link;
       
   510 	union {
       
   511 		u8 iaaddr[ETH_ALEN];
       
   512 		u32 ucode[UCODE_SIZE];
       
   513 		struct config config;
       
   514 		struct multi multi;
       
   515 		struct {
       
   516 			u32 tbd_array;
       
   517 			u16 tcb_byte_count;
       
   518 			u8 threshold;
       
   519 			u8 tbd_count;
       
   520 			struct {
       
   521 				u32 buf_addr;
       
   522 				u16 size;
       
   523 				u16 eol;
       
   524 			} tbd;
       
   525 		} tcb;
       
   526 		u32 dump_buffer_addr;
       
   527 	} u;
       
   528 	struct cb *next, *prev;
       
   529 	dma_addr_t dma_addr;
       
   530 	struct sk_buff *skb;
       
   531 };
       
   532 
       
   533 enum loopback {
       
   534 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   535 };
       
   536 
       
   537 struct stats {
       
   538 	u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   539 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   540 		tx_multiple_collisions, tx_total_collisions;
       
   541 	u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   542 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   543 		rx_short_frame_errors;
       
   544 	u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   545 	u16 xmt_tco_frames, rcv_tco_frames;
       
   546 	u32 complete;
       
   547 };
       
   548 
       
   549 struct mem {
       
   550 	struct {
       
   551 		u32 signature;
       
   552 		u32 result;
       
   553 	} selftest;
       
   554 	struct stats stats;
       
   555 	u8 dump_buf[596];
       
   556 };
       
   557 
       
   558 struct param_range {
       
   559 	u32 min;
       
   560 	u32 max;
       
   561 	u32 count;
       
   562 };
       
   563 
       
   564 struct params {
       
   565 	struct param_range rfds;
       
   566 	struct param_range cbs;
       
   567 };
       
   568 
       
   569 struct nic {
       
   570 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   571 	u32 msg_enable				____cacheline_aligned;
       
   572 	struct net_device *netdev;
       
   573 	struct pci_dev *pdev;
       
   574 
       
   575 	struct rx *rxs				____cacheline_aligned;
       
   576 	struct rx *rx_to_use;
       
   577 	struct rx *rx_to_clean;
       
   578 	struct rfd blank_rfd;
       
   579 	enum ru_state ru_running;
       
   580 
       
   581 	spinlock_t cb_lock			____cacheline_aligned;
       
   582 	spinlock_t cmd_lock;
       
   583 	struct csr __iomem *csr;
       
   584 	enum scb_cmd_lo cuc_cmd;
       
   585 	unsigned int cbs_avail;
       
   586 	struct cb *cbs;
       
   587 	struct cb *cb_to_use;
       
   588 	struct cb *cb_to_send;
       
   589 	struct cb *cb_to_clean;
       
   590 	u16 tx_command;
       
   591 	/* End: frequently used values: keep adjacent for cache effect */
       
   592 
       
   593 	enum {
       
   594 		ich                = (1 << 0),
       
   595 		promiscuous        = (1 << 1),
       
   596 		multicast_all      = (1 << 2),
       
   597 		wol_magic          = (1 << 3),
       
   598 		ich_10h_workaround = (1 << 4),
       
   599 	} flags					____cacheline_aligned;
       
   600 
       
   601 	enum mac mac;
       
   602 	enum phy phy;
       
   603 	struct params params;
       
   604 	struct net_device_stats net_stats;
       
   605 	struct timer_list watchdog;
       
   606 	struct timer_list blink_timer;
       
   607 	struct mii_if_info mii;
       
   608 	struct work_struct tx_timeout_task;
       
   609 	enum loopback loopback;
       
   610 
       
   611 	struct mem *mem;
       
   612 	dma_addr_t dma_addr;
       
   613 
       
   614 	dma_addr_t cbs_dma_addr;
       
   615 	u8 adaptive_ifs;
       
   616 	u8 tx_threshold;
       
   617 	u32 tx_frames;
       
   618 	u32 tx_collisions;
       
   619 	u32 tx_deferred;
       
   620 	u32 tx_single_collisions;
       
   621 	u32 tx_multiple_collisions;
       
   622 	u32 tx_fc_pause;
       
   623 	u32 tx_tco_frames;
       
   624 
       
   625 	u32 rx_fc_pause;
       
   626 	u32 rx_fc_unsupported;
       
   627 	u32 rx_tco_frames;
       
   628 	u32 rx_over_length_errors;
       
   629 
       
   630 	u8 rev_id;
       
   631 	u16 leds;
       
   632 	u16 eeprom_wc;
       
   633 	u16 eeprom[256];
       
   634 	spinlock_t mdio_lock;
       
   635 
       
   636     ec_device_t *ecdev;
       
   637     unsigned long ec_watchdog_jiffies;
       
   638 };
       
   639 
       
   640 static inline void e100_write_flush(struct nic *nic)
       
   641 {
       
   642 	/* Flush previous PCI writes through intermediate bridges
       
   643 	 * by doing a benign read */
       
   644 	(void)readb(&nic->csr->scb.status);
       
   645 }
       
   646 
       
   647 static void e100_enable_irq(struct nic *nic)
       
   648 {
       
   649 	unsigned long flags;
       
   650 
       
   651     if (nic->ecdev)
       
   652         return;
       
   653 
       
   654 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   655 	writeb(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   656 	e100_write_flush(nic);
       
   657 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   658 }
       
   659 
       
   660 static void e100_disable_irq(struct nic *nic)
       
   661 {
       
   662 	unsigned long flags;
       
   663 
       
   664     if (nic->ecdev)
       
   665         return;
       
   666 
       
   667 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   668 	writeb(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   669 	e100_write_flush(nic);
       
   670 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   671 }
       
   672 
       
   673 static void e100_hw_reset(struct nic *nic)
       
   674 {
       
   675 	/* Put CU and RU into idle with a selective reset to get
       
   676 	 * device off of PCI bus */
       
   677 	writel(selective_reset, &nic->csr->port);
       
   678 	e100_write_flush(nic); udelay(20);
       
   679 
       
   680 	/* Now fully reset device */
       
   681 	writel(software_reset, &nic->csr->port);
       
   682 	e100_write_flush(nic); udelay(20);
       
   683 
       
   684 	/* Mask off our interrupt line - it's unmasked after reset */
       
   685 	e100_disable_irq(nic);
       
   686 }
       
   687 
       
   688 static int e100_self_test(struct nic *nic)
       
   689 {
       
   690 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   691 
       
   692 	/* Passing the self-test is a pretty good indication
       
   693 	 * that the device can DMA to/from host memory */
       
   694 
       
   695 	nic->mem->selftest.signature = 0;
       
   696 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   697 
       
   698 	writel(selftest | dma_addr, &nic->csr->port);
       
   699 	e100_write_flush(nic);
       
   700 	/* Wait 10 msec for self-test to complete */
       
   701 	msleep(10);
       
   702 
       
   703 	/* Interrupts are enabled after self-test */
       
   704 	e100_disable_irq(nic);
       
   705 
       
   706 	/* Check results of self-test */
       
   707 	if(nic->mem->selftest.result != 0) {
       
   708 		DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
       
   709 			nic->mem->selftest.result);
       
   710 		return -ETIMEDOUT;
       
   711 	}
       
   712 	if(nic->mem->selftest.signature == 0) {
       
   713 		DPRINTK(HW, ERR, "Self-test failed: timed out\n");
       
   714 		return -ETIMEDOUT;
       
   715 	}
       
   716 
       
   717 	return 0;
       
   718 }
       
   719 
       
   720 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
       
   721 {
       
   722 	u32 cmd_addr_data[3];
       
   723 	u8 ctrl;
       
   724 	int i, j;
       
   725 
       
   726 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   727 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   728 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   729 		cpu_to_le16(data);
       
   730 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   731 
       
   732 	/* Bit-bang cmds to write word to eeprom */
       
   733 	for(j = 0; j < 3; j++) {
       
   734 
       
   735 		/* Chip select */
       
   736 		writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   737 		e100_write_flush(nic); udelay(4);
       
   738 
       
   739 		for(i = 31; i >= 0; i--) {
       
   740 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   741 				eecs | eedi : eecs;
       
   742 			writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   743 			e100_write_flush(nic); udelay(4);
       
   744 
       
   745 			writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   746 			e100_write_flush(nic); udelay(4);
       
   747 		}
       
   748 		/* Wait 10 msec for cmd to complete */
       
   749 		msleep(10);
       
   750 
       
   751 		/* Chip deselect */
       
   752 		writeb(0, &nic->csr->eeprom_ctrl_lo);
       
   753 		e100_write_flush(nic); udelay(4);
       
   754 	}
       
   755 };
       
   756 
       
   757 /* General technique stolen from the eepro100 driver - very clever */
       
   758 static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   759 {
       
   760 	u32 cmd_addr_data;
       
   761 	u16 data = 0;
       
   762 	u8 ctrl;
       
   763 	int i;
       
   764 
       
   765 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   766 
       
   767 	/* Chip select */
       
   768 	writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   769 	e100_write_flush(nic); udelay(4);
       
   770 
       
   771 	/* Bit-bang to read word from eeprom */
       
   772 	for(i = 31; i >= 0; i--) {
       
   773 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   774 		writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   775 		e100_write_flush(nic); udelay(4);
       
   776 
       
   777 		writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   778 		e100_write_flush(nic); udelay(4);
       
   779 
       
   780 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   781 		 * complete address.  Use this to adjust addr_len. */
       
   782 		ctrl = readb(&nic->csr->eeprom_ctrl_lo);
       
   783 		if(!(ctrl & eedo) && i > 16) {
       
   784 			*addr_len -= (i - 16);
       
   785 			i = 17;
       
   786 		}
       
   787 
       
   788 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   789 	}
       
   790 
       
   791 	/* Chip deselect */
       
   792 	writeb(0, &nic->csr->eeprom_ctrl_lo);
       
   793 	e100_write_flush(nic); udelay(4);
       
   794 
       
   795 	return le16_to_cpu(data);
       
   796 };
       
   797 
       
   798 /* Load entire EEPROM image into driver cache and validate checksum */
       
   799 static int e100_eeprom_load(struct nic *nic)
       
   800 {
       
   801 	u16 addr, addr_len = 8, checksum = 0;
       
   802 
       
   803 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   804 	e100_eeprom_read(nic, &addr_len, 0);
       
   805 	nic->eeprom_wc = 1 << addr_len;
       
   806 
       
   807 	for(addr = 0; addr < nic->eeprom_wc; addr++) {
       
   808 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   809 		if(addr < nic->eeprom_wc - 1)
       
   810 			checksum += cpu_to_le16(nic->eeprom[addr]);
       
   811 	}
       
   812 
       
   813 	/* The checksum, stored in the last word, is calculated such that
       
   814 	 * the sum of words should be 0xBABA */
       
   815 	checksum = le16_to_cpu(0xBABA - checksum);
       
   816 	if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
       
   817 		DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
       
   818 		if (!eeprom_bad_csum_allow)
       
   819 			return -EAGAIN;
       
   820 	}
       
   821 
       
   822 	return 0;
       
   823 }
       
   824 
       
   825 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   826 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   827 {
       
   828 	u16 addr, addr_len = 8, checksum = 0;
       
   829 
       
   830 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   831 	e100_eeprom_read(nic, &addr_len, 0);
       
   832 	nic->eeprom_wc = 1 << addr_len;
       
   833 
       
   834 	if(start + count >= nic->eeprom_wc)
       
   835 		return -EINVAL;
       
   836 
       
   837 	for(addr = start; addr < start + count; addr++)
       
   838 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   839 
       
   840 	/* The checksum, stored in the last word, is calculated such that
       
   841 	 * the sum of words should be 0xBABA */
       
   842 	for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   843 		checksum += cpu_to_le16(nic->eeprom[addr]);
       
   844 	nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
       
   845 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   846 		nic->eeprom[nic->eeprom_wc - 1]);
       
   847 
       
   848 	return 0;
       
   849 }
       
   850 
       
   851 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   852 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   853 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   854 {
       
   855 	unsigned long flags = 0;
       
   856 	unsigned int i;
       
   857 	int err = 0;
       
   858 
       
   859 	if (!nic->ecdev)
       
   860 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   861 
       
   862 	/* Previous command is accepted when SCB clears */
       
   863 	for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   864 		if(likely(!readb(&nic->csr->scb.cmd_lo)))
       
   865 			break;
       
   866 		cpu_relax();
       
   867 		if(unlikely(i > E100_WAIT_SCB_FAST))
       
   868 			udelay(5);
       
   869 	}
       
   870 	if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   871 		err = -EAGAIN;
       
   872 		goto err_unlock;
       
   873 	}
       
   874 
       
   875 	if(unlikely(cmd != cuc_resume))
       
   876 		writel(dma_addr, &nic->csr->scb.gen_ptr);
       
   877 	writeb(cmd, &nic->csr->scb.cmd_lo);
       
   878 
       
   879 err_unlock:
       
   880 	if (!nic->ecdev)
       
   881 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   882 
       
   883 	return err;
       
   884 }
       
   885 
       
   886 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   887 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   888 {
       
   889 	struct cb *cb;
       
   890 	unsigned long flags = 0;
       
   891 	int err = 0;
       
   892 
       
   893 	if (!nic->ecdev)
       
   894 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   895 
       
   896 	if(unlikely(!nic->cbs_avail)) {
       
   897 		err = -ENOMEM;
       
   898 		goto err_unlock;
       
   899 	}
       
   900 
       
   901 	cb = nic->cb_to_use;
       
   902 	nic->cb_to_use = cb->next;
       
   903 	nic->cbs_avail--;
       
   904 	cb->skb = skb;
       
   905 
       
   906 	if(unlikely(!nic->cbs_avail))
       
   907 		err = -ENOSPC;
       
   908 
       
   909 	cb_prepare(nic, cb, skb);
       
   910 
       
   911 	/* Order is important otherwise we'll be in a race with h/w:
       
   912 	 * set S-bit in current first, then clear S-bit in previous. */
       
   913 	cb->command |= cpu_to_le16(cb_s);
       
   914 	wmb();
       
   915 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   916 
       
   917 	while(nic->cb_to_send != nic->cb_to_use) {
       
   918 		if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   919 			nic->cb_to_send->dma_addr))) {
       
   920 			/* Ok, here's where things get sticky.  It's
       
   921 			 * possible that we can't schedule the command
       
   922 			 * because the controller is too busy, so
       
   923 			 * let's just queue the command and try again
       
   924 			 * when another command is scheduled. */
       
   925 			if(err == -ENOSPC) {
       
   926 				//request a reset
       
   927 				schedule_work(&nic->tx_timeout_task);
       
   928 			}
       
   929 			break;
       
   930 		} else {
       
   931 			nic->cuc_cmd = cuc_resume;
       
   932 			nic->cb_to_send = nic->cb_to_send->next;
       
   933 		}
       
   934 	}
       
   935 
       
   936 err_unlock:
       
   937 	if (!nic->ecdev)
       
   938 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   939 
       
   940 	return err;
       
   941 }
       
   942 
       
   943 static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
   944 {
       
   945 	u32 data_out = 0;
       
   946 	unsigned int i;
       
   947 	unsigned long flags = 0;
       
   948 
       
   949 
       
   950 	/*
       
   951 	 * Stratus87247: we shouldn't be writing the MDI control
       
   952 	 * register until the Ready bit shows True.  Also, since
       
   953 	 * manipulation of the MDI control registers is a multi-step
       
   954 	 * procedure it should be done under lock.
       
   955 	 */
       
   956 	if (!nic->ecdev)
       
   957 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
   958 	for (i = 100; i; --i) {
       
   959 		if (readl(&nic->csr->mdi_ctrl) & mdi_ready)
       
   960 			break;
       
   961 		udelay(20);
       
   962 	}
       
   963 	if (unlikely(!i)) {
       
   964 		printk("e100.mdio_ctrl(%s) won't go Ready\n",
       
   965 			nic->netdev->name );
       
   966 		if (!nic->ecdev)
       
   967 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
   968 		return 0;		/* No way to indicate timeout error */
       
   969 	}
       
   970 	writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
   971 
       
   972 	for (i = 0; i < 100; i++) {
       
   973 		udelay(20);
       
   974 		if ((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
       
   975 			break;
       
   976 	}
       
   977 	if (!nic->ecdev)
       
   978 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
   979 	DPRINTK(HW, DEBUG,
       
   980 		"%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
   981 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
       
   982 	return (u16)data_out;
       
   983 }
       
   984 
       
   985 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
   986 {
       
   987 	return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
       
   988 }
       
   989 
       
   990 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
   991 {
       
   992 	mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
       
   993 }
       
   994 
       
   995 static void e100_get_defaults(struct nic *nic)
       
   996 {
       
   997 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
   998 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
   999 
       
  1000 	pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
       
  1001 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1002 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id;
       
  1003 	if(nic->mac == mac_unknown)
       
  1004 		nic->mac = mac_82557_D100_A;
       
  1005 
       
  1006 	nic->params.rfds = rfds;
       
  1007 	nic->params.cbs = cbs;
       
  1008 
       
  1009 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1010 	nic->tx_threshold = 0xE0;
       
  1011 
       
  1012 	/* no interrupt for every tx completion, delay = 256us if not 557*/
       
  1013 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1014 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1015 
       
  1016 	/* Template for a freshly allocated RFD */
       
  1017 	nic->blank_rfd.command = cpu_to_le16(cb_el);
       
  1018 	nic->blank_rfd.rbd = 0xFFFFFFFF;
       
  1019 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  1020 
       
  1021 	/* MII setup */
       
  1022 	nic->mii.phy_id_mask = 0x1F;
       
  1023 	nic->mii.reg_num_mask = 0x1F;
       
  1024 	nic->mii.dev = nic->netdev;
       
  1025 	nic->mii.mdio_read = mdio_read;
       
  1026 	nic->mii.mdio_write = mdio_write;
       
  1027 }
       
  1028 
       
  1029 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1030 {
       
  1031 	struct config *config = &cb->u.config;
       
  1032 	u8 *c = (u8 *)config;
       
  1033 
       
  1034 	cb->command = cpu_to_le16(cb_config);
       
  1035 
       
  1036 	memset(config, 0, sizeof(struct config));
       
  1037 
       
  1038 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1039 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1040 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1041 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1042 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1043 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1044 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1045 	config->mii_mode = 0x1;			/* 1=MII mode, 0=503 mode */
       
  1046 	config->pad10 = 0x6;
       
  1047 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1048 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1049 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1050 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1051 	config->pad15_1 = 0x1;
       
  1052 	config->pad15_2 = 0x1;
       
  1053 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1054 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1055 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1056 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1057 	config->pad18 = 0x1;
       
  1058 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1059 	config->pad20_1 = 0x1F;
       
  1060 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1061 	config->pad21_1 = 0x5;
       
  1062 
       
  1063 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1064 	config->loopback = nic->loopback;
       
  1065 
       
  1066 	if(nic->mii.force_media && nic->mii.full_duplex)
       
  1067 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1068 
       
  1069 	if(nic->flags & promiscuous || nic->loopback) {
       
  1070 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1071 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1072 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1073 	}
       
  1074 
       
  1075 	if(nic->flags & multicast_all)
       
  1076 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1077 
       
  1078 	/* disable WoL when up */
       
  1079 	if (nic->ecdev ||
       
  1080             (netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1081 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1082 
       
  1083 	if(nic->mac >= mac_82558_D101_A4) {
       
  1084 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1085 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1086 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1087 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1088 		if(nic->mac >= mac_82559_D101M)
       
  1089 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1090 		else
       
  1091 			config->standard_stat_counter = 0x0;
       
  1092 	}
       
  1093 
       
  1094 	DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1095 		c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1096 	DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1097 		c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1098 	DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1099 		c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1100 }
       
  1101 
       
  1102 /********************************************************/
       
  1103 /*  Micro code for 8086:1229 Rev 8                      */
       
  1104 /********************************************************/
       
  1105 
       
  1106 /*  Parameter values for the D101M B-step  */
       
  1107 #define D101M_CPUSAVER_TIMER_DWORD		78
       
  1108 #define D101M_CPUSAVER_BUNDLE_DWORD		65
       
  1109 #define D101M_CPUSAVER_MIN_SIZE_DWORD		126
       
  1110 
       
  1111 #define D101M_B_RCVBUNDLE_UCODE \
       
  1112 {\
       
  1113 0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
       
  1114 0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
       
  1115 0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
       
  1116 0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
       
  1117 0x00380438, 0x00000000, 0x00140000, 0x00380555, \
       
  1118 0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
       
  1119 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
       
  1120 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
       
  1121 0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
       
  1122 0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
       
  1123 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1124 0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
       
  1125 0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
       
  1126 0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
       
  1127 0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
       
  1128 0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
       
  1129 0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
       
  1130 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1131 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1132 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
       
  1133 0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
       
  1134 0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
       
  1135 0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
       
  1136 0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
       
  1137 0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
       
  1138 0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
       
  1139 0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
       
  1140 0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
       
  1141 0x00380559, 0x00000000, 0x00000000, 0x00000000, \
       
  1142 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1143 0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
       
  1144 0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
       
  1145 0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
       
  1146 }
       
  1147 
       
  1148 /********************************************************/
       
  1149 /*  Micro code for 8086:1229 Rev 9                      */
       
  1150 /********************************************************/
       
  1151 
       
  1152 /*  Parameter values for the D101S  */
       
  1153 #define D101S_CPUSAVER_TIMER_DWORD		78
       
  1154 #define D101S_CPUSAVER_BUNDLE_DWORD		67
       
  1155 #define D101S_CPUSAVER_MIN_SIZE_DWORD		128
       
  1156 
       
  1157 #define D101S_RCVBUNDLE_UCODE \
       
  1158 {\
       
  1159 0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
       
  1160 0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
       
  1161 0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
       
  1162 0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
       
  1163 0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
       
  1164 0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
       
  1165 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
       
  1166 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
       
  1167 0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
       
  1168 0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
       
  1169 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1170 0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
       
  1171 0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
       
  1172 0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
       
  1173 0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
       
  1174 0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
       
  1175 0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
       
  1176 0x00101313, 0x00380700, 0x00000000, 0x00000000, \
       
  1177 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1178 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
       
  1179 0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
       
  1180 0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
       
  1181 0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
       
  1182 0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
       
  1183 0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
       
  1184 0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
       
  1185 0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
       
  1186 0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
       
  1187 0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
       
  1188 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1189 0x00000000, 0x00000000, 0x00000000, 0x00130831, \
       
  1190 0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
       
  1191 0x00041000, 0x00010004, 0x00380700  \
       
  1192 }
       
  1193 
       
  1194 /********************************************************/
       
  1195 /*  Micro code for the 8086:1229 Rev F/10               */
       
  1196 /********************************************************/
       
  1197 
       
  1198 /*  Parameter values for the D102 E-step  */
       
  1199 #define D102_E_CPUSAVER_TIMER_DWORD		42
       
  1200 #define D102_E_CPUSAVER_BUNDLE_DWORD		54
       
  1201 #define D102_E_CPUSAVER_MIN_SIZE_DWORD		46
       
  1202 
       
  1203 #define     D102_E_RCVBUNDLE_UCODE \
       
  1204 {\
       
  1205 0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
       
  1206 0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
       
  1207 0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
       
  1208 0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
       
  1209 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1210 0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
       
  1211 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1212 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1213 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1214 0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
       
  1215 0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
       
  1216 0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
       
  1217 0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
       
  1218 0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
       
  1219 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1220 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1221 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1222 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
       
  1223 0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
       
  1224 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1225 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1226 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1227 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1228 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1229 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1230 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1231 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1232 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1233 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1234 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1235 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1236 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1237 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1238 }
       
  1239 
       
  1240 static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1241 {
       
  1242 /* *INDENT-OFF* */
       
  1243 	static struct {
       
  1244 		u32 ucode[UCODE_SIZE + 1];
       
  1245 		u8 mac;
       
  1246 		u8 timer_dword;
       
  1247 		u8 bundle_dword;
       
  1248 		u8 min_size_dword;
       
  1249 	} ucode_opts[] = {
       
  1250 		{ D101M_B_RCVBUNDLE_UCODE,
       
  1251 		  mac_82559_D101M,
       
  1252 		  D101M_CPUSAVER_TIMER_DWORD,
       
  1253 		  D101M_CPUSAVER_BUNDLE_DWORD,
       
  1254 		  D101M_CPUSAVER_MIN_SIZE_DWORD },
       
  1255 		{ D101S_RCVBUNDLE_UCODE,
       
  1256 		  mac_82559_D101S,
       
  1257 		  D101S_CPUSAVER_TIMER_DWORD,
       
  1258 		  D101S_CPUSAVER_BUNDLE_DWORD,
       
  1259 		  D101S_CPUSAVER_MIN_SIZE_DWORD },
       
  1260 		{ D102_E_RCVBUNDLE_UCODE,
       
  1261 		  mac_82551_F,
       
  1262 		  D102_E_CPUSAVER_TIMER_DWORD,
       
  1263 		  D102_E_CPUSAVER_BUNDLE_DWORD,
       
  1264 		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
       
  1265 		{ D102_E_RCVBUNDLE_UCODE,
       
  1266 		  mac_82551_10,
       
  1267 		  D102_E_CPUSAVER_TIMER_DWORD,
       
  1268 		  D102_E_CPUSAVER_BUNDLE_DWORD,
       
  1269 		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
       
  1270 		{ {0}, 0, 0, 0, 0}
       
  1271 	}, *opts;
       
  1272 /* *INDENT-ON* */
       
  1273 
       
  1274 /*************************************************************************
       
  1275 *  CPUSaver parameters
       
  1276 *
       
  1277 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1278 *  "move immediate value" instruction.  By changing the value of
       
  1279 *  the literal in the instruction before the code is loaded, the
       
  1280 *  driver can change the algorithm.
       
  1281 *
       
  1282 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1283 *    When this timer expires the interrupt is asserted, and the
       
  1284 *    timer is reset each time a new packet is received.  (see
       
  1285 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1286 *    The current default is 0x600 or 1536.  Experiments show that
       
  1287 *    the value should probably stay within the 0x200 - 0x1000.
       
  1288 *
       
  1289 *  BUNDLEMAX -
       
  1290 *    This sets the maximum number of frames that will be bundled.  In
       
  1291 *    some situations, such as the TCP windowing algorithm, it may be
       
  1292 *    better to limit the growth of the bundle size than let it go as
       
  1293 *    high as it can, because that could cause too much added latency.
       
  1294 *    The default is six, because this is the number of packets in the
       
  1295 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1296 *    an interrupt for every frame received.  If you do not want to put
       
  1297 *    a limit on the bundle size, set this value to xFFFF.
       
  1298 *
       
  1299 *  BUNDLESMALL -
       
  1300 *    This contains a bit-mask describing the minimum size frame that
       
  1301 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1302 *    that any frame less than 128 bytes in length will not be bundled,
       
  1303 *    but will instead immediately generate an interrupt.  This does
       
  1304 *    not affect the current bundle in any way.  Any frame that is 128
       
  1305 *    bytes or large will be bundled normally.  This feature is meant
       
  1306 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1307 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1308 *    enabled was sending but not receiving.  The delay introduced when
       
  1309 *    the ACKs were received was enough to reduce total throughput, because
       
  1310 *    the sender would sit idle until the ACK was finally seen.
       
  1311 *
       
  1312 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1313 *    This means that any frame which is x7F (127) bytes or smaller
       
  1314 *    will cause an immediate interrupt.  Because this value must be a
       
  1315 *    bit mask, there are only a few valid values that can be used.  To
       
  1316 *    turn this feature off, the driver can write the value xFFFF to the
       
  1317 *    lower word of this instruction (in the same way that the other
       
  1318 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1319 *    cause an interrupt to be generated for every frame, because all
       
  1320 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1321 *************************************************************************/
       
  1322 
       
  1323 /* if you wish to disable the ucode functionality, while maintaining the
       
  1324  * workarounds it provides, set the following defines to:
       
  1325  * BUNDLESMALL 0
       
  1326  * BUNDLEMAX 1
       
  1327  * INTDELAY 1
       
  1328  */
       
  1329 #define BUNDLESMALL 1
       
  1330 #define BUNDLEMAX (u16)6
       
  1331 #define INTDELAY (u16)1536 /* 0x600 */
       
  1332 
       
  1333 	/* do not load u-code for ICH devices */
       
  1334 	if (nic->flags & ich)
       
  1335 		goto noloaducode;
       
  1336 
       
  1337 	/* Search for ucode match against h/w rev_id */
       
  1338 	for (opts = ucode_opts; opts->mac; opts++) {
       
  1339 		int i;
       
  1340 		u32 *ucode = opts->ucode;
       
  1341 		if (nic->mac != opts->mac)
       
  1342 			continue;
       
  1343 
       
  1344 		/* Insert user-tunable settings */
       
  1345 		ucode[opts->timer_dword] &= 0xFFFF0000;
       
  1346 		ucode[opts->timer_dword] |= INTDELAY;
       
  1347 		ucode[opts->bundle_dword] &= 0xFFFF0000;
       
  1348 		ucode[opts->bundle_dword] |= BUNDLEMAX;
       
  1349 		ucode[opts->min_size_dword] &= 0xFFFF0000;
       
  1350 		ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
       
  1351 
       
  1352 		for (i = 0; i < UCODE_SIZE; i++)
       
  1353 			cb->u.ucode[i] = cpu_to_le32(ucode[i]);
       
  1354 		cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1355 		return;
       
  1356 	}
       
  1357 
       
  1358 noloaducode:
       
  1359 	cb->command = cpu_to_le16(cb_nop | cb_el);
       
  1360 }
       
  1361 
       
  1362 static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
       
  1363 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
  1364 {
       
  1365 	int err = 0, counter = 50;
       
  1366 	struct cb *cb = nic->cb_to_clean;
       
  1367 
       
  1368 	if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
       
  1369 		DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
       
  1370 
       
  1371 	/* must restart cuc */
       
  1372 	nic->cuc_cmd = cuc_start;
       
  1373 
       
  1374 	/* wait for completion */
       
  1375 	e100_write_flush(nic);
       
  1376 	udelay(10);
       
  1377 
       
  1378 	/* wait for possibly (ouch) 500ms */
       
  1379 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1380 		msleep(10);
       
  1381 		if (!--counter) break;
       
  1382 	}
       
  1383 
       
  1384 	/* ack any interupts, something could have been set */
       
  1385 	writeb(~0, &nic->csr->scb.stat_ack);
       
  1386 
       
  1387 	/* if the command failed, or is not OK, notify and return */
       
  1388 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1389 		DPRINTK(PROBE,ERR, "ucode load failed\n");
       
  1390 		err = -EPERM;
       
  1391 	}
       
  1392 
       
  1393 	return err;
       
  1394 }
       
  1395 
       
  1396 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1397 	struct sk_buff *skb)
       
  1398 {
       
  1399 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1400 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1401 }
       
  1402 
       
  1403 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1404 {
       
  1405 	cb->command = cpu_to_le16(cb_dump);
       
  1406 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1407 		offsetof(struct mem, dump_buf));
       
  1408 }
       
  1409 
       
  1410 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1411 #define MII_NSC_CONG		MII_RESV1
       
  1412 #define NSC_CONG_ENABLE		0x0100
       
  1413 #define NSC_CONG_TXREADY	0x0400
       
  1414 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1415 static int e100_phy_init(struct nic *nic)
       
  1416 {
       
  1417 	struct net_device *netdev = nic->netdev;
       
  1418 	u32 addr;
       
  1419 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1420 
       
  1421 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1422 	for(addr = 0; addr < 32; addr++) {
       
  1423 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1424 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1425 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1426 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1427 		if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1428 			break;
       
  1429 	}
       
  1430 	DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
       
  1431 	if(addr == 32)
       
  1432 		return -EAGAIN;
       
  1433 
       
  1434 	/* Selected the phy and isolate the rest */
       
  1435 	for(addr = 0; addr < 32; addr++) {
       
  1436 		if(addr != nic->mii.phy_id) {
       
  1437 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1438 		} else {
       
  1439 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1440 			mdio_write(netdev, addr, MII_BMCR,
       
  1441 				bmcr & ~BMCR_ISOLATE);
       
  1442 		}
       
  1443 	}
       
  1444 
       
  1445 	/* Get phy ID */
       
  1446 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1447 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1448 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1449 	DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
       
  1450 
       
  1451 	/* Handle National tx phys */
       
  1452 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1453 	if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1454 		/* Disable congestion control */
       
  1455 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1456 		cong |= NSC_CONG_TXREADY;
       
  1457 		cong &= ~NSC_CONG_ENABLE;
       
  1458 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1459 	}
       
  1460 
       
  1461 	if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1462 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1463 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1464 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1465 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1466 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1467 	}
       
  1468 
       
  1469 	return 0;
       
  1470 }
       
  1471 
       
  1472 static int e100_hw_init(struct nic *nic)
       
  1473 {
       
  1474 	int err;
       
  1475 
       
  1476 	e100_hw_reset(nic);
       
  1477 
       
  1478 	DPRINTK(HW, ERR, "e100_hw_init\n");
       
  1479 	if(!in_interrupt() && (err = e100_self_test(nic)))
       
  1480 		return err;
       
  1481 
       
  1482 	if((err = e100_phy_init(nic)))
       
  1483 		return err;
       
  1484 	if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1485 		return err;
       
  1486 	if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1487 		return err;
       
  1488 	if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
       
  1489 		return err;
       
  1490 	if((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1491 		return err;
       
  1492 	if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1493 		return err;
       
  1494 	if((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1495 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1496 		return err;
       
  1497 	if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1498 		return err;
       
  1499 
       
  1500 	e100_disable_irq(nic);
       
  1501 
       
  1502 	return 0;
       
  1503 }
       
  1504 
       
  1505 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1506 {
       
  1507 	struct net_device *netdev = nic->netdev;
       
  1508 	struct dev_mc_list *list = netdev->mc_list;
       
  1509 	u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
       
  1510 
       
  1511 	cb->command = cpu_to_le16(cb_multi);
       
  1512 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1513 	for(i = 0; list && i < count; i++, list = list->next)
       
  1514 		memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
       
  1515 			ETH_ALEN);
       
  1516 }
       
  1517 
       
  1518 static void e100_set_multicast_list(struct net_device *netdev)
       
  1519 {
       
  1520 	struct nic *nic = netdev_priv(netdev);
       
  1521 
       
  1522 	DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
       
  1523 		netdev->mc_count, netdev->flags);
       
  1524 
       
  1525 	if(netdev->flags & IFF_PROMISC)
       
  1526 		nic->flags |= promiscuous;
       
  1527 	else
       
  1528 		nic->flags &= ~promiscuous;
       
  1529 
       
  1530 	if(netdev->flags & IFF_ALLMULTI ||
       
  1531 		netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
       
  1532 		nic->flags |= multicast_all;
       
  1533 	else
       
  1534 		nic->flags &= ~multicast_all;
       
  1535 
       
  1536 	e100_exec_cb(nic, NULL, e100_configure);
       
  1537 	e100_exec_cb(nic, NULL, e100_multi);
       
  1538 }
       
  1539 
       
  1540 static void e100_update_stats(struct nic *nic)
       
  1541 {
       
  1542 	struct net_device_stats *ns = &nic->net_stats;
       
  1543 	struct stats *s = &nic->mem->stats;
       
  1544 	u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1545 		(nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
       
  1546 		&s->complete;
       
  1547 
       
  1548 	/* Device's stats reporting may take several microseconds to
       
  1549 	 * complete, so where always waiting for results of the
       
  1550 	 * previous command. */
       
  1551 
       
  1552 	if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
       
  1553 		*complete = 0;
       
  1554 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1555 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1556 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1557 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1558 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1559 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1560 		ns->collisions += nic->tx_collisions;
       
  1561 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1562 			le32_to_cpu(s->tx_lost_crs);
       
  1563 		ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
       
  1564 			nic->rx_over_length_errors;
       
  1565 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1566 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1567 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1568 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1569 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1570 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1571 			le32_to_cpu(s->rx_alignment_errors) +
       
  1572 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1573 			le32_to_cpu(s->rx_cdt_errors);
       
  1574 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1575 		nic->tx_single_collisions +=
       
  1576 			le32_to_cpu(s->tx_single_collisions);
       
  1577 		nic->tx_multiple_collisions +=
       
  1578 			le32_to_cpu(s->tx_multiple_collisions);
       
  1579 		if(nic->mac >= mac_82558_D101_A4) {
       
  1580 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1581 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1582 			nic->rx_fc_unsupported +=
       
  1583 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1584 			if(nic->mac >= mac_82559_D101M) {
       
  1585 				nic->tx_tco_frames +=
       
  1586 					le16_to_cpu(s->xmt_tco_frames);
       
  1587 				nic->rx_tco_frames +=
       
  1588 					le16_to_cpu(s->rcv_tco_frames);
       
  1589 			}
       
  1590 		}
       
  1591 	}
       
  1592 
       
  1593 
       
  1594 	if(e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1595 		DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
       
  1596 }
       
  1597 
       
  1598 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1599 {
       
  1600 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1601 	 * we're getting collisions on a half-duplex connection. */
       
  1602 
       
  1603 	if(duplex == DUPLEX_HALF) {
       
  1604 		u32 prev = nic->adaptive_ifs;
       
  1605 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1606 
       
  1607 		if((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1608 		   (nic->tx_frames > min_frames)) {
       
  1609 			if(nic->adaptive_ifs < 60)
       
  1610 				nic->adaptive_ifs += 5;
       
  1611 		} else if (nic->tx_frames < min_frames) {
       
  1612 			if(nic->adaptive_ifs >= 5)
       
  1613 				nic->adaptive_ifs -= 5;
       
  1614 		}
       
  1615 		if(nic->adaptive_ifs != prev)
       
  1616 			e100_exec_cb(nic, NULL, e100_configure);
       
  1617 	}
       
  1618 }
       
  1619 
       
  1620 static void e100_watchdog(unsigned long data)
       
  1621 {
       
  1622 	struct nic *nic = (struct nic *)data;
       
  1623 	struct ethtool_cmd cmd;
       
  1624 
       
  1625 	DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
       
  1626 
       
  1627 	/* mii library handles link maintenance tasks */
       
  1628 
       
  1629     if (nic->ecdev) {
       
  1630 		ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1631     } else {
       
  1632 		mii_ethtool_gset(&nic->mii, &cmd);
       
  1633 
       
  1634 		if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1635 			DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
       
  1636 					cmd.speed == SPEED_100 ? "100" : "10",
       
  1637 					cmd.duplex == DUPLEX_FULL ? "full" : "half");
       
  1638 		} else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1639 			DPRINTK(LINK, INFO, "link down\n");
       
  1640 		}
       
  1641 	}
       
  1642 
       
  1643 	mii_check_link(&nic->mii);
       
  1644 
       
  1645 	/* Software generated interrupt to recover from (rare) Rx
       
  1646 	 * allocation failure.
       
  1647 	 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1648 	 * accidentally, due to hardware that shares a register between the
       
  1649 	 * interrupt mask bit and the SW Interrupt generation bit */
       
  1650 	if (!nic->ecdev)
       
  1651 		spin_lock_irq(&nic->cmd_lock);
       
  1652 	writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1653 	e100_write_flush(nic);
       
  1654 	if (!nic->ecdev)
       
  1655 		spin_unlock_irq(&nic->cmd_lock);
       
  1656 
       
  1657 	e100_update_stats(nic);
       
  1658 	e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
       
  1659 
       
  1660 	if(nic->mac <= mac_82557_D100_C)
       
  1661 		/* Issue a multicast command to workaround a 557 lock up */
       
  1662 		e100_set_multicast_list(nic->netdev);
       
  1663 
       
  1664 	if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
       
  1665 		/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1666 		nic->flags |= ich_10h_workaround;
       
  1667 	else
       
  1668 		nic->flags &= ~ich_10h_workaround;
       
  1669 
       
  1670     if (!nic->ecdev)
       
  1671 		mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
       
  1672 }
       
  1673 
       
  1674 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1675 	struct sk_buff *skb)
       
  1676 {
       
  1677 	cb->command = nic->tx_command;
       
  1678 	/* interrupt every 16 packets regardless of delay */
       
  1679 	if((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1680 		cb->command |= cpu_to_le16(cb_i);
       
  1681 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1682 	cb->u.tcb.tcb_byte_count = 0;
       
  1683 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1684 	cb->u.tcb.tbd_count = 1;
       
  1685 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
       
  1686 		skb->data, skb->len, PCI_DMA_TODEVICE));
       
  1687 	/* check for mapping failure? */
       
  1688 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1689 }
       
  1690 
       
  1691 static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
       
  1692 {
       
  1693 	struct nic *nic = netdev_priv(netdev);
       
  1694 	int err;
       
  1695 
       
  1696 	if(nic->flags & ich_10h_workaround) {
       
  1697 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1698 		   Issue a NOP command followed by a 1us delay before
       
  1699 		   issuing the Tx command. */
       
  1700 		if(e100_exec_cmd(nic, cuc_nop, 0))
       
  1701 			DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
       
  1702 		udelay(1);
       
  1703 	}
       
  1704 
       
  1705 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1706 
       
  1707 	switch(err) {
       
  1708 	case -ENOSPC:
       
  1709 		/* We queued the skb, but now we're out of space. */
       
  1710 		DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
       
  1711         if (!nic->ecdev)
       
  1712             netif_stop_queue(netdev);
       
  1713 		break;
       
  1714 	case -ENOMEM:
       
  1715 		/* This is a hard error - log it. */
       
  1716 		DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
       
  1717         if (!nic->ecdev)
       
  1718             netif_stop_queue(netdev);
       
  1719 		return 1;
       
  1720 	}
       
  1721 
       
  1722 	netdev->trans_start = jiffies;
       
  1723 	return 0;
       
  1724 }
       
  1725 
       
  1726 static int e100_tx_clean(struct nic *nic)
       
  1727 {
       
  1728 	struct cb *cb;
       
  1729 	int tx_cleaned = 0;
       
  1730 
       
  1731 	if (!nic->ecdev)
       
  1732 		spin_lock(&nic->cb_lock);
       
  1733 
       
  1734 	/* Clean CBs marked complete */
       
  1735 	for(cb = nic->cb_to_clean;
       
  1736 	    cb->status & cpu_to_le16(cb_complete);
       
  1737 	    cb = nic->cb_to_clean = cb->next) {
       
  1738 		DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
       
  1739 		        (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1740 		        cb->status);
       
  1741 
       
  1742 		if(likely(cb->skb != NULL)) {
       
  1743 			nic->net_stats.tx_packets++;
       
  1744 			nic->net_stats.tx_bytes += cb->skb->len;
       
  1745 
       
  1746 			pci_unmap_single(nic->pdev,
       
  1747 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1748 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1749 				PCI_DMA_TODEVICE);
       
  1750 			if (!nic->ecdev)
       
  1751 				dev_kfree_skb_any(cb->skb);
       
  1752 			cb->skb = NULL;
       
  1753 			tx_cleaned = 1;
       
  1754 		}
       
  1755 		cb->status = 0;
       
  1756 		nic->cbs_avail++;
       
  1757 	}
       
  1758 
       
  1759 	if (!nic->ecdev) {
       
  1760 		spin_unlock(&nic->cb_lock);
       
  1761 
       
  1762 		/* Recover from running out of Tx resources in xmit_frame */
       
  1763 		if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1764 			netif_wake_queue(nic->netdev);
       
  1765 	}
       
  1766 
       
  1767 	return tx_cleaned;
       
  1768 }
       
  1769 
       
  1770 static void e100_clean_cbs(struct nic *nic)
       
  1771 {
       
  1772 	if(nic->cbs) {
       
  1773 		while(nic->cbs_avail != nic->params.cbs.count) {
       
  1774 			struct cb *cb = nic->cb_to_clean;
       
  1775 			if(cb->skb) {
       
  1776 				pci_unmap_single(nic->pdev,
       
  1777 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1778 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1779 					PCI_DMA_TODEVICE);
       
  1780 				if (!nic->ecdev)
       
  1781 					dev_kfree_skb(cb->skb);
       
  1782 			}
       
  1783 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1784 			nic->cbs_avail++;
       
  1785 		}
       
  1786 		pci_free_consistent(nic->pdev,
       
  1787 			sizeof(struct cb) * nic->params.cbs.count,
       
  1788 			nic->cbs, nic->cbs_dma_addr);
       
  1789 		nic->cbs = NULL;
       
  1790 		nic->cbs_avail = 0;
       
  1791 	}
       
  1792 	nic->cuc_cmd = cuc_start;
       
  1793 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1794 		nic->cbs;
       
  1795 }
       
  1796 
       
  1797 static int e100_alloc_cbs(struct nic *nic)
       
  1798 {
       
  1799 	struct cb *cb;
       
  1800 	unsigned int i, count = nic->params.cbs.count;
       
  1801 
       
  1802 	nic->cuc_cmd = cuc_start;
       
  1803 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1804 	nic->cbs_avail = 0;
       
  1805 
       
  1806 	nic->cbs = pci_alloc_consistent(nic->pdev,
       
  1807 		sizeof(struct cb) * count, &nic->cbs_dma_addr);
       
  1808 	if(!nic->cbs)
       
  1809 		return -ENOMEM;
       
  1810 
       
  1811 	for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1812 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1813 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  1814 
       
  1815 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  1816 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  1817 			((i+1) % count) * sizeof(struct cb));
       
  1818 		cb->skb = NULL;
       
  1819 	}
       
  1820 
       
  1821 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  1822 	nic->cbs_avail = count;
       
  1823 
       
  1824 	return 0;
       
  1825 }
       
  1826 
       
  1827 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  1828 {
       
  1829 	if(!nic->rxs) return;
       
  1830 	if(RU_SUSPENDED != nic->ru_running) return;
       
  1831 
       
  1832 	/* handle init time starts */
       
  1833 	if(!rx) rx = nic->rxs;
       
  1834 
       
  1835 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  1836 	if(rx->skb) {
       
  1837 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  1838 		nic->ru_running = RU_RUNNING;
       
  1839 	}
       
  1840 }
       
  1841 
       
  1842 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
       
  1843 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  1844 {
       
  1845 	if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
       
  1846 		return -ENOMEM;
       
  1847 
       
  1848 	/* Align, init, and map the RFD. */
       
  1849 	skb_reserve(rx->skb, NET_IP_ALIGN);
       
  1850 	memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  1851 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  1852 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1853 
       
  1854 	if(pci_dma_mapping_error(rx->dma_addr)) {
       
  1855 		dev_kfree_skb_any(rx->skb);
       
  1856 		rx->skb = NULL;
       
  1857 		rx->dma_addr = 0;
       
  1858 		return -ENOMEM;
       
  1859 	}
       
  1860 
       
  1861 	/* Link the RFD to end of RFA by linking previous RFD to
       
  1862 	 * this one, and clearing EL bit of previous.  */
       
  1863 	if(rx->prev->skb) {
       
  1864 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  1865 		put_unaligned(cpu_to_le32(rx->dma_addr),
       
  1866 			(u32 *)&prev_rfd->link);
       
  1867 		wmb();
       
  1868 		prev_rfd->command &= ~cpu_to_le16(cb_el);
       
  1869 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  1870 			sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  1871 	}
       
  1872 
       
  1873 	return 0;
       
  1874 }
       
  1875 
       
  1876 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  1877 	unsigned int *work_done, unsigned int work_to_do)
       
  1878 {
       
  1879 	struct sk_buff *skb = rx->skb;
       
  1880 	struct rfd *rfd = (struct rfd *)skb->data;
       
  1881 	u16 rfd_status, actual_size;
       
  1882 
       
  1883 	if(unlikely(work_done && *work_done >= work_to_do))
       
  1884 		return -EAGAIN;
       
  1885 
       
  1886 	/* Need to sync before taking a peek at cb_complete bit */
       
  1887 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  1888 		sizeof(struct rfd), PCI_DMA_FROMDEVICE);
       
  1889 	rfd_status = le16_to_cpu(rfd->status);
       
  1890 
       
  1891 	DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
       
  1892 
       
  1893 	/* If data isn't ready, nothing to indicate */
       
  1894 	if(unlikely(!(rfd_status & cb_complete)))
       
  1895 		return -ENODATA;
       
  1896 
       
  1897 	/* Get actual data size */
       
  1898 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  1899 	if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  1900 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  1901 
       
  1902 	/* Get data */
       
  1903 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  1904 		RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
       
  1905 
       
  1906 	/* this allows for a fast restart without re-enabling interrupts */
       
  1907 	if(le16_to_cpu(rfd->command) & cb_el)
       
  1908 		nic->ru_running = RU_SUSPENDED;
       
  1909 
       
  1910 	if (!nic->ecdev) {
       
  1911 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  1912 		skb_reserve(skb, sizeof(struct rfd));
       
  1913 		skb_put(skb, actual_size);
       
  1914 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  1915 	}
       
  1916 
       
  1917 	if(unlikely(!(rfd_status & cb_ok))) {
       
  1918 		if (!nic->ecdev) {
       
  1919 			/* Don't indicate if hardware indicates errors */
       
  1920 			dev_kfree_skb_any(skb);
       
  1921 		}
       
  1922 	} else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
       
  1923 		/* Don't indicate oversized frames */
       
  1924 		nic->rx_over_length_errors++;
       
  1925 		if (!nic->ecdev)
       
  1926 			dev_kfree_skb_any(skb);
       
  1927 	} else {
       
  1928 		nic->net_stats.rx_packets++;
       
  1929 		nic->net_stats.rx_bytes += actual_size;
       
  1930 		nic->netdev->last_rx = jiffies;
       
  1931 		if (nic->ecdev) {
       
  1932 			ecdev_receive(nic->ecdev,
       
  1933 					skb->data + sizeof(struct rfd), actual_size);
       
  1934 
       
  1935 			// No need to detect link status as
       
  1936 			// long as frames are received: Reset watchdog.
       
  1937 			nic->ec_watchdog_jiffies = jiffies;
       
  1938 		} else {
       
  1939 			netif_receive_skb(skb);
       
  1940 		}
       
  1941 		if(work_done)
       
  1942 			(*work_done)++;
       
  1943 	}
       
  1944 
       
  1945 	if (nic->ecdev) {
       
  1946 		// make receive frame descriptior usable again
       
  1947 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  1948 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  1949 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1950 		if(pci_dma_mapping_error(rx->dma_addr)) {
       
  1951 			rx->dma_addr = 0;
       
  1952 		}
       
  1953 
       
  1954 		/* Link the RFD to end of RFA by linking previous RFD to
       
  1955 		 * this one, and clearing EL bit of previous.  */
       
  1956 		if(rx->prev->skb) {
       
  1957 			struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  1958 			put_unaligned(cpu_to_le32(rx->dma_addr),
       
  1959 					(u32 *)&prev_rfd->link);
       
  1960 			wmb();
       
  1961 			prev_rfd->command &= ~cpu_to_le16(cb_el);
       
  1962 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  1963 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  1964 		}
       
  1965 	} else {
       
  1966 		rx->skb = NULL;
       
  1967 	}
       
  1968 
       
  1969 	return 0;
       
  1970 }
       
  1971 
       
  1972 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  1973 	unsigned int work_to_do)
       
  1974 {
       
  1975 	struct rx *rx;
       
  1976 	int restart_required = 0;
       
  1977 	struct rx *rx_to_start = NULL;
       
  1978 
       
  1979 	/* are we already rnr? then pay attention!!! this ensures that
       
  1980 	 * the state machine progression never allows a start with a
       
  1981 	 * partially cleaned list, avoiding a race between hardware
       
  1982 	 * and rx_to_clean when in NAPI mode */
       
  1983 	if(RU_SUSPENDED == nic->ru_running)
       
  1984 		restart_required = 1;
       
  1985 
       
  1986 	/* Indicate newly arrived packets */
       
  1987 	for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  1988 		int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  1989 		if(-EAGAIN == err) {
       
  1990 			/* hit quota so have more work to do, restart once
       
  1991 			 * cleanup is complete */
       
  1992 			restart_required = 0;
       
  1993 			break;
       
  1994 		} else if(-ENODATA == err)
       
  1995 			break; /* No more to clean */
       
  1996 	}
       
  1997 
       
  1998 	/* save our starting point as the place we'll restart the receiver */
       
  1999 	if(restart_required)
       
  2000 		rx_to_start = nic->rx_to_clean;
       
  2001 
       
  2002 	if (!nic->ecdev) {
       
  2003 		/* Alloc new skbs to refill list */
       
  2004 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  2005 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  2006 				break; /* Better luck next time (see watchdog) */
       
  2007 		}
       
  2008 	}
       
  2009 
       
  2010 	if(restart_required) {
       
  2011 		// ack the rnr?
       
  2012 		writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2013 		e100_start_receiver(nic, rx_to_start);
       
  2014 		if(work_done)
       
  2015 			(*work_done)++;
       
  2016 	}
       
  2017 }
       
  2018 
       
  2019 static void e100_rx_clean_list(struct nic *nic)
       
  2020 {
       
  2021 	struct rx *rx;
       
  2022 	unsigned int i, count = nic->params.rfds.count;
       
  2023 
       
  2024 	nic->ru_running = RU_UNINITIALIZED;
       
  2025 
       
  2026 	if(nic->rxs) {
       
  2027 		for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2028 			if(rx->skb) {
       
  2029 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2030 					RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
       
  2031 				dev_kfree_skb(rx->skb);
       
  2032 			}
       
  2033 		}
       
  2034 		kfree(nic->rxs);
       
  2035 		nic->rxs = NULL;
       
  2036 	}
       
  2037 
       
  2038 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2039 }
       
  2040 
       
  2041 static int e100_rx_alloc_list(struct nic *nic)
       
  2042 {
       
  2043 	struct rx *rx;
       
  2044 	unsigned int i, count = nic->params.rfds.count;
       
  2045 
       
  2046 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2047 	nic->ru_running = RU_UNINITIALIZED;
       
  2048 
       
  2049 	if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2050 		return -ENOMEM;
       
  2051 
       
  2052 	for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2053 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2054 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2055 		if(e100_rx_alloc_skb(nic, rx)) {
       
  2056 			e100_rx_clean_list(nic);
       
  2057 			return -ENOMEM;
       
  2058 		}
       
  2059 	}
       
  2060 
       
  2061 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2062 	nic->ru_running = RU_SUSPENDED;
       
  2063 
       
  2064 	return 0;
       
  2065 }
       
  2066 
       
  2067 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2068 {
       
  2069 	struct net_device *netdev = dev_id;
       
  2070 	struct nic *nic = netdev_priv(netdev);
       
  2071 	u8 stat_ack = readb(&nic->csr->scb.stat_ack);
       
  2072 
       
  2073 	DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
       
  2074 
       
  2075 	if(stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2076 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2077 		return IRQ_NONE;
       
  2078 
       
  2079 	/* Ack interrupt(s) */
       
  2080 	writeb(stat_ack, &nic->csr->scb.stat_ack);
       
  2081 
       
  2082 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2083 	if(stat_ack & stat_ack_rnr)
       
  2084 		nic->ru_running = RU_SUSPENDED;
       
  2085 
       
  2086 	if(!nic->ecdev && likely(netif_rx_schedule_prep(netdev))) {
       
  2087 		e100_disable_irq(nic);
       
  2088 		__netif_rx_schedule(netdev);
       
  2089 	}
       
  2090 
       
  2091 	return IRQ_HANDLED;
       
  2092 }
       
  2093 
       
  2094 void e100_ec_poll(struct net_device *netdev)
       
  2095 {
       
  2096 	struct nic *nic = netdev_priv(netdev);
       
  2097 
       
  2098 	e100_rx_clean(nic, NULL, 100); // FIXME
       
  2099 	e100_tx_clean(nic);
       
  2100 
       
  2101     if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2102         e100_watchdog((unsigned long) nic);
       
  2103         nic->ec_watchdog_jiffies = jiffies;
       
  2104     }
       
  2105 }
       
  2106 
       
  2107 static int e100_poll(struct net_device *netdev, int *budget)
       
  2108 {
       
  2109 	struct nic *nic = netdev_priv(netdev);
       
  2110 	unsigned int work_to_do = min(netdev->quota, *budget);
       
  2111 	unsigned int work_done = 0;
       
  2112 	int tx_cleaned;
       
  2113 
       
  2114 	e100_rx_clean(nic, &work_done, work_to_do);
       
  2115 	tx_cleaned = e100_tx_clean(nic);
       
  2116 
       
  2117 	/* If no Rx and Tx cleanup work was done, exit polling mode. */
       
  2118 	if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
       
  2119 		netif_rx_complete(netdev);
       
  2120 		e100_enable_irq(nic);
       
  2121 		return 0;
       
  2122 	}
       
  2123 
       
  2124 	*budget -= work_done;
       
  2125 	netdev->quota -= work_done;
       
  2126 
       
  2127 	return 1;
       
  2128 }
       
  2129 
       
  2130 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2131 static void e100_netpoll(struct net_device *netdev)
       
  2132 {
       
  2133 	struct nic *nic = netdev_priv(netdev);
       
  2134 
       
  2135     if (nic->ecdev)
       
  2136         return;
       
  2137 
       
  2138 	e100_disable_irq(nic);
       
  2139 	e100_intr(nic->pdev->irq, netdev);
       
  2140 	e100_tx_clean(nic);
       
  2141 	e100_enable_irq(nic);
       
  2142 }
       
  2143 #endif
       
  2144 
       
  2145 static struct net_device_stats *e100_get_stats(struct net_device *netdev)
       
  2146 {
       
  2147 	struct nic *nic = netdev_priv(netdev);
       
  2148 	return &nic->net_stats;
       
  2149 }
       
  2150 
       
  2151 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2152 {
       
  2153 	struct nic *nic = netdev_priv(netdev);
       
  2154 	struct sockaddr *addr = p;
       
  2155 
       
  2156 	if (!is_valid_ether_addr(addr->sa_data))
       
  2157 		return -EADDRNOTAVAIL;
       
  2158 
       
  2159 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2160 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2161 
       
  2162 	return 0;
       
  2163 }
       
  2164 
       
  2165 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2166 {
       
  2167 	if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2168 		return -EINVAL;
       
  2169 	netdev->mtu = new_mtu;
       
  2170 	return 0;
       
  2171 }
       
  2172 
       
  2173 static int e100_asf(struct nic *nic)
       
  2174 {
       
  2175 	/* ASF can be enabled from eeprom */
       
  2176 	return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2177 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2178 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2179 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
       
  2180 }
       
  2181 
       
  2182 static int e100_up(struct nic *nic)
       
  2183 {
       
  2184 	int err;
       
  2185 
       
  2186 	if((err = e100_rx_alloc_list(nic)))
       
  2187 		return err;
       
  2188 	if((err = e100_alloc_cbs(nic)))
       
  2189 		goto err_rx_clean_list;
       
  2190 	if((err = e100_hw_init(nic)))
       
  2191 		goto err_clean_cbs;
       
  2192 	e100_set_multicast_list(nic->netdev);
       
  2193 	e100_start_receiver(nic, NULL);
       
  2194     if (!nic->ecdev) {
       
  2195 		mod_timer(&nic->watchdog, jiffies);
       
  2196         if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2197                         nic->netdev->name, nic->netdev)))
       
  2198             goto err_no_irq;
       
  2199         netif_wake_queue(nic->netdev);
       
  2200         netif_poll_enable(nic->netdev);
       
  2201         /* enable ints _after_ enabling poll, preventing a race between
       
  2202          * disable ints+schedule */
       
  2203         e100_enable_irq(nic);
       
  2204     }
       
  2205 	return 0;
       
  2206 
       
  2207 err_no_irq:
       
  2208     if (!nic->ecdev)
       
  2209 		del_timer_sync(&nic->watchdog);
       
  2210 err_clean_cbs:
       
  2211 	e100_clean_cbs(nic);
       
  2212 err_rx_clean_list:
       
  2213 	e100_rx_clean_list(nic);
       
  2214 	return err;
       
  2215 }
       
  2216 
       
  2217 static void e100_down(struct nic *nic)
       
  2218 {
       
  2219     if (!nic->ecdev) {
       
  2220         /* wait here for poll to complete */
       
  2221         netif_poll_disable(nic->netdev);
       
  2222         netif_stop_queue(nic->netdev);
       
  2223     }
       
  2224 	e100_hw_reset(nic);
       
  2225     if (!nic->ecdev) {
       
  2226         free_irq(nic->pdev->irq, nic->netdev);
       
  2227 		del_timer_sync(&nic->watchdog);
       
  2228         netif_carrier_off(nic->netdev);
       
  2229 	}
       
  2230 	e100_clean_cbs(nic);
       
  2231 	e100_rx_clean_list(nic);
       
  2232 }
       
  2233 
       
  2234 static void e100_tx_timeout(struct net_device *netdev)
       
  2235 {
       
  2236 	struct nic *nic = netdev_priv(netdev);
       
  2237 
       
  2238 	/* Reset outside of interrupt context, to avoid request_irq
       
  2239 	 * in interrupt context */
       
  2240 	schedule_work(&nic->tx_timeout_task);
       
  2241 }
       
  2242 
       
  2243 static void e100_tx_timeout_task(struct work_struct *work)
       
  2244 {
       
  2245 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2246 	struct net_device *netdev = nic->netdev;
       
  2247 
       
  2248 	DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
       
  2249 		readb(&nic->csr->scb.status));
       
  2250 	e100_down(netdev_priv(netdev));
       
  2251 	e100_up(netdev_priv(netdev));
       
  2252 }
       
  2253 
       
  2254 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2255 {
       
  2256 	int err;
       
  2257 	struct sk_buff *skb;
       
  2258 
       
  2259 	/* Use driver resources to perform internal MAC or PHY
       
  2260 	 * loopback test.  A single packet is prepared and transmitted
       
  2261 	 * in loopback mode, and the test passes if the received
       
  2262 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2263 
       
  2264 	if((err = e100_rx_alloc_list(nic)))
       
  2265 		return err;
       
  2266 	if((err = e100_alloc_cbs(nic)))
       
  2267 		goto err_clean_rx;
       
  2268 
       
  2269 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2270 	if(nic->flags & ich && loopback_mode == lb_phy)
       
  2271 		loopback_mode = lb_mac;
       
  2272 
       
  2273 	nic->loopback = loopback_mode;
       
  2274 	if((err = e100_hw_init(nic)))
       
  2275 		goto err_loopback_none;
       
  2276 
       
  2277 	if(loopback_mode == lb_phy)
       
  2278 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2279 			BMCR_LOOPBACK);
       
  2280 
       
  2281 	e100_start_receiver(nic, NULL);
       
  2282 
       
  2283 	if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2284 		err = -ENOMEM;
       
  2285 		goto err_loopback_none;
       
  2286 	}
       
  2287 	skb_put(skb, ETH_DATA_LEN);
       
  2288 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2289 	e100_xmit_frame(skb, nic->netdev);
       
  2290 
       
  2291 	msleep(10);
       
  2292 
       
  2293 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2294 			RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
       
  2295 
       
  2296 	if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2297 	   skb->data, ETH_DATA_LEN))
       
  2298 		err = -EAGAIN;
       
  2299 
       
  2300 err_loopback_none:
       
  2301 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2302 	nic->loopback = lb_none;
       
  2303 	e100_clean_cbs(nic);
       
  2304 	e100_hw_reset(nic);
       
  2305 err_clean_rx:
       
  2306 	e100_rx_clean_list(nic);
       
  2307 	return err;
       
  2308 }
       
  2309 
       
  2310 #define MII_LED_CONTROL	0x1B
       
  2311 static void e100_blink_led(unsigned long data)
       
  2312 {
       
  2313 	struct nic *nic = (struct nic *)data;
       
  2314 	enum led_state {
       
  2315 		led_on     = 0x01,
       
  2316 		led_off    = 0x04,
       
  2317 		led_on_559 = 0x05,
       
  2318 		led_on_557 = 0x07,
       
  2319 	};
       
  2320 
       
  2321 	nic->leds = (nic->leds & led_on) ? led_off :
       
  2322 		(nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
       
  2323 	mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
       
  2324 	mod_timer(&nic->blink_timer, jiffies + HZ / 4);
       
  2325 }
       
  2326 
       
  2327 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2328 {
       
  2329 	struct nic *nic = netdev_priv(netdev);
       
  2330 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2331 }
       
  2332 
       
  2333 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2334 {
       
  2335 	struct nic *nic = netdev_priv(netdev);
       
  2336 	int err;
       
  2337 
       
  2338 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2339 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2340 	e100_exec_cb(nic, NULL, e100_configure);
       
  2341 
       
  2342 	return err;
       
  2343 }
       
  2344 
       
  2345 static void e100_get_drvinfo(struct net_device *netdev,
       
  2346 	struct ethtool_drvinfo *info)
       
  2347 {
       
  2348 	struct nic *nic = netdev_priv(netdev);
       
  2349 	strcpy(info->driver, DRV_NAME);
       
  2350 	strcpy(info->version, DRV_VERSION);
       
  2351 	strcpy(info->fw_version, "N/A");
       
  2352 	strcpy(info->bus_info, pci_name(nic->pdev));
       
  2353 }
       
  2354 
       
  2355 static int e100_get_regs_len(struct net_device *netdev)
       
  2356 {
       
  2357 	struct nic *nic = netdev_priv(netdev);
       
  2358 #define E100_PHY_REGS		0x1C
       
  2359 #define E100_REGS_LEN		1 + E100_PHY_REGS + \
       
  2360 	sizeof(nic->mem->dump_buf) / sizeof(u32)
       
  2361 	return E100_REGS_LEN * sizeof(u32);
       
  2362 }
       
  2363 
       
  2364 static void e100_get_regs(struct net_device *netdev,
       
  2365 	struct ethtool_regs *regs, void *p)
       
  2366 {
       
  2367 	struct nic *nic = netdev_priv(netdev);
       
  2368 	u32 *buff = p;
       
  2369 	int i;
       
  2370 
       
  2371 	regs->version = (1 << 24) | nic->rev_id;
       
  2372 	buff[0] = readb(&nic->csr->scb.cmd_hi) << 24 |
       
  2373 		readb(&nic->csr->scb.cmd_lo) << 16 |
       
  2374 		readw(&nic->csr->scb.status);
       
  2375 	for(i = E100_PHY_REGS; i >= 0; i--)
       
  2376 		buff[1 + E100_PHY_REGS - i] =
       
  2377 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2378 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2379 	e100_exec_cb(nic, NULL, e100_dump);
       
  2380 	msleep(10);
       
  2381 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2382 		sizeof(nic->mem->dump_buf));
       
  2383 }
       
  2384 
       
  2385 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2386 {
       
  2387 	struct nic *nic = netdev_priv(netdev);
       
  2388 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2389 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2390 }
       
  2391 
       
  2392 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2393 {
       
  2394 	struct nic *nic = netdev_priv(netdev);
       
  2395 
       
  2396 	if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
       
  2397 		return -EOPNOTSUPP;
       
  2398 
       
  2399 	if(wol->wolopts)
       
  2400 		nic->flags |= wol_magic;
       
  2401 	else
       
  2402 		nic->flags &= ~wol_magic;
       
  2403 
       
  2404 	e100_exec_cb(nic, NULL, e100_configure);
       
  2405 
       
  2406 	return 0;
       
  2407 }
       
  2408 
       
  2409 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2410 {
       
  2411 	struct nic *nic = netdev_priv(netdev);
       
  2412 	return nic->msg_enable;
       
  2413 }
       
  2414 
       
  2415 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2416 {
       
  2417 	struct nic *nic = netdev_priv(netdev);
       
  2418 	nic->msg_enable = value;
       
  2419 }
       
  2420 
       
  2421 static int e100_nway_reset(struct net_device *netdev)
       
  2422 {
       
  2423 	struct nic *nic = netdev_priv(netdev);
       
  2424 	return mii_nway_restart(&nic->mii);
       
  2425 }
       
  2426 
       
  2427 static u32 e100_get_link(struct net_device *netdev)
       
  2428 {
       
  2429 	struct nic *nic = netdev_priv(netdev);
       
  2430 	return mii_link_ok(&nic->mii);
       
  2431 }
       
  2432 
       
  2433 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2434 {
       
  2435 	struct nic *nic = netdev_priv(netdev);
       
  2436 	return nic->eeprom_wc << 1;
       
  2437 }
       
  2438 
       
  2439 #define E100_EEPROM_MAGIC	0x1234
       
  2440 static int e100_get_eeprom(struct net_device *netdev,
       
  2441 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2442 {
       
  2443 	struct nic *nic = netdev_priv(netdev);
       
  2444 
       
  2445 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2446 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2447 
       
  2448 	return 0;
       
  2449 }
       
  2450 
       
  2451 static int e100_set_eeprom(struct net_device *netdev,
       
  2452 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2453 {
       
  2454 	struct nic *nic = netdev_priv(netdev);
       
  2455 
       
  2456 	if(eeprom->magic != E100_EEPROM_MAGIC)
       
  2457 		return -EINVAL;
       
  2458 
       
  2459 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2460 
       
  2461 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2462 		(eeprom->len >> 1) + 1);
       
  2463 }
       
  2464 
       
  2465 static void e100_get_ringparam(struct net_device *netdev,
       
  2466 	struct ethtool_ringparam *ring)
       
  2467 {
       
  2468 	struct nic *nic = netdev_priv(netdev);
       
  2469 	struct param_range *rfds = &nic->params.rfds;
       
  2470 	struct param_range *cbs = &nic->params.cbs;
       
  2471 
       
  2472 	ring->rx_max_pending = rfds->max;
       
  2473 	ring->tx_max_pending = cbs->max;
       
  2474 	ring->rx_mini_max_pending = 0;
       
  2475 	ring->rx_jumbo_max_pending = 0;
       
  2476 	ring->rx_pending = rfds->count;
       
  2477 	ring->tx_pending = cbs->count;
       
  2478 	ring->rx_mini_pending = 0;
       
  2479 	ring->rx_jumbo_pending = 0;
       
  2480 }
       
  2481 
       
  2482 static int e100_set_ringparam(struct net_device *netdev,
       
  2483 	struct ethtool_ringparam *ring)
       
  2484 {
       
  2485 	struct nic *nic = netdev_priv(netdev);
       
  2486 	struct param_range *rfds = &nic->params.rfds;
       
  2487 	struct param_range *cbs = &nic->params.cbs;
       
  2488 
       
  2489 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2490 		return -EINVAL;
       
  2491 
       
  2492 	if(netif_running(netdev))
       
  2493 		e100_down(nic);
       
  2494 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2495 	rfds->count = min(rfds->count, rfds->max);
       
  2496 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2497 	cbs->count = min(cbs->count, cbs->max);
       
  2498 	DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
       
  2499 	        rfds->count, cbs->count);
       
  2500 	if(netif_running(netdev))
       
  2501 		e100_up(nic);
       
  2502 
       
  2503 	return 0;
       
  2504 }
       
  2505 
       
  2506 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2507 	"Link test     (on/offline)",
       
  2508 	"Eeprom test   (on/offline)",
       
  2509 	"Self test        (offline)",
       
  2510 	"Mac loopback     (offline)",
       
  2511 	"Phy loopback     (offline)",
       
  2512 };
       
  2513 #define E100_TEST_LEN	sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
       
  2514 
       
  2515 static int e100_diag_test_count(struct net_device *netdev)
       
  2516 {
       
  2517 	return E100_TEST_LEN;
       
  2518 }
       
  2519 
       
  2520 static void e100_diag_test(struct net_device *netdev,
       
  2521 	struct ethtool_test *test, u64 *data)
       
  2522 {
       
  2523 	struct ethtool_cmd cmd;
       
  2524 	struct nic *nic = netdev_priv(netdev);
       
  2525 	int i, err;
       
  2526 
       
  2527 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2528 	data[0] = !mii_link_ok(&nic->mii);
       
  2529 	data[1] = e100_eeprom_load(nic);
       
  2530 	if(test->flags & ETH_TEST_FL_OFFLINE) {
       
  2531 
       
  2532 		/* save speed, duplex & autoneg settings */
       
  2533 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2534 
       
  2535 		if(netif_running(netdev))
       
  2536 			e100_down(nic);
       
  2537 		data[2] = e100_self_test(nic);
       
  2538 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2539 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2540 
       
  2541 		/* restore speed, duplex & autoneg settings */
       
  2542 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2543 
       
  2544 		if(netif_running(netdev))
       
  2545 			e100_up(nic);
       
  2546 	}
       
  2547 	for(i = 0; i < E100_TEST_LEN; i++)
       
  2548 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2549 
       
  2550 	msleep_interruptible(4 * 1000);
       
  2551 }
       
  2552 
       
  2553 static int e100_phys_id(struct net_device *netdev, u32 data)
       
  2554 {
       
  2555 	struct nic *nic = netdev_priv(netdev);
       
  2556 
       
  2557 	if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
       
  2558 		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
       
  2559 	mod_timer(&nic->blink_timer, jiffies);
       
  2560 	msleep_interruptible(data * 1000);
       
  2561 	del_timer_sync(&nic->blink_timer);
       
  2562 	mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
       
  2563 
       
  2564 	return 0;
       
  2565 }
       
  2566 
       
  2567 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2568 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2569 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2570 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2571 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2572 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2573 	"tx_heartbeat_errors", "tx_window_errors",
       
  2574 	/* device-specific stats */
       
  2575 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2576 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2577 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2578 };
       
  2579 #define E100_NET_STATS_LEN	21
       
  2580 #define E100_STATS_LEN	sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
       
  2581 
       
  2582 static int e100_get_stats_count(struct net_device *netdev)
       
  2583 {
       
  2584 	return E100_STATS_LEN;
       
  2585 }
       
  2586 
       
  2587 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2588 	struct ethtool_stats *stats, u64 *data)
       
  2589 {
       
  2590 	struct nic *nic = netdev_priv(netdev);
       
  2591 	int i;
       
  2592 
       
  2593 	for(i = 0; i < E100_NET_STATS_LEN; i++)
       
  2594 		data[i] = ((unsigned long *)&nic->net_stats)[i];
       
  2595 
       
  2596 	data[i++] = nic->tx_deferred;
       
  2597 	data[i++] = nic->tx_single_collisions;
       
  2598 	data[i++] = nic->tx_multiple_collisions;
       
  2599 	data[i++] = nic->tx_fc_pause;
       
  2600 	data[i++] = nic->rx_fc_pause;
       
  2601 	data[i++] = nic->rx_fc_unsupported;
       
  2602 	data[i++] = nic->tx_tco_frames;
       
  2603 	data[i++] = nic->rx_tco_frames;
       
  2604 }
       
  2605 
       
  2606 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2607 {
       
  2608 	switch(stringset) {
       
  2609 	case ETH_SS_TEST:
       
  2610 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2611 		break;
       
  2612 	case ETH_SS_STATS:
       
  2613 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2614 		break;
       
  2615 	}
       
  2616 }
       
  2617 
       
  2618 static const struct ethtool_ops e100_ethtool_ops = {
       
  2619 	.get_settings		= e100_get_settings,
       
  2620 	.set_settings		= e100_set_settings,
       
  2621 	.get_drvinfo		= e100_get_drvinfo,
       
  2622 	.get_regs_len		= e100_get_regs_len,
       
  2623 	.get_regs		= e100_get_regs,
       
  2624 	.get_wol		= e100_get_wol,
       
  2625 	.set_wol		= e100_set_wol,
       
  2626 	.get_msglevel		= e100_get_msglevel,
       
  2627 	.set_msglevel		= e100_set_msglevel,
       
  2628 	.nway_reset		= e100_nway_reset,
       
  2629 	.get_link		= e100_get_link,
       
  2630 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2631 	.get_eeprom		= e100_get_eeprom,
       
  2632 	.set_eeprom		= e100_set_eeprom,
       
  2633 	.get_ringparam		= e100_get_ringparam,
       
  2634 	.set_ringparam		= e100_set_ringparam,
       
  2635 	.self_test_count	= e100_diag_test_count,
       
  2636 	.self_test		= e100_diag_test,
       
  2637 	.get_strings		= e100_get_strings,
       
  2638 	.phys_id		= e100_phys_id,
       
  2639 	.get_stats_count	= e100_get_stats_count,
       
  2640 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2641 	.get_perm_addr		= ethtool_op_get_perm_addr,
       
  2642 };
       
  2643 
       
  2644 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2645 {
       
  2646 	struct nic *nic = netdev_priv(netdev);
       
  2647 
       
  2648 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2649 }
       
  2650 
       
  2651 static int e100_alloc(struct nic *nic)
       
  2652 {
       
  2653 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2654 		&nic->dma_addr);
       
  2655 	return nic->mem ? 0 : -ENOMEM;
       
  2656 }
       
  2657 
       
  2658 static void e100_free(struct nic *nic)
       
  2659 {
       
  2660 	if(nic->mem) {
       
  2661 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2662 			nic->mem, nic->dma_addr);
       
  2663 		nic->mem = NULL;
       
  2664 	}
       
  2665 }
       
  2666 
       
  2667 static int e100_open(struct net_device *netdev)
       
  2668 {
       
  2669 	struct nic *nic = netdev_priv(netdev);
       
  2670 	int err = 0;
       
  2671 
       
  2672     if (!nic->ecdev)
       
  2673         netif_carrier_off(netdev);
       
  2674 	if((err = e100_up(nic)))
       
  2675 		DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
       
  2676 	return err;
       
  2677 }
       
  2678 
       
  2679 static int e100_close(struct net_device *netdev)
       
  2680 {
       
  2681 	e100_down(netdev_priv(netdev));
       
  2682 	return 0;
       
  2683 }
       
  2684 
       
  2685 static int __devinit e100_probe(struct pci_dev *pdev,
       
  2686 	const struct pci_device_id *ent)
       
  2687 {
       
  2688 	struct net_device *netdev;
       
  2689 	struct nic *nic;
       
  2690 	int err;
       
  2691 
       
  2692 	if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
       
  2693 		if(((1 << debug) - 1) & NETIF_MSG_PROBE)
       
  2694 			printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
       
  2695 		return -ENOMEM;
       
  2696 	}
       
  2697 
       
  2698 	netdev->open = e100_open;
       
  2699 	netdev->stop = e100_close;
       
  2700 	netdev->hard_start_xmit = e100_xmit_frame;
       
  2701 	netdev->get_stats = e100_get_stats;
       
  2702 	netdev->set_multicast_list = e100_set_multicast_list;
       
  2703 	netdev->set_mac_address = e100_set_mac_address;
       
  2704 	netdev->change_mtu = e100_change_mtu;
       
  2705 	netdev->do_ioctl = e100_do_ioctl;
       
  2706 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  2707 	netdev->tx_timeout = e100_tx_timeout;
       
  2708 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  2709 	netdev->poll = e100_poll;
       
  2710 	netdev->weight = E100_NAPI_WEIGHT;
       
  2711 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2712 	netdev->poll_controller = e100_netpoll;
       
  2713 #endif
       
  2714 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  2715 
       
  2716 	nic = netdev_priv(netdev);
       
  2717 	nic->netdev = netdev;
       
  2718 	nic->pdev = pdev;
       
  2719 	nic->msg_enable = (1 << debug) - 1;
       
  2720 	pci_set_drvdata(pdev, netdev);
       
  2721 
       
  2722 	if((err = pci_enable_device(pdev))) {
       
  2723 		DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
       
  2724 		goto err_out_free_dev;
       
  2725 	}
       
  2726 
       
  2727 	if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  2728 		DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
       
  2729 			"base address, aborting.\n");
       
  2730 		err = -ENODEV;
       
  2731 		goto err_out_disable_pdev;
       
  2732 	}
       
  2733 
       
  2734 	if((err = pci_request_regions(pdev, DRV_NAME))) {
       
  2735 		DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
       
  2736 		goto err_out_disable_pdev;
       
  2737 	}
       
  2738 
       
  2739 	if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
       
  2740 		DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
       
  2741 		goto err_out_free_res;
       
  2742 	}
       
  2743 
       
  2744 	SET_MODULE_OWNER(netdev);
       
  2745 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  2746 
       
  2747 	nic->csr = ioremap(pci_resource_start(pdev, 0), sizeof(struct csr));
       
  2748 	if(!nic->csr) {
       
  2749 		DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
       
  2750 		err = -ENOMEM;
       
  2751 		goto err_out_free_res;
       
  2752 	}
       
  2753 
       
  2754 	if(ent->driver_data)
       
  2755 		nic->flags |= ich;
       
  2756 	else
       
  2757 		nic->flags &= ~ich;
       
  2758 
       
  2759 	e100_get_defaults(nic);
       
  2760 
       
  2761 	/* locks must be initialized before calling hw_reset */
       
  2762 	spin_lock_init(&nic->cb_lock);
       
  2763 	spin_lock_init(&nic->cmd_lock);
       
  2764 	spin_lock_init(&nic->mdio_lock);
       
  2765 
       
  2766 	/* Reset the device before pci_set_master() in case device is in some
       
  2767 	 * funky state and has an interrupt pending - hint: we don't have the
       
  2768 	 * interrupt handler registered yet. */
       
  2769 	e100_hw_reset(nic);
       
  2770 
       
  2771 	pci_set_master(pdev);
       
  2772 
       
  2773 	init_timer(&nic->watchdog);
       
  2774 	nic->watchdog.function = e100_watchdog;
       
  2775 	nic->watchdog.data = (unsigned long)nic;
       
  2776 	init_timer(&nic->blink_timer);
       
  2777 	nic->blink_timer.function = e100_blink_led;
       
  2778 	nic->blink_timer.data = (unsigned long)nic;
       
  2779 
       
  2780 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  2781 
       
  2782 	if((err = e100_alloc(nic))) {
       
  2783 		DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
       
  2784 		goto err_out_iounmap;
       
  2785 	}
       
  2786 
       
  2787 	if((err = e100_eeprom_load(nic)))
       
  2788 		goto err_out_free;
       
  2789 
       
  2790 	e100_phy_init(nic);
       
  2791 
       
  2792 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  2793 	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
       
  2794 	if(!is_valid_ether_addr(netdev->perm_addr)) {
       
  2795 		DPRINTK(PROBE, ERR, "Invalid MAC address from "
       
  2796 			"EEPROM, aborting.\n");
       
  2797 		err = -EAGAIN;
       
  2798 		goto err_out_free;
       
  2799 	}
       
  2800 
       
  2801 	/* Wol magic packet can be enabled from eeprom */
       
  2802 	if((nic->mac >= mac_82558_D101_A4) &&
       
  2803 	   (nic->eeprom[eeprom_id] & eeprom_id_wol))
       
  2804 		nic->flags |= wol_magic;
       
  2805 
       
  2806 	/* ack any pending wake events, disable PME */
       
  2807 	err = pci_enable_wake(pdev, 0, 0);
       
  2808 	if (err)
       
  2809 		DPRINTK(PROBE, ERR, "Error clearing wake event\n");
       
  2810 
       
  2811 	// offer device to EtherCAT master module
       
  2812 	if (ecdev_offer(netdev, e100_ec_poll, THIS_MODULE, &nic->ecdev))
       
  2813 		goto err_out_free;
       
  2814 
       
  2815     if (nic->ecdev) {
       
  2816         strcpy(netdev->name, "ec0");
       
  2817 		if (ecdev_open(nic->ecdev)) {
       
  2818 			ecdev_withdraw(nic->ecdev);
       
  2819 			goto err_out_free;
       
  2820 		}
       
  2821 	} else {
       
  2822         strcpy(netdev->name, "eth%d");
       
  2823         if((err = register_netdev(netdev))) {
       
  2824             DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
       
  2825             goto err_out_free;
       
  2826         }
       
  2827     }
       
  2828 
       
  2829 	DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, "
       
  2830 		"MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
       
  2831 		(unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
       
  2832 		netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
       
  2833 		netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
       
  2834 
       
  2835 	return 0;
       
  2836 
       
  2837 err_out_free:
       
  2838 	e100_free(nic);
       
  2839 err_out_iounmap:
       
  2840 	iounmap(nic->csr);
       
  2841 err_out_free_res:
       
  2842 	pci_release_regions(pdev);
       
  2843 err_out_disable_pdev:
       
  2844 	pci_disable_device(pdev);
       
  2845 err_out_free_dev:
       
  2846 	pci_set_drvdata(pdev, NULL);
       
  2847 	free_netdev(netdev);
       
  2848 	return err;
       
  2849 }
       
  2850 
       
  2851 static void __devexit e100_remove(struct pci_dev *pdev)
       
  2852 {
       
  2853 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2854 
       
  2855 	if(netdev) {
       
  2856 		struct nic *nic = netdev_priv(netdev);
       
  2857 		if (nic->ecdev) {
       
  2858 			ecdev_close(nic->ecdev);
       
  2859 			ecdev_withdraw(nic->ecdev);
       
  2860 		} else {
       
  2861 			unregister_netdev(netdev);
       
  2862 		}
       
  2863 		e100_free(nic);
       
  2864 		iounmap(nic->csr);
       
  2865 		free_netdev(netdev);
       
  2866 		pci_release_regions(pdev);
       
  2867 		pci_disable_device(pdev);
       
  2868 		pci_set_drvdata(pdev, NULL);
       
  2869 	}
       
  2870 }
       
  2871 
       
  2872 #ifdef CONFIG_PM
       
  2873 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  2874 {
       
  2875 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2876 	struct nic *nic = netdev_priv(netdev);
       
  2877 
       
  2878 	if (nic->ecdev)
       
  2879 		return 0;
       
  2880 
       
  2881 	if (netif_running(netdev))
       
  2882 		netif_poll_disable(nic->netdev);
       
  2883 	del_timer_sync(&nic->watchdog);
       
  2884 	netif_carrier_off(nic->netdev);
       
  2885 	netif_device_detach(netdev);
       
  2886 
       
  2887 	pci_save_state(pdev);
       
  2888 
       
  2889 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  2890 		pci_enable_wake(pdev, PCI_D3hot, 1);
       
  2891 		pci_enable_wake(pdev, PCI_D3cold, 1);
       
  2892 	} else {
       
  2893 		pci_enable_wake(pdev, PCI_D3hot, 0);
       
  2894 		pci_enable_wake(pdev, PCI_D3cold, 0);
       
  2895 	}
       
  2896 
       
  2897 	pci_disable_device(pdev);
       
  2898 	free_irq(pdev->irq, netdev);
       
  2899 	pci_set_power_state(pdev, PCI_D3hot);
       
  2900 
       
  2901 	return 0;
       
  2902 }
       
  2903 
       
  2904 static int e100_resume(struct pci_dev *pdev)
       
  2905 {
       
  2906 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2907 	struct nic *nic = netdev_priv(netdev);
       
  2908 
       
  2909 	if (nic->ecdev)
       
  2910 		return 0;
       
  2911 
       
  2912 	pci_set_power_state(pdev, PCI_D0);
       
  2913 	pci_restore_state(pdev);
       
  2914 	/* ack any pending wake events, disable PME */
       
  2915 	pci_enable_wake(pdev, 0, 0);
       
  2916 
       
  2917 	netif_device_attach(netdev);
       
  2918 	if (netif_running(netdev))
       
  2919 		e100_up(nic);
       
  2920 
       
  2921 	return 0;
       
  2922 }
       
  2923 #endif /* CONFIG_PM */
       
  2924 
       
  2925 static void e100_shutdown(struct pci_dev *pdev)
       
  2926 {
       
  2927 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2928 	struct nic *nic = netdev_priv(netdev);
       
  2929 
       
  2930 	if (nic->ecdev)
       
  2931 		return;
       
  2932 
       
  2933 	if (netif_running(netdev))
       
  2934 		netif_poll_disable(nic->netdev);
       
  2935 	del_timer_sync(&nic->watchdog);
       
  2936 	netif_carrier_off(nic->netdev);
       
  2937 
       
  2938 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  2939 		pci_enable_wake(pdev, PCI_D3hot, 1);
       
  2940 		pci_enable_wake(pdev, PCI_D3cold, 1);
       
  2941 	} else {
       
  2942 		pci_enable_wake(pdev, PCI_D3hot, 0);
       
  2943 		pci_enable_wake(pdev, PCI_D3cold, 0);
       
  2944 	}
       
  2945 
       
  2946 	pci_disable_device(pdev);
       
  2947 	pci_set_power_state(pdev, PCI_D3hot);
       
  2948 }
       
  2949 
       
  2950 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  2951 /**
       
  2952  * e100_io_error_detected - called when PCI error is detected.
       
  2953  * @pdev: Pointer to PCI device
       
  2954  * @state: The current pci conneection state
       
  2955  */
       
  2956 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  2957 {
       
  2958 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2959 	struct nic *nic = netdev_priv(netdev);
       
  2960 
       
  2961 	/* Similar to calling e100_down(), but avoids adpater I/O. */
       
  2962 	netdev->stop(netdev);
       
  2963 
       
  2964     if (!nic->ecdev) {
       
  2965         /* Detach; put netif into state similar to hotplug unplug. */
       
  2966         netif_poll_enable(netdev);
       
  2967         netif_device_detach(netdev);
       
  2968     }
       
  2969 	pci_disable_device(pdev);
       
  2970 
       
  2971 	/* Request a slot reset. */
       
  2972 	return PCI_ERS_RESULT_NEED_RESET;
       
  2973 }
       
  2974 
       
  2975 /**
       
  2976  * e100_io_slot_reset - called after the pci bus has been reset.
       
  2977  * @pdev: Pointer to PCI device
       
  2978  *
       
  2979  * Restart the card from scratch.
       
  2980  */
       
  2981 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  2982 {
       
  2983 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2984 	struct nic *nic = netdev_priv(netdev);
       
  2985 
       
  2986 	if (pci_enable_device(pdev)) {
       
  2987 		printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
       
  2988 		return PCI_ERS_RESULT_DISCONNECT;
       
  2989 	}
       
  2990 	pci_set_master(pdev);
       
  2991 
       
  2992 	/* Only one device per card can do a reset */
       
  2993 	if (0 != PCI_FUNC(pdev->devfn))
       
  2994 		return PCI_ERS_RESULT_RECOVERED;
       
  2995 	e100_hw_reset(nic);
       
  2996 	e100_phy_init(nic);
       
  2997 
       
  2998 	return PCI_ERS_RESULT_RECOVERED;
       
  2999 }
       
  3000 
       
  3001 /**
       
  3002  * e100_io_resume - resume normal operations
       
  3003  * @pdev: Pointer to PCI device
       
  3004  *
       
  3005  * Resume normal operations after an error recovery
       
  3006  * sequence has been completed.
       
  3007  */
       
  3008 static void e100_io_resume(struct pci_dev *pdev)
       
  3009 {
       
  3010 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3011 	struct nic *nic = netdev_priv(netdev);
       
  3012 
       
  3013 	/* ack any pending wake events, disable PME */
       
  3014 	pci_enable_wake(pdev, 0, 0);
       
  3015 
       
  3016     if (!nic->ecdev)
       
  3017         netif_device_attach(netdev);
       
  3018 	if (nic->ecdev || netif_running(netdev)) {
       
  3019 		e100_open(netdev);
       
  3020 		if (!nic->ecdev)
       
  3021 			mod_timer(&nic->watchdog, jiffies);
       
  3022 	}
       
  3023 }
       
  3024 
       
  3025 static struct pci_error_handlers e100_err_handler = {
       
  3026 	.error_detected = e100_io_error_detected,
       
  3027 	.slot_reset = e100_io_slot_reset,
       
  3028 	.resume = e100_io_resume,
       
  3029 };
       
  3030 
       
  3031 static struct pci_driver e100_driver = {
       
  3032 	.name =         DRV_NAME,
       
  3033 	.id_table =     e100_id_table,
       
  3034 	.probe =        e100_probe,
       
  3035 	.remove =       __devexit_p(e100_remove),
       
  3036 #ifdef CONFIG_PM
       
  3037 	/* Power Management hooks */
       
  3038 	.suspend =      e100_suspend,
       
  3039 	.resume =       e100_resume,
       
  3040 #endif
       
  3041 	.shutdown =     e100_shutdown,
       
  3042 	.err_handler = &e100_err_handler,
       
  3043 };
       
  3044 
       
  3045 static int __init e100_init_module(void)
       
  3046 {
       
  3047     printk(KERN_INFO DRV_NAME " " DRV_DESCRIPTION " " DRV_VERSION
       
  3048             ", master " EC_MASTER_VERSION "\n");
       
  3049 
       
  3050 	return pci_register_driver(&e100_driver);
       
  3051 }
       
  3052 
       
  3053 static void __exit e100_cleanup_module(void)
       
  3054 {
       
  3055 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3056 	pci_unregister_driver(&e100_driver);
       
  3057 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3058 }
       
  3059 
       
  3060 module_init(e100_init_module);
       
  3061 module_exit(e100_cleanup_module);