devices/e100-2.6.28-ethercat.c
changeset 1477 76a463df511b
child 1502 9715a5599590
equal deleted inserted replaced
1476:1da1c72fd495 1477:76a463df511b
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007-2008  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it and/or
       
    10  *  modify it under the terms of the GNU General Public License version 2, as
       
    11  *  published by the Free Software Foundation.
       
    12  *
       
    13  *  The IgH EtherCAT Master is distributed in the hope that it will be useful,
       
    14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
       
    16  *  Public License for more details.
       
    17  *
       
    18  *  You should have received a copy of the GNU General Public License along
       
    19  *  with the IgH EtherCAT Master; if not, write to the Free Software
       
    20  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    21  *
       
    22  *  ---
       
    23  *
       
    24  *  The license mentioned above concerns the source code only. Using the
       
    25  *  EtherCAT technology and brand is only permitted in compliance with the
       
    26  *  industrial property and similar rights of Beckhoff Automation GmbH.
       
    27  *
       
    28  *  ---
       
    29  *
       
    30  *  vim: noexpandtab
       
    31  *
       
    32  *****************************************************************************/
       
    33 
       
    34 /**
       
    35    \file
       
    36    EtherCAT driver for e100-compatible NICs.
       
    37 */
       
    38 
       
    39 /* Former documentation: */
       
    40 
       
    41 /*******************************************************************************
       
    42 
       
    43   Intel PRO/100 Linux driver
       
    44   Copyright(c) 1999 - 2006 Intel Corporation.
       
    45 
       
    46   This program is free software; you can redistribute it and/or modify it
       
    47   under the terms and conditions of the GNU General Public License,
       
    48   version 2, as published by the Free Software Foundation.
       
    49 
       
    50   This program is distributed in the hope it will be useful, but WITHOUT
       
    51   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    52   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    53   more details.
       
    54 
       
    55   You should have received a copy of the GNU General Public License along with
       
    56   this program; if not, write to the Free Software Foundation, Inc.,
       
    57   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    58 
       
    59   The full GNU General Public License is included in this distribution in
       
    60   the file called "COPYING".
       
    61 
       
    62   Contact Information:
       
    63   Linux NICS <linux.nics@intel.com>
       
    64   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    65   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    66 
       
    67 *******************************************************************************/
       
    68 
       
    69 /*
       
    70  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    71  *
       
    72  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    73  *	original e100 driver, but better described as a munging of
       
    74  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    75  *
       
    76  *	References:
       
    77  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    78  *		Open Source Software Developers Manual,
       
    79  *		http://sourceforge.net/projects/e1000
       
    80  *
       
    81  *
       
    82  *	                      Theory of Operation
       
    83  *
       
    84  *	I.   General
       
    85  *
       
    86  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    87  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    88  *	82551, and 82562 devices.  82558 and greater controllers
       
    89  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    90  *	server and client network interface cards, as well as in
       
    91  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    92  *	configurations.  8255x supports a 32-bit linear addressing
       
    93  *	mode and operates at 33Mhz PCI clock rate.
       
    94  *
       
    95  *	II.  Driver Operation
       
    96  *
       
    97  *	Memory-mapped mode is used exclusively to access the device's
       
    98  *	shared-memory structure, the Control/Status Registers (CSR). All
       
    99  *	setup, configuration, and control of the device, including queuing
       
   100  *	of Tx, Rx, and configuration commands is through the CSR.
       
   101  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   102  *	protects the shared Command Block List (CBL).
       
   103  *
       
   104  *	8255x is highly MII-compliant and all access to the PHY go
       
   105  *	through the Management Data Interface (MDI).  Consequently, the
       
   106  *	driver leverages the mii.c library shared with other MII-compliant
       
   107  *	devices.
       
   108  *
       
   109  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   110  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   111  *	archs are supported.
       
   112  *
       
   113  *	III. Transmit
       
   114  *
       
   115  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   116  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   117  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   118  *	the end of the ring.  The last TCB processed suspends the
       
   119  *	controller, and the controller can be restarted by issue a CU
       
   120  *	resume command to continue from the suspend point, or a CU start
       
   121  *	command to start at a given position in the ring.
       
   122  *
       
   123  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   124  *	into the CBL ring along with Tx commands.  The common structure
       
   125  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   126  *
       
   127  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   128  *	is the next CB to check for completion; cb_to_send is the first
       
   129  *	CB to start on in case of a previous failure to resume.  CB clean
       
   130  *	up happens in interrupt context in response to a CU interrupt.
       
   131  *	cbs_avail keeps track of number of free CB resources available.
       
   132  *
       
   133  * 	Hardware padding of short packets to minimum packet size is
       
   134  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   135  * 	with 00h.
       
   136  *
       
   137  *	IV.  Receive
       
   138  *
       
   139  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   140  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   141  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   142  *	and the data buffer, but the RFD is pulled off before the skb is
       
   143  *	indicated.  The data buffer is aligned such that encapsulated
       
   144  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   145  *	mapped shared memory, and completion status is contained within
       
   146  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   147  *	view from software and hardware.
       
   148  *
       
   149  *	In order to keep updates to the RFD link field from colliding with
       
   150  *	hardware writes to mark packets complete, we use the feature that
       
   151  *	hardware will not write to a size 0 descriptor and mark the previous
       
   152  *	packet as end-of-list (EL).   After updating the link, we remove EL
       
   153  *	and only then restore the size such that hardware may use the
       
   154  *	previous-to-end RFD.
       
   155  *
       
   156  *	Under typical operation, the  receive unit (RU) is start once,
       
   157  *	and the controller happily fills RFDs as frames arrive.  If
       
   158  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   159  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   160  *	and Rx indication and re-allocation happen in the same context,
       
   161  *	therefore no locking is required.  A software-generated interrupt
       
   162  *	is generated from the watchdog to recover from a failed allocation
       
   163  *	scenario where all Rx resources have been indicated and none re-
       
   164  *	placed.
       
   165  *
       
   166  *	V.   Miscellaneous
       
   167  *
       
   168  * 	VLAN offloading of tagging, stripping and filtering is not
       
   169  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   170  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   171  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   172  * 	not supported (hardware limitation).
       
   173  *
       
   174  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   175  *
       
   176  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   177  * 	testing/troubleshooting the development driver.
       
   178  *
       
   179  * 	TODO:
       
   180  * 	o several entry points race with dev->close
       
   181  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   182  *
       
   183  *	FIXES:
       
   184  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   185  *	- Stratus87247: protect MDI control register manipulations
       
   186  */
       
   187 
       
   188 #include <linux/module.h>
       
   189 #include <linux/moduleparam.h>
       
   190 #include <linux/kernel.h>
       
   191 #include <linux/types.h>
       
   192 #include <linux/slab.h>
       
   193 #include <linux/delay.h>
       
   194 #include <linux/init.h>
       
   195 #include <linux/pci.h>
       
   196 #include <linux/dma-mapping.h>
       
   197 #include <linux/netdevice.h>
       
   198 #include <linux/etherdevice.h>
       
   199 #include <linux/mii.h>
       
   200 #include <linux/if_vlan.h>
       
   201 #include <linux/skbuff.h>
       
   202 #include <linux/ethtool.h>
       
   203 #include <linux/string.h>
       
   204 #include <asm/unaligned.h>
       
   205 
       
   206 // EtherCAT includes
       
   207 #include "../globals.h"
       
   208 #include "ecdev.h"
       
   209 
       
   210 #define DRV_NAME		"ec_e100"
       
   211 
       
   212 #define DRV_EXT			"-NAPI"
       
   213 #define DRV_VERSION		"3.5.23-k6"DRV_EXT
       
   214 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   215 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   216 #define PFX			DRV_NAME ": "
       
   217 
       
   218 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   219 #define E100_NAPI_WEIGHT	16
       
   220 
       
   221 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   222 MODULE_AUTHOR(DRV_COPYRIGHT);
       
   223 MODULE_LICENSE("GPL");
       
   224 MODULE_VERSION(DRV_VERSION);
       
   225 
       
   226 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   227 MODULE_AUTHOR("Mario Witkowski <mario.witkowski@w4systems.de>");
       
   228 MODULE_LICENSE("GPL");
       
   229 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   230 
       
   231 void e100_ec_poll(struct net_device *);
       
   232 
       
   233 static int debug = 3;
       
   234 static int eeprom_bad_csum_allow = 0;
       
   235 static int use_io = 0;
       
   236 module_param(debug, int, 0);
       
   237 module_param(eeprom_bad_csum_allow, int, 0);
       
   238 module_param(use_io, int, 0);
       
   239 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   240 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   241 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
       
   242 #define DPRINTK(nlevel, klevel, fmt, args...) \
       
   243 	(void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
       
   244 	printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
       
   245 		__func__ , ## args))
       
   246 
       
   247 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   248 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   249 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   250 static struct pci_device_id e100_id_table[] = {
       
   251 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   252 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   253 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   254 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   255 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   256 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   257 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   258 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   259 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   260 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   261 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   262 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   263 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   277 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   278 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   279 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   280 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   281 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   282 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   283 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   284 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   285 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   286 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   287 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   288 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   289 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   290 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   291 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   292 	{ 0, }
       
   293 };
       
   294 
       
   295 // prevent from being loaded automatically
       
   296 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   297 
       
   298 enum mac {
       
   299 	mac_82557_D100_A  = 0,
       
   300 	mac_82557_D100_B  = 1,
       
   301 	mac_82557_D100_C  = 2,
       
   302 	mac_82558_D101_A4 = 4,
       
   303 	mac_82558_D101_B0 = 5,
       
   304 	mac_82559_D101M   = 8,
       
   305 	mac_82559_D101S   = 9,
       
   306 	mac_82550_D102    = 12,
       
   307 	mac_82550_D102_C  = 13,
       
   308 	mac_82551_E       = 14,
       
   309 	mac_82551_F       = 15,
       
   310 	mac_82551_10      = 16,
       
   311 	mac_unknown       = 0xFF,
       
   312 };
       
   313 
       
   314 enum phy {
       
   315 	phy_100a     = 0x000003E0,
       
   316 	phy_100c     = 0x035002A8,
       
   317 	phy_82555_tx = 0x015002A8,
       
   318 	phy_nsc_tx   = 0x5C002000,
       
   319 	phy_82562_et = 0x033002A8,
       
   320 	phy_82562_em = 0x032002A8,
       
   321 	phy_82562_ek = 0x031002A8,
       
   322 	phy_82562_eh = 0x017002A8,
       
   323 	phy_unknown  = 0xFFFFFFFF,
       
   324 };
       
   325 
       
   326 /* CSR (Control/Status Registers) */
       
   327 struct csr {
       
   328 	struct {
       
   329 		u8 status;
       
   330 		u8 stat_ack;
       
   331 		u8 cmd_lo;
       
   332 		u8 cmd_hi;
       
   333 		u32 gen_ptr;
       
   334 	} scb;
       
   335 	u32 port;
       
   336 	u16 flash_ctrl;
       
   337 	u8 eeprom_ctrl_lo;
       
   338 	u8 eeprom_ctrl_hi;
       
   339 	u32 mdi_ctrl;
       
   340 	u32 rx_dma_count;
       
   341 };
       
   342 
       
   343 enum scb_status {
       
   344 	rus_no_res       = 0x08,
       
   345 	rus_ready        = 0x10,
       
   346 	rus_mask         = 0x3C,
       
   347 };
       
   348 
       
   349 enum ru_state  {
       
   350 	RU_SUSPENDED = 0,
       
   351 	RU_RUNNING	 = 1,
       
   352 	RU_UNINITIALIZED = -1,
       
   353 };
       
   354 
       
   355 enum scb_stat_ack {
       
   356 	stat_ack_not_ours    = 0x00,
       
   357 	stat_ack_sw_gen      = 0x04,
       
   358 	stat_ack_rnr         = 0x10,
       
   359 	stat_ack_cu_idle     = 0x20,
       
   360 	stat_ack_frame_rx    = 0x40,
       
   361 	stat_ack_cu_cmd_done = 0x80,
       
   362 	stat_ack_not_present = 0xFF,
       
   363 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   364 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   365 };
       
   366 
       
   367 enum scb_cmd_hi {
       
   368 	irq_mask_none = 0x00,
       
   369 	irq_mask_all  = 0x01,
       
   370 	irq_sw_gen    = 0x02,
       
   371 };
       
   372 
       
   373 enum scb_cmd_lo {
       
   374 	cuc_nop        = 0x00,
       
   375 	ruc_start      = 0x01,
       
   376 	ruc_load_base  = 0x06,
       
   377 	cuc_start      = 0x10,
       
   378 	cuc_resume     = 0x20,
       
   379 	cuc_dump_addr  = 0x40,
       
   380 	cuc_dump_stats = 0x50,
       
   381 	cuc_load_base  = 0x60,
       
   382 	cuc_dump_reset = 0x70,
       
   383 };
       
   384 
       
   385 enum cuc_dump {
       
   386 	cuc_dump_complete       = 0x0000A005,
       
   387 	cuc_dump_reset_complete = 0x0000A007,
       
   388 };
       
   389 
       
   390 enum port {
       
   391 	software_reset  = 0x0000,
       
   392 	selftest        = 0x0001,
       
   393 	selective_reset = 0x0002,
       
   394 };
       
   395 
       
   396 enum eeprom_ctrl_lo {
       
   397 	eesk = 0x01,
       
   398 	eecs = 0x02,
       
   399 	eedi = 0x04,
       
   400 	eedo = 0x08,
       
   401 };
       
   402 
       
   403 enum mdi_ctrl {
       
   404 	mdi_write = 0x04000000,
       
   405 	mdi_read  = 0x08000000,
       
   406 	mdi_ready = 0x10000000,
       
   407 };
       
   408 
       
   409 enum eeprom_op {
       
   410 	op_write = 0x05,
       
   411 	op_read  = 0x06,
       
   412 	op_ewds  = 0x10,
       
   413 	op_ewen  = 0x13,
       
   414 };
       
   415 
       
   416 enum eeprom_offsets {
       
   417 	eeprom_cnfg_mdix  = 0x03,
       
   418 	eeprom_id         = 0x0A,
       
   419 	eeprom_config_asf = 0x0D,
       
   420 	eeprom_smbus_addr = 0x90,
       
   421 };
       
   422 
       
   423 enum eeprom_cnfg_mdix {
       
   424 	eeprom_mdix_enabled = 0x0080,
       
   425 };
       
   426 
       
   427 enum eeprom_id {
       
   428 	eeprom_id_wol = 0x0020,
       
   429 };
       
   430 
       
   431 enum eeprom_config_asf {
       
   432 	eeprom_asf = 0x8000,
       
   433 	eeprom_gcl = 0x4000,
       
   434 };
       
   435 
       
   436 enum cb_status {
       
   437 	cb_complete = 0x8000,
       
   438 	cb_ok       = 0x2000,
       
   439 };
       
   440 
       
   441 enum cb_command {
       
   442 	cb_nop    = 0x0000,
       
   443 	cb_iaaddr = 0x0001,
       
   444 	cb_config = 0x0002,
       
   445 	cb_multi  = 0x0003,
       
   446 	cb_tx     = 0x0004,
       
   447 	cb_ucode  = 0x0005,
       
   448 	cb_dump   = 0x0006,
       
   449 	cb_tx_sf  = 0x0008,
       
   450 	cb_cid    = 0x1f00,
       
   451 	cb_i      = 0x2000,
       
   452 	cb_s      = 0x4000,
       
   453 	cb_el     = 0x8000,
       
   454 };
       
   455 
       
   456 struct rfd {
       
   457 	__le16 status;
       
   458 	__le16 command;
       
   459 	__le32 link;
       
   460 	__le32 rbd;
       
   461 	__le16 actual_size;
       
   462 	__le16 size;
       
   463 };
       
   464 
       
   465 struct rx {
       
   466 	struct rx *next, *prev;
       
   467 	struct sk_buff *skb;
       
   468 	dma_addr_t dma_addr;
       
   469 };
       
   470 
       
   471 #if defined(__BIG_ENDIAN_BITFIELD)
       
   472 #define X(a,b)	b,a
       
   473 #else
       
   474 #define X(a,b)	a,b
       
   475 #endif
       
   476 struct config {
       
   477 /*0*/	u8 X(byte_count:6, pad0:2);
       
   478 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   479 /*2*/	u8 adaptive_ifs;
       
   480 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   481 	   term_write_cache_line:1), pad3:4);
       
   482 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   483 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   484 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   485 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   486 	   rx_discard_overruns:1), rx_save_bad_frames:1);
       
   487 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   488 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   489 	   tx_dynamic_tbd:1);
       
   490 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   491 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   492 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   493 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   494 	   loopback:2);
       
   495 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   496 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   497 /*13*/	u8 ip_addr_lo;
       
   498 /*14*/	u8 ip_addr_hi;
       
   499 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   500 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   501 	   pad15_2:1), crs_or_cdt:1);
       
   502 /*16*/	u8 fc_delay_lo;
       
   503 /*17*/	u8 fc_delay_hi;
       
   504 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   505 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   506 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   507 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   508 	   full_duplex_force:1), full_duplex_pin:1);
       
   509 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   510 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   511 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   512 	u8 pad_d102[9];
       
   513 };
       
   514 
       
   515 #define E100_MAX_MULTICAST_ADDRS	64
       
   516 struct multi {
       
   517 	__le16 count;
       
   518 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   519 };
       
   520 
       
   521 /* Important: keep total struct u32-aligned */
       
   522 #define UCODE_SIZE			134
       
   523 struct cb {
       
   524 	__le16 status;
       
   525 	__le16 command;
       
   526 	__le32 link;
       
   527 	union {
       
   528 		u8 iaaddr[ETH_ALEN];
       
   529 		__le32 ucode[UCODE_SIZE];
       
   530 		struct config config;
       
   531 		struct multi multi;
       
   532 		struct {
       
   533 			u32 tbd_array;
       
   534 			u16 tcb_byte_count;
       
   535 			u8 threshold;
       
   536 			u8 tbd_count;
       
   537 			struct {
       
   538 				__le32 buf_addr;
       
   539 				__le16 size;
       
   540 				u16 eol;
       
   541 			} tbd;
       
   542 		} tcb;
       
   543 		__le32 dump_buffer_addr;
       
   544 	} u;
       
   545 	struct cb *next, *prev;
       
   546 	dma_addr_t dma_addr;
       
   547 	struct sk_buff *skb;
       
   548 };
       
   549 
       
   550 enum loopback {
       
   551 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   552 };
       
   553 
       
   554 struct stats {
       
   555 	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   556 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   557 		tx_multiple_collisions, tx_total_collisions;
       
   558 	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   559 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   560 		rx_short_frame_errors;
       
   561 	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   562 	__le16 xmt_tco_frames, rcv_tco_frames;
       
   563 	__le32 complete;
       
   564 };
       
   565 
       
   566 struct mem {
       
   567 	struct {
       
   568 		u32 signature;
       
   569 		u32 result;
       
   570 	} selftest;
       
   571 	struct stats stats;
       
   572 	u8 dump_buf[596];
       
   573 };
       
   574 
       
   575 struct param_range {
       
   576 	u32 min;
       
   577 	u32 max;
       
   578 	u32 count;
       
   579 };
       
   580 
       
   581 struct params {
       
   582 	struct param_range rfds;
       
   583 	struct param_range cbs;
       
   584 };
       
   585 
       
   586 struct nic {
       
   587 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   588 	u32 msg_enable				____cacheline_aligned;
       
   589 	struct net_device *netdev;
       
   590 	struct pci_dev *pdev;
       
   591 
       
   592 	struct rx *rxs				____cacheline_aligned;
       
   593 	struct rx *rx_to_use;
       
   594 	struct rx *rx_to_clean;
       
   595 	struct rfd blank_rfd;
       
   596 	enum ru_state ru_running;
       
   597 
       
   598 	spinlock_t cb_lock			____cacheline_aligned;
       
   599 	spinlock_t cmd_lock;
       
   600 	struct csr __iomem *csr;
       
   601 	enum scb_cmd_lo cuc_cmd;
       
   602 	unsigned int cbs_avail;
       
   603 	struct napi_struct napi;
       
   604 	struct cb *cbs;
       
   605 	struct cb *cb_to_use;
       
   606 	struct cb *cb_to_send;
       
   607 	struct cb *cb_to_clean;
       
   608 	__le16 tx_command;
       
   609 	/* End: frequently used values: keep adjacent for cache effect */
       
   610 
       
   611 	enum {
       
   612 		ich                = (1 << 0),
       
   613 		promiscuous        = (1 << 1),
       
   614 		multicast_all      = (1 << 2),
       
   615 		wol_magic          = (1 << 3),
       
   616 		ich_10h_workaround = (1 << 4),
       
   617 	} flags					____cacheline_aligned;
       
   618 
       
   619 	enum mac mac;
       
   620 	enum phy phy;
       
   621 	struct params params;
       
   622 	struct timer_list watchdog;
       
   623 	struct timer_list blink_timer;
       
   624 	struct mii_if_info mii;
       
   625 	struct work_struct tx_timeout_task;
       
   626 	enum loopback loopback;
       
   627 
       
   628 	struct mem *mem;
       
   629 	dma_addr_t dma_addr;
       
   630 
       
   631 	dma_addr_t cbs_dma_addr;
       
   632 	u8 adaptive_ifs;
       
   633 	u8 tx_threshold;
       
   634 	u32 tx_frames;
       
   635 	u32 tx_collisions;
       
   636 	u32 tx_deferred;
       
   637 	u32 tx_single_collisions;
       
   638 	u32 tx_multiple_collisions;
       
   639 	u32 tx_fc_pause;
       
   640 	u32 tx_tco_frames;
       
   641 
       
   642 	u32 rx_fc_pause;
       
   643 	u32 rx_fc_unsupported;
       
   644 	u32 rx_tco_frames;
       
   645 	u32 rx_over_length_errors;
       
   646 
       
   647 	u16 leds;
       
   648 	u16 eeprom_wc;
       
   649 	__le16 eeprom[256];
       
   650 	spinlock_t mdio_lock;
       
   651 
       
   652 	ec_device_t *ecdev;
       
   653 	unsigned long ec_watchdog_jiffies;
       
   654 };
       
   655 
       
   656 static inline void e100_write_flush(struct nic *nic)
       
   657 {
       
   658 	/* Flush previous PCI writes through intermediate bridges
       
   659 	 * by doing a benign read */
       
   660 	(void)ioread8(&nic->csr->scb.status);
       
   661 }
       
   662 
       
   663 static void e100_enable_irq(struct nic *nic)
       
   664 {
       
   665 	unsigned long flags;
       
   666 
       
   667 	if (nic->ecdev)
       
   668 		return;
       
   669 
       
   670 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   671 	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   672 	e100_write_flush(nic);
       
   673 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   674 }
       
   675 
       
   676 static void e100_disable_irq(struct nic *nic)
       
   677 {
       
   678 	unsigned long flags = 0;
       
   679 
       
   680 	if (!nic->ecdev)
       
   681 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   682 	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   683 	e100_write_flush(nic);
       
   684 	if (!nic->ecdev)
       
   685 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   686 }
       
   687 
       
   688 static void e100_hw_reset(struct nic *nic)
       
   689 {
       
   690 	/* Put CU and RU into idle with a selective reset to get
       
   691 	 * device off of PCI bus */
       
   692 	iowrite32(selective_reset, &nic->csr->port);
       
   693 	e100_write_flush(nic); udelay(20);
       
   694 
       
   695 	/* Now fully reset device */
       
   696 	iowrite32(software_reset, &nic->csr->port);
       
   697 	e100_write_flush(nic); udelay(20);
       
   698 
       
   699 	/* Mask off our interrupt line - it's unmasked after reset */
       
   700 	e100_disable_irq(nic);
       
   701 }
       
   702 
       
   703 static int e100_self_test(struct nic *nic)
       
   704 {
       
   705 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   706 
       
   707 	/* Passing the self-test is a pretty good indication
       
   708 	 * that the device can DMA to/from host memory */
       
   709 
       
   710 	nic->mem->selftest.signature = 0;
       
   711 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   712 
       
   713 	iowrite32(selftest | dma_addr, &nic->csr->port);
       
   714 	e100_write_flush(nic);
       
   715 	/* Wait 10 msec for self-test to complete */
       
   716 	msleep(10);
       
   717 
       
   718 	/* Interrupts are enabled after self-test */
       
   719 	e100_disable_irq(nic);
       
   720 
       
   721 	/* Check results of self-test */
       
   722 	if(nic->mem->selftest.result != 0) {
       
   723 		DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
       
   724 			nic->mem->selftest.result);
       
   725 		return -ETIMEDOUT;
       
   726 	}
       
   727 	if(nic->mem->selftest.signature == 0) {
       
   728 		DPRINTK(HW, ERR, "Self-test failed: timed out\n");
       
   729 		return -ETIMEDOUT;
       
   730 	}
       
   731 
       
   732 	return 0;
       
   733 }
       
   734 
       
   735 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
       
   736 {
       
   737 	u32 cmd_addr_data[3];
       
   738 	u8 ctrl;
       
   739 	int i, j;
       
   740 
       
   741 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   742 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   743 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   744 		le16_to_cpu(data);
       
   745 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   746 
       
   747 	/* Bit-bang cmds to write word to eeprom */
       
   748 	for(j = 0; j < 3; j++) {
       
   749 
       
   750 		/* Chip select */
       
   751 		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   752 		e100_write_flush(nic); udelay(4);
       
   753 
       
   754 		for(i = 31; i >= 0; i--) {
       
   755 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   756 				eecs | eedi : eecs;
       
   757 			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   758 			e100_write_flush(nic); udelay(4);
       
   759 
       
   760 			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   761 			e100_write_flush(nic); udelay(4);
       
   762 		}
       
   763 		/* Wait 10 msec for cmd to complete */
       
   764 		msleep(10);
       
   765 
       
   766 		/* Chip deselect */
       
   767 		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   768 		e100_write_flush(nic); udelay(4);
       
   769 	}
       
   770 };
       
   771 
       
   772 /* General technique stolen from the eepro100 driver - very clever */
       
   773 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   774 {
       
   775 	u32 cmd_addr_data;
       
   776 	u16 data = 0;
       
   777 	u8 ctrl;
       
   778 	int i;
       
   779 
       
   780 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   781 
       
   782 	/* Chip select */
       
   783 	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   784 	e100_write_flush(nic); udelay(4);
       
   785 
       
   786 	/* Bit-bang to read word from eeprom */
       
   787 	for(i = 31; i >= 0; i--) {
       
   788 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   789 		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   790 		e100_write_flush(nic); udelay(4);
       
   791 
       
   792 		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   793 		e100_write_flush(nic); udelay(4);
       
   794 
       
   795 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   796 		 * complete address.  Use this to adjust addr_len. */
       
   797 		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
       
   798 		if(!(ctrl & eedo) && i > 16) {
       
   799 			*addr_len -= (i - 16);
       
   800 			i = 17;
       
   801 		}
       
   802 
       
   803 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   804 	}
       
   805 
       
   806 	/* Chip deselect */
       
   807 	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   808 	e100_write_flush(nic); udelay(4);
       
   809 
       
   810 	return cpu_to_le16(data);
       
   811 };
       
   812 
       
   813 /* Load entire EEPROM image into driver cache and validate checksum */
       
   814 static int e100_eeprom_load(struct nic *nic)
       
   815 {
       
   816 	u16 addr, addr_len = 8, checksum = 0;
       
   817 
       
   818 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   819 	e100_eeprom_read(nic, &addr_len, 0);
       
   820 	nic->eeprom_wc = 1 << addr_len;
       
   821 
       
   822 	for(addr = 0; addr < nic->eeprom_wc; addr++) {
       
   823 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   824 		if(addr < nic->eeprom_wc - 1)
       
   825 			checksum += le16_to_cpu(nic->eeprom[addr]);
       
   826 	}
       
   827 
       
   828 	/* The checksum, stored in the last word, is calculated such that
       
   829 	 * the sum of words should be 0xBABA */
       
   830 	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
       
   831 		DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
       
   832 		if (!eeprom_bad_csum_allow)
       
   833 			return -EAGAIN;
       
   834 	}
       
   835 
       
   836 	return 0;
       
   837 }
       
   838 
       
   839 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   840 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   841 {
       
   842 	u16 addr, addr_len = 8, checksum = 0;
       
   843 
       
   844 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   845 	e100_eeprom_read(nic, &addr_len, 0);
       
   846 	nic->eeprom_wc = 1 << addr_len;
       
   847 
       
   848 	if(start + count >= nic->eeprom_wc)
       
   849 		return -EINVAL;
       
   850 
       
   851 	for(addr = start; addr < start + count; addr++)
       
   852 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   853 
       
   854 	/* The checksum, stored in the last word, is calculated such that
       
   855 	 * the sum of words should be 0xBABA */
       
   856 	for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   857 		checksum += le16_to_cpu(nic->eeprom[addr]);
       
   858 	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
       
   859 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   860 		nic->eeprom[nic->eeprom_wc - 1]);
       
   861 
       
   862 	return 0;
       
   863 }
       
   864 
       
   865 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   866 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   867 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   868 {
       
   869 	unsigned long flags = 0;
       
   870 	unsigned int i;
       
   871 	int err = 0;
       
   872 
       
   873 	if (!nic->ecdev)
       
   874 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   875 
       
   876 	/* Previous command is accepted when SCB clears */
       
   877 	for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   878 		if(likely(!ioread8(&nic->csr->scb.cmd_lo)))
       
   879 			break;
       
   880 		cpu_relax();
       
   881 		if(unlikely(i > E100_WAIT_SCB_FAST))
       
   882 			udelay(5);
       
   883 	}
       
   884 	if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   885 		err = -EAGAIN;
       
   886 		goto err_unlock;
       
   887 	}
       
   888 
       
   889 	if(unlikely(cmd != cuc_resume))
       
   890 		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
       
   891 	iowrite8(cmd, &nic->csr->scb.cmd_lo);
       
   892 
       
   893 err_unlock:
       
   894 	if (!nic->ecdev)
       
   895 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   896 
       
   897 	return err;
       
   898 }
       
   899 
       
   900 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   901 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   902 {
       
   903 	struct cb *cb;
       
   904 	unsigned long flags = 0;
       
   905 	int err = 0;
       
   906 
       
   907 	if (!nic->ecdev)
       
   908 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   909 
       
   910 	if(unlikely(!nic->cbs_avail)) {
       
   911 		err = -ENOMEM;
       
   912 		goto err_unlock;
       
   913 	}
       
   914 
       
   915 	cb = nic->cb_to_use;
       
   916 	nic->cb_to_use = cb->next;
       
   917 	nic->cbs_avail--;
       
   918 	cb->skb = skb;
       
   919 
       
   920 	if(unlikely(!nic->cbs_avail))
       
   921 		err = -ENOSPC;
       
   922 
       
   923 	cb_prepare(nic, cb, skb);
       
   924 
       
   925 	/* Order is important otherwise we'll be in a race with h/w:
       
   926 	 * set S-bit in current first, then clear S-bit in previous. */
       
   927 	cb->command |= cpu_to_le16(cb_s);
       
   928 	wmb();
       
   929 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   930 
       
   931 	while(nic->cb_to_send != nic->cb_to_use) {
       
   932 		if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   933 			nic->cb_to_send->dma_addr))) {
       
   934 			/* Ok, here's where things get sticky.  It's
       
   935 			 * possible that we can't schedule the command
       
   936 			 * because the controller is too busy, so
       
   937 			 * let's just queue the command and try again
       
   938 			 * when another command is scheduled. */
       
   939 			if(err == -ENOSPC) {
       
   940 				//request a reset
       
   941 				schedule_work(&nic->tx_timeout_task);
       
   942 			}
       
   943 			break;
       
   944 		} else {
       
   945 			nic->cuc_cmd = cuc_resume;
       
   946 			nic->cb_to_send = nic->cb_to_send->next;
       
   947 		}
       
   948 	}
       
   949 
       
   950 err_unlock:
       
   951 	if (!nic->ecdev)
       
   952 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   953 
       
   954 	return err;
       
   955 }
       
   956 
       
   957 static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
   958 {
       
   959 	u32 data_out = 0;
       
   960 	unsigned int i;
       
   961 	unsigned long flags = 0;
       
   962 
       
   963 
       
   964 	/*
       
   965 	 * Stratus87247: we shouldn't be writing the MDI control
       
   966 	 * register until the Ready bit shows True.  Also, since
       
   967 	 * manipulation of the MDI control registers is a multi-step
       
   968 	 * procedure it should be done under lock.
       
   969 	 */
       
   970 	if (!nic->ecdev)
       
   971 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
   972 	for (i = 100; i; --i) {
       
   973 		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
       
   974 			break;
       
   975 		udelay(20);
       
   976 	}
       
   977 	if (unlikely(!i)) {
       
   978 		printk("e100.mdio_ctrl(%s) won't go Ready\n",
       
   979 			nic->netdev->name );
       
   980 		if (!nic->ecdev)
       
   981 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
   982 		return 0;		/* No way to indicate timeout error */
       
   983 	}
       
   984 	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
   985 
       
   986 	for (i = 0; i < 100; i++) {
       
   987 		udelay(20);
       
   988 		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
       
   989 			break;
       
   990 	}
       
   991 	if (!nic->ecdev)
       
   992 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
   993 	DPRINTK(HW, DEBUG,
       
   994 		"%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
   995 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
       
   996 	return (u16)data_out;
       
   997 }
       
   998 
       
   999 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
  1000 {
       
  1001 	return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
       
  1002 }
       
  1003 
       
  1004 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
  1005 {
       
  1006 	mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
       
  1007 }
       
  1008 
       
  1009 static void e100_get_defaults(struct nic *nic)
       
  1010 {
       
  1011 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
  1012 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
  1013 
       
  1014 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1015 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
       
  1016 	if(nic->mac == mac_unknown)
       
  1017 		nic->mac = mac_82557_D100_A;
       
  1018 
       
  1019 	nic->params.rfds = rfds;
       
  1020 	nic->params.cbs = cbs;
       
  1021 
       
  1022 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1023 	nic->tx_threshold = 0xE0;
       
  1024 
       
  1025 	/* no interrupt for every tx completion, delay = 256us if not 557 */
       
  1026 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1027 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1028 
       
  1029 	/* Template for a freshly allocated RFD */
       
  1030 	nic->blank_rfd.command = 0;
       
  1031 	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
       
  1032 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  1033 
       
  1034 	/* MII setup */
       
  1035 	nic->mii.phy_id_mask = 0x1F;
       
  1036 	nic->mii.reg_num_mask = 0x1F;
       
  1037 	nic->mii.dev = nic->netdev;
       
  1038 	nic->mii.mdio_read = mdio_read;
       
  1039 	nic->mii.mdio_write = mdio_write;
       
  1040 }
       
  1041 
       
  1042 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1043 {
       
  1044 	struct config *config = &cb->u.config;
       
  1045 	u8 *c = (u8 *)config;
       
  1046 
       
  1047 	cb->command = cpu_to_le16(cb_config);
       
  1048 
       
  1049 	memset(config, 0, sizeof(struct config));
       
  1050 
       
  1051 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1052 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1053 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1054 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1055 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1056 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1057 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1058 	config->mii_mode = 0x1;			/* 1=MII mode, 0=503 mode */
       
  1059 	config->pad10 = 0x6;
       
  1060 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1061 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1062 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1063 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1064 	config->pad15_1 = 0x1;
       
  1065 	config->pad15_2 = 0x1;
       
  1066 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1067 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1068 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1069 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1070 	config->pad18 = 0x1;
       
  1071 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1072 	config->pad20_1 = 0x1F;
       
  1073 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1074 	config->pad21_1 = 0x5;
       
  1075 
       
  1076 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1077 	config->loopback = nic->loopback;
       
  1078 
       
  1079 	if(nic->mii.force_media && nic->mii.full_duplex)
       
  1080 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1081 
       
  1082 	if(nic->flags & promiscuous || nic->loopback) {
       
  1083 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1084 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1085 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1086 	}
       
  1087 
       
  1088 	if(nic->flags & multicast_all)
       
  1089 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1090 
       
  1091 	/* disable WoL when up */
       
  1092 	if (nic->ecdev ||
       
  1093 			(netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1094 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1095 
       
  1096 	if(nic->mac >= mac_82558_D101_A4) {
       
  1097 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1098 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1099 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1100 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1101 		if (nic->mac >= mac_82559_D101M) {
       
  1102 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1103 			/* Enable TCO in extended config */
       
  1104 			if (nic->mac >= mac_82551_10) {
       
  1105 				config->byte_count = 0x20; /* extended bytes */
       
  1106 				config->rx_d102_mode = 0x1; /* GMRC for TCO */
       
  1107 			}
       
  1108 		} else {
       
  1109 			config->standard_stat_counter = 0x0;
       
  1110 		}
       
  1111 	}
       
  1112 
       
  1113 	DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1114 		c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1115 	DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1116 		c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1117 	DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1118 		c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1119 }
       
  1120 
       
  1121 /********************************************************/
       
  1122 /*  Micro code for 8086:1229 Rev 8                      */
       
  1123 /********************************************************/
       
  1124 
       
  1125 /*  Parameter values for the D101M B-step  */
       
  1126 #define D101M_CPUSAVER_TIMER_DWORD		78
       
  1127 #define D101M_CPUSAVER_BUNDLE_DWORD		65
       
  1128 #define D101M_CPUSAVER_MIN_SIZE_DWORD		126
       
  1129 
       
  1130 #define D101M_B_RCVBUNDLE_UCODE \
       
  1131 {\
       
  1132 0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
       
  1133 0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
       
  1134 0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
       
  1135 0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
       
  1136 0x00380438, 0x00000000, 0x00140000, 0x00380555, \
       
  1137 0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
       
  1138 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
       
  1139 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
       
  1140 0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
       
  1141 0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
       
  1142 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1143 0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
       
  1144 0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
       
  1145 0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
       
  1146 0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
       
  1147 0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
       
  1148 0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
       
  1149 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1150 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1151 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
       
  1152 0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
       
  1153 0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
       
  1154 0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
       
  1155 0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
       
  1156 0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
       
  1157 0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
       
  1158 0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
       
  1159 0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
       
  1160 0x00380559, 0x00000000, 0x00000000, 0x00000000, \
       
  1161 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1162 0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
       
  1163 0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
       
  1164 0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
       
  1165 }
       
  1166 
       
  1167 /********************************************************/
       
  1168 /*  Micro code for 8086:1229 Rev 9                      */
       
  1169 /********************************************************/
       
  1170 
       
  1171 /*  Parameter values for the D101S  */
       
  1172 #define D101S_CPUSAVER_TIMER_DWORD		78
       
  1173 #define D101S_CPUSAVER_BUNDLE_DWORD		67
       
  1174 #define D101S_CPUSAVER_MIN_SIZE_DWORD		128
       
  1175 
       
  1176 #define D101S_RCVBUNDLE_UCODE \
       
  1177 {\
       
  1178 0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
       
  1179 0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
       
  1180 0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
       
  1181 0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
       
  1182 0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
       
  1183 0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
       
  1184 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
       
  1185 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
       
  1186 0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
       
  1187 0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
       
  1188 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1189 0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
       
  1190 0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
       
  1191 0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
       
  1192 0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
       
  1193 0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
       
  1194 0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
       
  1195 0x00101313, 0x00380700, 0x00000000, 0x00000000, \
       
  1196 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1197 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
       
  1198 0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
       
  1199 0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
       
  1200 0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
       
  1201 0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
       
  1202 0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
       
  1203 0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
       
  1204 0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
       
  1205 0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
       
  1206 0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
       
  1207 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1208 0x00000000, 0x00000000, 0x00000000, 0x00130831, \
       
  1209 0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
       
  1210 0x00041000, 0x00010004, 0x00380700  \
       
  1211 }
       
  1212 
       
  1213 /********************************************************/
       
  1214 /*  Micro code for the 8086:1229 Rev F/10               */
       
  1215 /********************************************************/
       
  1216 
       
  1217 /*  Parameter values for the D102 E-step  */
       
  1218 #define D102_E_CPUSAVER_TIMER_DWORD		42
       
  1219 #define D102_E_CPUSAVER_BUNDLE_DWORD		54
       
  1220 #define D102_E_CPUSAVER_MIN_SIZE_DWORD		46
       
  1221 
       
  1222 #define     D102_E_RCVBUNDLE_UCODE \
       
  1223 {\
       
  1224 0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
       
  1225 0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
       
  1226 0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
       
  1227 0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
       
  1228 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1229 0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
       
  1230 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1231 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1232 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1233 0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
       
  1234 0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
       
  1235 0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
       
  1236 0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
       
  1237 0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
       
  1238 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1239 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1240 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1241 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
       
  1242 0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
       
  1243 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1244 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1245 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1246 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1247 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1248 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1249 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1250 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1251 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1252 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1253 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1254 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1255 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1256 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1257 }
       
  1258 
       
  1259 static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1260 {
       
  1261 /* *INDENT-OFF* */
       
  1262 	static struct {
       
  1263 		u32 ucode[UCODE_SIZE + 1];
       
  1264 		u8 mac;
       
  1265 		u8 timer_dword;
       
  1266 		u8 bundle_dword;
       
  1267 		u8 min_size_dword;
       
  1268 	} ucode_opts[] = {
       
  1269 		{ D101M_B_RCVBUNDLE_UCODE,
       
  1270 		  mac_82559_D101M,
       
  1271 		  D101M_CPUSAVER_TIMER_DWORD,
       
  1272 		  D101M_CPUSAVER_BUNDLE_DWORD,
       
  1273 		  D101M_CPUSAVER_MIN_SIZE_DWORD },
       
  1274 		{ D101S_RCVBUNDLE_UCODE,
       
  1275 		  mac_82559_D101S,
       
  1276 		  D101S_CPUSAVER_TIMER_DWORD,
       
  1277 		  D101S_CPUSAVER_BUNDLE_DWORD,
       
  1278 		  D101S_CPUSAVER_MIN_SIZE_DWORD },
       
  1279 		{ D102_E_RCVBUNDLE_UCODE,
       
  1280 		  mac_82551_F,
       
  1281 		  D102_E_CPUSAVER_TIMER_DWORD,
       
  1282 		  D102_E_CPUSAVER_BUNDLE_DWORD,
       
  1283 		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
       
  1284 		{ D102_E_RCVBUNDLE_UCODE,
       
  1285 		  mac_82551_10,
       
  1286 		  D102_E_CPUSAVER_TIMER_DWORD,
       
  1287 		  D102_E_CPUSAVER_BUNDLE_DWORD,
       
  1288 		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
       
  1289 		{ {0}, 0, 0, 0, 0}
       
  1290 	}, *opts;
       
  1291 /* *INDENT-ON* */
       
  1292 
       
  1293 /*************************************************************************
       
  1294 *  CPUSaver parameters
       
  1295 *
       
  1296 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1297 *  "move immediate value" instruction.  By changing the value of
       
  1298 *  the literal in the instruction before the code is loaded, the
       
  1299 *  driver can change the algorithm.
       
  1300 *
       
  1301 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1302 *    When this timer expires the interrupt is asserted, and the
       
  1303 *    timer is reset each time a new packet is received.  (see
       
  1304 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1305 *    The current default is 0x600 or 1536.  Experiments show that
       
  1306 *    the value should probably stay within the 0x200 - 0x1000.
       
  1307 *
       
  1308 *  BUNDLEMAX -
       
  1309 *    This sets the maximum number of frames that will be bundled.  In
       
  1310 *    some situations, such as the TCP windowing algorithm, it may be
       
  1311 *    better to limit the growth of the bundle size than let it go as
       
  1312 *    high as it can, because that could cause too much added latency.
       
  1313 *    The default is six, because this is the number of packets in the
       
  1314 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1315 *    an interrupt for every frame received.  If you do not want to put
       
  1316 *    a limit on the bundle size, set this value to xFFFF.
       
  1317 *
       
  1318 *  BUNDLESMALL -
       
  1319 *    This contains a bit-mask describing the minimum size frame that
       
  1320 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1321 *    that any frame less than 128 bytes in length will not be bundled,
       
  1322 *    but will instead immediately generate an interrupt.  This does
       
  1323 *    not affect the current bundle in any way.  Any frame that is 128
       
  1324 *    bytes or large will be bundled normally.  This feature is meant
       
  1325 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1326 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1327 *    enabled was sending but not receiving.  The delay introduced when
       
  1328 *    the ACKs were received was enough to reduce total throughput, because
       
  1329 *    the sender would sit idle until the ACK was finally seen.
       
  1330 *
       
  1331 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1332 *    This means that any frame which is x7F (127) bytes or smaller
       
  1333 *    will cause an immediate interrupt.  Because this value must be a
       
  1334 *    bit mask, there are only a few valid values that can be used.  To
       
  1335 *    turn this feature off, the driver can write the value xFFFF to the
       
  1336 *    lower word of this instruction (in the same way that the other
       
  1337 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1338 *    cause an interrupt to be generated for every frame, because all
       
  1339 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1340 *************************************************************************/
       
  1341 
       
  1342 /* if you wish to disable the ucode functionality, while maintaining the
       
  1343  * workarounds it provides, set the following defines to:
       
  1344  * BUNDLESMALL 0
       
  1345  * BUNDLEMAX 1
       
  1346  * INTDELAY 1
       
  1347  */
       
  1348 #define BUNDLESMALL 1
       
  1349 #define BUNDLEMAX (u16)6
       
  1350 #define INTDELAY (u16)1536 /* 0x600 */
       
  1351 
       
  1352 	/* do not load u-code for ICH devices */
       
  1353 	if (nic->flags & ich)
       
  1354 		goto noloaducode;
       
  1355 
       
  1356 	/* Search for ucode match against h/w revision */
       
  1357 	for (opts = ucode_opts; opts->mac; opts++) {
       
  1358 		int i;
       
  1359 		u32 *ucode = opts->ucode;
       
  1360 		if (nic->mac != opts->mac)
       
  1361 			continue;
       
  1362 
       
  1363 		/* Insert user-tunable settings */
       
  1364 		ucode[opts->timer_dword] &= 0xFFFF0000;
       
  1365 		ucode[opts->timer_dword] |= INTDELAY;
       
  1366 		ucode[opts->bundle_dword] &= 0xFFFF0000;
       
  1367 		ucode[opts->bundle_dword] |= BUNDLEMAX;
       
  1368 		ucode[opts->min_size_dword] &= 0xFFFF0000;
       
  1369 		ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
       
  1370 
       
  1371 		for (i = 0; i < UCODE_SIZE; i++)
       
  1372 			cb->u.ucode[i] = cpu_to_le32(ucode[i]);
       
  1373 		cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1374 		return;
       
  1375 	}
       
  1376 
       
  1377 noloaducode:
       
  1378 	cb->command = cpu_to_le16(cb_nop | cb_el);
       
  1379 }
       
  1380 
       
  1381 static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
       
  1382 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
  1383 {
       
  1384 	int err = 0, counter = 50;
       
  1385 	struct cb *cb = nic->cb_to_clean;
       
  1386 
       
  1387 	if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
       
  1388 		DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
       
  1389 
       
  1390 	/* must restart cuc */
       
  1391 	nic->cuc_cmd = cuc_start;
       
  1392 
       
  1393 	/* wait for completion */
       
  1394 	e100_write_flush(nic);
       
  1395 	udelay(10);
       
  1396 
       
  1397 	/* wait for possibly (ouch) 500ms */
       
  1398 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1399 		msleep(10);
       
  1400 		if (!--counter) break;
       
  1401 	}
       
  1402 
       
  1403 	/* ack any interrupts, something could have been set */
       
  1404 	iowrite8(~0, &nic->csr->scb.stat_ack);
       
  1405 
       
  1406 	/* if the command failed, or is not OK, notify and return */
       
  1407 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1408 		DPRINTK(PROBE,ERR, "ucode load failed\n");
       
  1409 		err = -EPERM;
       
  1410 	}
       
  1411 
       
  1412 	return err;
       
  1413 }
       
  1414 
       
  1415 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1416 	struct sk_buff *skb)
       
  1417 {
       
  1418 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1419 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1420 }
       
  1421 
       
  1422 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1423 {
       
  1424 	cb->command = cpu_to_le16(cb_dump);
       
  1425 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1426 		offsetof(struct mem, dump_buf));
       
  1427 }
       
  1428 
       
  1429 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1430 #define MII_NSC_CONG		MII_RESV1
       
  1431 #define NSC_CONG_ENABLE		0x0100
       
  1432 #define NSC_CONG_TXREADY	0x0400
       
  1433 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1434 static int e100_phy_init(struct nic *nic)
       
  1435 {
       
  1436 	struct net_device *netdev = nic->netdev;
       
  1437 	u32 addr;
       
  1438 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1439 
       
  1440 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1441 	for(addr = 0; addr < 32; addr++) {
       
  1442 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1443 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1444 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1445 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1446 		if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1447 			break;
       
  1448 	}
       
  1449 	DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
       
  1450 	if(addr == 32)
       
  1451 		return -EAGAIN;
       
  1452 
       
  1453 	/* Selected the phy and isolate the rest */
       
  1454 	for(addr = 0; addr < 32; addr++) {
       
  1455 		if(addr != nic->mii.phy_id) {
       
  1456 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1457 		} else {
       
  1458 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1459 			mdio_write(netdev, addr, MII_BMCR,
       
  1460 				bmcr & ~BMCR_ISOLATE);
       
  1461 		}
       
  1462 	}
       
  1463 
       
  1464 	/* Get phy ID */
       
  1465 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1466 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1467 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1468 	DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
       
  1469 
       
  1470 	/* Handle National tx phys */
       
  1471 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1472 	if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1473 		/* Disable congestion control */
       
  1474 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1475 		cong |= NSC_CONG_TXREADY;
       
  1476 		cong &= ~NSC_CONG_ENABLE;
       
  1477 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1478 	}
       
  1479 
       
  1480 	if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1481 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1482 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1483 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1484 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1485 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1486 	}
       
  1487 
       
  1488 	return 0;
       
  1489 }
       
  1490 
       
  1491 static int e100_hw_init(struct nic *nic)
       
  1492 {
       
  1493 	int err;
       
  1494 
       
  1495 	e100_hw_reset(nic);
       
  1496 
       
  1497 	DPRINTK(HW, ERR, "e100_hw_init\n");
       
  1498 	if(!in_interrupt() && (err = e100_self_test(nic)))
       
  1499 		return err;
       
  1500 
       
  1501 	if((err = e100_phy_init(nic)))
       
  1502 		return err;
       
  1503 	if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1504 		return err;
       
  1505 	if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1506 		return err;
       
  1507 	if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
       
  1508 		return err;
       
  1509 	if((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1510 		return err;
       
  1511 	if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1512 		return err;
       
  1513 	if((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1514 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1515 		return err;
       
  1516 	if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1517 		return err;
       
  1518 
       
  1519 	e100_disable_irq(nic);
       
  1520 
       
  1521 	return 0;
       
  1522 }
       
  1523 
       
  1524 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1525 {
       
  1526 	struct net_device *netdev = nic->netdev;
       
  1527 	struct dev_mc_list *list = netdev->mc_list;
       
  1528 	u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
       
  1529 
       
  1530 	cb->command = cpu_to_le16(cb_multi);
       
  1531 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1532 	for(i = 0; list && i < count; i++, list = list->next)
       
  1533 		memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
       
  1534 			ETH_ALEN);
       
  1535 }
       
  1536 
       
  1537 static void e100_set_multicast_list(struct net_device *netdev)
       
  1538 {
       
  1539 	struct nic *nic = netdev_priv(netdev);
       
  1540 
       
  1541 	DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
       
  1542 		netdev->mc_count, netdev->flags);
       
  1543 
       
  1544 	if(netdev->flags & IFF_PROMISC)
       
  1545 		nic->flags |= promiscuous;
       
  1546 	else
       
  1547 		nic->flags &= ~promiscuous;
       
  1548 
       
  1549 	if(netdev->flags & IFF_ALLMULTI ||
       
  1550 		netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
       
  1551 		nic->flags |= multicast_all;
       
  1552 	else
       
  1553 		nic->flags &= ~multicast_all;
       
  1554 
       
  1555 	e100_exec_cb(nic, NULL, e100_configure);
       
  1556 	e100_exec_cb(nic, NULL, e100_multi);
       
  1557 }
       
  1558 
       
  1559 static void e100_update_stats(struct nic *nic)
       
  1560 {
       
  1561 	struct net_device *dev = nic->netdev;
       
  1562 	struct net_device_stats *ns = &dev->stats;
       
  1563 	struct stats *s = &nic->mem->stats;
       
  1564 	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1565 		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
       
  1566 		&s->complete;
       
  1567 
       
  1568 	/* Device's stats reporting may take several microseconds to
       
  1569 	 * complete, so we're always waiting for results of the
       
  1570 	 * previous command. */
       
  1571 
       
  1572 	if(*complete == cpu_to_le32(cuc_dump_reset_complete)) {
       
  1573 		*complete = 0;
       
  1574 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1575 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1576 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1577 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1578 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1579 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1580 		ns->collisions += nic->tx_collisions;
       
  1581 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1582 			le32_to_cpu(s->tx_lost_crs);
       
  1583 		ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
       
  1584 			nic->rx_over_length_errors;
       
  1585 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1586 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1587 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1588 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1589 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1590 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1591 			le32_to_cpu(s->rx_alignment_errors) +
       
  1592 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1593 			le32_to_cpu(s->rx_cdt_errors);
       
  1594 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1595 		nic->tx_single_collisions +=
       
  1596 			le32_to_cpu(s->tx_single_collisions);
       
  1597 		nic->tx_multiple_collisions +=
       
  1598 			le32_to_cpu(s->tx_multiple_collisions);
       
  1599 		if(nic->mac >= mac_82558_D101_A4) {
       
  1600 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1601 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1602 			nic->rx_fc_unsupported +=
       
  1603 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1604 			if(nic->mac >= mac_82559_D101M) {
       
  1605 				nic->tx_tco_frames +=
       
  1606 					le16_to_cpu(s->xmt_tco_frames);
       
  1607 				nic->rx_tco_frames +=
       
  1608 					le16_to_cpu(s->rcv_tco_frames);
       
  1609 			}
       
  1610 		}
       
  1611 	}
       
  1612 
       
  1613 
       
  1614 	if(e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1615 		DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
       
  1616 }
       
  1617 
       
  1618 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1619 {
       
  1620 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1621 	 * we're getting collisions on a half-duplex connection. */
       
  1622 
       
  1623 	if(duplex == DUPLEX_HALF) {
       
  1624 		u32 prev = nic->adaptive_ifs;
       
  1625 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1626 
       
  1627 		if((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1628 		   (nic->tx_frames > min_frames)) {
       
  1629 			if(nic->adaptive_ifs < 60)
       
  1630 				nic->adaptive_ifs += 5;
       
  1631 		} else if (nic->tx_frames < min_frames) {
       
  1632 			if(nic->adaptive_ifs >= 5)
       
  1633 				nic->adaptive_ifs -= 5;
       
  1634 		}
       
  1635 		if(nic->adaptive_ifs != prev)
       
  1636 			e100_exec_cb(nic, NULL, e100_configure);
       
  1637 	}
       
  1638 }
       
  1639 
       
  1640 static void e100_watchdog(unsigned long data)
       
  1641 {
       
  1642 	struct nic *nic = (struct nic *)data;
       
  1643 	struct ethtool_cmd cmd;
       
  1644 
       
  1645 	DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
       
  1646 
       
  1647 	/* mii library handles link maintenance tasks */
       
  1648 
       
  1649 	if (nic->ecdev) {
       
  1650 		ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1651 	} else {
       
  1652 		mii_ethtool_gset(&nic->mii, &cmd);
       
  1653 
       
  1654 		if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1655 			DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
       
  1656 					cmd.speed == SPEED_100 ? "100" : "10",
       
  1657 					cmd.duplex == DUPLEX_FULL ? "full" : "half");
       
  1658 		} else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1659 			DPRINTK(LINK, INFO, "link down\n");
       
  1660 		}
       
  1661 	}
       
  1662 
       
  1663 	mii_check_link(&nic->mii);
       
  1664 
       
  1665 	if (!nic->ecdev) {
       
  1666 		/* Software generated interrupt to recover from (rare) Rx
       
  1667 		 * allocation failure.
       
  1668 		 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1669 		 * accidentally, due to hardware that shares a register between the
       
  1670 		 * interrupt mask bit and the SW Interrupt generation bit */
       
  1671 		spin_lock_irq(&nic->cmd_lock);
       
  1672 		iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1673 		e100_write_flush(nic);
       
  1674 		spin_unlock_irq(&nic->cmd_lock);
       
  1675 	}
       
  1676 
       
  1677 	e100_update_stats(nic);
       
  1678 	e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
       
  1679 
       
  1680 	if(nic->mac <= mac_82557_D100_C)
       
  1681 		/* Issue a multicast command to workaround a 557 lock up */
       
  1682 		e100_set_multicast_list(nic->netdev);
       
  1683 
       
  1684 	if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
       
  1685 		/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1686 		nic->flags |= ich_10h_workaround;
       
  1687 	else
       
  1688 		nic->flags &= ~ich_10h_workaround;
       
  1689 
       
  1690 	if (!nic->ecdev)
       
  1691 		mod_timer(&nic->watchdog,
       
  1692 				round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
       
  1693 }
       
  1694 
       
  1695 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1696 	struct sk_buff *skb)
       
  1697 {
       
  1698 	cb->command = nic->tx_command;
       
  1699 	/* interrupt every 16 packets regardless of delay */
       
  1700 	if((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1701 		cb->command |= cpu_to_le16(cb_i);
       
  1702 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1703 	cb->u.tcb.tcb_byte_count = 0;
       
  1704 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1705 	cb->u.tcb.tbd_count = 1;
       
  1706 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
       
  1707 		skb->data, skb->len, PCI_DMA_TODEVICE));
       
  1708 	/* check for mapping failure? */
       
  1709 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1710 }
       
  1711 
       
  1712 static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
       
  1713 {
       
  1714 	struct nic *nic = netdev_priv(netdev);
       
  1715 	int err;
       
  1716 
       
  1717 	if(nic->flags & ich_10h_workaround) {
       
  1718 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1719 		   Issue a NOP command followed by a 1us delay before
       
  1720 		   issuing the Tx command. */
       
  1721 		if(e100_exec_cmd(nic, cuc_nop, 0))
       
  1722 			DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
       
  1723 		udelay(1);
       
  1724 	}
       
  1725 
       
  1726 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1727 
       
  1728 	switch(err) {
       
  1729 	case -ENOSPC:
       
  1730 		/* We queued the skb, but now we're out of space. */
       
  1731 		DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
       
  1732 		if (!nic->ecdev)
       
  1733 			netif_stop_queue(netdev);
       
  1734 		break;
       
  1735 	case -ENOMEM:
       
  1736 		/* This is a hard error - log it. */
       
  1737 		DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
       
  1738 		if (!nic->ecdev)
       
  1739 			netif_stop_queue(netdev);
       
  1740 		return 1;
       
  1741 	}
       
  1742 
       
  1743 	netdev->trans_start = jiffies;
       
  1744 	return 0;
       
  1745 }
       
  1746 
       
  1747 static int e100_tx_clean(struct nic *nic)
       
  1748 {
       
  1749 	struct net_device *dev = nic->netdev;
       
  1750 	struct cb *cb;
       
  1751 	int tx_cleaned = 0;
       
  1752 
       
  1753 	if (!nic->ecdev)
       
  1754 		spin_lock(&nic->cb_lock);
       
  1755 
       
  1756 	/* Clean CBs marked complete */
       
  1757 	for(cb = nic->cb_to_clean;
       
  1758 	    cb->status & cpu_to_le16(cb_complete);
       
  1759 	    cb = nic->cb_to_clean = cb->next) {
       
  1760 		DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
       
  1761 		        (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1762 		        cb->status);
       
  1763 
       
  1764 		if(likely(cb->skb != NULL)) {
       
  1765 			dev->stats.tx_packets++;
       
  1766 			dev->stats.tx_bytes += cb->skb->len;
       
  1767 
       
  1768 			pci_unmap_single(nic->pdev,
       
  1769 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1770 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1771 				PCI_DMA_TODEVICE);
       
  1772 			if (!nic->ecdev)
       
  1773 				dev_kfree_skb_any(cb->skb);
       
  1774 			cb->skb = NULL;
       
  1775 			tx_cleaned = 1;
       
  1776 		}
       
  1777 		cb->status = 0;
       
  1778 		nic->cbs_avail++;
       
  1779 	}
       
  1780 
       
  1781 	if (!nic->ecdev) {
       
  1782 		spin_unlock(&nic->cb_lock);
       
  1783 
       
  1784 		/* Recover from running out of Tx resources in xmit_frame */
       
  1785 		if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1786 			netif_wake_queue(nic->netdev);
       
  1787 	}
       
  1788 
       
  1789 	return tx_cleaned;
       
  1790 }
       
  1791 
       
  1792 static void e100_clean_cbs(struct nic *nic)
       
  1793 {
       
  1794 	if(nic->cbs) {
       
  1795 		while(nic->cbs_avail != nic->params.cbs.count) {
       
  1796 			struct cb *cb = nic->cb_to_clean;
       
  1797 			if(cb->skb) {
       
  1798 				pci_unmap_single(nic->pdev,
       
  1799 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1800 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1801 					PCI_DMA_TODEVICE);
       
  1802 				if (!nic->ecdev)
       
  1803 					dev_kfree_skb(cb->skb);
       
  1804 			}
       
  1805 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1806 			nic->cbs_avail++;
       
  1807 		}
       
  1808 		pci_free_consistent(nic->pdev,
       
  1809 			sizeof(struct cb) * nic->params.cbs.count,
       
  1810 			nic->cbs, nic->cbs_dma_addr);
       
  1811 		nic->cbs = NULL;
       
  1812 		nic->cbs_avail = 0;
       
  1813 	}
       
  1814 	nic->cuc_cmd = cuc_start;
       
  1815 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1816 		nic->cbs;
       
  1817 }
       
  1818 
       
  1819 static int e100_alloc_cbs(struct nic *nic)
       
  1820 {
       
  1821 	struct cb *cb;
       
  1822 	unsigned int i, count = nic->params.cbs.count;
       
  1823 
       
  1824 	nic->cuc_cmd = cuc_start;
       
  1825 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1826 	nic->cbs_avail = 0;
       
  1827 
       
  1828 	nic->cbs = pci_alloc_consistent(nic->pdev,
       
  1829 		sizeof(struct cb) * count, &nic->cbs_dma_addr);
       
  1830 	if(!nic->cbs)
       
  1831 		return -ENOMEM;
       
  1832 
       
  1833 	for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1834 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1835 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  1836 
       
  1837 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  1838 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  1839 			((i+1) % count) * sizeof(struct cb));
       
  1840 		cb->skb = NULL;
       
  1841 	}
       
  1842 
       
  1843 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  1844 	nic->cbs_avail = count;
       
  1845 
       
  1846 	return 0;
       
  1847 }
       
  1848 
       
  1849 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  1850 {
       
  1851 	if(!nic->rxs) return;
       
  1852 	if(RU_SUSPENDED != nic->ru_running) return;
       
  1853 
       
  1854 	/* handle init time starts */
       
  1855 	if(!rx) rx = nic->rxs;
       
  1856 
       
  1857 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  1858 	if(rx->skb) {
       
  1859 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  1860 		nic->ru_running = RU_RUNNING;
       
  1861 	}
       
  1862 }
       
  1863 
       
  1864 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
       
  1865 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  1866 {
       
  1867 	if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
       
  1868 		return -ENOMEM;
       
  1869 
       
  1870 	/* Align, init, and map the RFD. */
       
  1871 	skb_reserve(rx->skb, NET_IP_ALIGN);
       
  1872 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
       
  1873 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  1874 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1875 
       
  1876 	if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  1877 		dev_kfree_skb_any(rx->skb);
       
  1878 		rx->skb = NULL;
       
  1879 		rx->dma_addr = 0;
       
  1880 		return -ENOMEM;
       
  1881 	}
       
  1882 
       
  1883 	/* Link the RFD to end of RFA by linking previous RFD to
       
  1884 	 * this one.  We are safe to touch the previous RFD because
       
  1885 	 * it is protected by the before last buffer's el bit being set */
       
  1886 	if (rx->prev->skb) {
       
  1887 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  1888 		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  1889 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  1890 			sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  1891 	}
       
  1892 
       
  1893 	return 0;
       
  1894 }
       
  1895 
       
  1896 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  1897 	unsigned int *work_done, unsigned int work_to_do)
       
  1898 {
       
  1899 	struct net_device *dev = nic->netdev;
       
  1900 	struct sk_buff *skb = rx->skb;
       
  1901 	struct rfd *rfd = (struct rfd *)skb->data;
       
  1902 	u16 rfd_status, actual_size;
       
  1903 
       
  1904 	if(unlikely(work_done && *work_done >= work_to_do))
       
  1905 		return -EAGAIN;
       
  1906 
       
  1907 	/* Need to sync before taking a peek at cb_complete bit */
       
  1908 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  1909 		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  1910 	rfd_status = le16_to_cpu(rfd->status);
       
  1911 
       
  1912 	DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
       
  1913 
       
  1914 	/* If data isn't ready, nothing to indicate */
       
  1915 	if (unlikely(!(rfd_status & cb_complete))) {
       
  1916 		/* If the next buffer has the el bit, but we think the receiver
       
  1917 		 * is still running, check to see if it really stopped while
       
  1918 		 * we had interrupts off.
       
  1919 		 * This allows for a fast restart without re-enabling
       
  1920 		 * interrupts */
       
  1921 		if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  1922 		    (RU_RUNNING == nic->ru_running))
       
  1923 
       
  1924 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  1925 				nic->ru_running = RU_SUSPENDED;
       
  1926 		return -ENODATA;
       
  1927 	}
       
  1928 
       
  1929 	/* Get actual data size */
       
  1930 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  1931 	if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  1932 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  1933 
       
  1934 	/* Get data */
       
  1935 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  1936 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1937 
       
  1938 	/* If this buffer has the el bit, but we think the receiver
       
  1939 	 * is still running, check to see if it really stopped while
       
  1940 	 * we had interrupts off.
       
  1941 	 * This allows for a fast restart without re-enabling interrupts.
       
  1942 	 * This can happen when the RU sees the size change but also sees
       
  1943 	 * the el bit set. */
       
  1944 	if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  1945 	    (RU_RUNNING == nic->ru_running)) {
       
  1946 
       
  1947 	    if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  1948 		nic->ru_running = RU_SUSPENDED;
       
  1949 	}
       
  1950 
       
  1951 	if (!nic->ecdev) {
       
  1952 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  1953 		skb_reserve(skb, sizeof(struct rfd));
       
  1954 		skb_put(skb, actual_size);
       
  1955 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  1956 	}
       
  1957 
       
  1958 	if(unlikely(!(rfd_status & cb_ok))) {
       
  1959 		if (!nic->ecdev) {
       
  1960 			/* Don't indicate if hardware indicates errors */
       
  1961 			dev_kfree_skb_any(skb);
       
  1962 		}
       
  1963 	} else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
       
  1964 		/* Don't indicate oversized frames */
       
  1965 		nic->rx_over_length_errors++;
       
  1966 		if (!nic->ecdev)
       
  1967 			dev_kfree_skb_any(skb);
       
  1968 	} else {
       
  1969 		dev->stats.rx_packets++;
       
  1970 		dev->stats.rx_bytes += actual_size;
       
  1971 		nic->netdev->last_rx = jiffies;
       
  1972 		if (nic->ecdev) {
       
  1973 			ecdev_receive(nic->ecdev,
       
  1974 					skb->data + sizeof(struct rfd), actual_size);
       
  1975 
       
  1976 			// No need to detect link status as
       
  1977 			// long as frames are received: Reset watchdog.
       
  1978 			nic->ec_watchdog_jiffies = jiffies;
       
  1979 		} else {
       
  1980 			netif_receive_skb(skb);
       
  1981 		}
       
  1982 		if(work_done)
       
  1983 			(*work_done)++;
       
  1984 	}
       
  1985 
       
  1986 	if (nic->ecdev) {
       
  1987 		// make receive frame descriptior usable again
       
  1988 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  1989 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  1990 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1991 		if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  1992 			rx->dma_addr = 0;
       
  1993 		}
       
  1994 
       
  1995 		/* Link the RFD to end of RFA by linking previous RFD to
       
  1996 		 * this one.  We are safe to touch the previous RFD because
       
  1997 		 * it is protected by the before last buffer's el bit being set */
       
  1998 		if (rx->prev->skb) {
       
  1999 			struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  2000 			put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2001 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2002 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  2003 		}
       
  2004 	} else {
       
  2005 		rx->skb = NULL;
       
  2006 	}
       
  2007 
       
  2008 	return 0;
       
  2009 }
       
  2010 
       
  2011 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  2012 	unsigned int work_to_do)
       
  2013 {
       
  2014 	struct rx *rx;
       
  2015 	int restart_required = 0, err = 0;
       
  2016 	struct rx *old_before_last_rx, *new_before_last_rx;
       
  2017 	struct rfd *old_before_last_rfd, *new_before_last_rfd;
       
  2018 
       
  2019 	/* Indicate newly arrived packets */
       
  2020 	for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  2021 		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  2022 		/* Hit quota or no more to clean */
       
  2023 		if (-EAGAIN == err || -ENODATA == err)
       
  2024 			break;
       
  2025 	}
       
  2026 
       
  2027 
       
  2028 	/* On EAGAIN, hit quota so have more work to do, restart once
       
  2029 	 * cleanup is complete.
       
  2030 	 * Else, are we already rnr? then pay attention!!! this ensures that
       
  2031 	 * the state machine progression never allows a start with a
       
  2032 	 * partially cleaned list, avoiding a race between hardware
       
  2033 	 * and rx_to_clean when in NAPI mode */
       
  2034 	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
       
  2035 		restart_required = 1;
       
  2036 
       
  2037 	old_before_last_rx = nic->rx_to_use->prev->prev;
       
  2038 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
       
  2039 
       
  2040 	if (!nic->ecdev) {
       
  2041 		/* Alloc new skbs to refill list */
       
  2042 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  2043 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  2044 				break; /* Better luck next time (see watchdog) */
       
  2045 		}
       
  2046 	}
       
  2047 
       
  2048 	new_before_last_rx = nic->rx_to_use->prev->prev;
       
  2049 	if (new_before_last_rx != old_before_last_rx) {
       
  2050 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2051 		 * This lets us update the next pointer on the last buffer
       
  2052 		 * without worrying about hardware touching it.
       
  2053 		 * We set the size to 0 to prevent hardware from touching this
       
  2054 		 * buffer.
       
  2055 		 * When the hardware hits the before last buffer with el-bit
       
  2056 		 * and size of 0, it will RNR interrupt, the RUS will go into
       
  2057 		 * the No Resources state.  It will not complete nor write to
       
  2058 		 * this buffer. */
       
  2059 		new_before_last_rfd =
       
  2060 			(struct rfd *)new_before_last_rx->skb->data;
       
  2061 		new_before_last_rfd->size = 0;
       
  2062 		new_before_last_rfd->command |= cpu_to_le16(cb_el);
       
  2063 		pci_dma_sync_single_for_device(nic->pdev,
       
  2064 			new_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2065 			PCI_DMA_BIDIRECTIONAL);
       
  2066 
       
  2067 		/* Now that we have a new stopping point, we can clear the old
       
  2068 		 * stopping point.  We must sync twice to get the proper
       
  2069 		 * ordering on the hardware side of things. */
       
  2070 		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
       
  2071 		pci_dma_sync_single_for_device(nic->pdev,
       
  2072 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2073 			PCI_DMA_BIDIRECTIONAL);
       
  2074 		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  2075 		pci_dma_sync_single_for_device(nic->pdev,
       
  2076 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2077 			PCI_DMA_BIDIRECTIONAL);
       
  2078 	}
       
  2079 
       
  2080 	if(restart_required) {
       
  2081 		// ack the rnr?
       
  2082 		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2083 		e100_start_receiver(nic, nic->rx_to_clean);
       
  2084 		if(work_done)
       
  2085 			(*work_done)++;
       
  2086 	}
       
  2087 }
       
  2088 
       
  2089 static void e100_rx_clean_list(struct nic *nic)
       
  2090 {
       
  2091 	struct rx *rx;
       
  2092 	unsigned int i, count = nic->params.rfds.count;
       
  2093 
       
  2094 	nic->ru_running = RU_UNINITIALIZED;
       
  2095 
       
  2096 	if(nic->rxs) {
       
  2097 		for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2098 			if(rx->skb) {
       
  2099 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2100 					RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2101 				dev_kfree_skb(rx->skb);
       
  2102 			}
       
  2103 		}
       
  2104 		kfree(nic->rxs);
       
  2105 		nic->rxs = NULL;
       
  2106 	}
       
  2107 
       
  2108 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2109 }
       
  2110 
       
  2111 static int e100_rx_alloc_list(struct nic *nic)
       
  2112 {
       
  2113 	struct rx *rx;
       
  2114 	unsigned int i, count = nic->params.rfds.count;
       
  2115 	struct rfd *before_last;
       
  2116 
       
  2117 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2118 	nic->ru_running = RU_UNINITIALIZED;
       
  2119 
       
  2120 	if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2121 		return -ENOMEM;
       
  2122 
       
  2123 	for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2124 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2125 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2126 		if(e100_rx_alloc_skb(nic, rx)) {
       
  2127 			e100_rx_clean_list(nic);
       
  2128 			return -ENOMEM;
       
  2129 		}
       
  2130 	}
       
  2131 
       
  2132 	if (!nic->ecdev) {
       
  2133 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2134 		 * This lets us update the next pointer on the last buffer without
       
  2135 		 * worrying about hardware touching it.
       
  2136 		 * We set the size to 0 to prevent hardware from touching this buffer.
       
  2137 		 * When the hardware hits the before last buffer with el-bit and size
       
  2138 		 * of 0, it will RNR interrupt, the RU will go into the No Resources
       
  2139 		 * state.  It will not complete nor write to this buffer. */
       
  2140 		rx = nic->rxs->prev->prev;
       
  2141 		before_last = (struct rfd *)rx->skb->data;
       
  2142 		before_last->command |= cpu_to_le16(cb_el);
       
  2143 		before_last->size = 0;
       
  2144 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2145 				sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2146 	}
       
  2147 
       
  2148 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2149 	nic->ru_running = RU_SUSPENDED;
       
  2150 
       
  2151 	return 0;
       
  2152 }
       
  2153 
       
  2154 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2155 {
       
  2156 	struct net_device *netdev = dev_id;
       
  2157 	struct nic *nic = netdev_priv(netdev);
       
  2158 	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
       
  2159 
       
  2160 	DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
       
  2161 
       
  2162 	if(stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2163 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2164 		return IRQ_NONE;
       
  2165 
       
  2166 	/* Ack interrupt(s) */
       
  2167 	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
       
  2168 
       
  2169 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2170 	if(stat_ack & stat_ack_rnr)
       
  2171 		nic->ru_running = RU_SUSPENDED;
       
  2172 
       
  2173 	if(!nic->ecdev && likely(netif_rx_schedule_prep(netdev, &nic->napi))) {
       
  2174 		e100_disable_irq(nic);
       
  2175 		__netif_rx_schedule(netdev, &nic->napi);
       
  2176 	}
       
  2177 
       
  2178 	return IRQ_HANDLED;
       
  2179 }
       
  2180 
       
  2181 void e100_ec_poll(struct net_device *netdev)
       
  2182 {
       
  2183 	struct nic *nic = netdev_priv(netdev);
       
  2184 
       
  2185 	e100_rx_clean(nic, NULL, 100);
       
  2186 	e100_tx_clean(nic);
       
  2187 
       
  2188     if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2189         e100_watchdog((unsigned long) nic);
       
  2190         nic->ec_watchdog_jiffies = jiffies;
       
  2191     }
       
  2192 }
       
  2193 
       
  2194 
       
  2195 static int e100_poll(struct napi_struct *napi, int budget)
       
  2196 {
       
  2197 	struct nic *nic = container_of(napi, struct nic, napi);
       
  2198 	struct net_device *netdev = nic->netdev;
       
  2199 	unsigned int work_done = 0;
       
  2200 
       
  2201 	e100_rx_clean(nic, &work_done, budget);
       
  2202 	e100_tx_clean(nic);
       
  2203 
       
  2204 	/* If budget not fully consumed, exit the polling mode */
       
  2205 	if (work_done < budget) {
       
  2206 		netif_rx_complete(netdev, napi);
       
  2207 		e100_enable_irq(nic);
       
  2208 	}
       
  2209 
       
  2210 	return work_done;
       
  2211 }
       
  2212 
       
  2213 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2214 static void e100_netpoll(struct net_device *netdev)
       
  2215 {
       
  2216 	struct nic *nic = netdev_priv(netdev);
       
  2217 
       
  2218 	e100_disable_irq(nic);
       
  2219 	e100_intr(nic->pdev->irq, netdev);
       
  2220 	e100_tx_clean(nic);
       
  2221 	e100_enable_irq(nic);
       
  2222 }
       
  2223 #endif
       
  2224 
       
  2225 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2226 {
       
  2227 	struct nic *nic = netdev_priv(netdev);
       
  2228 	struct sockaddr *addr = p;
       
  2229 
       
  2230 	if (!is_valid_ether_addr(addr->sa_data))
       
  2231 		return -EADDRNOTAVAIL;
       
  2232 
       
  2233 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2234 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2235 
       
  2236 	return 0;
       
  2237 }
       
  2238 
       
  2239 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2240 {
       
  2241 	if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2242 		return -EINVAL;
       
  2243 	netdev->mtu = new_mtu;
       
  2244 	return 0;
       
  2245 }
       
  2246 
       
  2247 static int e100_asf(struct nic *nic)
       
  2248 {
       
  2249 	/* ASF can be enabled from eeprom */
       
  2250 	return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2251 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2252 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2253 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
       
  2254 }
       
  2255 
       
  2256 static int e100_up(struct nic *nic)
       
  2257 {
       
  2258 	int err;
       
  2259 
       
  2260 	if((err = e100_rx_alloc_list(nic)))
       
  2261 		return err;
       
  2262 	if((err = e100_alloc_cbs(nic)))
       
  2263 		goto err_rx_clean_list;
       
  2264 	if((err = e100_hw_init(nic)))
       
  2265 		goto err_clean_cbs;
       
  2266 	e100_set_multicast_list(nic->netdev);
       
  2267 	e100_start_receiver(nic, NULL);
       
  2268 	if (!nic->ecdev) {
       
  2269 		mod_timer(&nic->watchdog, jiffies);
       
  2270 	}
       
  2271 	if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2272 					nic->netdev->name, nic->netdev)))
       
  2273 		goto err_no_irq;
       
  2274 	if (!nic->ecdev) {
       
  2275 		netif_wake_queue(nic->netdev);
       
  2276 		napi_enable(&nic->napi);
       
  2277 		/* enable ints _after_ enabling poll, preventing a race between
       
  2278 		 * disable ints+schedule */
       
  2279 		e100_enable_irq(nic);
       
  2280 	}
       
  2281 	return 0;
       
  2282 
       
  2283 err_no_irq:
       
  2284 	if (!nic->ecdev)
       
  2285 		del_timer_sync(&nic->watchdog);
       
  2286 err_clean_cbs:
       
  2287 	e100_clean_cbs(nic);
       
  2288 err_rx_clean_list:
       
  2289 	e100_rx_clean_list(nic);
       
  2290 	return err;
       
  2291 }
       
  2292 
       
  2293 static void e100_down(struct nic *nic)
       
  2294 {
       
  2295 	if (!nic->ecdev) {
       
  2296 		/* wait here for poll to complete */
       
  2297 		napi_disable(&nic->napi);
       
  2298 		netif_stop_queue(nic->netdev);
       
  2299 	}
       
  2300 	e100_hw_reset(nic);
       
  2301 	if (!nic->ecdev) {
       
  2302 		free_irq(nic->pdev->irq, nic->netdev);
       
  2303 		del_timer_sync(&nic->watchdog);
       
  2304 		netif_carrier_off(nic->netdev);
       
  2305 	}
       
  2306 	e100_clean_cbs(nic);
       
  2307 	e100_rx_clean_list(nic);
       
  2308 }
       
  2309 
       
  2310 static void e100_tx_timeout(struct net_device *netdev)
       
  2311 {
       
  2312 	struct nic *nic = netdev_priv(netdev);
       
  2313 
       
  2314 	/* Reset outside of interrupt context, to avoid request_irq
       
  2315 	 * in interrupt context */
       
  2316 	schedule_work(&nic->tx_timeout_task);
       
  2317 }
       
  2318 
       
  2319 static void e100_tx_timeout_task(struct work_struct *work)
       
  2320 {
       
  2321 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2322 	struct net_device *netdev = nic->netdev;
       
  2323 
       
  2324 	DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
       
  2325 		ioread8(&nic->csr->scb.status));
       
  2326 	e100_down(netdev_priv(netdev));
       
  2327 	e100_up(netdev_priv(netdev));
       
  2328 }
       
  2329 
       
  2330 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2331 {
       
  2332 	int err;
       
  2333 	struct sk_buff *skb;
       
  2334 
       
  2335 	/* Use driver resources to perform internal MAC or PHY
       
  2336 	 * loopback test.  A single packet is prepared and transmitted
       
  2337 	 * in loopback mode, and the test passes if the received
       
  2338 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2339 
       
  2340 	if((err = e100_rx_alloc_list(nic)))
       
  2341 		return err;
       
  2342 	if((err = e100_alloc_cbs(nic)))
       
  2343 		goto err_clean_rx;
       
  2344 
       
  2345 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2346 	if(nic->flags & ich && loopback_mode == lb_phy)
       
  2347 		loopback_mode = lb_mac;
       
  2348 
       
  2349 	nic->loopback = loopback_mode;
       
  2350 	if((err = e100_hw_init(nic)))
       
  2351 		goto err_loopback_none;
       
  2352 
       
  2353 	if(loopback_mode == lb_phy)
       
  2354 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2355 			BMCR_LOOPBACK);
       
  2356 
       
  2357 	e100_start_receiver(nic, NULL);
       
  2358 
       
  2359 	if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2360 		err = -ENOMEM;
       
  2361 		goto err_loopback_none;
       
  2362 	}
       
  2363 	skb_put(skb, ETH_DATA_LEN);
       
  2364 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2365 	e100_xmit_frame(skb, nic->netdev);
       
  2366 
       
  2367 	msleep(10);
       
  2368 
       
  2369 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2370 			RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2371 
       
  2372 	if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2373 	   skb->data, ETH_DATA_LEN))
       
  2374 		err = -EAGAIN;
       
  2375 
       
  2376 err_loopback_none:
       
  2377 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2378 	nic->loopback = lb_none;
       
  2379 	e100_clean_cbs(nic);
       
  2380 	e100_hw_reset(nic);
       
  2381 err_clean_rx:
       
  2382 	e100_rx_clean_list(nic);
       
  2383 	return err;
       
  2384 }
       
  2385 
       
  2386 #define MII_LED_CONTROL	0x1B
       
  2387 static void e100_blink_led(unsigned long data)
       
  2388 {
       
  2389 	struct nic *nic = (struct nic *)data;
       
  2390 	enum led_state {
       
  2391 		led_on     = 0x01,
       
  2392 		led_off    = 0x04,
       
  2393 		led_on_559 = 0x05,
       
  2394 		led_on_557 = 0x07,
       
  2395 	};
       
  2396 
       
  2397 	nic->leds = (nic->leds & led_on) ? led_off :
       
  2398 		(nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
       
  2399 	mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
       
  2400 	mod_timer(&nic->blink_timer, jiffies + HZ / 4);
       
  2401 }
       
  2402 
       
  2403 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2404 {
       
  2405 	struct nic *nic = netdev_priv(netdev);
       
  2406 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2407 }
       
  2408 
       
  2409 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2410 {
       
  2411 	struct nic *nic = netdev_priv(netdev);
       
  2412 	int err;
       
  2413 
       
  2414 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2415 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2416 	e100_exec_cb(nic, NULL, e100_configure);
       
  2417 
       
  2418 	return err;
       
  2419 }
       
  2420 
       
  2421 static void e100_get_drvinfo(struct net_device *netdev,
       
  2422 	struct ethtool_drvinfo *info)
       
  2423 {
       
  2424 	struct nic *nic = netdev_priv(netdev);
       
  2425 	strcpy(info->driver, DRV_NAME);
       
  2426 	strcpy(info->version, DRV_VERSION);
       
  2427 	strcpy(info->fw_version, "N/A");
       
  2428 	strcpy(info->bus_info, pci_name(nic->pdev));
       
  2429 }
       
  2430 
       
  2431 #define E100_PHY_REGS 0x1C
       
  2432 static int e100_get_regs_len(struct net_device *netdev)
       
  2433 {
       
  2434 	struct nic *nic = netdev_priv(netdev);
       
  2435 	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
       
  2436 }
       
  2437 
       
  2438 static void e100_get_regs(struct net_device *netdev,
       
  2439 	struct ethtool_regs *regs, void *p)
       
  2440 {
       
  2441 	struct nic *nic = netdev_priv(netdev);
       
  2442 	u32 *buff = p;
       
  2443 	int i;
       
  2444 
       
  2445 	regs->version = (1 << 24) | nic->pdev->revision;
       
  2446 	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
       
  2447 		ioread8(&nic->csr->scb.cmd_lo) << 16 |
       
  2448 		ioread16(&nic->csr->scb.status);
       
  2449 	for(i = E100_PHY_REGS; i >= 0; i--)
       
  2450 		buff[1 + E100_PHY_REGS - i] =
       
  2451 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2452 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2453 	e100_exec_cb(nic, NULL, e100_dump);
       
  2454 	msleep(10);
       
  2455 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2456 		sizeof(nic->mem->dump_buf));
       
  2457 }
       
  2458 
       
  2459 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2460 {
       
  2461 	struct nic *nic = netdev_priv(netdev);
       
  2462 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2463 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2464 }
       
  2465 
       
  2466 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2467 {
       
  2468 	struct nic *nic = netdev_priv(netdev);
       
  2469 
       
  2470 	if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
       
  2471 		return -EOPNOTSUPP;
       
  2472 
       
  2473 	if(wol->wolopts)
       
  2474 		nic->flags |= wol_magic;
       
  2475 	else
       
  2476 		nic->flags &= ~wol_magic;
       
  2477 
       
  2478 	e100_exec_cb(nic, NULL, e100_configure);
       
  2479 
       
  2480 	return 0;
       
  2481 }
       
  2482 
       
  2483 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2484 {
       
  2485 	struct nic *nic = netdev_priv(netdev);
       
  2486 	return nic->msg_enable;
       
  2487 }
       
  2488 
       
  2489 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2490 {
       
  2491 	struct nic *nic = netdev_priv(netdev);
       
  2492 	nic->msg_enable = value;
       
  2493 }
       
  2494 
       
  2495 static int e100_nway_reset(struct net_device *netdev)
       
  2496 {
       
  2497 	struct nic *nic = netdev_priv(netdev);
       
  2498 	return mii_nway_restart(&nic->mii);
       
  2499 }
       
  2500 
       
  2501 static u32 e100_get_link(struct net_device *netdev)
       
  2502 {
       
  2503 	struct nic *nic = netdev_priv(netdev);
       
  2504 	return mii_link_ok(&nic->mii);
       
  2505 }
       
  2506 
       
  2507 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2508 {
       
  2509 	struct nic *nic = netdev_priv(netdev);
       
  2510 	return nic->eeprom_wc << 1;
       
  2511 }
       
  2512 
       
  2513 #define E100_EEPROM_MAGIC	0x1234
       
  2514 static int e100_get_eeprom(struct net_device *netdev,
       
  2515 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2516 {
       
  2517 	struct nic *nic = netdev_priv(netdev);
       
  2518 
       
  2519 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2520 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2521 
       
  2522 	return 0;
       
  2523 }
       
  2524 
       
  2525 static int e100_set_eeprom(struct net_device *netdev,
       
  2526 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2527 {
       
  2528 	struct nic *nic = netdev_priv(netdev);
       
  2529 
       
  2530 	if(eeprom->magic != E100_EEPROM_MAGIC)
       
  2531 		return -EINVAL;
       
  2532 
       
  2533 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2534 
       
  2535 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2536 		(eeprom->len >> 1) + 1);
       
  2537 }
       
  2538 
       
  2539 static void e100_get_ringparam(struct net_device *netdev,
       
  2540 	struct ethtool_ringparam *ring)
       
  2541 {
       
  2542 	struct nic *nic = netdev_priv(netdev);
       
  2543 	struct param_range *rfds = &nic->params.rfds;
       
  2544 	struct param_range *cbs = &nic->params.cbs;
       
  2545 
       
  2546 	ring->rx_max_pending = rfds->max;
       
  2547 	ring->tx_max_pending = cbs->max;
       
  2548 	ring->rx_mini_max_pending = 0;
       
  2549 	ring->rx_jumbo_max_pending = 0;
       
  2550 	ring->rx_pending = rfds->count;
       
  2551 	ring->tx_pending = cbs->count;
       
  2552 	ring->rx_mini_pending = 0;
       
  2553 	ring->rx_jumbo_pending = 0;
       
  2554 }
       
  2555 
       
  2556 static int e100_set_ringparam(struct net_device *netdev,
       
  2557 	struct ethtool_ringparam *ring)
       
  2558 {
       
  2559 	struct nic *nic = netdev_priv(netdev);
       
  2560 	struct param_range *rfds = &nic->params.rfds;
       
  2561 	struct param_range *cbs = &nic->params.cbs;
       
  2562 
       
  2563 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2564 		return -EINVAL;
       
  2565 
       
  2566 	if(netif_running(netdev))
       
  2567 		e100_down(nic);
       
  2568 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2569 	rfds->count = min(rfds->count, rfds->max);
       
  2570 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2571 	cbs->count = min(cbs->count, cbs->max);
       
  2572 	DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
       
  2573 	        rfds->count, cbs->count);
       
  2574 	if(netif_running(netdev))
       
  2575 		e100_up(nic);
       
  2576 
       
  2577 	return 0;
       
  2578 }
       
  2579 
       
  2580 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2581 	"Link test     (on/offline)",
       
  2582 	"Eeprom test   (on/offline)",
       
  2583 	"Self test        (offline)",
       
  2584 	"Mac loopback     (offline)",
       
  2585 	"Phy loopback     (offline)",
       
  2586 };
       
  2587 #define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
       
  2588 
       
  2589 static void e100_diag_test(struct net_device *netdev,
       
  2590 	struct ethtool_test *test, u64 *data)
       
  2591 {
       
  2592 	struct ethtool_cmd cmd;
       
  2593 	struct nic *nic = netdev_priv(netdev);
       
  2594 	int i, err;
       
  2595 
       
  2596 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2597 	data[0] = !mii_link_ok(&nic->mii);
       
  2598 	data[1] = e100_eeprom_load(nic);
       
  2599 	if(test->flags & ETH_TEST_FL_OFFLINE) {
       
  2600 
       
  2601 		/* save speed, duplex & autoneg settings */
       
  2602 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2603 
       
  2604 		if(netif_running(netdev))
       
  2605 			e100_down(nic);
       
  2606 		data[2] = e100_self_test(nic);
       
  2607 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2608 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2609 
       
  2610 		/* restore speed, duplex & autoneg settings */
       
  2611 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2612 
       
  2613 		if(netif_running(netdev))
       
  2614 			e100_up(nic);
       
  2615 	}
       
  2616 	for(i = 0; i < E100_TEST_LEN; i++)
       
  2617 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2618 
       
  2619 	msleep_interruptible(4 * 1000);
       
  2620 }
       
  2621 
       
  2622 static int e100_phys_id(struct net_device *netdev, u32 data)
       
  2623 {
       
  2624 	struct nic *nic = netdev_priv(netdev);
       
  2625 
       
  2626 	if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
       
  2627 		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
       
  2628 	mod_timer(&nic->blink_timer, jiffies);
       
  2629 	msleep_interruptible(data * 1000);
       
  2630 	del_timer_sync(&nic->blink_timer);
       
  2631 	mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
       
  2632 
       
  2633 	return 0;
       
  2634 }
       
  2635 
       
  2636 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2637 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2638 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2639 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2640 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2641 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2642 	"tx_heartbeat_errors", "tx_window_errors",
       
  2643 	/* device-specific stats */
       
  2644 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2645 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2646 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2647 };
       
  2648 #define E100_NET_STATS_LEN	21
       
  2649 #define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
       
  2650 
       
  2651 static int e100_get_sset_count(struct net_device *netdev, int sset)
       
  2652 {
       
  2653 	switch (sset) {
       
  2654 	case ETH_SS_TEST:
       
  2655 		return E100_TEST_LEN;
       
  2656 	case ETH_SS_STATS:
       
  2657 		return E100_STATS_LEN;
       
  2658 	default:
       
  2659 		return -EOPNOTSUPP;
       
  2660 	}
       
  2661 }
       
  2662 
       
  2663 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2664 	struct ethtool_stats *stats, u64 *data)
       
  2665 {
       
  2666 	struct nic *nic = netdev_priv(netdev);
       
  2667 	int i;
       
  2668 
       
  2669 	for(i = 0; i < E100_NET_STATS_LEN; i++)
       
  2670 		data[i] = ((unsigned long *)&netdev->stats)[i];
       
  2671 
       
  2672 	data[i++] = nic->tx_deferred;
       
  2673 	data[i++] = nic->tx_single_collisions;
       
  2674 	data[i++] = nic->tx_multiple_collisions;
       
  2675 	data[i++] = nic->tx_fc_pause;
       
  2676 	data[i++] = nic->rx_fc_pause;
       
  2677 	data[i++] = nic->rx_fc_unsupported;
       
  2678 	data[i++] = nic->tx_tco_frames;
       
  2679 	data[i++] = nic->rx_tco_frames;
       
  2680 }
       
  2681 
       
  2682 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2683 {
       
  2684 	switch(stringset) {
       
  2685 	case ETH_SS_TEST:
       
  2686 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2687 		break;
       
  2688 	case ETH_SS_STATS:
       
  2689 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2690 		break;
       
  2691 	}
       
  2692 }
       
  2693 
       
  2694 static const struct ethtool_ops e100_ethtool_ops = {
       
  2695 	.get_settings		= e100_get_settings,
       
  2696 	.set_settings		= e100_set_settings,
       
  2697 	.get_drvinfo		= e100_get_drvinfo,
       
  2698 	.get_regs_len		= e100_get_regs_len,
       
  2699 	.get_regs		= e100_get_regs,
       
  2700 	.get_wol		= e100_get_wol,
       
  2701 	.set_wol		= e100_set_wol,
       
  2702 	.get_msglevel		= e100_get_msglevel,
       
  2703 	.set_msglevel		= e100_set_msglevel,
       
  2704 	.nway_reset		= e100_nway_reset,
       
  2705 	.get_link		= e100_get_link,
       
  2706 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2707 	.get_eeprom		= e100_get_eeprom,
       
  2708 	.set_eeprom		= e100_set_eeprom,
       
  2709 	.get_ringparam		= e100_get_ringparam,
       
  2710 	.set_ringparam		= e100_set_ringparam,
       
  2711 	.self_test		= e100_diag_test,
       
  2712 	.get_strings		= e100_get_strings,
       
  2713 	.phys_id		= e100_phys_id,
       
  2714 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2715 	.get_sset_count		= e100_get_sset_count,
       
  2716 };
       
  2717 
       
  2718 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2719 {
       
  2720 	struct nic *nic = netdev_priv(netdev);
       
  2721 
       
  2722 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2723 }
       
  2724 
       
  2725 static int e100_alloc(struct nic *nic)
       
  2726 {
       
  2727 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2728 		&nic->dma_addr);
       
  2729 	return nic->mem ? 0 : -ENOMEM;
       
  2730 }
       
  2731 
       
  2732 static void e100_free(struct nic *nic)
       
  2733 {
       
  2734 	if(nic->mem) {
       
  2735 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2736 			nic->mem, nic->dma_addr);
       
  2737 		nic->mem = NULL;
       
  2738 	}
       
  2739 }
       
  2740 
       
  2741 static int e100_open(struct net_device *netdev)
       
  2742 {
       
  2743 	struct nic *nic = netdev_priv(netdev);
       
  2744 	int err = 0;
       
  2745 
       
  2746 	if (!nic->ecdev)
       
  2747 		netif_carrier_off(netdev);
       
  2748 	if((err = e100_up(nic)))
       
  2749 		DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
       
  2750 	return err;
       
  2751 }
       
  2752 
       
  2753 static int e100_close(struct net_device *netdev)
       
  2754 {
       
  2755 	e100_down(netdev_priv(netdev));
       
  2756 	return 0;
       
  2757 }
       
  2758 
       
  2759 static int __devinit e100_probe(struct pci_dev *pdev,
       
  2760 	const struct pci_device_id *ent)
       
  2761 {
       
  2762 	struct net_device *netdev;
       
  2763 	struct nic *nic;
       
  2764 	int err;
       
  2765 	DECLARE_MAC_BUF(mac);
       
  2766 
       
  2767 	if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
       
  2768 		if(((1 << debug) - 1) & NETIF_MSG_PROBE)
       
  2769 			printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
       
  2770 		return -ENOMEM;
       
  2771 	}
       
  2772 
       
  2773 	netdev->open = e100_open;
       
  2774 	netdev->stop = e100_close;
       
  2775 	netdev->hard_start_xmit = e100_xmit_frame;
       
  2776 	netdev->set_multicast_list = e100_set_multicast_list;
       
  2777 	netdev->set_mac_address = e100_set_mac_address;
       
  2778 	netdev->change_mtu = e100_change_mtu;
       
  2779 	netdev->do_ioctl = e100_do_ioctl;
       
  2780 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  2781 	netdev->tx_timeout = e100_tx_timeout;
       
  2782 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  2783 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2784 	netdev->poll_controller = e100_netpoll;
       
  2785 #endif
       
  2786 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  2787 
       
  2788 	nic = netdev_priv(netdev);
       
  2789 	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
       
  2790 	nic->netdev = netdev;
       
  2791 	nic->pdev = pdev;
       
  2792 	nic->msg_enable = (1 << debug) - 1;
       
  2793 	pci_set_drvdata(pdev, netdev);
       
  2794 
       
  2795 	if((err = pci_enable_device(pdev))) {
       
  2796 		DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
       
  2797 		goto err_out_free_dev;
       
  2798 	}
       
  2799 
       
  2800 	if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  2801 		DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
       
  2802 			"base address, aborting.\n");
       
  2803 		err = -ENODEV;
       
  2804 		goto err_out_disable_pdev;
       
  2805 	}
       
  2806 
       
  2807 	if((err = pci_request_regions(pdev, DRV_NAME))) {
       
  2808 		DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
       
  2809 		goto err_out_disable_pdev;
       
  2810 	}
       
  2811 
       
  2812 	if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
       
  2813 		DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
       
  2814 		goto err_out_free_res;
       
  2815 	}
       
  2816 
       
  2817 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  2818 
       
  2819 	if (use_io)
       
  2820 		DPRINTK(PROBE, INFO, "using i/o access mode\n");
       
  2821 
       
  2822 	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
       
  2823 	if(!nic->csr) {
       
  2824 		DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
       
  2825 		err = -ENOMEM;
       
  2826 		goto err_out_free_res;
       
  2827 	}
       
  2828 
       
  2829 	if(ent->driver_data)
       
  2830 		nic->flags |= ich;
       
  2831 	else
       
  2832 		nic->flags &= ~ich;
       
  2833 
       
  2834 	e100_get_defaults(nic);
       
  2835 
       
  2836 	/* locks must be initialized before calling hw_reset */
       
  2837 	spin_lock_init(&nic->cb_lock);
       
  2838 	spin_lock_init(&nic->cmd_lock);
       
  2839 	spin_lock_init(&nic->mdio_lock);
       
  2840 
       
  2841 	/* Reset the device before pci_set_master() in case device is in some
       
  2842 	 * funky state and has an interrupt pending - hint: we don't have the
       
  2843 	 * interrupt handler registered yet. */
       
  2844 	e100_hw_reset(nic);
       
  2845 
       
  2846 	pci_set_master(pdev);
       
  2847 
       
  2848 	init_timer(&nic->watchdog);
       
  2849 	nic->watchdog.function = e100_watchdog;
       
  2850 	nic->watchdog.data = (unsigned long)nic;
       
  2851 	init_timer(&nic->blink_timer);
       
  2852 	nic->blink_timer.function = e100_blink_led;
       
  2853 	nic->blink_timer.data = (unsigned long)nic;
       
  2854 
       
  2855 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  2856 
       
  2857 	if((err = e100_alloc(nic))) {
       
  2858 		DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
       
  2859 		goto err_out_iounmap;
       
  2860 	}
       
  2861 
       
  2862 	if((err = e100_eeprom_load(nic)))
       
  2863 		goto err_out_free;
       
  2864 
       
  2865 	e100_phy_init(nic);
       
  2866 
       
  2867 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  2868 	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
       
  2869 	if (!is_valid_ether_addr(netdev->perm_addr)) {
       
  2870 		if (!eeprom_bad_csum_allow) {
       
  2871 			DPRINTK(PROBE, ERR, "Invalid MAC address from "
       
  2872 			        "EEPROM, aborting.\n");
       
  2873 			err = -EAGAIN;
       
  2874 			goto err_out_free;
       
  2875 		} else {
       
  2876 			DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
       
  2877 			        "you MUST configure one.\n");
       
  2878 		}
       
  2879 	}
       
  2880 
       
  2881 	/* Wol magic packet can be enabled from eeprom */
       
  2882 	if((nic->mac >= mac_82558_D101_A4) &&
       
  2883 	   (nic->eeprom[eeprom_id] & eeprom_id_wol))
       
  2884 		nic->flags |= wol_magic;
       
  2885 
       
  2886 	/* ack any pending wake events, disable PME */
       
  2887 	pci_pme_active(pdev, false);
       
  2888 
       
  2889 	// offer device to EtherCAT master module
       
  2890 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  2891 	if (nic->ecdev) {
       
  2892 		if (ecdev_open(nic->ecdev)) {
       
  2893 			ecdev_withdraw(nic->ecdev);
       
  2894 			goto err_out_free;
       
  2895 		}
       
  2896 	} else {
       
  2897 		strcpy(netdev->name, "eth%d");
       
  2898 		if((err = register_netdev(netdev))) {
       
  2899 			DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
       
  2900 			goto err_out_free;
       
  2901 		}
       
  2902 	}
       
  2903 
       
  2904 	DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %s\n",
       
  2905 		(unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
       
  2906 		pdev->irq, print_mac(mac, netdev->dev_addr));
       
  2907 
       
  2908 	return 0;
       
  2909 
       
  2910 err_out_free:
       
  2911 	e100_free(nic);
       
  2912 err_out_iounmap:
       
  2913 	pci_iounmap(pdev, nic->csr);
       
  2914 err_out_free_res:
       
  2915 	pci_release_regions(pdev);
       
  2916 err_out_disable_pdev:
       
  2917 	pci_disable_device(pdev);
       
  2918 err_out_free_dev:
       
  2919 	pci_set_drvdata(pdev, NULL);
       
  2920 	free_netdev(netdev);
       
  2921 	return err;
       
  2922 }
       
  2923 
       
  2924 static void __devexit e100_remove(struct pci_dev *pdev)
       
  2925 {
       
  2926 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2927 
       
  2928 	if(netdev) {
       
  2929 		struct nic *nic = netdev_priv(netdev);
       
  2930 		if (nic->ecdev) {
       
  2931 			ecdev_close(nic->ecdev);
       
  2932 			ecdev_withdraw(nic->ecdev);
       
  2933 		} else {
       
  2934 			unregister_netdev(netdev);
       
  2935 		}
       
  2936 
       
  2937 		e100_free(nic);
       
  2938 		pci_iounmap(pdev, nic->csr);
       
  2939 		free_netdev(netdev);
       
  2940 		pci_release_regions(pdev);
       
  2941 		pci_disable_device(pdev);
       
  2942 		pci_set_drvdata(pdev, NULL);
       
  2943 	}
       
  2944 }
       
  2945 
       
  2946 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  2947 {
       
  2948 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2949 	struct nic *nic = netdev_priv(netdev);
       
  2950 
       
  2951 	if (nic->ecdev)
       
  2952 		return 0;
       
  2953 
       
  2954 	if (netif_running(netdev))
       
  2955 		e100_down(nic);
       
  2956 	netif_device_detach(netdev);
       
  2957 
       
  2958 	pci_save_state(pdev);
       
  2959 
       
  2960 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  2961 		pci_enable_wake(pdev, PCI_D3hot, 1);
       
  2962 		pci_enable_wake(pdev, PCI_D3cold, 1);
       
  2963 	} else {
       
  2964 		pci_enable_wake(pdev, PCI_D3hot, 0);
       
  2965 		pci_enable_wake(pdev, PCI_D3cold, 0);
       
  2966 	}
       
  2967 
       
  2968 	pci_disable_device(pdev);
       
  2969 	pci_set_power_state(pdev, PCI_D3hot);
       
  2970 
       
  2971 	return 0;
       
  2972 }
       
  2973 
       
  2974 #ifdef CONFIG_PM
       
  2975 static int e100_resume(struct pci_dev *pdev)
       
  2976 {
       
  2977 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2978 	struct nic *nic = netdev_priv(netdev);
       
  2979 
       
  2980 	if (nic->ecdev)
       
  2981 		return 0;
       
  2982 
       
  2983 	pci_set_power_state(pdev, PCI_D0);
       
  2984 	pci_restore_state(pdev);
       
  2985 	/* ack any pending wake events, disable PME */
       
  2986 	pci_enable_wake(pdev, 0, 0);
       
  2987 
       
  2988 	netif_device_attach(netdev);
       
  2989 	if (netif_running(netdev))
       
  2990 		e100_up(nic);
       
  2991 
       
  2992 	return 0;
       
  2993 }
       
  2994 #endif /* CONFIG_PM */
       
  2995 
       
  2996 static void e100_shutdown(struct pci_dev *pdev)
       
  2997 {
       
  2998 	e100_suspend(pdev, PMSG_SUSPEND);
       
  2999 }
       
  3000 
       
  3001 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  3002 /**
       
  3003  * e100_io_error_detected - called when PCI error is detected.
       
  3004  * @pdev: Pointer to PCI device
       
  3005  * @state: The current pci connection state
       
  3006  */
       
  3007 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  3008 {
       
  3009 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3010 	struct nic *nic = netdev_priv(netdev);
       
  3011 
       
  3012 	/* Similar to calling e100_down(), but avoids adapter I/O. */
       
  3013 	netdev->stop(netdev);
       
  3014 
       
  3015 	if (!nic->ecdev) {
       
  3016 		/* Detach; put netif into a state similar to hotplug unplug. */
       
  3017 		napi_enable(&nic->napi);
       
  3018 		netif_device_detach(netdev);
       
  3019 	}
       
  3020 	pci_disable_device(pdev);
       
  3021 
       
  3022 	/* Request a slot reset. */
       
  3023 	return PCI_ERS_RESULT_NEED_RESET;
       
  3024 }
       
  3025 
       
  3026 /**
       
  3027  * e100_io_slot_reset - called after the pci bus has been reset.
       
  3028  * @pdev: Pointer to PCI device
       
  3029  *
       
  3030  * Restart the card from scratch.
       
  3031  */
       
  3032 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  3033 {
       
  3034 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3035 	struct nic *nic = netdev_priv(netdev);
       
  3036 
       
  3037 	if (pci_enable_device(pdev)) {
       
  3038 		printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
       
  3039 		return PCI_ERS_RESULT_DISCONNECT;
       
  3040 	}
       
  3041 	pci_set_master(pdev);
       
  3042 
       
  3043 	/* Only one device per card can do a reset */
       
  3044 	if (0 != PCI_FUNC(pdev->devfn))
       
  3045 		return PCI_ERS_RESULT_RECOVERED;
       
  3046 	e100_hw_reset(nic);
       
  3047 	e100_phy_init(nic);
       
  3048 
       
  3049 	return PCI_ERS_RESULT_RECOVERED;
       
  3050 }
       
  3051 
       
  3052 /**
       
  3053  * e100_io_resume - resume normal operations
       
  3054  * @pdev: Pointer to PCI device
       
  3055  *
       
  3056  * Resume normal operations after an error recovery
       
  3057  * sequence has been completed.
       
  3058  */
       
  3059 static void e100_io_resume(struct pci_dev *pdev)
       
  3060 {
       
  3061 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3062 	struct nic *nic = netdev_priv(netdev);
       
  3063 
       
  3064 	/* ack any pending wake events, disable PME */
       
  3065 	pci_enable_wake(pdev, 0, 0);
       
  3066 
       
  3067 	if (!nic->ecdev)
       
  3068 		netif_device_attach(netdev);
       
  3069 	if (nic->ecdev || netif_running(netdev)) {
       
  3070 		e100_open(netdev);
       
  3071 		if (!nic->ecdev)
       
  3072 			mod_timer(&nic->watchdog, jiffies);
       
  3073 	}
       
  3074 }
       
  3075 
       
  3076 static struct pci_error_handlers e100_err_handler = {
       
  3077 	.error_detected = e100_io_error_detected,
       
  3078 	.slot_reset = e100_io_slot_reset,
       
  3079 	.resume = e100_io_resume,
       
  3080 };
       
  3081 
       
  3082 static struct pci_driver e100_driver = {
       
  3083 	.name =         DRV_NAME,
       
  3084 	.id_table =     e100_id_table,
       
  3085 	.probe =        e100_probe,
       
  3086 	.remove =       __devexit_p(e100_remove),
       
  3087 #ifdef CONFIG_PM
       
  3088 	/* Power Management hooks */
       
  3089 	.suspend =      e100_suspend,
       
  3090 	.resume =       e100_resume,
       
  3091 #endif
       
  3092 	.shutdown =     e100_shutdown,
       
  3093 	.err_handler = &e100_err_handler,
       
  3094 };
       
  3095 
       
  3096 static int __init e100_init_module(void)
       
  3097 {
       
  3098     printk(KERN_INFO DRV_NAME " " DRV_DESCRIPTION " " DRV_VERSION
       
  3099             ", master " EC_MASTER_VERSION "\n");
       
  3100 
       
  3101 	return pci_register_driver(&e100_driver);
       
  3102 }
       
  3103 
       
  3104 static void __exit e100_cleanup_module(void)
       
  3105 {
       
  3106 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3107 	pci_unregister_driver(&e100_driver);
       
  3108 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3109 }
       
  3110 
       
  3111 module_init(e100_init_module);
       
  3112 module_exit(e100_cleanup_module);