devices/e100-2.6.26-ethercat.c
changeset 1251 3c3f8cb76748
child 1274 d5ddf04c76fc
equal deleted inserted replaced
1250:642048176899 1251:3c3f8cb76748
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it
       
    10  *  and/or modify it under the terms of the GNU General Public License
       
    11  *  as published by the Free Software Foundation; either version 2 of the
       
    12  *  License, or (at your option) any later version.
       
    13  *
       
    14  *  The IgH EtherCAT Master is distributed in the hope that it will be
       
    15  *  useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
       
    17  *  GNU General Public License for more details.
       
    18  *
       
    19  *  You should have received a copy of the GNU General Public License
       
    20  *  along with the IgH EtherCAT Master; if not, write to the Free Software
       
    21  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    22  *
       
    23  *  The right to use EtherCAT Technology is granted and comes free of
       
    24  *  charge under condition of compatibility of product made by
       
    25  *  Licensee. People intending to distribute/sell products based on the
       
    26  *  code, have to sign an agreement to guarantee that products using
       
    27  *  software based on IgH EtherCAT master stay compatible with the actual
       
    28  *  EtherCAT specification (which are released themselves as an open
       
    29  *  standard) as the (only) precondition to have the right to use EtherCAT
       
    30  *  Technology, IP and trade marks.
       
    31  *
       
    32  *  vim: noexpandtab
       
    33  *
       
    34  *****************************************************************************/
       
    35 
       
    36 /**
       
    37    \file
       
    38    EtherCAT driver for e100-compatible NICs.
       
    39 */
       
    40 
       
    41 /* Former documentation: */
       
    42 
       
    43 /*******************************************************************************
       
    44 
       
    45   Intel PRO/100 Linux driver
       
    46   Copyright(c) 1999 - 2006 Intel Corporation.
       
    47 
       
    48   This program is free software; you can redistribute it and/or modify it
       
    49   under the terms and conditions of the GNU General Public License,
       
    50   version 2, as published by the Free Software Foundation.
       
    51 
       
    52   This program is distributed in the hope it will be useful, but WITHOUT
       
    53   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    54   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    55   more details.
       
    56 
       
    57   You should have received a copy of the GNU General Public License along with
       
    58   this program; if not, write to the Free Software Foundation, Inc.,
       
    59   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    60 
       
    61   The full GNU General Public License is included in this distribution in
       
    62   the file called "COPYING".
       
    63 
       
    64   Contact Information:
       
    65   Linux NICS <linux.nics@intel.com>
       
    66   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    67   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    68 
       
    69 *******************************************************************************/
       
    70 
       
    71 /*
       
    72  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    73  *
       
    74  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    75  *	original e100 driver, but better described as a munging of
       
    76  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    77  *
       
    78  *	References:
       
    79  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    80  *		Open Source Software Developers Manual,
       
    81  *		http://sourceforge.net/projects/e1000
       
    82  *
       
    83  *
       
    84  *	                      Theory of Operation
       
    85  *
       
    86  *	I.   General
       
    87  *
       
    88  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    89  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    90  *	82551, and 82562 devices.  82558 and greater controllers
       
    91  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    92  *	server and client network interface cards, as well as in
       
    93  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    94  *	configurations.  8255x supports a 32-bit linear addressing
       
    95  *	mode and operates at 33Mhz PCI clock rate.
       
    96  *
       
    97  *	II.  Driver Operation
       
    98  *
       
    99  *	Memory-mapped mode is used exclusively to access the device's
       
   100  *	shared-memory structure, the Control/Status Registers (CSR). All
       
   101  *	setup, configuration, and control of the device, including queuing
       
   102  *	of Tx, Rx, and configuration commands is through the CSR.
       
   103  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   104  *	protects the shared Command Block List (CBL).
       
   105  *
       
   106  *	8255x is highly MII-compliant and all access to the PHY go
       
   107  *	through the Management Data Interface (MDI).  Consequently, the
       
   108  *	driver leverages the mii.c library shared with other MII-compliant
       
   109  *	devices.
       
   110  *
       
   111  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   112  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   113  *	archs are supported.
       
   114  *
       
   115  *	III. Transmit
       
   116  *
       
   117  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   118  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   119  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   120  *	the end of the ring.  The last TCB processed suspends the
       
   121  *	controller, and the controller can be restarted by issue a CU
       
   122  *	resume command to continue from the suspend point, or a CU start
       
   123  *	command to start at a given position in the ring.
       
   124  *
       
   125  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   126  *	into the CBL ring along with Tx commands.  The common structure
       
   127  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   128  *
       
   129  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   130  *	is the next CB to check for completion; cb_to_send is the first
       
   131  *	CB to start on in case of a previous failure to resume.  CB clean
       
   132  *	up happens in interrupt context in response to a CU interrupt.
       
   133  *	cbs_avail keeps track of number of free CB resources available.
       
   134  *
       
   135  * 	Hardware padding of short packets to minimum packet size is
       
   136  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   137  * 	with 00h.
       
   138  *
       
   139  *	IV.  Receive
       
   140  *
       
   141  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   142  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   143  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   144  *	and the data buffer, but the RFD is pulled off before the skb is
       
   145  *	indicated.  The data buffer is aligned such that encapsulated
       
   146  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   147  *	mapped shared memory, and completion status is contained within
       
   148  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   149  *	view from software and hardware.
       
   150  *
       
   151  *	In order to keep updates to the RFD link field from colliding with
       
   152  *	hardware writes to mark packets complete, we use the feature that
       
   153  *	hardware will not write to a size 0 descriptor and mark the previous
       
   154  *	packet as end-of-list (EL).   After updating the link, we remove EL
       
   155  *	and only then restore the size such that hardware may use the
       
   156  *	previous-to-end RFD.
       
   157  *
       
   158  *	Under typical operation, the  receive unit (RU) is start once,
       
   159  *	and the controller happily fills RFDs as frames arrive.  If
       
   160  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   161  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   162  *	and Rx indication and re-allocation happen in the same context,
       
   163  *	therefore no locking is required.  A software-generated interrupt
       
   164  *	is generated from the watchdog to recover from a failed allocation
       
   165  *	scenario where all Rx resources have been indicated and none re-
       
   166  *	placed.
       
   167  *
       
   168  *	V.   Miscellaneous
       
   169  *
       
   170  * 	VLAN offloading of tagging, stripping and filtering is not
       
   171  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   172  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   173  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   174  * 	not supported (hardware limitation).
       
   175  *
       
   176  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   177  *
       
   178  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   179  * 	testing/troubleshooting the development driver.
       
   180  *
       
   181  * 	TODO:
       
   182  * 	o several entry points race with dev->close
       
   183  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   184  *
       
   185  *	FIXES:
       
   186  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   187  *	- Stratus87247: protect MDI control register manipulations
       
   188  */
       
   189 
       
   190 #include <linux/module.h>
       
   191 #include <linux/moduleparam.h>
       
   192 #include <linux/kernel.h>
       
   193 #include <linux/types.h>
       
   194 #include <linux/slab.h>
       
   195 #include <linux/delay.h>
       
   196 #include <linux/init.h>
       
   197 #include <linux/pci.h>
       
   198 #include <linux/dma-mapping.h>
       
   199 #include <linux/netdevice.h>
       
   200 #include <linux/etherdevice.h>
       
   201 #include <linux/mii.h>
       
   202 #include <linux/if_vlan.h>
       
   203 #include <linux/skbuff.h>
       
   204 #include <linux/ethtool.h>
       
   205 #include <linux/string.h>
       
   206 #include <asm/unaligned.h>
       
   207 
       
   208 // EtherCAT includes
       
   209 #include "../globals.h"
       
   210 #include "ecdev.h"
       
   211 
       
   212 #define DRV_NAME		"ec_e100"
       
   213 
       
   214 #define DRV_EXT			"-NAPI"
       
   215 #define DRV_VERSION		"3.5.23-k4"DRV_EXT
       
   216 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   217 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   218 #define PFX			DRV_NAME ": "
       
   219 
       
   220 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   221 #define E100_NAPI_WEIGHT	16
       
   222 
       
   223 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   224 MODULE_AUTHOR(DRV_COPYRIGHT);
       
   225 MODULE_LICENSE("GPL");
       
   226 MODULE_VERSION(DRV_VERSION);
       
   227 
       
   228 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   229 MODULE_AUTHOR("Mario Witkowski <mario.witkowski@w4systems.de>");
       
   230 MODULE_LICENSE("GPL");
       
   231 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   232 
       
   233 void e100_ec_poll(struct net_device *);
       
   234 
       
   235 static int debug = 3;
       
   236 static int eeprom_bad_csum_allow = 0;
       
   237 static int use_io = 0;
       
   238 module_param(debug, int, 0);
       
   239 module_param(eeprom_bad_csum_allow, int, 0);
       
   240 module_param(use_io, int, 0);
       
   241 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   242 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   243 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
       
   244 #define DPRINTK(nlevel, klevel, fmt, args...) \
       
   245 	(void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
       
   246 	printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
       
   247 		__FUNCTION__ , ## args))
       
   248 
       
   249 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   250 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   251 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   252 static struct pci_device_id e100_id_table[] = {
       
   253 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   254 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   255 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   256 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   257 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   258 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   259 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   260 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   261 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   262 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   263 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   277 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   278 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   279 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   280 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   281 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   282 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   283 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   284 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   285 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   286 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   287 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   288 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   289 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   290 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   291 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   292 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   293 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   294 	{ 0, }
       
   295 };
       
   296 
       
   297 // prevent from being loaded automatically
       
   298 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   299 
       
   300 enum mac {
       
   301 	mac_82557_D100_A  = 0,
       
   302 	mac_82557_D100_B  = 1,
       
   303 	mac_82557_D100_C  = 2,
       
   304 	mac_82558_D101_A4 = 4,
       
   305 	mac_82558_D101_B0 = 5,
       
   306 	mac_82559_D101M   = 8,
       
   307 	mac_82559_D101S   = 9,
       
   308 	mac_82550_D102    = 12,
       
   309 	mac_82550_D102_C  = 13,
       
   310 	mac_82551_E       = 14,
       
   311 	mac_82551_F       = 15,
       
   312 	mac_82551_10      = 16,
       
   313 	mac_unknown       = 0xFF,
       
   314 };
       
   315 
       
   316 enum phy {
       
   317 	phy_100a     = 0x000003E0,
       
   318 	phy_100c     = 0x035002A8,
       
   319 	phy_82555_tx = 0x015002A8,
       
   320 	phy_nsc_tx   = 0x5C002000,
       
   321 	phy_82562_et = 0x033002A8,
       
   322 	phy_82562_em = 0x032002A8,
       
   323 	phy_82562_ek = 0x031002A8,
       
   324 	phy_82562_eh = 0x017002A8,
       
   325 	phy_unknown  = 0xFFFFFFFF,
       
   326 };
       
   327 
       
   328 /* CSR (Control/Status Registers) */
       
   329 struct csr {
       
   330 	struct {
       
   331 		u8 status;
       
   332 		u8 stat_ack;
       
   333 		u8 cmd_lo;
       
   334 		u8 cmd_hi;
       
   335 		u32 gen_ptr;
       
   336 	} scb;
       
   337 	u32 port;
       
   338 	u16 flash_ctrl;
       
   339 	u8 eeprom_ctrl_lo;
       
   340 	u8 eeprom_ctrl_hi;
       
   341 	u32 mdi_ctrl;
       
   342 	u32 rx_dma_count;
       
   343 };
       
   344 
       
   345 enum scb_status {
       
   346 	rus_no_res       = 0x08,
       
   347 	rus_ready        = 0x10,
       
   348 	rus_mask         = 0x3C,
       
   349 };
       
   350 
       
   351 enum ru_state  {
       
   352 	RU_SUSPENDED = 0,
       
   353 	RU_RUNNING	 = 1,
       
   354 	RU_UNINITIALIZED = -1,
       
   355 };
       
   356 
       
   357 enum scb_stat_ack {
       
   358 	stat_ack_not_ours    = 0x00,
       
   359 	stat_ack_sw_gen      = 0x04,
       
   360 	stat_ack_rnr         = 0x10,
       
   361 	stat_ack_cu_idle     = 0x20,
       
   362 	stat_ack_frame_rx    = 0x40,
       
   363 	stat_ack_cu_cmd_done = 0x80,
       
   364 	stat_ack_not_present = 0xFF,
       
   365 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   366 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   367 };
       
   368 
       
   369 enum scb_cmd_hi {
       
   370 	irq_mask_none = 0x00,
       
   371 	irq_mask_all  = 0x01,
       
   372 	irq_sw_gen    = 0x02,
       
   373 };
       
   374 
       
   375 enum scb_cmd_lo {
       
   376 	cuc_nop        = 0x00,
       
   377 	ruc_start      = 0x01,
       
   378 	ruc_load_base  = 0x06,
       
   379 	cuc_start      = 0x10,
       
   380 	cuc_resume     = 0x20,
       
   381 	cuc_dump_addr  = 0x40,
       
   382 	cuc_dump_stats = 0x50,
       
   383 	cuc_load_base  = 0x60,
       
   384 	cuc_dump_reset = 0x70,
       
   385 };
       
   386 
       
   387 enum cuc_dump {
       
   388 	cuc_dump_complete       = 0x0000A005,
       
   389 	cuc_dump_reset_complete = 0x0000A007,
       
   390 };
       
   391 
       
   392 enum port {
       
   393 	software_reset  = 0x0000,
       
   394 	selftest        = 0x0001,
       
   395 	selective_reset = 0x0002,
       
   396 };
       
   397 
       
   398 enum eeprom_ctrl_lo {
       
   399 	eesk = 0x01,
       
   400 	eecs = 0x02,
       
   401 	eedi = 0x04,
       
   402 	eedo = 0x08,
       
   403 };
       
   404 
       
   405 enum mdi_ctrl {
       
   406 	mdi_write = 0x04000000,
       
   407 	mdi_read  = 0x08000000,
       
   408 	mdi_ready = 0x10000000,
       
   409 };
       
   410 
       
   411 enum eeprom_op {
       
   412 	op_write = 0x05,
       
   413 	op_read  = 0x06,
       
   414 	op_ewds  = 0x10,
       
   415 	op_ewen  = 0x13,
       
   416 };
       
   417 
       
   418 enum eeprom_offsets {
       
   419 	eeprom_cnfg_mdix  = 0x03,
       
   420 	eeprom_id         = 0x0A,
       
   421 	eeprom_config_asf = 0x0D,
       
   422 	eeprom_smbus_addr = 0x90,
       
   423 };
       
   424 
       
   425 enum eeprom_cnfg_mdix {
       
   426 	eeprom_mdix_enabled = 0x0080,
       
   427 };
       
   428 
       
   429 enum eeprom_id {
       
   430 	eeprom_id_wol = 0x0020,
       
   431 };
       
   432 
       
   433 enum eeprom_config_asf {
       
   434 	eeprom_asf = 0x8000,
       
   435 	eeprom_gcl = 0x4000,
       
   436 };
       
   437 
       
   438 enum cb_status {
       
   439 	cb_complete = 0x8000,
       
   440 	cb_ok       = 0x2000,
       
   441 };
       
   442 
       
   443 enum cb_command {
       
   444 	cb_nop    = 0x0000,
       
   445 	cb_iaaddr = 0x0001,
       
   446 	cb_config = 0x0002,
       
   447 	cb_multi  = 0x0003,
       
   448 	cb_tx     = 0x0004,
       
   449 	cb_ucode  = 0x0005,
       
   450 	cb_dump   = 0x0006,
       
   451 	cb_tx_sf  = 0x0008,
       
   452 	cb_cid    = 0x1f00,
       
   453 	cb_i      = 0x2000,
       
   454 	cb_s      = 0x4000,
       
   455 	cb_el     = 0x8000,
       
   456 };
       
   457 
       
   458 struct rfd {
       
   459 	__le16 status;
       
   460 	__le16 command;
       
   461 	__le32 link;
       
   462 	__le32 rbd;
       
   463 	__le16 actual_size;
       
   464 	__le16 size;
       
   465 };
       
   466 
       
   467 struct rx {
       
   468 	struct rx *next, *prev;
       
   469 	struct sk_buff *skb;
       
   470 	dma_addr_t dma_addr;
       
   471 };
       
   472 
       
   473 #if defined(__BIG_ENDIAN_BITFIELD)
       
   474 #define X(a,b)	b,a
       
   475 #else
       
   476 #define X(a,b)	a,b
       
   477 #endif
       
   478 struct config {
       
   479 /*0*/	u8 X(byte_count:6, pad0:2);
       
   480 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   481 /*2*/	u8 adaptive_ifs;
       
   482 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   483 	   term_write_cache_line:1), pad3:4);
       
   484 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   485 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   486 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   487 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   488 	   rx_discard_overruns:1), rx_save_bad_frames:1);
       
   489 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   490 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   491 	   tx_dynamic_tbd:1);
       
   492 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   493 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   494 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   495 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   496 	   loopback:2);
       
   497 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   498 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   499 /*13*/	u8 ip_addr_lo;
       
   500 /*14*/	u8 ip_addr_hi;
       
   501 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   502 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   503 	   pad15_2:1), crs_or_cdt:1);
       
   504 /*16*/	u8 fc_delay_lo;
       
   505 /*17*/	u8 fc_delay_hi;
       
   506 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   507 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   508 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   509 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   510 	   full_duplex_force:1), full_duplex_pin:1);
       
   511 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   512 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   513 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   514 	u8 pad_d102[9];
       
   515 };
       
   516 
       
   517 #define E100_MAX_MULTICAST_ADDRS	64
       
   518 struct multi {
       
   519 	__le16 count;
       
   520 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   521 };
       
   522 
       
   523 /* Important: keep total struct u32-aligned */
       
   524 #define UCODE_SIZE			134
       
   525 struct cb {
       
   526 	__le16 status;
       
   527 	__le16 command;
       
   528 	__le32 link;
       
   529 	union {
       
   530 		u8 iaaddr[ETH_ALEN];
       
   531 		__le32 ucode[UCODE_SIZE];
       
   532 		struct config config;
       
   533 		struct multi multi;
       
   534 		struct {
       
   535 			u32 tbd_array;
       
   536 			u16 tcb_byte_count;
       
   537 			u8 threshold;
       
   538 			u8 tbd_count;
       
   539 			struct {
       
   540 				__le32 buf_addr;
       
   541 				__le16 size;
       
   542 				u16 eol;
       
   543 			} tbd;
       
   544 		} tcb;
       
   545 		__le32 dump_buffer_addr;
       
   546 	} u;
       
   547 	struct cb *next, *prev;
       
   548 	dma_addr_t dma_addr;
       
   549 	struct sk_buff *skb;
       
   550 };
       
   551 
       
   552 enum loopback {
       
   553 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   554 };
       
   555 
       
   556 struct stats {
       
   557 	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   558 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   559 		tx_multiple_collisions, tx_total_collisions;
       
   560 	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   561 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   562 		rx_short_frame_errors;
       
   563 	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   564 	__le16 xmt_tco_frames, rcv_tco_frames;
       
   565 	__le32 complete;
       
   566 };
       
   567 
       
   568 struct mem {
       
   569 	struct {
       
   570 		u32 signature;
       
   571 		u32 result;
       
   572 	} selftest;
       
   573 	struct stats stats;
       
   574 	u8 dump_buf[596];
       
   575 };
       
   576 
       
   577 struct param_range {
       
   578 	u32 min;
       
   579 	u32 max;
       
   580 	u32 count;
       
   581 };
       
   582 
       
   583 struct params {
       
   584 	struct param_range rfds;
       
   585 	struct param_range cbs;
       
   586 };
       
   587 
       
   588 struct nic {
       
   589 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   590 	u32 msg_enable				____cacheline_aligned;
       
   591 	struct net_device *netdev;
       
   592 	struct pci_dev *pdev;
       
   593 
       
   594 	struct rx *rxs				____cacheline_aligned;
       
   595 	struct rx *rx_to_use;
       
   596 	struct rx *rx_to_clean;
       
   597 	struct rfd blank_rfd;
       
   598 	enum ru_state ru_running;
       
   599 
       
   600 	spinlock_t cb_lock			____cacheline_aligned;
       
   601 	spinlock_t cmd_lock;
       
   602 	struct csr __iomem *csr;
       
   603 	enum scb_cmd_lo cuc_cmd;
       
   604 	unsigned int cbs_avail;
       
   605 	struct napi_struct napi;
       
   606 	struct cb *cbs;
       
   607 	struct cb *cb_to_use;
       
   608 	struct cb *cb_to_send;
       
   609 	struct cb *cb_to_clean;
       
   610 	__le16 tx_command;
       
   611 	/* End: frequently used values: keep adjacent for cache effect */
       
   612 
       
   613 	enum {
       
   614 		ich                = (1 << 0),
       
   615 		promiscuous        = (1 << 1),
       
   616 		multicast_all      = (1 << 2),
       
   617 		wol_magic          = (1 << 3),
       
   618 		ich_10h_workaround = (1 << 4),
       
   619 	} flags					____cacheline_aligned;
       
   620 
       
   621 	enum mac mac;
       
   622 	enum phy phy;
       
   623 	struct params params;
       
   624 	struct timer_list watchdog;
       
   625 	struct timer_list blink_timer;
       
   626 	struct mii_if_info mii;
       
   627 	struct work_struct tx_timeout_task;
       
   628 	enum loopback loopback;
       
   629 
       
   630 	struct mem *mem;
       
   631 	dma_addr_t dma_addr;
       
   632 
       
   633 	dma_addr_t cbs_dma_addr;
       
   634 	u8 adaptive_ifs;
       
   635 	u8 tx_threshold;
       
   636 	u32 tx_frames;
       
   637 	u32 tx_collisions;
       
   638 	u32 tx_deferred;
       
   639 	u32 tx_single_collisions;
       
   640 	u32 tx_multiple_collisions;
       
   641 	u32 tx_fc_pause;
       
   642 	u32 tx_tco_frames;
       
   643 
       
   644 	u32 rx_fc_pause;
       
   645 	u32 rx_fc_unsupported;
       
   646 	u32 rx_tco_frames;
       
   647 	u32 rx_over_length_errors;
       
   648 
       
   649 	u16 leds;
       
   650 	u16 eeprom_wc;
       
   651 	__le16 eeprom[256];
       
   652 	spinlock_t mdio_lock;
       
   653 
       
   654 	ec_device_t *ecdev;
       
   655 	unsigned long ec_watchdog_jiffies;
       
   656 };
       
   657 
       
   658 static inline void e100_write_flush(struct nic *nic)
       
   659 {
       
   660 	/* Flush previous PCI writes through intermediate bridges
       
   661 	 * by doing a benign read */
       
   662 	(void)ioread8(&nic->csr->scb.status);
       
   663 }
       
   664 
       
   665 static void e100_enable_irq(struct nic *nic)
       
   666 {
       
   667 	unsigned long flags;
       
   668 
       
   669 	if (nic->ecdev)
       
   670 		return;
       
   671 
       
   672 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   673 	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   674 	e100_write_flush(nic);
       
   675 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   676 }
       
   677 
       
   678 static void e100_disable_irq(struct nic *nic)
       
   679 {
       
   680 	unsigned long flags;
       
   681 
       
   682 	if (nic->ecdev)
       
   683 		return;
       
   684 
       
   685 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   686 	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   687 	e100_write_flush(nic);
       
   688 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   689 }
       
   690 
       
   691 static void e100_hw_reset(struct nic *nic)
       
   692 {
       
   693 	/* Put CU and RU into idle with a selective reset to get
       
   694 	 * device off of PCI bus */
       
   695 	iowrite32(selective_reset, &nic->csr->port);
       
   696 	e100_write_flush(nic); udelay(20);
       
   697 
       
   698 	/* Now fully reset device */
       
   699 	iowrite32(software_reset, &nic->csr->port);
       
   700 	e100_write_flush(nic); udelay(20);
       
   701 
       
   702 	/* Mask off our interrupt line - it's unmasked after reset */
       
   703 	e100_disable_irq(nic);
       
   704 }
       
   705 
       
   706 static int e100_self_test(struct nic *nic)
       
   707 {
       
   708 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   709 
       
   710 	/* Passing the self-test is a pretty good indication
       
   711 	 * that the device can DMA to/from host memory */
       
   712 
       
   713 	nic->mem->selftest.signature = 0;
       
   714 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   715 
       
   716 	iowrite32(selftest | dma_addr, &nic->csr->port);
       
   717 	e100_write_flush(nic);
       
   718 	/* Wait 10 msec for self-test to complete */
       
   719 	msleep(10);
       
   720 
       
   721 	/* Interrupts are enabled after self-test */
       
   722 	e100_disable_irq(nic);
       
   723 
       
   724 	/* Check results of self-test */
       
   725 	if(nic->mem->selftest.result != 0) {
       
   726 		DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
       
   727 			nic->mem->selftest.result);
       
   728 		return -ETIMEDOUT;
       
   729 	}
       
   730 	if(nic->mem->selftest.signature == 0) {
       
   731 		DPRINTK(HW, ERR, "Self-test failed: timed out\n");
       
   732 		return -ETIMEDOUT;
       
   733 	}
       
   734 
       
   735 	return 0;
       
   736 }
       
   737 
       
   738 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
       
   739 {
       
   740 	u32 cmd_addr_data[3];
       
   741 	u8 ctrl;
       
   742 	int i, j;
       
   743 
       
   744 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   745 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   746 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   747 		le16_to_cpu(data);
       
   748 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   749 
       
   750 	/* Bit-bang cmds to write word to eeprom */
       
   751 	for(j = 0; j < 3; j++) {
       
   752 
       
   753 		/* Chip select */
       
   754 		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   755 		e100_write_flush(nic); udelay(4);
       
   756 
       
   757 		for(i = 31; i >= 0; i--) {
       
   758 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   759 				eecs | eedi : eecs;
       
   760 			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   761 			e100_write_flush(nic); udelay(4);
       
   762 
       
   763 			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   764 			e100_write_flush(nic); udelay(4);
       
   765 		}
       
   766 		/* Wait 10 msec for cmd to complete */
       
   767 		msleep(10);
       
   768 
       
   769 		/* Chip deselect */
       
   770 		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   771 		e100_write_flush(nic); udelay(4);
       
   772 	}
       
   773 };
       
   774 
       
   775 /* General technique stolen from the eepro100 driver - very clever */
       
   776 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   777 {
       
   778 	u32 cmd_addr_data;
       
   779 	u16 data = 0;
       
   780 	u8 ctrl;
       
   781 	int i;
       
   782 
       
   783 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   784 
       
   785 	/* Chip select */
       
   786 	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   787 	e100_write_flush(nic); udelay(4);
       
   788 
       
   789 	/* Bit-bang to read word from eeprom */
       
   790 	for(i = 31; i >= 0; i--) {
       
   791 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   792 		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   793 		e100_write_flush(nic); udelay(4);
       
   794 
       
   795 		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   796 		e100_write_flush(nic); udelay(4);
       
   797 
       
   798 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   799 		 * complete address.  Use this to adjust addr_len. */
       
   800 		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
       
   801 		if(!(ctrl & eedo) && i > 16) {
       
   802 			*addr_len -= (i - 16);
       
   803 			i = 17;
       
   804 		}
       
   805 
       
   806 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   807 	}
       
   808 
       
   809 	/* Chip deselect */
       
   810 	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   811 	e100_write_flush(nic); udelay(4);
       
   812 
       
   813 	return cpu_to_le16(data);
       
   814 };
       
   815 
       
   816 /* Load entire EEPROM image into driver cache and validate checksum */
       
   817 static int e100_eeprom_load(struct nic *nic)
       
   818 {
       
   819 	u16 addr, addr_len = 8, checksum = 0;
       
   820 
       
   821 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   822 	e100_eeprom_read(nic, &addr_len, 0);
       
   823 	nic->eeprom_wc = 1 << addr_len;
       
   824 
       
   825 	for(addr = 0; addr < nic->eeprom_wc; addr++) {
       
   826 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   827 		if(addr < nic->eeprom_wc - 1)
       
   828 			checksum += le16_to_cpu(nic->eeprom[addr]);
       
   829 	}
       
   830 
       
   831 	/* The checksum, stored in the last word, is calculated such that
       
   832 	 * the sum of words should be 0xBABA */
       
   833 	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
       
   834 		DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
       
   835 		if (!eeprom_bad_csum_allow)
       
   836 			return -EAGAIN;
       
   837 	}
       
   838 
       
   839 	return 0;
       
   840 }
       
   841 
       
   842 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   843 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   844 {
       
   845 	u16 addr, addr_len = 8, checksum = 0;
       
   846 
       
   847 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   848 	e100_eeprom_read(nic, &addr_len, 0);
       
   849 	nic->eeprom_wc = 1 << addr_len;
       
   850 
       
   851 	if(start + count >= nic->eeprom_wc)
       
   852 		return -EINVAL;
       
   853 
       
   854 	for(addr = start; addr < start + count; addr++)
       
   855 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   856 
       
   857 	/* The checksum, stored in the last word, is calculated such that
       
   858 	 * the sum of words should be 0xBABA */
       
   859 	for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   860 		checksum += le16_to_cpu(nic->eeprom[addr]);
       
   861 	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
       
   862 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   863 		nic->eeprom[nic->eeprom_wc - 1]);
       
   864 
       
   865 	return 0;
       
   866 }
       
   867 
       
   868 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   869 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   870 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   871 {
       
   872 	unsigned long flags = 0;
       
   873 	unsigned int i;
       
   874 	int err = 0;
       
   875 
       
   876 	if (!nic->ecdev)
       
   877 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   878 
       
   879 	/* Previous command is accepted when SCB clears */
       
   880 	for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   881 		if(likely(!ioread8(&nic->csr->scb.cmd_lo)))
       
   882 			break;
       
   883 		cpu_relax();
       
   884 		if(unlikely(i > E100_WAIT_SCB_FAST))
       
   885 			udelay(5);
       
   886 	}
       
   887 	if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   888 		err = -EAGAIN;
       
   889 		goto err_unlock;
       
   890 	}
       
   891 
       
   892 	if(unlikely(cmd != cuc_resume))
       
   893 		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
       
   894 	iowrite8(cmd, &nic->csr->scb.cmd_lo);
       
   895 
       
   896 err_unlock:
       
   897 	if (!nic->ecdev)
       
   898 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   899 
       
   900 	return err;
       
   901 }
       
   902 
       
   903 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   904 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   905 {
       
   906 	struct cb *cb;
       
   907 	unsigned long flags = 0;
       
   908 	int err = 0;
       
   909 
       
   910 	if (!nic->ecdev)
       
   911 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   912 
       
   913 	if(unlikely(!nic->cbs_avail)) {
       
   914 		err = -ENOMEM;
       
   915 		goto err_unlock;
       
   916 	}
       
   917 
       
   918 	cb = nic->cb_to_use;
       
   919 	nic->cb_to_use = cb->next;
       
   920 	nic->cbs_avail--;
       
   921 	cb->skb = skb;
       
   922 
       
   923 	if(unlikely(!nic->cbs_avail))
       
   924 		err = -ENOSPC;
       
   925 
       
   926 	cb_prepare(nic, cb, skb);
       
   927 
       
   928 	/* Order is important otherwise we'll be in a race with h/w:
       
   929 	 * set S-bit in current first, then clear S-bit in previous. */
       
   930 	cb->command |= cpu_to_le16(cb_s);
       
   931 	wmb();
       
   932 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   933 
       
   934 	while(nic->cb_to_send != nic->cb_to_use) {
       
   935 		if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   936 			nic->cb_to_send->dma_addr))) {
       
   937 			/* Ok, here's where things get sticky.  It's
       
   938 			 * possible that we can't schedule the command
       
   939 			 * because the controller is too busy, so
       
   940 			 * let's just queue the command and try again
       
   941 			 * when another command is scheduled. */
       
   942 			if(err == -ENOSPC) {
       
   943 				//request a reset
       
   944 				schedule_work(&nic->tx_timeout_task);
       
   945 			}
       
   946 			break;
       
   947 		} else {
       
   948 			nic->cuc_cmd = cuc_resume;
       
   949 			nic->cb_to_send = nic->cb_to_send->next;
       
   950 		}
       
   951 	}
       
   952 
       
   953 err_unlock:
       
   954 	if (!nic->ecdev)
       
   955 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   956 
       
   957 	return err;
       
   958 }
       
   959 
       
   960 static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
   961 {
       
   962 	u32 data_out = 0;
       
   963 	unsigned int i;
       
   964 	unsigned long flags = 0;
       
   965 
       
   966 
       
   967 	/*
       
   968 	 * Stratus87247: we shouldn't be writing the MDI control
       
   969 	 * register until the Ready bit shows True.  Also, since
       
   970 	 * manipulation of the MDI control registers is a multi-step
       
   971 	 * procedure it should be done under lock.
       
   972 	 */
       
   973 	if (!nic->ecdev)
       
   974 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
   975 	for (i = 100; i; --i) {
       
   976 		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
       
   977 			break;
       
   978 		udelay(20);
       
   979 	}
       
   980 	if (unlikely(!i)) {
       
   981 		printk("e100.mdio_ctrl(%s) won't go Ready\n",
       
   982 			nic->netdev->name );
       
   983 		if (!nic->ecdev)
       
   984 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
   985 		return 0;		/* No way to indicate timeout error */
       
   986 	}
       
   987 	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
   988 
       
   989 	for (i = 0; i < 100; i++) {
       
   990 		udelay(20);
       
   991 		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
       
   992 			break;
       
   993 	}
       
   994 	if (!nic->ecdev)
       
   995 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
   996 	DPRINTK(HW, DEBUG,
       
   997 		"%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
   998 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
       
   999 	return (u16)data_out;
       
  1000 }
       
  1001 
       
  1002 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
  1003 {
       
  1004 	return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
       
  1005 }
       
  1006 
       
  1007 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
  1008 {
       
  1009 	mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
       
  1010 }
       
  1011 
       
  1012 static void e100_get_defaults(struct nic *nic)
       
  1013 {
       
  1014 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
  1015 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
  1016 
       
  1017 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1018 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
       
  1019 	if(nic->mac == mac_unknown)
       
  1020 		nic->mac = mac_82557_D100_A;
       
  1021 
       
  1022 	nic->params.rfds = rfds;
       
  1023 	nic->params.cbs = cbs;
       
  1024 
       
  1025 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1026 	nic->tx_threshold = 0xE0;
       
  1027 
       
  1028 	/* no interrupt for every tx completion, delay = 256us if not 557 */
       
  1029 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1030 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1031 
       
  1032 	/* Template for a freshly allocated RFD */
       
  1033 	nic->blank_rfd.command = 0;
       
  1034 	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
       
  1035 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  1036 
       
  1037 	/* MII setup */
       
  1038 	nic->mii.phy_id_mask = 0x1F;
       
  1039 	nic->mii.reg_num_mask = 0x1F;
       
  1040 	nic->mii.dev = nic->netdev;
       
  1041 	nic->mii.mdio_read = mdio_read;
       
  1042 	nic->mii.mdio_write = mdio_write;
       
  1043 }
       
  1044 
       
  1045 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1046 {
       
  1047 	struct config *config = &cb->u.config;
       
  1048 	u8 *c = (u8 *)config;
       
  1049 
       
  1050 	cb->command = cpu_to_le16(cb_config);
       
  1051 
       
  1052 	memset(config, 0, sizeof(struct config));
       
  1053 
       
  1054 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1055 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1056 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1057 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1058 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1059 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1060 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1061 	config->mii_mode = 0x1;			/* 1=MII mode, 0=503 mode */
       
  1062 	config->pad10 = 0x6;
       
  1063 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1064 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1065 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1066 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1067 	config->pad15_1 = 0x1;
       
  1068 	config->pad15_2 = 0x1;
       
  1069 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1070 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1071 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1072 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1073 	config->pad18 = 0x1;
       
  1074 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1075 	config->pad20_1 = 0x1F;
       
  1076 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1077 	config->pad21_1 = 0x5;
       
  1078 
       
  1079 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1080 	config->loopback = nic->loopback;
       
  1081 
       
  1082 	if(nic->mii.force_media && nic->mii.full_duplex)
       
  1083 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1084 
       
  1085 	if(nic->flags & promiscuous || nic->loopback) {
       
  1086 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1087 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1088 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1089 	}
       
  1090 
       
  1091 	if(nic->flags & multicast_all)
       
  1092 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1093 
       
  1094 	/* disable WoL when up */
       
  1095 	if (nic->ecdev ||
       
  1096 			(netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1097 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1098 
       
  1099 	if(nic->mac >= mac_82558_D101_A4) {
       
  1100 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1101 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1102 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1103 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1104 		if (nic->mac >= mac_82559_D101M) {
       
  1105 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1106 			/* Enable TCO in extended config */
       
  1107 			if (nic->mac >= mac_82551_10) {
       
  1108 				config->byte_count = 0x20; /* extended bytes */
       
  1109 				config->rx_d102_mode = 0x1; /* GMRC for TCO */
       
  1110 			}
       
  1111 		} else {
       
  1112 			config->standard_stat_counter = 0x0;
       
  1113 		}
       
  1114 	}
       
  1115 
       
  1116 	DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1117 		c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1118 	DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1119 		c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1120 	DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1121 		c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1122 }
       
  1123 
       
  1124 /********************************************************/
       
  1125 /*  Micro code for 8086:1229 Rev 8                      */
       
  1126 /********************************************************/
       
  1127 
       
  1128 /*  Parameter values for the D101M B-step  */
       
  1129 #define D101M_CPUSAVER_TIMER_DWORD		78
       
  1130 #define D101M_CPUSAVER_BUNDLE_DWORD		65
       
  1131 #define D101M_CPUSAVER_MIN_SIZE_DWORD		126
       
  1132 
       
  1133 #define D101M_B_RCVBUNDLE_UCODE \
       
  1134 {\
       
  1135 0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
       
  1136 0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
       
  1137 0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
       
  1138 0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
       
  1139 0x00380438, 0x00000000, 0x00140000, 0x00380555, \
       
  1140 0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
       
  1141 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
       
  1142 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
       
  1143 0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
       
  1144 0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
       
  1145 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1146 0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
       
  1147 0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
       
  1148 0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
       
  1149 0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
       
  1150 0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
       
  1151 0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
       
  1152 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1153 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1154 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
       
  1155 0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
       
  1156 0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
       
  1157 0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
       
  1158 0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
       
  1159 0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
       
  1160 0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
       
  1161 0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
       
  1162 0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
       
  1163 0x00380559, 0x00000000, 0x00000000, 0x00000000, \
       
  1164 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1165 0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
       
  1166 0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
       
  1167 0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
       
  1168 }
       
  1169 
       
  1170 /********************************************************/
       
  1171 /*  Micro code for 8086:1229 Rev 9                      */
       
  1172 /********************************************************/
       
  1173 
       
  1174 /*  Parameter values for the D101S  */
       
  1175 #define D101S_CPUSAVER_TIMER_DWORD		78
       
  1176 #define D101S_CPUSAVER_BUNDLE_DWORD		67
       
  1177 #define D101S_CPUSAVER_MIN_SIZE_DWORD		128
       
  1178 
       
  1179 #define D101S_RCVBUNDLE_UCODE \
       
  1180 {\
       
  1181 0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
       
  1182 0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
       
  1183 0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
       
  1184 0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
       
  1185 0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
       
  1186 0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
       
  1187 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
       
  1188 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
       
  1189 0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
       
  1190 0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
       
  1191 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1192 0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
       
  1193 0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
       
  1194 0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
       
  1195 0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
       
  1196 0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
       
  1197 0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
       
  1198 0x00101313, 0x00380700, 0x00000000, 0x00000000, \
       
  1199 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1200 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
       
  1201 0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
       
  1202 0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
       
  1203 0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
       
  1204 0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
       
  1205 0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
       
  1206 0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
       
  1207 0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
       
  1208 0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
       
  1209 0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
       
  1210 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1211 0x00000000, 0x00000000, 0x00000000, 0x00130831, \
       
  1212 0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
       
  1213 0x00041000, 0x00010004, 0x00380700  \
       
  1214 }
       
  1215 
       
  1216 /********************************************************/
       
  1217 /*  Micro code for the 8086:1229 Rev F/10               */
       
  1218 /********************************************************/
       
  1219 
       
  1220 /*  Parameter values for the D102 E-step  */
       
  1221 #define D102_E_CPUSAVER_TIMER_DWORD		42
       
  1222 #define D102_E_CPUSAVER_BUNDLE_DWORD		54
       
  1223 #define D102_E_CPUSAVER_MIN_SIZE_DWORD		46
       
  1224 
       
  1225 #define     D102_E_RCVBUNDLE_UCODE \
       
  1226 {\
       
  1227 0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
       
  1228 0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
       
  1229 0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
       
  1230 0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
       
  1231 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1232 0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
       
  1233 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1234 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1235 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1236 0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
       
  1237 0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
       
  1238 0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
       
  1239 0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
       
  1240 0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
       
  1241 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1242 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1243 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1244 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
       
  1245 0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
       
  1246 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1247 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1248 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1249 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1250 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1251 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1252 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1253 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1254 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1255 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1256 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1257 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1258 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1259 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
       
  1260 }
       
  1261 
       
  1262 static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1263 {
       
  1264 /* *INDENT-OFF* */
       
  1265 	static struct {
       
  1266 		u32 ucode[UCODE_SIZE + 1];
       
  1267 		u8 mac;
       
  1268 		u8 timer_dword;
       
  1269 		u8 bundle_dword;
       
  1270 		u8 min_size_dword;
       
  1271 	} ucode_opts[] = {
       
  1272 		{ D101M_B_RCVBUNDLE_UCODE,
       
  1273 		  mac_82559_D101M,
       
  1274 		  D101M_CPUSAVER_TIMER_DWORD,
       
  1275 		  D101M_CPUSAVER_BUNDLE_DWORD,
       
  1276 		  D101M_CPUSAVER_MIN_SIZE_DWORD },
       
  1277 		{ D101S_RCVBUNDLE_UCODE,
       
  1278 		  mac_82559_D101S,
       
  1279 		  D101S_CPUSAVER_TIMER_DWORD,
       
  1280 		  D101S_CPUSAVER_BUNDLE_DWORD,
       
  1281 		  D101S_CPUSAVER_MIN_SIZE_DWORD },
       
  1282 		{ D102_E_RCVBUNDLE_UCODE,
       
  1283 		  mac_82551_F,
       
  1284 		  D102_E_CPUSAVER_TIMER_DWORD,
       
  1285 		  D102_E_CPUSAVER_BUNDLE_DWORD,
       
  1286 		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
       
  1287 		{ D102_E_RCVBUNDLE_UCODE,
       
  1288 		  mac_82551_10,
       
  1289 		  D102_E_CPUSAVER_TIMER_DWORD,
       
  1290 		  D102_E_CPUSAVER_BUNDLE_DWORD,
       
  1291 		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
       
  1292 		{ {0}, 0, 0, 0, 0}
       
  1293 	}, *opts;
       
  1294 /* *INDENT-ON* */
       
  1295 
       
  1296 /*************************************************************************
       
  1297 *  CPUSaver parameters
       
  1298 *
       
  1299 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1300 *  "move immediate value" instruction.  By changing the value of
       
  1301 *  the literal in the instruction before the code is loaded, the
       
  1302 *  driver can change the algorithm.
       
  1303 *
       
  1304 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1305 *    When this timer expires the interrupt is asserted, and the
       
  1306 *    timer is reset each time a new packet is received.  (see
       
  1307 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1308 *    The current default is 0x600 or 1536.  Experiments show that
       
  1309 *    the value should probably stay within the 0x200 - 0x1000.
       
  1310 *
       
  1311 *  BUNDLEMAX -
       
  1312 *    This sets the maximum number of frames that will be bundled.  In
       
  1313 *    some situations, such as the TCP windowing algorithm, it may be
       
  1314 *    better to limit the growth of the bundle size than let it go as
       
  1315 *    high as it can, because that could cause too much added latency.
       
  1316 *    The default is six, because this is the number of packets in the
       
  1317 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1318 *    an interrupt for every frame received.  If you do not want to put
       
  1319 *    a limit on the bundle size, set this value to xFFFF.
       
  1320 *
       
  1321 *  BUNDLESMALL -
       
  1322 *    This contains a bit-mask describing the minimum size frame that
       
  1323 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1324 *    that any frame less than 128 bytes in length will not be bundled,
       
  1325 *    but will instead immediately generate an interrupt.  This does
       
  1326 *    not affect the current bundle in any way.  Any frame that is 128
       
  1327 *    bytes or large will be bundled normally.  This feature is meant
       
  1328 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1329 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1330 *    enabled was sending but not receiving.  The delay introduced when
       
  1331 *    the ACKs were received was enough to reduce total throughput, because
       
  1332 *    the sender would sit idle until the ACK was finally seen.
       
  1333 *
       
  1334 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1335 *    This means that any frame which is x7F (127) bytes or smaller
       
  1336 *    will cause an immediate interrupt.  Because this value must be a
       
  1337 *    bit mask, there are only a few valid values that can be used.  To
       
  1338 *    turn this feature off, the driver can write the value xFFFF to the
       
  1339 *    lower word of this instruction (in the same way that the other
       
  1340 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1341 *    cause an interrupt to be generated for every frame, because all
       
  1342 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1343 *************************************************************************/
       
  1344 
       
  1345 /* if you wish to disable the ucode functionality, while maintaining the
       
  1346  * workarounds it provides, set the following defines to:
       
  1347  * BUNDLESMALL 0
       
  1348  * BUNDLEMAX 1
       
  1349  * INTDELAY 1
       
  1350  */
       
  1351 #define BUNDLESMALL 1
       
  1352 #define BUNDLEMAX (u16)6
       
  1353 #define INTDELAY (u16)1536 /* 0x600 */
       
  1354 
       
  1355 	/* do not load u-code for ICH devices */
       
  1356 	if (nic->flags & ich)
       
  1357 		goto noloaducode;
       
  1358 
       
  1359 	/* Search for ucode match against h/w revision */
       
  1360 	for (opts = ucode_opts; opts->mac; opts++) {
       
  1361 		int i;
       
  1362 		u32 *ucode = opts->ucode;
       
  1363 		if (nic->mac != opts->mac)
       
  1364 			continue;
       
  1365 
       
  1366 		/* Insert user-tunable settings */
       
  1367 		ucode[opts->timer_dword] &= 0xFFFF0000;
       
  1368 		ucode[opts->timer_dword] |= INTDELAY;
       
  1369 		ucode[opts->bundle_dword] &= 0xFFFF0000;
       
  1370 		ucode[opts->bundle_dword] |= BUNDLEMAX;
       
  1371 		ucode[opts->min_size_dword] &= 0xFFFF0000;
       
  1372 		ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
       
  1373 
       
  1374 		for (i = 0; i < UCODE_SIZE; i++)
       
  1375 			cb->u.ucode[i] = cpu_to_le32(ucode[i]);
       
  1376 		cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1377 		return;
       
  1378 	}
       
  1379 
       
  1380 noloaducode:
       
  1381 	cb->command = cpu_to_le16(cb_nop | cb_el);
       
  1382 }
       
  1383 
       
  1384 static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
       
  1385 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
  1386 {
       
  1387 	int err = 0, counter = 50;
       
  1388 	struct cb *cb = nic->cb_to_clean;
       
  1389 
       
  1390 	if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
       
  1391 		DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
       
  1392 
       
  1393 	/* must restart cuc */
       
  1394 	nic->cuc_cmd = cuc_start;
       
  1395 
       
  1396 	/* wait for completion */
       
  1397 	e100_write_flush(nic);
       
  1398 	udelay(10);
       
  1399 
       
  1400 	/* wait for possibly (ouch) 500ms */
       
  1401 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1402 		msleep(10);
       
  1403 		if (!--counter) break;
       
  1404 	}
       
  1405 
       
  1406 	/* ack any interrupts, something could have been set */
       
  1407 	iowrite8(~0, &nic->csr->scb.stat_ack);
       
  1408 
       
  1409 	/* if the command failed, or is not OK, notify and return */
       
  1410 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1411 		DPRINTK(PROBE,ERR, "ucode load failed\n");
       
  1412 		err = -EPERM;
       
  1413 	}
       
  1414 
       
  1415 	return err;
       
  1416 }
       
  1417 
       
  1418 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1419 	struct sk_buff *skb)
       
  1420 {
       
  1421 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1422 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1423 }
       
  1424 
       
  1425 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1426 {
       
  1427 	cb->command = cpu_to_le16(cb_dump);
       
  1428 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1429 		offsetof(struct mem, dump_buf));
       
  1430 }
       
  1431 
       
  1432 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1433 #define MII_NSC_CONG		MII_RESV1
       
  1434 #define NSC_CONG_ENABLE		0x0100
       
  1435 #define NSC_CONG_TXREADY	0x0400
       
  1436 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1437 static int e100_phy_init(struct nic *nic)
       
  1438 {
       
  1439 	struct net_device *netdev = nic->netdev;
       
  1440 	u32 addr;
       
  1441 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1442 
       
  1443 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1444 	for(addr = 0; addr < 32; addr++) {
       
  1445 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1446 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1447 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1448 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1449 		if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1450 			break;
       
  1451 	}
       
  1452 	DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
       
  1453 	if(addr == 32)
       
  1454 		return -EAGAIN;
       
  1455 
       
  1456 	/* Selected the phy and isolate the rest */
       
  1457 	for(addr = 0; addr < 32; addr++) {
       
  1458 		if(addr != nic->mii.phy_id) {
       
  1459 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1460 		} else {
       
  1461 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1462 			mdio_write(netdev, addr, MII_BMCR,
       
  1463 				bmcr & ~BMCR_ISOLATE);
       
  1464 		}
       
  1465 	}
       
  1466 
       
  1467 	/* Get phy ID */
       
  1468 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1469 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1470 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1471 	DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
       
  1472 
       
  1473 	/* Handle National tx phys */
       
  1474 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1475 	if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1476 		/* Disable congestion control */
       
  1477 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1478 		cong |= NSC_CONG_TXREADY;
       
  1479 		cong &= ~NSC_CONG_ENABLE;
       
  1480 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1481 	}
       
  1482 
       
  1483 	if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1484 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1485 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1486 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1487 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1488 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1489 	}
       
  1490 
       
  1491 	return 0;
       
  1492 }
       
  1493 
       
  1494 static int e100_hw_init(struct nic *nic)
       
  1495 {
       
  1496 	int err;
       
  1497 
       
  1498 	e100_hw_reset(nic);
       
  1499 
       
  1500 	DPRINTK(HW, ERR, "e100_hw_init\n");
       
  1501 	if(!in_interrupt() && (err = e100_self_test(nic)))
       
  1502 		return err;
       
  1503 
       
  1504 	if((err = e100_phy_init(nic)))
       
  1505 		return err;
       
  1506 	if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1507 		return err;
       
  1508 	if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1509 		return err;
       
  1510 	if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
       
  1511 		return err;
       
  1512 	if((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1513 		return err;
       
  1514 	if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1515 		return err;
       
  1516 	if((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1517 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1518 		return err;
       
  1519 	if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1520 		return err;
       
  1521 
       
  1522 	e100_disable_irq(nic);
       
  1523 
       
  1524 	return 0;
       
  1525 }
       
  1526 
       
  1527 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1528 {
       
  1529 	struct net_device *netdev = nic->netdev;
       
  1530 	struct dev_mc_list *list = netdev->mc_list;
       
  1531 	u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
       
  1532 
       
  1533 	cb->command = cpu_to_le16(cb_multi);
       
  1534 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1535 	for(i = 0; list && i < count; i++, list = list->next)
       
  1536 		memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
       
  1537 			ETH_ALEN);
       
  1538 }
       
  1539 
       
  1540 static void e100_set_multicast_list(struct net_device *netdev)
       
  1541 {
       
  1542 	struct nic *nic = netdev_priv(netdev);
       
  1543 
       
  1544 	DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
       
  1545 		netdev->mc_count, netdev->flags);
       
  1546 
       
  1547 	if(netdev->flags & IFF_PROMISC)
       
  1548 		nic->flags |= promiscuous;
       
  1549 	else
       
  1550 		nic->flags &= ~promiscuous;
       
  1551 
       
  1552 	if(netdev->flags & IFF_ALLMULTI ||
       
  1553 		netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
       
  1554 		nic->flags |= multicast_all;
       
  1555 	else
       
  1556 		nic->flags &= ~multicast_all;
       
  1557 
       
  1558 	e100_exec_cb(nic, NULL, e100_configure);
       
  1559 	e100_exec_cb(nic, NULL, e100_multi);
       
  1560 }
       
  1561 
       
  1562 static void e100_update_stats(struct nic *nic)
       
  1563 {
       
  1564 	struct net_device *dev = nic->netdev;
       
  1565 	struct net_device_stats *ns = &dev->stats;
       
  1566 	struct stats *s = &nic->mem->stats;
       
  1567 	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1568 		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
       
  1569 		&s->complete;
       
  1570 
       
  1571 	/* Device's stats reporting may take several microseconds to
       
  1572 	 * complete, so we're always waiting for results of the
       
  1573 	 * previous command. */
       
  1574 
       
  1575 	if(*complete == cpu_to_le32(cuc_dump_reset_complete)) {
       
  1576 		*complete = 0;
       
  1577 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1578 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1579 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1580 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1581 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1582 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1583 		ns->collisions += nic->tx_collisions;
       
  1584 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1585 			le32_to_cpu(s->tx_lost_crs);
       
  1586 		ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
       
  1587 			nic->rx_over_length_errors;
       
  1588 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1589 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1590 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1591 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1592 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1593 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1594 			le32_to_cpu(s->rx_alignment_errors) +
       
  1595 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1596 			le32_to_cpu(s->rx_cdt_errors);
       
  1597 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1598 		nic->tx_single_collisions +=
       
  1599 			le32_to_cpu(s->tx_single_collisions);
       
  1600 		nic->tx_multiple_collisions +=
       
  1601 			le32_to_cpu(s->tx_multiple_collisions);
       
  1602 		if(nic->mac >= mac_82558_D101_A4) {
       
  1603 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1604 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1605 			nic->rx_fc_unsupported +=
       
  1606 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1607 			if(nic->mac >= mac_82559_D101M) {
       
  1608 				nic->tx_tco_frames +=
       
  1609 					le16_to_cpu(s->xmt_tco_frames);
       
  1610 				nic->rx_tco_frames +=
       
  1611 					le16_to_cpu(s->rcv_tco_frames);
       
  1612 			}
       
  1613 		}
       
  1614 	}
       
  1615 
       
  1616 
       
  1617 	if(e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1618 		DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
       
  1619 }
       
  1620 
       
  1621 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1622 {
       
  1623 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1624 	 * we're getting collisions on a half-duplex connection. */
       
  1625 
       
  1626 	if(duplex == DUPLEX_HALF) {
       
  1627 		u32 prev = nic->adaptive_ifs;
       
  1628 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1629 
       
  1630 		if((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1631 		   (nic->tx_frames > min_frames)) {
       
  1632 			if(nic->adaptive_ifs < 60)
       
  1633 				nic->adaptive_ifs += 5;
       
  1634 		} else if (nic->tx_frames < min_frames) {
       
  1635 			if(nic->adaptive_ifs >= 5)
       
  1636 				nic->adaptive_ifs -= 5;
       
  1637 		}
       
  1638 		if(nic->adaptive_ifs != prev)
       
  1639 			e100_exec_cb(nic, NULL, e100_configure);
       
  1640 	}
       
  1641 }
       
  1642 
       
  1643 static void e100_watchdog(unsigned long data)
       
  1644 {
       
  1645 	struct nic *nic = (struct nic *)data;
       
  1646 	struct ethtool_cmd cmd;
       
  1647 
       
  1648 	DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
       
  1649 
       
  1650 	/* mii library handles link maintenance tasks */
       
  1651 
       
  1652 	if (nic->ecdev) {
       
  1653 		ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1654 	} else {
       
  1655 		mii_ethtool_gset(&nic->mii, &cmd);
       
  1656 
       
  1657 		if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1658 			DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
       
  1659 					cmd.speed == SPEED_100 ? "100" : "10",
       
  1660 					cmd.duplex == DUPLEX_FULL ? "full" : "half");
       
  1661 		} else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1662 			DPRINTK(LINK, INFO, "link down\n");
       
  1663 		}
       
  1664 	}
       
  1665 
       
  1666 	mii_check_link(&nic->mii);
       
  1667 
       
  1668 	/* Software generated interrupt to recover from (rare) Rx
       
  1669 	 * allocation failure.
       
  1670 	 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1671 	 * accidentally, due to hardware that shares a register between the
       
  1672 	 * interrupt mask bit and the SW Interrupt generation bit */
       
  1673 	if (!nic->ecdev)
       
  1674 		spin_lock_irq(&nic->cmd_lock);
       
  1675 	iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1676 	e100_write_flush(nic);
       
  1677 	if (!nic->ecdev)
       
  1678 		spin_unlock_irq(&nic->cmd_lock);
       
  1679 
       
  1680 	e100_update_stats(nic);
       
  1681 	e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
       
  1682 
       
  1683 	if(nic->mac <= mac_82557_D100_C)
       
  1684 		/* Issue a multicast command to workaround a 557 lock up */
       
  1685 		e100_set_multicast_list(nic->netdev);
       
  1686 
       
  1687 	if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
       
  1688 		/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1689 		nic->flags |= ich_10h_workaround;
       
  1690 	else
       
  1691 		nic->flags &= ~ich_10h_workaround;
       
  1692 
       
  1693 	if (!nic->ecdev)
       
  1694 		mod_timer(&nic->watchdog,
       
  1695 				round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
       
  1696 }
       
  1697 
       
  1698 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1699 	struct sk_buff *skb)
       
  1700 {
       
  1701 	cb->command = nic->tx_command;
       
  1702 	/* interrupt every 16 packets regardless of delay */
       
  1703 	if((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1704 		cb->command |= cpu_to_le16(cb_i);
       
  1705 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1706 	cb->u.tcb.tcb_byte_count = 0;
       
  1707 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1708 	cb->u.tcb.tbd_count = 1;
       
  1709 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
       
  1710 		skb->data, skb->len, PCI_DMA_TODEVICE));
       
  1711 	/* check for mapping failure? */
       
  1712 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1713 }
       
  1714 
       
  1715 static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
       
  1716 {
       
  1717 	struct nic *nic = netdev_priv(netdev);
       
  1718 	int err;
       
  1719 
       
  1720 	if(nic->flags & ich_10h_workaround) {
       
  1721 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1722 		   Issue a NOP command followed by a 1us delay before
       
  1723 		   issuing the Tx command. */
       
  1724 		if(e100_exec_cmd(nic, cuc_nop, 0))
       
  1725 			DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
       
  1726 		udelay(1);
       
  1727 	}
       
  1728 
       
  1729 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1730 
       
  1731 	switch(err) {
       
  1732 	case -ENOSPC:
       
  1733 		/* We queued the skb, but now we're out of space. */
       
  1734 		DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
       
  1735 		if (!nic->ecdev)
       
  1736 			netif_stop_queue(netdev);
       
  1737 		break;
       
  1738 	case -ENOMEM:
       
  1739 		/* This is a hard error - log it. */
       
  1740 		DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
       
  1741 		if (!nic->ecdev)
       
  1742 			netif_stop_queue(netdev);
       
  1743 		return 1;
       
  1744 	}
       
  1745 
       
  1746 	netdev->trans_start = jiffies;
       
  1747 	return 0;
       
  1748 }
       
  1749 
       
  1750 static int e100_tx_clean(struct nic *nic)
       
  1751 {
       
  1752 	struct net_device *dev = nic->netdev;
       
  1753 	struct cb *cb;
       
  1754 	int tx_cleaned = 0;
       
  1755 
       
  1756 	if (!nic->ecdev)
       
  1757 		spin_lock(&nic->cb_lock);
       
  1758 
       
  1759 	/* Clean CBs marked complete */
       
  1760 	for(cb = nic->cb_to_clean;
       
  1761 	    cb->status & cpu_to_le16(cb_complete);
       
  1762 	    cb = nic->cb_to_clean = cb->next) {
       
  1763 		DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
       
  1764 		        (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1765 		        cb->status);
       
  1766 
       
  1767 		if(likely(cb->skb != NULL)) {
       
  1768 			dev->stats.tx_packets++;
       
  1769 			dev->stats.tx_bytes += cb->skb->len;
       
  1770 
       
  1771 			pci_unmap_single(nic->pdev,
       
  1772 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1773 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1774 				PCI_DMA_TODEVICE);
       
  1775 			if (!nic->ecdev)
       
  1776 				dev_kfree_skb_any(cb->skb);
       
  1777 			cb->skb = NULL;
       
  1778 			tx_cleaned = 1;
       
  1779 		}
       
  1780 		cb->status = 0;
       
  1781 		nic->cbs_avail++;
       
  1782 	}
       
  1783 
       
  1784 	if (!nic->ecdev) {
       
  1785 		spin_unlock(&nic->cb_lock);
       
  1786 
       
  1787 		/* Recover from running out of Tx resources in xmit_frame */
       
  1788 		if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1789 			netif_wake_queue(nic->netdev);
       
  1790 	}
       
  1791 
       
  1792 	return tx_cleaned;
       
  1793 }
       
  1794 
       
  1795 static void e100_clean_cbs(struct nic *nic)
       
  1796 {
       
  1797 	if(nic->cbs) {
       
  1798 		while(nic->cbs_avail != nic->params.cbs.count) {
       
  1799 			struct cb *cb = nic->cb_to_clean;
       
  1800 			if(cb->skb) {
       
  1801 				pci_unmap_single(nic->pdev,
       
  1802 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1803 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1804 					PCI_DMA_TODEVICE);
       
  1805 				if (!nic->ecdev)
       
  1806 					dev_kfree_skb(cb->skb);
       
  1807 			}
       
  1808 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1809 			nic->cbs_avail++;
       
  1810 		}
       
  1811 		pci_free_consistent(nic->pdev,
       
  1812 			sizeof(struct cb) * nic->params.cbs.count,
       
  1813 			nic->cbs, nic->cbs_dma_addr);
       
  1814 		nic->cbs = NULL;
       
  1815 		nic->cbs_avail = 0;
       
  1816 	}
       
  1817 	nic->cuc_cmd = cuc_start;
       
  1818 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1819 		nic->cbs;
       
  1820 }
       
  1821 
       
  1822 static int e100_alloc_cbs(struct nic *nic)
       
  1823 {
       
  1824 	struct cb *cb;
       
  1825 	unsigned int i, count = nic->params.cbs.count;
       
  1826 
       
  1827 	nic->cuc_cmd = cuc_start;
       
  1828 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1829 	nic->cbs_avail = 0;
       
  1830 
       
  1831 	nic->cbs = pci_alloc_consistent(nic->pdev,
       
  1832 		sizeof(struct cb) * count, &nic->cbs_dma_addr);
       
  1833 	if(!nic->cbs)
       
  1834 		return -ENOMEM;
       
  1835 
       
  1836 	for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1837 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1838 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  1839 
       
  1840 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  1841 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  1842 			((i+1) % count) * sizeof(struct cb));
       
  1843 		cb->skb = NULL;
       
  1844 	}
       
  1845 
       
  1846 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  1847 	nic->cbs_avail = count;
       
  1848 
       
  1849 	return 0;
       
  1850 }
       
  1851 
       
  1852 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  1853 {
       
  1854 	if(!nic->rxs) return;
       
  1855 	if(RU_SUSPENDED != nic->ru_running) return;
       
  1856 
       
  1857 	/* handle init time starts */
       
  1858 	if(!rx) rx = nic->rxs;
       
  1859 
       
  1860 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  1861 	if(rx->skb) {
       
  1862 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  1863 		nic->ru_running = RU_RUNNING;
       
  1864 	}
       
  1865 }
       
  1866 
       
  1867 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
       
  1868 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  1869 {
       
  1870 	if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
       
  1871 		return -ENOMEM;
       
  1872 
       
  1873 	/* Align, init, and map the RFD. */
       
  1874 	skb_reserve(rx->skb, NET_IP_ALIGN);
       
  1875 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
       
  1876 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  1877 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1878 
       
  1879 	if (pci_dma_mapping_error(rx->dma_addr)) {
       
  1880 		dev_kfree_skb_any(rx->skb);
       
  1881 		rx->skb = NULL;
       
  1882 		rx->dma_addr = 0;
       
  1883 		return -ENOMEM;
       
  1884 	}
       
  1885 
       
  1886 	/* Link the RFD to end of RFA by linking previous RFD to
       
  1887 	 * this one.  We are safe to touch the previous RFD because
       
  1888 	 * it is protected by the before last buffer's el bit being set */
       
  1889 	if (rx->prev->skb) {
       
  1890 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  1891 		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  1892 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  1893 			sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  1894 	}
       
  1895 
       
  1896 	return 0;
       
  1897 }
       
  1898 
       
  1899 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  1900 	unsigned int *work_done, unsigned int work_to_do)
       
  1901 {
       
  1902 	struct net_device *dev = nic->netdev;
       
  1903 	struct sk_buff *skb = rx->skb;
       
  1904 	struct rfd *rfd = (struct rfd *)skb->data;
       
  1905 	u16 rfd_status, actual_size;
       
  1906 
       
  1907 	if(unlikely(work_done && *work_done >= work_to_do))
       
  1908 		return -EAGAIN;
       
  1909 
       
  1910 	/* Need to sync before taking a peek at cb_complete bit */
       
  1911 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  1912 		sizeof(struct rfd), PCI_DMA_FROMDEVICE);
       
  1913 	rfd_status = le16_to_cpu(rfd->status);
       
  1914 
       
  1915 	DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
       
  1916 
       
  1917 	/* If data isn't ready, nothing to indicate */
       
  1918 	if (unlikely(!(rfd_status & cb_complete))) {
       
  1919 		/* If the next buffer has the el bit, but we think the receiver
       
  1920 		 * is still running, check to see if it really stopped while
       
  1921 		 * we had interrupts off.
       
  1922 		 * This allows for a fast restart without re-enabling
       
  1923 		 * interrupts */
       
  1924 		if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  1925 		    (RU_RUNNING == nic->ru_running))
       
  1926 
       
  1927 			if (readb(&nic->csr->scb.status) & rus_no_res)
       
  1928 				nic->ru_running = RU_SUSPENDED;
       
  1929 		return -ENODATA;
       
  1930 	}
       
  1931 
       
  1932 	/* Get actual data size */
       
  1933 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  1934 	if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  1935 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  1936 
       
  1937 	/* Get data */
       
  1938 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  1939 		RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
       
  1940 
       
  1941 	/* If this buffer has the el bit, but we think the receiver
       
  1942 	 * is still running, check to see if it really stopped while
       
  1943 	 * we had interrupts off.
       
  1944 	 * This allows for a fast restart without re-enabling interrupts.
       
  1945 	 * This can happen when the RU sees the size change but also sees
       
  1946 	 * the el bit set. */
       
  1947 	if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  1948 	    (RU_RUNNING == nic->ru_running)) {
       
  1949 
       
  1950 	    if (readb(&nic->csr->scb.status) & rus_no_res)
       
  1951 		nic->ru_running = RU_SUSPENDED;
       
  1952 	}
       
  1953 
       
  1954 	if (!nic->ecdev) {
       
  1955 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  1956 		skb_reserve(skb, sizeof(struct rfd));
       
  1957 		skb_put(skb, actual_size);
       
  1958 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  1959 	}
       
  1960 
       
  1961 	if(unlikely(!(rfd_status & cb_ok))) {
       
  1962 		if (!nic->ecdev) {
       
  1963 			/* Don't indicate if hardware indicates errors */
       
  1964 			dev_kfree_skb_any(skb);
       
  1965 		}
       
  1966 	} else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
       
  1967 		/* Don't indicate oversized frames */
       
  1968 		nic->rx_over_length_errors++;
       
  1969 		if (!nic->ecdev)
       
  1970 			dev_kfree_skb_any(skb);
       
  1971 	} else {
       
  1972 		dev->stats.rx_packets++;
       
  1973 		dev->stats.rx_bytes += actual_size;
       
  1974 		nic->netdev->last_rx = jiffies;
       
  1975 		if (nic->ecdev) {
       
  1976 			ecdev_receive(nic->ecdev,
       
  1977 					skb->data + sizeof(struct rfd), actual_size);
       
  1978 
       
  1979 			// No need to detect link status as
       
  1980 			// long as frames are received: Reset watchdog.
       
  1981 			nic->ec_watchdog_jiffies = jiffies;
       
  1982 		} else {
       
  1983 			netif_receive_skb(skb);
       
  1984 		}
       
  1985 		if(work_done)
       
  1986 			(*work_done)++;
       
  1987 	}
       
  1988 
       
  1989 	if (nic->ecdev) {
       
  1990 		// make receive frame descriptior usable again
       
  1991 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  1992 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  1993 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1994 		if(pci_dma_mapping_error(rx->dma_addr)) {
       
  1995 			rx->dma_addr = 0;
       
  1996 		}
       
  1997 
       
  1998 		/* Link the RFD to end of RFA by linking previous RFD to
       
  1999 		 * this one, and clearing EL bit of previous.  */
       
  2000 		if(rx->prev->skb) {
       
  2001 			struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  2002 			put_unaligned(cpu_to_le32(rx->dma_addr),
       
  2003 					(u32 *)&prev_rfd->link);
       
  2004 			wmb();
       
  2005 			prev_rfd->command &= ~cpu_to_le16(cb_el);
       
  2006 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2007 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  2008 		}
       
  2009 	} else {
       
  2010 		rx->skb = NULL;
       
  2011 	}
       
  2012 
       
  2013 	return 0;
       
  2014 }
       
  2015 
       
  2016 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  2017 	unsigned int work_to_do)
       
  2018 {
       
  2019 	struct rx *rx;
       
  2020 	int restart_required = 0, err = 0;
       
  2021 	struct rx *old_before_last_rx, *new_before_last_rx;
       
  2022 	struct rfd *old_before_last_rfd, *new_before_last_rfd;
       
  2023 
       
  2024 	/* Indicate newly arrived packets */
       
  2025 	for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  2026 		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  2027 		/* Hit quota or no more to clean */
       
  2028 		if (-EAGAIN == err || -ENODATA == err)
       
  2029 			break;
       
  2030 	}
       
  2031 
       
  2032 
       
  2033 	/* On EAGAIN, hit quota so have more work to do, restart once
       
  2034 	 * cleanup is complete.
       
  2035 	 * Else, are we already rnr? then pay attention!!! this ensures that
       
  2036 	 * the state machine progression never allows a start with a
       
  2037 	 * partially cleaned list, avoiding a race between hardware
       
  2038 	 * and rx_to_clean when in NAPI mode */
       
  2039 	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
       
  2040 		restart_required = 1;
       
  2041 
       
  2042 	old_before_last_rx = nic->rx_to_use->prev->prev;
       
  2043 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
       
  2044 
       
  2045 	if (!nic->ecdev) {
       
  2046 		/* Alloc new skbs to refill list */
       
  2047 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  2048 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  2049 				break; /* Better luck next time (see watchdog) */
       
  2050 		}
       
  2051 	}
       
  2052 
       
  2053 	new_before_last_rx = nic->rx_to_use->prev->prev;
       
  2054 	if (new_before_last_rx != old_before_last_rx) {
       
  2055 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2056 		 * This lets us update the next pointer on the last buffer
       
  2057 		 * without worrying about hardware touching it.
       
  2058 		 * We set the size to 0 to prevent hardware from touching this
       
  2059 		 * buffer.
       
  2060 		 * When the hardware hits the before last buffer with el-bit
       
  2061 		 * and size of 0, it will RNR interrupt, the RUS will go into
       
  2062 		 * the No Resources state.  It will not complete nor write to
       
  2063 		 * this buffer. */
       
  2064 		new_before_last_rfd =
       
  2065 			(struct rfd *)new_before_last_rx->skb->data;
       
  2066 		new_before_last_rfd->size = 0;
       
  2067 		new_before_last_rfd->command |= cpu_to_le16(cb_el);
       
  2068 		pci_dma_sync_single_for_device(nic->pdev,
       
  2069 			new_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2070 			PCI_DMA_TODEVICE);
       
  2071 
       
  2072 		/* Now that we have a new stopping point, we can clear the old
       
  2073 		 * stopping point.  We must sync twice to get the proper
       
  2074 		 * ordering on the hardware side of things. */
       
  2075 		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
       
  2076 		pci_dma_sync_single_for_device(nic->pdev,
       
  2077 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2078 			PCI_DMA_TODEVICE);
       
  2079 		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  2080 		pci_dma_sync_single_for_device(nic->pdev,
       
  2081 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2082 			PCI_DMA_TODEVICE);
       
  2083 	}
       
  2084 
       
  2085 	if(restart_required) {
       
  2086 		// ack the rnr?
       
  2087 		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2088 		e100_start_receiver(nic, nic->rx_to_clean);
       
  2089 		if(work_done)
       
  2090 			(*work_done)++;
       
  2091 	}
       
  2092 }
       
  2093 
       
  2094 static void e100_rx_clean_list(struct nic *nic)
       
  2095 {
       
  2096 	struct rx *rx;
       
  2097 	unsigned int i, count = nic->params.rfds.count;
       
  2098 
       
  2099 	nic->ru_running = RU_UNINITIALIZED;
       
  2100 
       
  2101 	if(nic->rxs) {
       
  2102 		for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2103 			if(rx->skb) {
       
  2104 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2105 					RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
       
  2106 				dev_kfree_skb(rx->skb);
       
  2107 			}
       
  2108 		}
       
  2109 		kfree(nic->rxs);
       
  2110 		nic->rxs = NULL;
       
  2111 	}
       
  2112 
       
  2113 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2114 }
       
  2115 
       
  2116 static int e100_rx_alloc_list(struct nic *nic)
       
  2117 {
       
  2118 	struct rx *rx;
       
  2119 	unsigned int i, count = nic->params.rfds.count;
       
  2120 	struct rfd *before_last;
       
  2121 
       
  2122 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2123 	nic->ru_running = RU_UNINITIALIZED;
       
  2124 
       
  2125 	if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2126 		return -ENOMEM;
       
  2127 
       
  2128 	for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2129 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2130 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2131 		if(e100_rx_alloc_skb(nic, rx)) {
       
  2132 			e100_rx_clean_list(nic);
       
  2133 			return -ENOMEM;
       
  2134 		}
       
  2135 	}
       
  2136 	/* Set the el-bit on the buffer that is before the last buffer.
       
  2137 	 * This lets us update the next pointer on the last buffer without
       
  2138 	 * worrying about hardware touching it.
       
  2139 	 * We set the size to 0 to prevent hardware from touching this buffer.
       
  2140 	 * When the hardware hits the before last buffer with el-bit and size
       
  2141 	 * of 0, it will RNR interrupt, the RU will go into the No Resources
       
  2142 	 * state.  It will not complete nor write to this buffer. */
       
  2143 	rx = nic->rxs->prev->prev;
       
  2144 	before_last = (struct rfd *)rx->skb->data;
       
  2145 	before_last->command |= cpu_to_le16(cb_el);
       
  2146 	before_last->size = 0;
       
  2147 	pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2148 		sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  2149 
       
  2150 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2151 	nic->ru_running = RU_SUSPENDED;
       
  2152 
       
  2153 	return 0;
       
  2154 }
       
  2155 
       
  2156 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2157 {
       
  2158 	struct net_device *netdev = dev_id;
       
  2159 	struct nic *nic = netdev_priv(netdev);
       
  2160 	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
       
  2161 
       
  2162 	DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
       
  2163 
       
  2164 	if(stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2165 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2166 		return IRQ_NONE;
       
  2167 
       
  2168 	/* Ack interrupt(s) */
       
  2169 	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
       
  2170 
       
  2171 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2172 	if(stat_ack & stat_ack_rnr)
       
  2173 		nic->ru_running = RU_SUSPENDED;
       
  2174 
       
  2175 	if(!nic->ecdev && likely(netif_rx_schedule_prep(netdev, &nic->napi))) {
       
  2176 		e100_disable_irq(nic);
       
  2177 		__netif_rx_schedule(netdev, &nic->napi);
       
  2178 	}
       
  2179 
       
  2180 	return IRQ_HANDLED;
       
  2181 }
       
  2182 
       
  2183 void e100_ec_poll(struct net_device *netdev)
       
  2184 {
       
  2185 	struct nic *nic = netdev_priv(netdev);
       
  2186 
       
  2187 	e100_rx_clean(nic, NULL, 100); // FIXME
       
  2188 	e100_tx_clean(nic);
       
  2189 
       
  2190     if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2191         e100_watchdog((unsigned long) nic);
       
  2192         nic->ec_watchdog_jiffies = jiffies;
       
  2193     }
       
  2194 }
       
  2195 
       
  2196 
       
  2197 static int e100_poll(struct napi_struct *napi, int budget)
       
  2198 {
       
  2199 	struct nic *nic = container_of(napi, struct nic, napi);
       
  2200 	struct net_device *netdev = nic->netdev;
       
  2201 	unsigned int work_done = 0;
       
  2202 
       
  2203 	e100_rx_clean(nic, &work_done, budget);
       
  2204 	e100_tx_clean(nic);
       
  2205 
       
  2206 	/* If budget not fully consumed, exit the polling mode */
       
  2207 	if (work_done < budget) {
       
  2208 		netif_rx_complete(netdev, napi);
       
  2209 		e100_enable_irq(nic);
       
  2210 	}
       
  2211 
       
  2212 	return work_done;
       
  2213 }
       
  2214 
       
  2215 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2216 static void e100_netpoll(struct net_device *netdev)
       
  2217 {
       
  2218 	struct nic *nic = netdev_priv(netdev);
       
  2219 
       
  2220 	if (nic->ecdev)
       
  2221 		return;
       
  2222 
       
  2223 	e100_disable_irq(nic);
       
  2224 	e100_intr(nic->pdev->irq, netdev);
       
  2225 	e100_tx_clean(nic);
       
  2226 	e100_enable_irq(nic);
       
  2227 }
       
  2228 #endif
       
  2229 
       
  2230 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2231 {
       
  2232 	struct nic *nic = netdev_priv(netdev);
       
  2233 	struct sockaddr *addr = p;
       
  2234 
       
  2235 	if (!is_valid_ether_addr(addr->sa_data))
       
  2236 		return -EADDRNOTAVAIL;
       
  2237 
       
  2238 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2239 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2240 
       
  2241 	return 0;
       
  2242 }
       
  2243 
       
  2244 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2245 {
       
  2246 	if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2247 		return -EINVAL;
       
  2248 	netdev->mtu = new_mtu;
       
  2249 	return 0;
       
  2250 }
       
  2251 
       
  2252 static int e100_asf(struct nic *nic)
       
  2253 {
       
  2254 	/* ASF can be enabled from eeprom */
       
  2255 	return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2256 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2257 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2258 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
       
  2259 }
       
  2260 
       
  2261 static int e100_up(struct nic *nic)
       
  2262 {
       
  2263 	int err;
       
  2264 
       
  2265 	if((err = e100_rx_alloc_list(nic)))
       
  2266 		return err;
       
  2267 	if((err = e100_alloc_cbs(nic)))
       
  2268 		goto err_rx_clean_list;
       
  2269 	if((err = e100_hw_init(nic)))
       
  2270 		goto err_clean_cbs;
       
  2271 	e100_set_multicast_list(nic->netdev);
       
  2272 	e100_start_receiver(nic, NULL);
       
  2273 	if (!nic->ecdev) {
       
  2274 		mod_timer(&nic->watchdog, jiffies);
       
  2275 		if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2276 						nic->netdev->name, nic->netdev)))
       
  2277 			goto err_no_irq;
       
  2278 		netif_wake_queue(nic->netdev);
       
  2279 		napi_enable(&nic->napi);
       
  2280 		/* enable ints _after_ enabling poll, preventing a race between
       
  2281 		 * disable ints+schedule */
       
  2282 		e100_enable_irq(nic);
       
  2283 	}
       
  2284 	return 0;
       
  2285 
       
  2286 err_no_irq:
       
  2287 	if (!nic->ecdev)
       
  2288 		del_timer_sync(&nic->watchdog);
       
  2289 err_clean_cbs:
       
  2290 	e100_clean_cbs(nic);
       
  2291 err_rx_clean_list:
       
  2292 	e100_rx_clean_list(nic);
       
  2293 	return err;
       
  2294 }
       
  2295 
       
  2296 static void e100_down(struct nic *nic)
       
  2297 {
       
  2298 	if (!nic->ecdev) {
       
  2299 		/* wait here for poll to complete */
       
  2300 		napi_disable(&nic->napi);
       
  2301 		netif_stop_queue(nic->netdev);
       
  2302 	}
       
  2303 	e100_hw_reset(nic);
       
  2304 	if (!nic->ecdev) {
       
  2305 		free_irq(nic->pdev->irq, nic->netdev);
       
  2306 		del_timer_sync(&nic->watchdog);
       
  2307 		netif_carrier_off(nic->netdev);
       
  2308 	}
       
  2309 	e100_clean_cbs(nic);
       
  2310 	e100_rx_clean_list(nic);
       
  2311 }
       
  2312 
       
  2313 static void e100_tx_timeout(struct net_device *netdev)
       
  2314 {
       
  2315 	struct nic *nic = netdev_priv(netdev);
       
  2316 
       
  2317 	/* Reset outside of interrupt context, to avoid request_irq
       
  2318 	 * in interrupt context */
       
  2319 	schedule_work(&nic->tx_timeout_task);
       
  2320 }
       
  2321 
       
  2322 static void e100_tx_timeout_task(struct work_struct *work)
       
  2323 {
       
  2324 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2325 	struct net_device *netdev = nic->netdev;
       
  2326 
       
  2327 	DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
       
  2328 		ioread8(&nic->csr->scb.status));
       
  2329 	e100_down(netdev_priv(netdev));
       
  2330 	e100_up(netdev_priv(netdev));
       
  2331 }
       
  2332 
       
  2333 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2334 {
       
  2335 	int err;
       
  2336 	struct sk_buff *skb;
       
  2337 
       
  2338 	/* Use driver resources to perform internal MAC or PHY
       
  2339 	 * loopback test.  A single packet is prepared and transmitted
       
  2340 	 * in loopback mode, and the test passes if the received
       
  2341 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2342 
       
  2343 	if((err = e100_rx_alloc_list(nic)))
       
  2344 		return err;
       
  2345 	if((err = e100_alloc_cbs(nic)))
       
  2346 		goto err_clean_rx;
       
  2347 
       
  2348 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2349 	if(nic->flags & ich && loopback_mode == lb_phy)
       
  2350 		loopback_mode = lb_mac;
       
  2351 
       
  2352 	nic->loopback = loopback_mode;
       
  2353 	if((err = e100_hw_init(nic)))
       
  2354 		goto err_loopback_none;
       
  2355 
       
  2356 	if(loopback_mode == lb_phy)
       
  2357 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2358 			BMCR_LOOPBACK);
       
  2359 
       
  2360 	e100_start_receiver(nic, NULL);
       
  2361 
       
  2362 	if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2363 		err = -ENOMEM;
       
  2364 		goto err_loopback_none;
       
  2365 	}
       
  2366 	skb_put(skb, ETH_DATA_LEN);
       
  2367 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2368 	e100_xmit_frame(skb, nic->netdev);
       
  2369 
       
  2370 	msleep(10);
       
  2371 
       
  2372 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2373 			RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
       
  2374 
       
  2375 	if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2376 	   skb->data, ETH_DATA_LEN))
       
  2377 		err = -EAGAIN;
       
  2378 
       
  2379 err_loopback_none:
       
  2380 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2381 	nic->loopback = lb_none;
       
  2382 	e100_clean_cbs(nic);
       
  2383 	e100_hw_reset(nic);
       
  2384 err_clean_rx:
       
  2385 	e100_rx_clean_list(nic);
       
  2386 	return err;
       
  2387 }
       
  2388 
       
  2389 #define MII_LED_CONTROL	0x1B
       
  2390 static void e100_blink_led(unsigned long data)
       
  2391 {
       
  2392 	struct nic *nic = (struct nic *)data;
       
  2393 	enum led_state {
       
  2394 		led_on     = 0x01,
       
  2395 		led_off    = 0x04,
       
  2396 		led_on_559 = 0x05,
       
  2397 		led_on_557 = 0x07,
       
  2398 	};
       
  2399 
       
  2400 	nic->leds = (nic->leds & led_on) ? led_off :
       
  2401 		(nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
       
  2402 	mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
       
  2403 	mod_timer(&nic->blink_timer, jiffies + HZ / 4);
       
  2404 }
       
  2405 
       
  2406 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2407 {
       
  2408 	struct nic *nic = netdev_priv(netdev);
       
  2409 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2410 }
       
  2411 
       
  2412 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2413 {
       
  2414 	struct nic *nic = netdev_priv(netdev);
       
  2415 	int err;
       
  2416 
       
  2417 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2418 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2419 	e100_exec_cb(nic, NULL, e100_configure);
       
  2420 
       
  2421 	return err;
       
  2422 }
       
  2423 
       
  2424 static void e100_get_drvinfo(struct net_device *netdev,
       
  2425 	struct ethtool_drvinfo *info)
       
  2426 {
       
  2427 	struct nic *nic = netdev_priv(netdev);
       
  2428 	strcpy(info->driver, DRV_NAME);
       
  2429 	strcpy(info->version, DRV_VERSION);
       
  2430 	strcpy(info->fw_version, "N/A");
       
  2431 	strcpy(info->bus_info, pci_name(nic->pdev));
       
  2432 }
       
  2433 
       
  2434 #define E100_PHY_REGS 0x1C
       
  2435 static int e100_get_regs_len(struct net_device *netdev)
       
  2436 {
       
  2437 	struct nic *nic = netdev_priv(netdev);
       
  2438 	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
       
  2439 }
       
  2440 
       
  2441 static void e100_get_regs(struct net_device *netdev,
       
  2442 	struct ethtool_regs *regs, void *p)
       
  2443 {
       
  2444 	struct nic *nic = netdev_priv(netdev);
       
  2445 	u32 *buff = p;
       
  2446 	int i;
       
  2447 
       
  2448 	regs->version = (1 << 24) | nic->pdev->revision;
       
  2449 	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
       
  2450 		ioread8(&nic->csr->scb.cmd_lo) << 16 |
       
  2451 		ioread16(&nic->csr->scb.status);
       
  2452 	for(i = E100_PHY_REGS; i >= 0; i--)
       
  2453 		buff[1 + E100_PHY_REGS - i] =
       
  2454 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2455 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2456 	e100_exec_cb(nic, NULL, e100_dump);
       
  2457 	msleep(10);
       
  2458 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2459 		sizeof(nic->mem->dump_buf));
       
  2460 }
       
  2461 
       
  2462 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2463 {
       
  2464 	struct nic *nic = netdev_priv(netdev);
       
  2465 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2466 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2467 }
       
  2468 
       
  2469 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2470 {
       
  2471 	struct nic *nic = netdev_priv(netdev);
       
  2472 
       
  2473 	if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
       
  2474 		return -EOPNOTSUPP;
       
  2475 
       
  2476 	if(wol->wolopts)
       
  2477 		nic->flags |= wol_magic;
       
  2478 	else
       
  2479 		nic->flags &= ~wol_magic;
       
  2480 
       
  2481 	e100_exec_cb(nic, NULL, e100_configure);
       
  2482 
       
  2483 	return 0;
       
  2484 }
       
  2485 
       
  2486 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2487 {
       
  2488 	struct nic *nic = netdev_priv(netdev);
       
  2489 	return nic->msg_enable;
       
  2490 }
       
  2491 
       
  2492 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2493 {
       
  2494 	struct nic *nic = netdev_priv(netdev);
       
  2495 	nic->msg_enable = value;
       
  2496 }
       
  2497 
       
  2498 static int e100_nway_reset(struct net_device *netdev)
       
  2499 {
       
  2500 	struct nic *nic = netdev_priv(netdev);
       
  2501 	return mii_nway_restart(&nic->mii);
       
  2502 }
       
  2503 
       
  2504 static u32 e100_get_link(struct net_device *netdev)
       
  2505 {
       
  2506 	struct nic *nic = netdev_priv(netdev);
       
  2507 	return mii_link_ok(&nic->mii);
       
  2508 }
       
  2509 
       
  2510 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2511 {
       
  2512 	struct nic *nic = netdev_priv(netdev);
       
  2513 	return nic->eeprom_wc << 1;
       
  2514 }
       
  2515 
       
  2516 #define E100_EEPROM_MAGIC	0x1234
       
  2517 static int e100_get_eeprom(struct net_device *netdev,
       
  2518 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2519 {
       
  2520 	struct nic *nic = netdev_priv(netdev);
       
  2521 
       
  2522 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2523 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2524 
       
  2525 	return 0;
       
  2526 }
       
  2527 
       
  2528 static int e100_set_eeprom(struct net_device *netdev,
       
  2529 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2530 {
       
  2531 	struct nic *nic = netdev_priv(netdev);
       
  2532 
       
  2533 	if(eeprom->magic != E100_EEPROM_MAGIC)
       
  2534 		return -EINVAL;
       
  2535 
       
  2536 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2537 
       
  2538 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2539 		(eeprom->len >> 1) + 1);
       
  2540 }
       
  2541 
       
  2542 static void e100_get_ringparam(struct net_device *netdev,
       
  2543 	struct ethtool_ringparam *ring)
       
  2544 {
       
  2545 	struct nic *nic = netdev_priv(netdev);
       
  2546 	struct param_range *rfds = &nic->params.rfds;
       
  2547 	struct param_range *cbs = &nic->params.cbs;
       
  2548 
       
  2549 	ring->rx_max_pending = rfds->max;
       
  2550 	ring->tx_max_pending = cbs->max;
       
  2551 	ring->rx_mini_max_pending = 0;
       
  2552 	ring->rx_jumbo_max_pending = 0;
       
  2553 	ring->rx_pending = rfds->count;
       
  2554 	ring->tx_pending = cbs->count;
       
  2555 	ring->rx_mini_pending = 0;
       
  2556 	ring->rx_jumbo_pending = 0;
       
  2557 }
       
  2558 
       
  2559 static int e100_set_ringparam(struct net_device *netdev,
       
  2560 	struct ethtool_ringparam *ring)
       
  2561 {
       
  2562 	struct nic *nic = netdev_priv(netdev);
       
  2563 	struct param_range *rfds = &nic->params.rfds;
       
  2564 	struct param_range *cbs = &nic->params.cbs;
       
  2565 
       
  2566 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2567 		return -EINVAL;
       
  2568 
       
  2569 	if(netif_running(netdev))
       
  2570 		e100_down(nic);
       
  2571 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2572 	rfds->count = min(rfds->count, rfds->max);
       
  2573 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2574 	cbs->count = min(cbs->count, cbs->max);
       
  2575 	DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
       
  2576 	        rfds->count, cbs->count);
       
  2577 	if(netif_running(netdev))
       
  2578 		e100_up(nic);
       
  2579 
       
  2580 	return 0;
       
  2581 }
       
  2582 
       
  2583 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2584 	"Link test     (on/offline)",
       
  2585 	"Eeprom test   (on/offline)",
       
  2586 	"Self test        (offline)",
       
  2587 	"Mac loopback     (offline)",
       
  2588 	"Phy loopback     (offline)",
       
  2589 };
       
  2590 #define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
       
  2591 
       
  2592 static void e100_diag_test(struct net_device *netdev,
       
  2593 	struct ethtool_test *test, u64 *data)
       
  2594 {
       
  2595 	struct ethtool_cmd cmd;
       
  2596 	struct nic *nic = netdev_priv(netdev);
       
  2597 	int i, err;
       
  2598 
       
  2599 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2600 	data[0] = !mii_link_ok(&nic->mii);
       
  2601 	data[1] = e100_eeprom_load(nic);
       
  2602 	if(test->flags & ETH_TEST_FL_OFFLINE) {
       
  2603 
       
  2604 		/* save speed, duplex & autoneg settings */
       
  2605 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2606 
       
  2607 		if(netif_running(netdev))
       
  2608 			e100_down(nic);
       
  2609 		data[2] = e100_self_test(nic);
       
  2610 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2611 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2612 
       
  2613 		/* restore speed, duplex & autoneg settings */
       
  2614 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2615 
       
  2616 		if(netif_running(netdev))
       
  2617 			e100_up(nic);
       
  2618 	}
       
  2619 	for(i = 0; i < E100_TEST_LEN; i++)
       
  2620 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2621 
       
  2622 	msleep_interruptible(4 * 1000);
       
  2623 }
       
  2624 
       
  2625 static int e100_phys_id(struct net_device *netdev, u32 data)
       
  2626 {
       
  2627 	struct nic *nic = netdev_priv(netdev);
       
  2628 
       
  2629 	if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
       
  2630 		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
       
  2631 	mod_timer(&nic->blink_timer, jiffies);
       
  2632 	msleep_interruptible(data * 1000);
       
  2633 	del_timer_sync(&nic->blink_timer);
       
  2634 	mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
       
  2635 
       
  2636 	return 0;
       
  2637 }
       
  2638 
       
  2639 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2640 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2641 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2642 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2643 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2644 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2645 	"tx_heartbeat_errors", "tx_window_errors",
       
  2646 	/* device-specific stats */
       
  2647 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2648 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2649 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2650 };
       
  2651 #define E100_NET_STATS_LEN	21
       
  2652 #define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
       
  2653 
       
  2654 static int e100_get_sset_count(struct net_device *netdev, int sset)
       
  2655 {
       
  2656 	switch (sset) {
       
  2657 	case ETH_SS_TEST:
       
  2658 		return E100_TEST_LEN;
       
  2659 	case ETH_SS_STATS:
       
  2660 		return E100_STATS_LEN;
       
  2661 	default:
       
  2662 		return -EOPNOTSUPP;
       
  2663 	}
       
  2664 }
       
  2665 
       
  2666 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2667 	struct ethtool_stats *stats, u64 *data)
       
  2668 {
       
  2669 	struct nic *nic = netdev_priv(netdev);
       
  2670 	int i;
       
  2671 
       
  2672 	for(i = 0; i < E100_NET_STATS_LEN; i++)
       
  2673 		data[i] = ((unsigned long *)&netdev->stats)[i];
       
  2674 
       
  2675 	data[i++] = nic->tx_deferred;
       
  2676 	data[i++] = nic->tx_single_collisions;
       
  2677 	data[i++] = nic->tx_multiple_collisions;
       
  2678 	data[i++] = nic->tx_fc_pause;
       
  2679 	data[i++] = nic->rx_fc_pause;
       
  2680 	data[i++] = nic->rx_fc_unsupported;
       
  2681 	data[i++] = nic->tx_tco_frames;
       
  2682 	data[i++] = nic->rx_tco_frames;
       
  2683 }
       
  2684 
       
  2685 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2686 {
       
  2687 	switch(stringset) {
       
  2688 	case ETH_SS_TEST:
       
  2689 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2690 		break;
       
  2691 	case ETH_SS_STATS:
       
  2692 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2693 		break;
       
  2694 	}
       
  2695 }
       
  2696 
       
  2697 static const struct ethtool_ops e100_ethtool_ops = {
       
  2698 	.get_settings		= e100_get_settings,
       
  2699 	.set_settings		= e100_set_settings,
       
  2700 	.get_drvinfo		= e100_get_drvinfo,
       
  2701 	.get_regs_len		= e100_get_regs_len,
       
  2702 	.get_regs		= e100_get_regs,
       
  2703 	.get_wol		= e100_get_wol,
       
  2704 	.set_wol		= e100_set_wol,
       
  2705 	.get_msglevel		= e100_get_msglevel,
       
  2706 	.set_msglevel		= e100_set_msglevel,
       
  2707 	.nway_reset		= e100_nway_reset,
       
  2708 	.get_link		= e100_get_link,
       
  2709 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2710 	.get_eeprom		= e100_get_eeprom,
       
  2711 	.set_eeprom		= e100_set_eeprom,
       
  2712 	.get_ringparam		= e100_get_ringparam,
       
  2713 	.set_ringparam		= e100_set_ringparam,
       
  2714 	.self_test		= e100_diag_test,
       
  2715 	.get_strings		= e100_get_strings,
       
  2716 	.phys_id		= e100_phys_id,
       
  2717 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2718 	.get_sset_count		= e100_get_sset_count,
       
  2719 };
       
  2720 
       
  2721 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2722 {
       
  2723 	struct nic *nic = netdev_priv(netdev);
       
  2724 
       
  2725 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2726 }
       
  2727 
       
  2728 static int e100_alloc(struct nic *nic)
       
  2729 {
       
  2730 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2731 		&nic->dma_addr);
       
  2732 	return nic->mem ? 0 : -ENOMEM;
       
  2733 }
       
  2734 
       
  2735 static void e100_free(struct nic *nic)
       
  2736 {
       
  2737 	if(nic->mem) {
       
  2738 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2739 			nic->mem, nic->dma_addr);
       
  2740 		nic->mem = NULL;
       
  2741 	}
       
  2742 }
       
  2743 
       
  2744 static int e100_open(struct net_device *netdev)
       
  2745 {
       
  2746 	struct nic *nic = netdev_priv(netdev);
       
  2747 	int err = 0;
       
  2748 
       
  2749 	if (!nic->ecdev)
       
  2750 		netif_carrier_off(netdev);
       
  2751 	if((err = e100_up(nic)))
       
  2752 		DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
       
  2753 	return err;
       
  2754 }
       
  2755 
       
  2756 static int e100_close(struct net_device *netdev)
       
  2757 {
       
  2758 	e100_down(netdev_priv(netdev));
       
  2759 	return 0;
       
  2760 }
       
  2761 
       
  2762 static int __devinit e100_probe(struct pci_dev *pdev,
       
  2763 	const struct pci_device_id *ent)
       
  2764 {
       
  2765 	struct net_device *netdev;
       
  2766 	struct nic *nic;
       
  2767 	int err;
       
  2768 	DECLARE_MAC_BUF(mac);
       
  2769 
       
  2770 	if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
       
  2771 		if(((1 << debug) - 1) & NETIF_MSG_PROBE)
       
  2772 			printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
       
  2773 		return -ENOMEM;
       
  2774 	}
       
  2775 
       
  2776 	netdev->open = e100_open;
       
  2777 	netdev->stop = e100_close;
       
  2778 	netdev->hard_start_xmit = e100_xmit_frame;
       
  2779 	netdev->set_multicast_list = e100_set_multicast_list;
       
  2780 	netdev->set_mac_address = e100_set_mac_address;
       
  2781 	netdev->change_mtu = e100_change_mtu;
       
  2782 	netdev->do_ioctl = e100_do_ioctl;
       
  2783 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  2784 	netdev->tx_timeout = e100_tx_timeout;
       
  2785 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  2786 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2787 	netdev->poll_controller = e100_netpoll;
       
  2788 #endif
       
  2789 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  2790 
       
  2791 	nic = netdev_priv(netdev);
       
  2792 	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
       
  2793 	nic->netdev = netdev;
       
  2794 	nic->pdev = pdev;
       
  2795 	nic->msg_enable = (1 << debug) - 1;
       
  2796 	pci_set_drvdata(pdev, netdev);
       
  2797 
       
  2798 	if((err = pci_enable_device(pdev))) {
       
  2799 		DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
       
  2800 		goto err_out_free_dev;
       
  2801 	}
       
  2802 
       
  2803 	if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  2804 		DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
       
  2805 			"base address, aborting.\n");
       
  2806 		err = -ENODEV;
       
  2807 		goto err_out_disable_pdev;
       
  2808 	}
       
  2809 
       
  2810 	if((err = pci_request_regions(pdev, DRV_NAME))) {
       
  2811 		DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
       
  2812 		goto err_out_disable_pdev;
       
  2813 	}
       
  2814 
       
  2815 	if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
       
  2816 		DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
       
  2817 		goto err_out_free_res;
       
  2818 	}
       
  2819 
       
  2820 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  2821 
       
  2822 	if (use_io)
       
  2823 		DPRINTK(PROBE, INFO, "using i/o access mode\n");
       
  2824 
       
  2825 	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
       
  2826 	if(!nic->csr) {
       
  2827 		DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
       
  2828 		err = -ENOMEM;
       
  2829 		goto err_out_free_res;
       
  2830 	}
       
  2831 
       
  2832 	if(ent->driver_data)
       
  2833 		nic->flags |= ich;
       
  2834 	else
       
  2835 		nic->flags &= ~ich;
       
  2836 
       
  2837 	e100_get_defaults(nic);
       
  2838 
       
  2839 	/* locks must be initialized before calling hw_reset */
       
  2840 	spin_lock_init(&nic->cb_lock);
       
  2841 	spin_lock_init(&nic->cmd_lock);
       
  2842 	spin_lock_init(&nic->mdio_lock);
       
  2843 
       
  2844 	/* Reset the device before pci_set_master() in case device is in some
       
  2845 	 * funky state and has an interrupt pending - hint: we don't have the
       
  2846 	 * interrupt handler registered yet. */
       
  2847 	e100_hw_reset(nic);
       
  2848 
       
  2849 	pci_set_master(pdev);
       
  2850 
       
  2851 	init_timer(&nic->watchdog);
       
  2852 	nic->watchdog.function = e100_watchdog;
       
  2853 	nic->watchdog.data = (unsigned long)nic;
       
  2854 	init_timer(&nic->blink_timer);
       
  2855 	nic->blink_timer.function = e100_blink_led;
       
  2856 	nic->blink_timer.data = (unsigned long)nic;
       
  2857 
       
  2858 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  2859 
       
  2860 	if((err = e100_alloc(nic))) {
       
  2861 		DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
       
  2862 		goto err_out_iounmap;
       
  2863 	}
       
  2864 
       
  2865 	if((err = e100_eeprom_load(nic)))
       
  2866 		goto err_out_free;
       
  2867 
       
  2868 	e100_phy_init(nic);
       
  2869 
       
  2870 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  2871 	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
       
  2872 	if (!is_valid_ether_addr(netdev->perm_addr)) {
       
  2873 		if (!eeprom_bad_csum_allow) {
       
  2874 			DPRINTK(PROBE, ERR, "Invalid MAC address from "
       
  2875 			        "EEPROM, aborting.\n");
       
  2876 			err = -EAGAIN;
       
  2877 			goto err_out_free;
       
  2878 		} else {
       
  2879 			DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
       
  2880 			        "you MUST configure one.\n");
       
  2881 		}
       
  2882 	}
       
  2883 
       
  2884 	/* Wol magic packet can be enabled from eeprom */
       
  2885 	if((nic->mac >= mac_82558_D101_A4) &&
       
  2886 	   (nic->eeprom[eeprom_id] & eeprom_id_wol))
       
  2887 		nic->flags |= wol_magic;
       
  2888 
       
  2889 	/* ack any pending wake events, disable PME */
       
  2890 	err = pci_enable_wake(pdev, 0, 0);
       
  2891 	if (err)
       
  2892 		DPRINTK(PROBE, ERR, "Error clearing wake event\n");
       
  2893 
       
  2894 	// offer device to EtherCAT master module
       
  2895 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  2896 	if (nic->ecdev) {
       
  2897 		strcpy(netdev->name, "ec0");
       
  2898 		if (ecdev_open(nic->ecdev)) {
       
  2899 			ecdev_withdraw(nic->ecdev);
       
  2900 			goto err_out_free;
       
  2901 		}
       
  2902 	} else {
       
  2903 		strcpy(netdev->name, "eth%d");
       
  2904 		if((err = register_netdev(netdev))) {
       
  2905 			DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
       
  2906 			goto err_out_free;
       
  2907 		}
       
  2908 	}
       
  2909 
       
  2910 	DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %s\n",
       
  2911 		(unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
       
  2912 		pdev->irq, print_mac(mac, netdev->dev_addr));
       
  2913 
       
  2914 	return 0;
       
  2915 
       
  2916 err_out_free:
       
  2917 	e100_free(nic);
       
  2918 err_out_iounmap:
       
  2919 	pci_iounmap(pdev, nic->csr);
       
  2920 err_out_free_res:
       
  2921 	pci_release_regions(pdev);
       
  2922 err_out_disable_pdev:
       
  2923 	pci_disable_device(pdev);
       
  2924 err_out_free_dev:
       
  2925 	pci_set_drvdata(pdev, NULL);
       
  2926 	free_netdev(netdev);
       
  2927 	return err;
       
  2928 }
       
  2929 
       
  2930 static void __devexit e100_remove(struct pci_dev *pdev)
       
  2931 {
       
  2932 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2933 
       
  2934 	if(netdev) {
       
  2935 		struct nic *nic = netdev_priv(netdev);
       
  2936 		if (nic->ecdev) {
       
  2937 			ecdev_close(nic->ecdev);
       
  2938 			ecdev_withdraw(nic->ecdev);
       
  2939 		} else {
       
  2940 			unregister_netdev(netdev);
       
  2941 		}
       
  2942 
       
  2943 		e100_free(nic);
       
  2944 		pci_iounmap(pdev, nic->csr);
       
  2945 		free_netdev(netdev);
       
  2946 		pci_release_regions(pdev);
       
  2947 		pci_disable_device(pdev);
       
  2948 		pci_set_drvdata(pdev, NULL);
       
  2949 	}
       
  2950 }
       
  2951 
       
  2952 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  2953 {
       
  2954 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2955 	struct nic *nic = netdev_priv(netdev);
       
  2956 
       
  2957 	if (nic->ecdev)
       
  2958 		return 0;
       
  2959 
       
  2960 	if (netif_running(netdev))
       
  2961 		e100_down(nic);
       
  2962 	netif_device_detach(netdev);
       
  2963 
       
  2964 	pci_save_state(pdev);
       
  2965 
       
  2966 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  2967 		pci_enable_wake(pdev, PCI_D3hot, 1);
       
  2968 		pci_enable_wake(pdev, PCI_D3cold, 1);
       
  2969 	} else {
       
  2970 		pci_enable_wake(pdev, PCI_D3hot, 0);
       
  2971 		pci_enable_wake(pdev, PCI_D3cold, 0);
       
  2972 	}
       
  2973 
       
  2974 	pci_disable_device(pdev);
       
  2975 	pci_set_power_state(pdev, PCI_D3hot);
       
  2976 
       
  2977 	return 0;
       
  2978 }
       
  2979 
       
  2980 #ifdef CONFIG_PM
       
  2981 static int e100_resume(struct pci_dev *pdev)
       
  2982 {
       
  2983 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  2984 	struct nic *nic = netdev_priv(netdev);
       
  2985 
       
  2986 	if (nic->ecdev)
       
  2987 		return 0;
       
  2988 
       
  2989 	pci_set_power_state(pdev, PCI_D0);
       
  2990 	pci_restore_state(pdev);
       
  2991 	/* ack any pending wake events, disable PME */
       
  2992 	pci_enable_wake(pdev, 0, 0);
       
  2993 
       
  2994 	netif_device_attach(netdev);
       
  2995 	if (netif_running(netdev))
       
  2996 		e100_up(nic);
       
  2997 
       
  2998 	return 0;
       
  2999 }
       
  3000 #endif /* CONFIG_PM */
       
  3001 
       
  3002 static void e100_shutdown(struct pci_dev *pdev)
       
  3003 {
       
  3004 	e100_suspend(pdev, PMSG_SUSPEND);
       
  3005 }
       
  3006 
       
  3007 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  3008 /**
       
  3009  * e100_io_error_detected - called when PCI error is detected.
       
  3010  * @pdev: Pointer to PCI device
       
  3011  * @state: The current pci connection state
       
  3012  */
       
  3013 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  3014 {
       
  3015 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3016 	struct nic *nic = netdev_priv(netdev);
       
  3017 
       
  3018     /* Similar to calling e100_down(), but avoids adapter I/O. */
       
  3019 	netdev->stop(netdev);
       
  3020 
       
  3021 	if (!nic->ecdev) {
       
  3022 		/* Detach; put netif into a state similar to hotplug unplug. */
       
  3023 		napi_enable(&nic->napi);
       
  3024 		netif_device_detach(netdev);
       
  3025 	}
       
  3026 	pci_disable_device(pdev);
       
  3027 
       
  3028 	/* Request a slot reset. */
       
  3029 	return PCI_ERS_RESULT_NEED_RESET;
       
  3030 }
       
  3031 
       
  3032 /**
       
  3033  * e100_io_slot_reset - called after the pci bus has been reset.
       
  3034  * @pdev: Pointer to PCI device
       
  3035  *
       
  3036  * Restart the card from scratch.
       
  3037  */
       
  3038 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  3039 {
       
  3040 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3041 	struct nic *nic = netdev_priv(netdev);
       
  3042 
       
  3043 	if (pci_enable_device(pdev)) {
       
  3044 		printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
       
  3045 		return PCI_ERS_RESULT_DISCONNECT;
       
  3046 	}
       
  3047 	pci_set_master(pdev);
       
  3048 
       
  3049 	/* Only one device per card can do a reset */
       
  3050 	if (0 != PCI_FUNC(pdev->devfn))
       
  3051 		return PCI_ERS_RESULT_RECOVERED;
       
  3052 	e100_hw_reset(nic);
       
  3053 	e100_phy_init(nic);
       
  3054 
       
  3055 	return PCI_ERS_RESULT_RECOVERED;
       
  3056 }
       
  3057 
       
  3058 /**
       
  3059  * e100_io_resume - resume normal operations
       
  3060  * @pdev: Pointer to PCI device
       
  3061  *
       
  3062  * Resume normal operations after an error recovery
       
  3063  * sequence has been completed.
       
  3064  */
       
  3065 static void e100_io_resume(struct pci_dev *pdev)
       
  3066 {
       
  3067 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3068 	struct nic *nic = netdev_priv(netdev);
       
  3069 
       
  3070 	/* ack any pending wake events, disable PME */
       
  3071 	pci_enable_wake(pdev, 0, 0);
       
  3072 
       
  3073 	if (!nic->ecdev)
       
  3074 		netif_device_attach(netdev);
       
  3075 	if (nic->ecdev || netif_running(netdev)) {
       
  3076 		e100_open(netdev);
       
  3077 		if (!nic->ecdev)
       
  3078 			mod_timer(&nic->watchdog, jiffies);
       
  3079 	}
       
  3080 }
       
  3081 
       
  3082 static struct pci_error_handlers e100_err_handler = {
       
  3083 	.error_detected = e100_io_error_detected,
       
  3084 	.slot_reset = e100_io_slot_reset,
       
  3085 	.resume = e100_io_resume,
       
  3086 };
       
  3087 
       
  3088 static struct pci_driver e100_driver = {
       
  3089 	.name =         DRV_NAME,
       
  3090 	.id_table =     e100_id_table,
       
  3091 	.probe =        e100_probe,
       
  3092 	.remove =       __devexit_p(e100_remove),
       
  3093 #ifdef CONFIG_PM
       
  3094 	/* Power Management hooks */
       
  3095 	.suspend =      e100_suspend,
       
  3096 	.resume =       e100_resume,
       
  3097 #endif
       
  3098 	.shutdown =     e100_shutdown,
       
  3099 	.err_handler = &e100_err_handler,
       
  3100 };
       
  3101 
       
  3102 static int __init e100_init_module(void)
       
  3103 {
       
  3104     printk(KERN_INFO DRV_NAME " " DRV_DESCRIPTION " " DRV_VERSION
       
  3105             ", master " EC_MASTER_VERSION "\n");
       
  3106 
       
  3107 	return pci_register_driver(&e100_driver);
       
  3108 }
       
  3109 
       
  3110 static void __exit e100_cleanup_module(void)
       
  3111 {
       
  3112 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3113 	pci_unregister_driver(&e100_driver);
       
  3114 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3115 }
       
  3116 
       
  3117 module_init(e100_init_module);
       
  3118 module_exit(e100_cleanup_module);