devices/e100-2.6.33-ethercat.c
changeset 2080 42fbd117c3e3
parent 2053 cceb9aacf4a6
child 2183 b784af1203a4
equal deleted inserted replaced
2079:56993027a2d0 2080:42fbd117c3e3
       
     1 /******************************************************************************
       
     2  *
       
     3  *  $Id$
       
     4  *
       
     5  *  Copyright (C) 2007-2008  Florian Pose, Ingenieurgemeinschaft IgH
       
     6  *
       
     7  *  This file is part of the IgH EtherCAT Master.
       
     8  *
       
     9  *  The IgH EtherCAT Master is free software; you can redistribute it and/or
       
    10  *  modify it under the terms of the GNU General Public License version 2, as
       
    11  *  published by the Free Software Foundation.
       
    12  *
       
    13  *  The IgH EtherCAT Master is distributed in the hope that it will be useful,
       
    14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
       
    16  *  Public License for more details.
       
    17  *
       
    18  *  You should have received a copy of the GNU General Public License along
       
    19  *  with the IgH EtherCAT Master; if not, write to the Free Software
       
    20  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
       
    21  *
       
    22  *  ---
       
    23  *
       
    24  *  The license mentioned above concerns the source code only. Using the
       
    25  *  EtherCAT technology and brand is only permitted in compliance with the
       
    26  *  industrial property and similar rights of Beckhoff Automation GmbH.
       
    27  *
       
    28  *  ---
       
    29  *
       
    30  *  vim: noexpandtab
       
    31  *
       
    32  *****************************************************************************/
       
    33 
       
    34 /**
       
    35    \file
       
    36    EtherCAT driver for e100-compatible NICs.
       
    37 */
       
    38 
       
    39 /* Former documentation: */
       
    40 
       
    41 /*******************************************************************************
       
    42 
       
    43   Intel PRO/100 Linux driver
       
    44   Copyright(c) 1999 - 2006 Intel Corporation.
       
    45 
       
    46   This program is free software; you can redistribute it and/or modify it
       
    47   under the terms and conditions of the GNU General Public License,
       
    48   version 2, as published by the Free Software Foundation.
       
    49 
       
    50   This program is distributed in the hope it will be useful, but WITHOUT
       
    51   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    52   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    53   more details.
       
    54 
       
    55   You should have received a copy of the GNU General Public License along with
       
    56   this program; if not, write to the Free Software Foundation, Inc.,
       
    57   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    58 
       
    59   The full GNU General Public License is included in this distribution in
       
    60   the file called "COPYING".
       
    61 
       
    62   Contact Information:
       
    63   Linux NICS <linux.nics@intel.com>
       
    64   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    65   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    66 
       
    67 *******************************************************************************/
       
    68 
       
    69 /*
       
    70  *	e100.c: Intel(R) PRO/100 ethernet driver
       
    71  *
       
    72  *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
       
    73  *	original e100 driver, but better described as a munging of
       
    74  *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
       
    75  *
       
    76  *	References:
       
    77  *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
       
    78  *		Open Source Software Developers Manual,
       
    79  *		http://sourceforge.net/projects/e1000
       
    80  *
       
    81  *
       
    82  *	                      Theory of Operation
       
    83  *
       
    84  *	I.   General
       
    85  *
       
    86  *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
       
    87  *	controller family, which includes the 82557, 82558, 82559, 82550,
       
    88  *	82551, and 82562 devices.  82558 and greater controllers
       
    89  *	integrate the Intel 82555 PHY.  The controllers are used in
       
    90  *	server and client network interface cards, as well as in
       
    91  *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
       
    92  *	configurations.  8255x supports a 32-bit linear addressing
       
    93  *	mode and operates at 33Mhz PCI clock rate.
       
    94  *
       
    95  *	II.  Driver Operation
       
    96  *
       
    97  *	Memory-mapped mode is used exclusively to access the device's
       
    98  *	shared-memory structure, the Control/Status Registers (CSR). All
       
    99  *	setup, configuration, and control of the device, including queuing
       
   100  *	of Tx, Rx, and configuration commands is through the CSR.
       
   101  *	cmd_lock serializes accesses to the CSR command register.  cb_lock
       
   102  *	protects the shared Command Block List (CBL).
       
   103  *
       
   104  *	8255x is highly MII-compliant and all access to the PHY go
       
   105  *	through the Management Data Interface (MDI).  Consequently, the
       
   106  *	driver leverages the mii.c library shared with other MII-compliant
       
   107  *	devices.
       
   108  *
       
   109  *	Big- and Little-Endian byte order as well as 32- and 64-bit
       
   110  *	archs are supported.  Weak-ordered memory and non-cache-coherent
       
   111  *	archs are supported.
       
   112  *
       
   113  *	III. Transmit
       
   114  *
       
   115  *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
       
   116  *	together in a fixed-size ring (CBL) thus forming the flexible mode
       
   117  *	memory structure.  A TCB marked with the suspend-bit indicates
       
   118  *	the end of the ring.  The last TCB processed suspends the
       
   119  *	controller, and the controller can be restarted by issue a CU
       
   120  *	resume command to continue from the suspend point, or a CU start
       
   121  *	command to start at a given position in the ring.
       
   122  *
       
   123  *	Non-Tx commands (config, multicast setup, etc) are linked
       
   124  *	into the CBL ring along with Tx commands.  The common structure
       
   125  *	used for both Tx and non-Tx commands is the Command Block (CB).
       
   126  *
       
   127  *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
       
   128  *	is the next CB to check for completion; cb_to_send is the first
       
   129  *	CB to start on in case of a previous failure to resume.  CB clean
       
   130  *	up happens in interrupt context in response to a CU interrupt.
       
   131  *	cbs_avail keeps track of number of free CB resources available.
       
   132  *
       
   133  * 	Hardware padding of short packets to minimum packet size is
       
   134  * 	enabled.  82557 pads with 7Eh, while the later controllers pad
       
   135  * 	with 00h.
       
   136  *
       
   137  *	IV.  Receive
       
   138  *
       
   139  *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
       
   140  *	Descriptors (RFD) + data buffer, thus forming the simplified mode
       
   141  *	memory structure.  Rx skbs are allocated to contain both the RFD
       
   142  *	and the data buffer, but the RFD is pulled off before the skb is
       
   143  *	indicated.  The data buffer is aligned such that encapsulated
       
   144  *	protocol headers are u32-aligned.  Since the RFD is part of the
       
   145  *	mapped shared memory, and completion status is contained within
       
   146  *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
       
   147  *	view from software and hardware.
       
   148  *
       
   149  *	In order to keep updates to the RFD link field from colliding with
       
   150  *	hardware writes to mark packets complete, we use the feature that
       
   151  *	hardware will not write to a size 0 descriptor and mark the previous
       
   152  *	packet as end-of-list (EL).   After updating the link, we remove EL
       
   153  *	and only then restore the size such that hardware may use the
       
   154  *	previous-to-end RFD.
       
   155  *
       
   156  *	Under typical operation, the  receive unit (RU) is start once,
       
   157  *	and the controller happily fills RFDs as frames arrive.  If
       
   158  *	replacement RFDs cannot be allocated, or the RU goes non-active,
       
   159  *	the RU must be restarted.  Frame arrival generates an interrupt,
       
   160  *	and Rx indication and re-allocation happen in the same context,
       
   161  *	therefore no locking is required.  A software-generated interrupt
       
   162  *	is generated from the watchdog to recover from a failed allocation
       
   163  *	scenario where all Rx resources have been indicated and none re-
       
   164  *	placed.
       
   165  *
       
   166  *	V.   Miscellaneous
       
   167  *
       
   168  * 	VLAN offloading of tagging, stripping and filtering is not
       
   169  * 	supported, but driver will accommodate the extra 4-byte VLAN tag
       
   170  * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
       
   171  * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
       
   172  * 	not supported (hardware limitation).
       
   173  *
       
   174  * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
       
   175  *
       
   176  * 	Thanks to JC (jchapman@katalix.com) for helping with
       
   177  * 	testing/troubleshooting the development driver.
       
   178  *
       
   179  * 	TODO:
       
   180  * 	o several entry points race with dev->close
       
   181  * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
       
   182  *
       
   183  *	FIXES:
       
   184  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
       
   185  *	- Stratus87247: protect MDI control register manipulations
       
   186  * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
       
   187  *      - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
       
   188  */
       
   189 
       
   190 #include <linux/module.h>
       
   191 #include <linux/moduleparam.h>
       
   192 #include <linux/kernel.h>
       
   193 #include <linux/types.h>
       
   194 #include <linux/sched.h>
       
   195 #include <linux/slab.h>
       
   196 #include <linux/delay.h>
       
   197 #include <linux/init.h>
       
   198 #include <linux/pci.h>
       
   199 #include <linux/dma-mapping.h>
       
   200 #include <linux/dmapool.h>
       
   201 #include <linux/netdevice.h>
       
   202 #include <linux/etherdevice.h>
       
   203 #include <linux/mii.h>
       
   204 #include <linux/if_vlan.h>
       
   205 #include <linux/skbuff.h>
       
   206 #include <linux/ethtool.h>
       
   207 #include <linux/string.h>
       
   208 #include <linux/firmware.h>
       
   209 
       
   210 // EtherCAT includes
       
   211 #include "../globals.h"
       
   212 #include "ecdev.h"
       
   213 
       
   214 #define DRV_NAME		"ec_e100"
       
   215 #include <asm/unaligned.h>
       
   216 
       
   217 
       
   218 #define DRV_EXT			"-NAPI"
       
   219 #define DRV_VERSION		"3.5.24-k2"DRV_EXT
       
   220 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
       
   221 #define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
       
   222 #define PFX			DRV_NAME ": "
       
   223 
       
   224 #define E100_WATCHDOG_PERIOD	(2 * HZ)
       
   225 #define E100_NAPI_WEIGHT	16
       
   226 
       
   227 #define FIRMWARE_D101M		"e100/d101m_ucode.bin"
       
   228 #define FIRMWARE_D101S		"e100/d101s_ucode.bin"
       
   229 #define FIRMWARE_D102E		"e100/d102e_ucode.bin"
       
   230 
       
   231 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   232 MODULE_AUTHOR(DRV_COPYRIGHT);
       
   233 MODULE_LICENSE("GPL");
       
   234 MODULE_VERSION(DRV_VERSION);
       
   235 MODULE_FIRMWARE(FIRMWARE_D101M);
       
   236 MODULE_FIRMWARE(FIRMWARE_D101S);
       
   237 MODULE_FIRMWARE(FIRMWARE_D102E);
       
   238 
       
   239 MODULE_DESCRIPTION(DRV_DESCRIPTION);
       
   240 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   241 MODULE_LICENSE("GPL");
       
   242 MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION);
       
   243 
       
   244 void e100_ec_poll(struct net_device *);
       
   245 
       
   246 static int debug = 3;
       
   247 static int eeprom_bad_csum_allow = 0;
       
   248 static int use_io = 0;
       
   249 module_param(debug, int, 0);
       
   250 module_param(eeprom_bad_csum_allow, int, 0);
       
   251 module_param(use_io, int, 0);
       
   252 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   253 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
       
   254 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
       
   255 #define DPRINTK(nlevel, klevel, fmt, args...) \
       
   256 	(void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
       
   257 	printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
       
   258 		__func__ , ## args))
       
   259 
       
   260 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
       
   261 	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
       
   262 	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
       
   263 static struct pci_device_id e100_id_table[] = {
       
   264 	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
       
   265 	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
       
   266 	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
       
   267 	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
       
   268 	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
       
   269 	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
       
   270 	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
       
   271 	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
       
   272 	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
       
   273 	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
       
   274 	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
       
   275 	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
       
   276 	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
       
   277 	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
       
   278 	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
       
   279 	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
       
   280 	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
       
   281 	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
       
   282 	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
       
   283 	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
       
   284 	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
       
   285 	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
       
   286 	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
       
   287 	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
       
   288 	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
       
   289 	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
       
   290 	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
       
   291 	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
       
   292 	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
       
   293 	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
       
   294 	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
       
   295 	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
       
   296 	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
       
   297 	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
       
   298 	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
       
   299 	INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
       
   300 	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
       
   301 	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
       
   302 	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
       
   303 	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
       
   304 	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
       
   305 	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
       
   306 	{ 0, }
       
   307 };
       
   308 
       
   309 // prevent from being loaded automatically
       
   310 //MODULE_DEVICE_TABLE(pci, e100_id_table);
       
   311 
       
   312 enum mac {
       
   313 	mac_82557_D100_A  = 0,
       
   314 	mac_82557_D100_B  = 1,
       
   315 	mac_82557_D100_C  = 2,
       
   316 	mac_82558_D101_A4 = 4,
       
   317 	mac_82558_D101_B0 = 5,
       
   318 	mac_82559_D101M   = 8,
       
   319 	mac_82559_D101S   = 9,
       
   320 	mac_82550_D102    = 12,
       
   321 	mac_82550_D102_C  = 13,
       
   322 	mac_82551_E       = 14,
       
   323 	mac_82551_F       = 15,
       
   324 	mac_82551_10      = 16,
       
   325 	mac_unknown       = 0xFF,
       
   326 };
       
   327 
       
   328 enum phy {
       
   329 	phy_100a     = 0x000003E0,
       
   330 	phy_100c     = 0x035002A8,
       
   331 	phy_82555_tx = 0x015002A8,
       
   332 	phy_nsc_tx   = 0x5C002000,
       
   333 	phy_82562_et = 0x033002A8,
       
   334 	phy_82562_em = 0x032002A8,
       
   335 	phy_82562_ek = 0x031002A8,
       
   336 	phy_82562_eh = 0x017002A8,
       
   337 	phy_82552_v  = 0xd061004d,
       
   338 	phy_unknown  = 0xFFFFFFFF,
       
   339 };
       
   340 
       
   341 /* CSR (Control/Status Registers) */
       
   342 struct csr {
       
   343 	struct {
       
   344 		u8 status;
       
   345 		u8 stat_ack;
       
   346 		u8 cmd_lo;
       
   347 		u8 cmd_hi;
       
   348 		u32 gen_ptr;
       
   349 	} scb;
       
   350 	u32 port;
       
   351 	u16 flash_ctrl;
       
   352 	u8 eeprom_ctrl_lo;
       
   353 	u8 eeprom_ctrl_hi;
       
   354 	u32 mdi_ctrl;
       
   355 	u32 rx_dma_count;
       
   356 };
       
   357 
       
   358 enum scb_status {
       
   359 	rus_no_res       = 0x08,
       
   360 	rus_ready        = 0x10,
       
   361 	rus_mask         = 0x3C,
       
   362 };
       
   363 
       
   364 enum ru_state  {
       
   365 	RU_SUSPENDED = 0,
       
   366 	RU_RUNNING	 = 1,
       
   367 	RU_UNINITIALIZED = -1,
       
   368 };
       
   369 
       
   370 enum scb_stat_ack {
       
   371 	stat_ack_not_ours    = 0x00,
       
   372 	stat_ack_sw_gen      = 0x04,
       
   373 	stat_ack_rnr         = 0x10,
       
   374 	stat_ack_cu_idle     = 0x20,
       
   375 	stat_ack_frame_rx    = 0x40,
       
   376 	stat_ack_cu_cmd_done = 0x80,
       
   377 	stat_ack_not_present = 0xFF,
       
   378 	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
       
   379 	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
       
   380 };
       
   381 
       
   382 enum scb_cmd_hi {
       
   383 	irq_mask_none = 0x00,
       
   384 	irq_mask_all  = 0x01,
       
   385 	irq_sw_gen    = 0x02,
       
   386 };
       
   387 
       
   388 enum scb_cmd_lo {
       
   389 	cuc_nop        = 0x00,
       
   390 	ruc_start      = 0x01,
       
   391 	ruc_load_base  = 0x06,
       
   392 	cuc_start      = 0x10,
       
   393 	cuc_resume     = 0x20,
       
   394 	cuc_dump_addr  = 0x40,
       
   395 	cuc_dump_stats = 0x50,
       
   396 	cuc_load_base  = 0x60,
       
   397 	cuc_dump_reset = 0x70,
       
   398 };
       
   399 
       
   400 enum cuc_dump {
       
   401 	cuc_dump_complete       = 0x0000A005,
       
   402 	cuc_dump_reset_complete = 0x0000A007,
       
   403 };
       
   404 
       
   405 enum port {
       
   406 	software_reset  = 0x0000,
       
   407 	selftest        = 0x0001,
       
   408 	selective_reset = 0x0002,
       
   409 };
       
   410 
       
   411 enum eeprom_ctrl_lo {
       
   412 	eesk = 0x01,
       
   413 	eecs = 0x02,
       
   414 	eedi = 0x04,
       
   415 	eedo = 0x08,
       
   416 };
       
   417 
       
   418 enum mdi_ctrl {
       
   419 	mdi_write = 0x04000000,
       
   420 	mdi_read  = 0x08000000,
       
   421 	mdi_ready = 0x10000000,
       
   422 };
       
   423 
       
   424 enum eeprom_op {
       
   425 	op_write = 0x05,
       
   426 	op_read  = 0x06,
       
   427 	op_ewds  = 0x10,
       
   428 	op_ewen  = 0x13,
       
   429 };
       
   430 
       
   431 enum eeprom_offsets {
       
   432 	eeprom_cnfg_mdix  = 0x03,
       
   433 	eeprom_phy_iface  = 0x06,
       
   434 	eeprom_id         = 0x0A,
       
   435 	eeprom_config_asf = 0x0D,
       
   436 	eeprom_smbus_addr = 0x90,
       
   437 };
       
   438 
       
   439 enum eeprom_cnfg_mdix {
       
   440 	eeprom_mdix_enabled = 0x0080,
       
   441 };
       
   442 
       
   443 enum eeprom_phy_iface {
       
   444 	NoSuchPhy = 0,
       
   445 	I82553AB,
       
   446 	I82553C,
       
   447 	I82503,
       
   448 	DP83840,
       
   449 	S80C240,
       
   450 	S80C24,
       
   451 	I82555,
       
   452 	DP83840A = 10,
       
   453 };
       
   454 
       
   455 enum eeprom_id {
       
   456 	eeprom_id_wol = 0x0020,
       
   457 };
       
   458 
       
   459 enum eeprom_config_asf {
       
   460 	eeprom_asf = 0x8000,
       
   461 	eeprom_gcl = 0x4000,
       
   462 };
       
   463 
       
   464 enum cb_status {
       
   465 	cb_complete = 0x8000,
       
   466 	cb_ok       = 0x2000,
       
   467 };
       
   468 
       
   469 enum cb_command {
       
   470 	cb_nop    = 0x0000,
       
   471 	cb_iaaddr = 0x0001,
       
   472 	cb_config = 0x0002,
       
   473 	cb_multi  = 0x0003,
       
   474 	cb_tx     = 0x0004,
       
   475 	cb_ucode  = 0x0005,
       
   476 	cb_dump   = 0x0006,
       
   477 	cb_tx_sf  = 0x0008,
       
   478 	cb_cid    = 0x1f00,
       
   479 	cb_i      = 0x2000,
       
   480 	cb_s      = 0x4000,
       
   481 	cb_el     = 0x8000,
       
   482 };
       
   483 
       
   484 struct rfd {
       
   485 	__le16 status;
       
   486 	__le16 command;
       
   487 	__le32 link;
       
   488 	__le32 rbd;
       
   489 	__le16 actual_size;
       
   490 	__le16 size;
       
   491 };
       
   492 
       
   493 struct rx {
       
   494 	struct rx *next, *prev;
       
   495 	struct sk_buff *skb;
       
   496 	dma_addr_t dma_addr;
       
   497 };
       
   498 
       
   499 #if defined(__BIG_ENDIAN_BITFIELD)
       
   500 #define X(a,b)	b,a
       
   501 #else
       
   502 #define X(a,b)	a,b
       
   503 #endif
       
   504 struct config {
       
   505 /*0*/	u8 X(byte_count:6, pad0:2);
       
   506 /*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
       
   507 /*2*/	u8 adaptive_ifs;
       
   508 /*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
       
   509 	   term_write_cache_line:1), pad3:4);
       
   510 /*4*/	u8 X(rx_dma_max_count:7, pad4:1);
       
   511 /*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
       
   512 /*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
       
   513 	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
       
   514 	   rx_discard_overruns:1), rx_save_bad_frames:1);
       
   515 /*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
       
   516 	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
       
   517 	   tx_dynamic_tbd:1);
       
   518 /*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
       
   519 /*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
       
   520 	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
       
   521 /*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
       
   522 	   loopback:2);
       
   523 /*11*/	u8 X(linear_priority:3, pad11:5);
       
   524 /*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
       
   525 /*13*/	u8 ip_addr_lo;
       
   526 /*14*/	u8 ip_addr_hi;
       
   527 /*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
       
   528 	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
       
   529 	   pad15_2:1), crs_or_cdt:1);
       
   530 /*16*/	u8 fc_delay_lo;
       
   531 /*17*/	u8 fc_delay_hi;
       
   532 /*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
       
   533 	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
       
   534 /*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
       
   535 	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
       
   536 	   full_duplex_force:1), full_duplex_pin:1);
       
   537 /*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
       
   538 /*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
       
   539 /*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
       
   540 	u8 pad_d102[9];
       
   541 };
       
   542 
       
   543 #define E100_MAX_MULTICAST_ADDRS	64
       
   544 struct multi {
       
   545 	__le16 count;
       
   546 	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
       
   547 };
       
   548 
       
   549 /* Important: keep total struct u32-aligned */
       
   550 #define UCODE_SIZE			134
       
   551 struct cb {
       
   552 	__le16 status;
       
   553 	__le16 command;
       
   554 	__le32 link;
       
   555 	union {
       
   556 		u8 iaaddr[ETH_ALEN];
       
   557 		__le32 ucode[UCODE_SIZE];
       
   558 		struct config config;
       
   559 		struct multi multi;
       
   560 		struct {
       
   561 			u32 tbd_array;
       
   562 			u16 tcb_byte_count;
       
   563 			u8 threshold;
       
   564 			u8 tbd_count;
       
   565 			struct {
       
   566 				__le32 buf_addr;
       
   567 				__le16 size;
       
   568 				u16 eol;
       
   569 			} tbd;
       
   570 		} tcb;
       
   571 		__le32 dump_buffer_addr;
       
   572 	} u;
       
   573 	struct cb *next, *prev;
       
   574 	dma_addr_t dma_addr;
       
   575 	struct sk_buff *skb;
       
   576 };
       
   577 
       
   578 enum loopback {
       
   579 	lb_none = 0, lb_mac = 1, lb_phy = 3,
       
   580 };
       
   581 
       
   582 struct stats {
       
   583 	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
       
   584 		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
       
   585 		tx_multiple_collisions, tx_total_collisions;
       
   586 	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
       
   587 		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
       
   588 		rx_short_frame_errors;
       
   589 	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
       
   590 	__le16 xmt_tco_frames, rcv_tco_frames;
       
   591 	__le32 complete;
       
   592 };
       
   593 
       
   594 struct mem {
       
   595 	struct {
       
   596 		u32 signature;
       
   597 		u32 result;
       
   598 	} selftest;
       
   599 	struct stats stats;
       
   600 	u8 dump_buf[596];
       
   601 };
       
   602 
       
   603 struct param_range {
       
   604 	u32 min;
       
   605 	u32 max;
       
   606 	u32 count;
       
   607 };
       
   608 
       
   609 struct params {
       
   610 	struct param_range rfds;
       
   611 	struct param_range cbs;
       
   612 };
       
   613 
       
   614 struct nic {
       
   615 	/* Begin: frequently used values: keep adjacent for cache effect */
       
   616 	u32 msg_enable				____cacheline_aligned;
       
   617 	struct net_device *netdev;
       
   618 	struct pci_dev *pdev;
       
   619 	u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
       
   620 
       
   621 	struct rx *rxs				____cacheline_aligned;
       
   622 	struct rx *rx_to_use;
       
   623 	struct rx *rx_to_clean;
       
   624 	struct rfd blank_rfd;
       
   625 	enum ru_state ru_running;
       
   626 
       
   627 	spinlock_t cb_lock			____cacheline_aligned;
       
   628 	spinlock_t cmd_lock;
       
   629 	struct csr __iomem *csr;
       
   630 	enum scb_cmd_lo cuc_cmd;
       
   631 	unsigned int cbs_avail;
       
   632 	struct napi_struct napi;
       
   633 	struct cb *cbs;
       
   634 	struct cb *cb_to_use;
       
   635 	struct cb *cb_to_send;
       
   636 	struct cb *cb_to_clean;
       
   637 	__le16 tx_command;
       
   638 	/* End: frequently used values: keep adjacent for cache effect */
       
   639 
       
   640 	enum {
       
   641 		ich                = (1 << 0),
       
   642 		promiscuous        = (1 << 1),
       
   643 		multicast_all      = (1 << 2),
       
   644 		wol_magic          = (1 << 3),
       
   645 		ich_10h_workaround = (1 << 4),
       
   646 	} flags					____cacheline_aligned;
       
   647 
       
   648 	enum mac mac;
       
   649 	enum phy phy;
       
   650 	struct params params;
       
   651 	struct timer_list watchdog;
       
   652 	struct timer_list blink_timer;
       
   653 	struct mii_if_info mii;
       
   654 	struct work_struct tx_timeout_task;
       
   655 	enum loopback loopback;
       
   656 
       
   657 	struct mem *mem;
       
   658 	dma_addr_t dma_addr;
       
   659 
       
   660 	struct pci_pool *cbs_pool;
       
   661 	dma_addr_t cbs_dma_addr;
       
   662 	u8 adaptive_ifs;
       
   663 	u8 tx_threshold;
       
   664 	u32 tx_frames;
       
   665 	u32 tx_collisions;
       
   666 
       
   667 	u32 tx_deferred;
       
   668 	u32 tx_single_collisions;
       
   669 	u32 tx_multiple_collisions;
       
   670 	u32 tx_fc_pause;
       
   671 	u32 tx_tco_frames;
       
   672 
       
   673 	u32 rx_fc_pause;
       
   674 	u32 rx_fc_unsupported;
       
   675 	u32 rx_tco_frames;
       
   676 	u32 rx_over_length_errors;
       
   677 
       
   678 	u16 leds;
       
   679 	u16 eeprom_wc;
       
   680 
       
   681 	__le16 eeprom[256];
       
   682 	spinlock_t mdio_lock;
       
   683 	const struct firmware *fw;
       
   684 	ec_device_t *ecdev;
       
   685 	unsigned long ec_watchdog_jiffies;
       
   686 };
       
   687 
       
   688 static inline void e100_write_flush(struct nic *nic)
       
   689 {
       
   690 	/* Flush previous PCI writes through intermediate bridges
       
   691 	 * by doing a benign read */
       
   692 	(void)ioread8(&nic->csr->scb.status);
       
   693 }
       
   694 
       
   695 static void e100_enable_irq(struct nic *nic)
       
   696 {
       
   697 	unsigned long flags;
       
   698 
       
   699 	if (nic->ecdev)
       
   700 		return;
       
   701 
       
   702 	spin_lock_irqsave(&nic->cmd_lock, flags);
       
   703 	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
       
   704 	e100_write_flush(nic);
       
   705 	spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   706 }
       
   707 
       
   708 static void e100_disable_irq(struct nic *nic)
       
   709 {
       
   710 	unsigned long flags = 0;
       
   711 
       
   712 	if (!nic->ecdev)
       
   713 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   714 	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
       
   715 	e100_write_flush(nic);
       
   716 	if (!nic->ecdev)
       
   717 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   718 }
       
   719 
       
   720 static void e100_hw_reset(struct nic *nic)
       
   721 {
       
   722 	/* Put CU and RU into idle with a selective reset to get
       
   723 	 * device off of PCI bus */
       
   724 	iowrite32(selective_reset, &nic->csr->port);
       
   725 	e100_write_flush(nic); udelay(20);
       
   726 
       
   727 	/* Now fully reset device */
       
   728 	iowrite32(software_reset, &nic->csr->port);
       
   729 	e100_write_flush(nic); udelay(20);
       
   730 
       
   731 	/* Mask off our interrupt line - it's unmasked after reset */
       
   732 	e100_disable_irq(nic);
       
   733 }
       
   734 
       
   735 static int e100_self_test(struct nic *nic)
       
   736 {
       
   737 	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
       
   738 
       
   739 	/* Passing the self-test is a pretty good indication
       
   740 	 * that the device can DMA to/from host memory */
       
   741 
       
   742 	nic->mem->selftest.signature = 0;
       
   743 	nic->mem->selftest.result = 0xFFFFFFFF;
       
   744 
       
   745 	iowrite32(selftest | dma_addr, &nic->csr->port);
       
   746 	e100_write_flush(nic);
       
   747 	/* Wait 10 msec for self-test to complete */
       
   748 	msleep(10);
       
   749 
       
   750 	/* Interrupts are enabled after self-test */
       
   751 	e100_disable_irq(nic);
       
   752 
       
   753 	/* Check results of self-test */
       
   754 	if (nic->mem->selftest.result != 0) {
       
   755 		DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
       
   756 			nic->mem->selftest.result);
       
   757 		return -ETIMEDOUT;
       
   758 	}
       
   759 	if (nic->mem->selftest.signature == 0) {
       
   760 		DPRINTK(HW, ERR, "Self-test failed: timed out\n");
       
   761 		return -ETIMEDOUT;
       
   762 	}
       
   763 
       
   764 	return 0;
       
   765 }
       
   766 
       
   767 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
       
   768 {
       
   769 	u32 cmd_addr_data[3];
       
   770 	u8 ctrl;
       
   771 	int i, j;
       
   772 
       
   773 	/* Three cmds: write/erase enable, write data, write/erase disable */
       
   774 	cmd_addr_data[0] = op_ewen << (addr_len - 2);
       
   775 	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
       
   776 		le16_to_cpu(data);
       
   777 	cmd_addr_data[2] = op_ewds << (addr_len - 2);
       
   778 
       
   779 	/* Bit-bang cmds to write word to eeprom */
       
   780 	for (j = 0; j < 3; j++) {
       
   781 
       
   782 		/* Chip select */
       
   783 		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   784 		e100_write_flush(nic); udelay(4);
       
   785 
       
   786 		for (i = 31; i >= 0; i--) {
       
   787 			ctrl = (cmd_addr_data[j] & (1 << i)) ?
       
   788 				eecs | eedi : eecs;
       
   789 			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   790 			e100_write_flush(nic); udelay(4);
       
   791 
       
   792 			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   793 			e100_write_flush(nic); udelay(4);
       
   794 		}
       
   795 		/* Wait 10 msec for cmd to complete */
       
   796 		msleep(10);
       
   797 
       
   798 		/* Chip deselect */
       
   799 		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   800 		e100_write_flush(nic); udelay(4);
       
   801 	}
       
   802 };
       
   803 
       
   804 /* General technique stolen from the eepro100 driver - very clever */
       
   805 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
       
   806 {
       
   807 	u32 cmd_addr_data;
       
   808 	u16 data = 0;
       
   809 	u8 ctrl;
       
   810 	int i;
       
   811 
       
   812 	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
       
   813 
       
   814 	/* Chip select */
       
   815 	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
       
   816 	e100_write_flush(nic); udelay(4);
       
   817 
       
   818 	/* Bit-bang to read word from eeprom */
       
   819 	for (i = 31; i >= 0; i--) {
       
   820 		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
       
   821 		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
       
   822 		e100_write_flush(nic); udelay(4);
       
   823 
       
   824 		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
       
   825 		e100_write_flush(nic); udelay(4);
       
   826 
       
   827 		/* Eeprom drives a dummy zero to EEDO after receiving
       
   828 		 * complete address.  Use this to adjust addr_len. */
       
   829 		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
       
   830 		if (!(ctrl & eedo) && i > 16) {
       
   831 			*addr_len -= (i - 16);
       
   832 			i = 17;
       
   833 		}
       
   834 
       
   835 		data = (data << 1) | (ctrl & eedo ? 1 : 0);
       
   836 	}
       
   837 
       
   838 	/* Chip deselect */
       
   839 	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
       
   840 	e100_write_flush(nic); udelay(4);
       
   841 
       
   842 	return cpu_to_le16(data);
       
   843 };
       
   844 
       
   845 /* Load entire EEPROM image into driver cache and validate checksum */
       
   846 static int e100_eeprom_load(struct nic *nic)
       
   847 {
       
   848 	u16 addr, addr_len = 8, checksum = 0;
       
   849 
       
   850 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   851 	e100_eeprom_read(nic, &addr_len, 0);
       
   852 	nic->eeprom_wc = 1 << addr_len;
       
   853 
       
   854 	for (addr = 0; addr < nic->eeprom_wc; addr++) {
       
   855 		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
       
   856 		if (addr < nic->eeprom_wc - 1)
       
   857 			checksum += le16_to_cpu(nic->eeprom[addr]);
       
   858 	}
       
   859 
       
   860 	/* The checksum, stored in the last word, is calculated such that
       
   861 	 * the sum of words should be 0xBABA */
       
   862 	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
       
   863 		DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
       
   864 		if (!eeprom_bad_csum_allow)
       
   865 			return -EAGAIN;
       
   866 	}
       
   867 
       
   868 	return 0;
       
   869 }
       
   870 
       
   871 /* Save (portion of) driver EEPROM cache to device and update checksum */
       
   872 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
       
   873 {
       
   874 	u16 addr, addr_len = 8, checksum = 0;
       
   875 
       
   876 	/* Try reading with an 8-bit addr len to discover actual addr len */
       
   877 	e100_eeprom_read(nic, &addr_len, 0);
       
   878 	nic->eeprom_wc = 1 << addr_len;
       
   879 
       
   880 	if (start + count >= nic->eeprom_wc)
       
   881 		return -EINVAL;
       
   882 
       
   883 	for (addr = start; addr < start + count; addr++)
       
   884 		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
       
   885 
       
   886 	/* The checksum, stored in the last word, is calculated such that
       
   887 	 * the sum of words should be 0xBABA */
       
   888 	for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
       
   889 		checksum += le16_to_cpu(nic->eeprom[addr]);
       
   890 	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
       
   891 	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
       
   892 		nic->eeprom[nic->eeprom_wc - 1]);
       
   893 
       
   894 	return 0;
       
   895 }
       
   896 
       
   897 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
       
   898 #define E100_WAIT_SCB_FAST 20       /* delay like the old code */
       
   899 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
       
   900 {
       
   901 	unsigned long flags = 0;
       
   902 	unsigned int i;
       
   903 	int err = 0;
       
   904 
       
   905 	if (!nic->ecdev)
       
   906 		spin_lock_irqsave(&nic->cmd_lock, flags);
       
   907 
       
   908 	/* Previous command is accepted when SCB clears */
       
   909 	for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
       
   910 		if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
       
   911 			break;
       
   912 		cpu_relax();
       
   913 		if (unlikely(i > E100_WAIT_SCB_FAST))
       
   914 			udelay(5);
       
   915 	}
       
   916 	if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
       
   917 		err = -EAGAIN;
       
   918 		goto err_unlock;
       
   919 	}
       
   920 
       
   921 	if (unlikely(cmd != cuc_resume))
       
   922 		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
       
   923 	iowrite8(cmd, &nic->csr->scb.cmd_lo);
       
   924 
       
   925 err_unlock:
       
   926 	if (!nic->ecdev)
       
   927 		spin_unlock_irqrestore(&nic->cmd_lock, flags);
       
   928 
       
   929 	return err;
       
   930 }
       
   931 
       
   932 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
       
   933 	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
       
   934 {
       
   935 	struct cb *cb;
       
   936 	unsigned long flags = 0;
       
   937 	int err = 0;
       
   938 
       
   939 	if (!nic->ecdev)
       
   940 		spin_lock_irqsave(&nic->cb_lock, flags);
       
   941 
       
   942 	if (unlikely(!nic->cbs_avail)) {
       
   943 		err = -ENOMEM;
       
   944 		goto err_unlock;
       
   945 	}
       
   946 
       
   947 	cb = nic->cb_to_use;
       
   948 	nic->cb_to_use = cb->next;
       
   949 	nic->cbs_avail--;
       
   950 	cb->skb = skb;
       
   951 
       
   952 	if (unlikely(!nic->cbs_avail))
       
   953 		err = -ENOSPC;
       
   954 
       
   955 	cb_prepare(nic, cb, skb);
       
   956 
       
   957 	/* Order is important otherwise we'll be in a race with h/w:
       
   958 	 * set S-bit in current first, then clear S-bit in previous. */
       
   959 	cb->command |= cpu_to_le16(cb_s);
       
   960 	wmb();
       
   961 	cb->prev->command &= cpu_to_le16(~cb_s);
       
   962 
       
   963 	while (nic->cb_to_send != nic->cb_to_use) {
       
   964 		if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
       
   965 			nic->cb_to_send->dma_addr))) {
       
   966 			/* Ok, here's where things get sticky.  It's
       
   967 			 * possible that we can't schedule the command
       
   968 			 * because the controller is too busy, so
       
   969 			 * let's just queue the command and try again
       
   970 			 * when another command is scheduled. */
       
   971 			if (err == -ENOSPC) {
       
   972 				//request a reset
       
   973 				schedule_work(&nic->tx_timeout_task);
       
   974 			}
       
   975 			break;
       
   976 		} else {
       
   977 			nic->cuc_cmd = cuc_resume;
       
   978 			nic->cb_to_send = nic->cb_to_send->next;
       
   979 		}
       
   980 	}
       
   981 
       
   982 err_unlock:
       
   983 	if (!nic->ecdev)
       
   984 		spin_unlock_irqrestore(&nic->cb_lock, flags);
       
   985 
       
   986 	return err;
       
   987 }
       
   988 
       
   989 static int mdio_read(struct net_device *netdev, int addr, int reg)
       
   990 {
       
   991 	struct nic *nic = netdev_priv(netdev);
       
   992 	return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
       
   993 }
       
   994 
       
   995 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
       
   996 {
       
   997 	struct nic *nic = netdev_priv(netdev);
       
   998 
       
   999 	nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
       
  1000 }
       
  1001 
       
  1002 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
       
  1003 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
       
  1004 {
       
  1005 	u32 data_out = 0;
       
  1006 	unsigned int i;
       
  1007 	unsigned long flags = 0;
       
  1008 
       
  1009 
       
  1010 	/*
       
  1011 	 * Stratus87247: we shouldn't be writing the MDI control
       
  1012 	 * register until the Ready bit shows True.  Also, since
       
  1013 	 * manipulation of the MDI control registers is a multi-step
       
  1014 	 * procedure it should be done under lock.
       
  1015 	 */
       
  1016 	if (!nic->ecdev)
       
  1017 		spin_lock_irqsave(&nic->mdio_lock, flags);
       
  1018 	for (i = 100; i; --i) {
       
  1019 		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
       
  1020 			break;
       
  1021 		udelay(20);
       
  1022 	}
       
  1023 	if (unlikely(!i)) {
       
  1024 		printk("e100.mdio_ctrl(%s) won't go Ready\n",
       
  1025 			nic->netdev->name );
       
  1026 		if (!nic->ecdev)
       
  1027 			spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1028 		return 0;		/* No way to indicate timeout error */
       
  1029 	}
       
  1030 	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
       
  1031 
       
  1032 	for (i = 0; i < 100; i++) {
       
  1033 		udelay(20);
       
  1034 		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
       
  1035 			break;
       
  1036 	}
       
  1037 	if (!nic->ecdev)
       
  1038 		spin_unlock_irqrestore(&nic->mdio_lock, flags);
       
  1039 	DPRINTK(HW, DEBUG,
       
  1040 		"%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
       
  1041 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
       
  1042 	return (u16)data_out;
       
  1043 }
       
  1044 
       
  1045 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
       
  1046 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
       
  1047 				 u32 addr,
       
  1048 				 u32 dir,
       
  1049 				 u32 reg,
       
  1050 				 u16 data)
       
  1051 {
       
  1052 	if ((reg == MII_BMCR) && (dir == mdi_write)) {
       
  1053 		if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
       
  1054 			u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
       
  1055 							MII_ADVERTISE);
       
  1056 
       
  1057 			/*
       
  1058 			 * Workaround Si issue where sometimes the part will not
       
  1059 			 * autoneg to 100Mbps even when advertised.
       
  1060 			 */
       
  1061 			if (advert & ADVERTISE_100FULL)
       
  1062 				data |= BMCR_SPEED100 | BMCR_FULLDPLX;
       
  1063 			else if (advert & ADVERTISE_100HALF)
       
  1064 				data |= BMCR_SPEED100;
       
  1065 		}
       
  1066 	}
       
  1067 	return mdio_ctrl_hw(nic, addr, dir, reg, data);
       
  1068 }
       
  1069 
       
  1070 /* Fully software-emulated mdio_ctrl() function for cards without
       
  1071  * MII-compliant PHYs.
       
  1072  * For now, this is mainly geared towards 80c24 support; in case of further
       
  1073  * requirements for other types (i82503, ...?) either extend this mechanism
       
  1074  * or split it, whichever is cleaner.
       
  1075  */
       
  1076 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
       
  1077 				      u32 addr,
       
  1078 				      u32 dir,
       
  1079 				      u32 reg,
       
  1080 				      u16 data)
       
  1081 {
       
  1082 	/* might need to allocate a netdev_priv'ed register array eventually
       
  1083 	 * to be able to record state changes, but for now
       
  1084 	 * some fully hardcoded register handling ought to be ok I guess. */
       
  1085 
       
  1086 	if (dir == mdi_read) {
       
  1087 		switch (reg) {
       
  1088 		case MII_BMCR:
       
  1089 			/* Auto-negotiation, right? */
       
  1090 			return  BMCR_ANENABLE |
       
  1091 				BMCR_FULLDPLX;
       
  1092 		case MII_BMSR:
       
  1093 			return	BMSR_LSTATUS /* for mii_link_ok() */ |
       
  1094 				BMSR_ANEGCAPABLE |
       
  1095 				BMSR_10FULL;
       
  1096 		case MII_ADVERTISE:
       
  1097 			/* 80c24 is a "combo card" PHY, right? */
       
  1098 			return	ADVERTISE_10HALF |
       
  1099 				ADVERTISE_10FULL;
       
  1100 		default:
       
  1101 			DPRINTK(HW, DEBUG,
       
  1102 		"%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1103 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
       
  1104 			return 0xFFFF;
       
  1105 		}
       
  1106 	} else {
       
  1107 		switch (reg) {
       
  1108 		default:
       
  1109 			DPRINTK(HW, DEBUG,
       
  1110 		"%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
       
  1111 		dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
       
  1112 			return 0xFFFF;
       
  1113 		}
       
  1114 	}
       
  1115 }
       
  1116 static inline int e100_phy_supports_mii(struct nic *nic)
       
  1117 {
       
  1118 	/* for now, just check it by comparing whether we
       
  1119 	   are using MII software emulation.
       
  1120 	*/
       
  1121 	return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
       
  1122 }
       
  1123 
       
  1124 static void e100_get_defaults(struct nic *nic)
       
  1125 {
       
  1126 	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
       
  1127 	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
       
  1128 
       
  1129 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
       
  1130 	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
       
  1131 	if (nic->mac == mac_unknown)
       
  1132 		nic->mac = mac_82557_D100_A;
       
  1133 
       
  1134 	nic->params.rfds = rfds;
       
  1135 	nic->params.cbs = cbs;
       
  1136 
       
  1137 	/* Quadwords to DMA into FIFO before starting frame transmit */
       
  1138 	nic->tx_threshold = 0xE0;
       
  1139 
       
  1140 	/* no interrupt for every tx completion, delay = 256us if not 557 */
       
  1141 	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
       
  1142 		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
       
  1143 
       
  1144 	/* Template for a freshly allocated RFD */
       
  1145 	nic->blank_rfd.command = 0;
       
  1146 	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
       
  1147 	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  1148 
       
  1149 	/* MII setup */
       
  1150 	nic->mii.phy_id_mask = 0x1F;
       
  1151 	nic->mii.reg_num_mask = 0x1F;
       
  1152 	nic->mii.dev = nic->netdev;
       
  1153 	nic->mii.mdio_read = mdio_read;
       
  1154 	nic->mii.mdio_write = mdio_write;
       
  1155 }
       
  1156 
       
  1157 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1158 {
       
  1159 	struct config *config = &cb->u.config;
       
  1160 	u8 *c = (u8 *)config;
       
  1161 
       
  1162 	cb->command = cpu_to_le16(cb_config);
       
  1163 
       
  1164 	memset(config, 0, sizeof(struct config));
       
  1165 
       
  1166 	config->byte_count = 0x16;		/* bytes in this struct */
       
  1167 	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
       
  1168 	config->direct_rx_dma = 0x1;		/* reserved */
       
  1169 	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
       
  1170 	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
       
  1171 	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
       
  1172 	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
       
  1173 	if (e100_phy_supports_mii(nic))
       
  1174 		config->mii_mode = 1;           /* 1=MII mode, 0=i82503 mode */
       
  1175 	config->pad10 = 0x6;
       
  1176 	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
       
  1177 	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
       
  1178 	config->ifs = 0x6;			/* x16 = inter frame spacing */
       
  1179 	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
       
  1180 	config->pad15_1 = 0x1;
       
  1181 	config->pad15_2 = 0x1;
       
  1182 	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
       
  1183 	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
       
  1184 	config->tx_padding = 0x1;		/* 1=pad short frames */
       
  1185 	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
       
  1186 	config->pad18 = 0x1;
       
  1187 	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
       
  1188 	config->pad20_1 = 0x1F;
       
  1189 	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
       
  1190 	config->pad21_1 = 0x5;
       
  1191 
       
  1192 	config->adaptive_ifs = nic->adaptive_ifs;
       
  1193 	config->loopback = nic->loopback;
       
  1194 
       
  1195 	if (nic->mii.force_media && nic->mii.full_duplex)
       
  1196 		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
       
  1197 
       
  1198 	if (nic->flags & promiscuous || nic->loopback) {
       
  1199 		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
       
  1200 		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
       
  1201 		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
       
  1202 	}
       
  1203 
       
  1204 	if (nic->flags & multicast_all)
       
  1205 		config->multicast_all = 0x1;		/* 1=accept, 0=no */
       
  1206 
       
  1207 	/* disable WoL when up */
       
  1208 	if (nic->ecdev || 
       
  1209 			(netif_running(nic->netdev) || !(nic->flags & wol_magic)))
       
  1210 		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
       
  1211 
       
  1212 	if (nic->mac >= mac_82558_D101_A4) {
       
  1213 		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
       
  1214 		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
       
  1215 		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
       
  1216 		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
       
  1217 		if (nic->mac >= mac_82559_D101M) {
       
  1218 			config->tno_intr = 0x1;		/* TCO stats enable */
       
  1219 			/* Enable TCO in extended config */
       
  1220 			if (nic->mac >= mac_82551_10) {
       
  1221 				config->byte_count = 0x20; /* extended bytes */
       
  1222 				config->rx_d102_mode = 0x1; /* GMRC for TCO */
       
  1223 			}
       
  1224 		} else {
       
  1225 			config->standard_stat_counter = 0x0;
       
  1226 		}
       
  1227 	}
       
  1228 
       
  1229 	DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1230 		c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
       
  1231 	DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1232 		c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
       
  1233 	DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
       
  1234 		c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
       
  1235 }
       
  1236 
       
  1237 /*************************************************************************
       
  1238 *  CPUSaver parameters
       
  1239 *
       
  1240 *  All CPUSaver parameters are 16-bit literals that are part of a
       
  1241 *  "move immediate value" instruction.  By changing the value of
       
  1242 *  the literal in the instruction before the code is loaded, the
       
  1243 *  driver can change the algorithm.
       
  1244 *
       
  1245 *  INTDELAY - This loads the dead-man timer with its initial value.
       
  1246 *    When this timer expires the interrupt is asserted, and the
       
  1247 *    timer is reset each time a new packet is received.  (see
       
  1248 *    BUNDLEMAX below to set the limit on number of chained packets)
       
  1249 *    The current default is 0x600 or 1536.  Experiments show that
       
  1250 *    the value should probably stay within the 0x200 - 0x1000.
       
  1251 *
       
  1252 *  BUNDLEMAX -
       
  1253 *    This sets the maximum number of frames that will be bundled.  In
       
  1254 *    some situations, such as the TCP windowing algorithm, it may be
       
  1255 *    better to limit the growth of the bundle size than let it go as
       
  1256 *    high as it can, because that could cause too much added latency.
       
  1257 *    The default is six, because this is the number of packets in the
       
  1258 *    default TCP window size.  A value of 1 would make CPUSaver indicate
       
  1259 *    an interrupt for every frame received.  If you do not want to put
       
  1260 *    a limit on the bundle size, set this value to xFFFF.
       
  1261 *
       
  1262 *  BUNDLESMALL -
       
  1263 *    This contains a bit-mask describing the minimum size frame that
       
  1264 *    will be bundled.  The default masks the lower 7 bits, which means
       
  1265 *    that any frame less than 128 bytes in length will not be bundled,
       
  1266 *    but will instead immediately generate an interrupt.  This does
       
  1267 *    not affect the current bundle in any way.  Any frame that is 128
       
  1268 *    bytes or large will be bundled normally.  This feature is meant
       
  1269 *    to provide immediate indication of ACK frames in a TCP environment.
       
  1270 *    Customers were seeing poor performance when a machine with CPUSaver
       
  1271 *    enabled was sending but not receiving.  The delay introduced when
       
  1272 *    the ACKs were received was enough to reduce total throughput, because
       
  1273 *    the sender would sit idle until the ACK was finally seen.
       
  1274 *
       
  1275 *    The current default is 0xFF80, which masks out the lower 7 bits.
       
  1276 *    This means that any frame which is x7F (127) bytes or smaller
       
  1277 *    will cause an immediate interrupt.  Because this value must be a
       
  1278 *    bit mask, there are only a few valid values that can be used.  To
       
  1279 *    turn this feature off, the driver can write the value xFFFF to the
       
  1280 *    lower word of this instruction (in the same way that the other
       
  1281 *    parameters are used).  Likewise, a value of 0xF800 (2047) would
       
  1282 *    cause an interrupt to be generated for every frame, because all
       
  1283 *    standard Ethernet frames are <= 2047 bytes in length.
       
  1284 *************************************************************************/
       
  1285 
       
  1286 /* if you wish to disable the ucode functionality, while maintaining the
       
  1287  * workarounds it provides, set the following defines to:
       
  1288  * BUNDLESMALL 0
       
  1289  * BUNDLEMAX 1
       
  1290  * INTDELAY 1
       
  1291  */
       
  1292 #define BUNDLESMALL 1
       
  1293 #define BUNDLEMAX (u16)6
       
  1294 #define INTDELAY (u16)1536 /* 0x600 */
       
  1295 
       
  1296 /* Initialize firmware */
       
  1297 static const struct firmware *e100_request_firmware(struct nic *nic)
       
  1298 {
       
  1299 	const char *fw_name;
       
  1300 	const struct firmware *fw = nic->fw;
       
  1301 	u8 timer, bundle, min_size;
       
  1302 	int err = 0;
       
  1303 
       
  1304 	/* do not load u-code for ICH devices */
       
  1305 	if (nic->flags & ich)
       
  1306 		return NULL;
       
  1307 
       
  1308 	/* Search for ucode match against h/w revision */
       
  1309 	if (nic->mac == mac_82559_D101M)
       
  1310 		fw_name = FIRMWARE_D101M;
       
  1311 	else if (nic->mac == mac_82559_D101S)
       
  1312 		fw_name = FIRMWARE_D101S;
       
  1313 	else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
       
  1314 		fw_name = FIRMWARE_D102E;
       
  1315 	else /* No ucode on other devices */
       
  1316 		return NULL;
       
  1317 
       
  1318 	/* If the firmware has not previously been loaded, request a pointer
       
  1319 	 * to it. If it was previously loaded, we are reinitializing the
       
  1320 	 * adapter, possibly in a resume from hibernate, in which case
       
  1321 	 * request_firmware() cannot be used.
       
  1322 	 */
       
  1323 	if (!fw)
       
  1324 		err = request_firmware(&fw, fw_name, &nic->pdev->dev);
       
  1325 
       
  1326 	if (err) {
       
  1327 		DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
       
  1328 			fw_name, err);
       
  1329 		return ERR_PTR(err);
       
  1330 	}
       
  1331 
       
  1332 	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
       
  1333 	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
       
  1334 	if (fw->size != UCODE_SIZE * 4 + 3) {
       
  1335 		DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
       
  1336 			fw_name, fw->size);
       
  1337 		release_firmware(fw);
       
  1338 		return ERR_PTR(-EINVAL);
       
  1339 	}
       
  1340 
       
  1341 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1342 	timer = fw->data[UCODE_SIZE * 4];
       
  1343 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1344 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1345 
       
  1346 	if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
       
  1347 	    min_size >= UCODE_SIZE) {
       
  1348 		DPRINTK(PROBE, ERR,
       
  1349 			"\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
       
  1350 			fw_name, timer, bundle, min_size);
       
  1351 		release_firmware(fw);
       
  1352 		return ERR_PTR(-EINVAL);
       
  1353 	}
       
  1354 
       
  1355 	/* OK, firmware is validated and ready to use. Save a pointer
       
  1356 	 * to it in the nic */
       
  1357 	nic->fw = fw;
       
  1358 	return fw;
       
  1359 }
       
  1360 
       
  1361 static void e100_setup_ucode(struct nic *nic, struct cb *cb,
       
  1362 			     struct sk_buff *skb)
       
  1363 {
       
  1364 	const struct firmware *fw = (void *)skb;
       
  1365 	u8 timer, bundle, min_size;
       
  1366 
       
  1367 	/* It's not a real skb; we just abused the fact that e100_exec_cb
       
  1368 	   will pass it through to here... */
       
  1369 	cb->skb = NULL;
       
  1370 
       
  1371 	/* firmware is stored as little endian already */
       
  1372 	memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
       
  1373 
       
  1374 	/* Read timer, bundle and min_size from end of firmware blob */
       
  1375 	timer = fw->data[UCODE_SIZE * 4];
       
  1376 	bundle = fw->data[UCODE_SIZE * 4 + 1];
       
  1377 	min_size = fw->data[UCODE_SIZE * 4 + 2];
       
  1378 
       
  1379 	/* Insert user-tunable settings in cb->u.ucode */
       
  1380 	cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
       
  1381 	cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
       
  1382 	cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
       
  1383 	cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
       
  1384 	cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
       
  1385 	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
       
  1386 
       
  1387 	cb->command = cpu_to_le16(cb_ucode | cb_el);
       
  1388 }
       
  1389 
       
  1390 static inline int e100_load_ucode_wait(struct nic *nic)
       
  1391 {
       
  1392 	const struct firmware *fw;
       
  1393 	int err = 0, counter = 50;
       
  1394 	struct cb *cb = nic->cb_to_clean;
       
  1395 
       
  1396 	fw = e100_request_firmware(nic);
       
  1397 	/* If it's NULL, then no ucode is required */
       
  1398 	if (!fw || IS_ERR(fw))
       
  1399 		return PTR_ERR(fw);
       
  1400 
       
  1401 	if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
       
  1402 		DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
       
  1403 
       
  1404 	/* must restart cuc */
       
  1405 	nic->cuc_cmd = cuc_start;
       
  1406 
       
  1407 	/* wait for completion */
       
  1408 	e100_write_flush(nic);
       
  1409 	udelay(10);
       
  1410 
       
  1411 	/* wait for possibly (ouch) 500ms */
       
  1412 	while (!(cb->status & cpu_to_le16(cb_complete))) {
       
  1413 		msleep(10);
       
  1414 		if (!--counter) break;
       
  1415 	}
       
  1416 
       
  1417 	/* ack any interrupts, something could have been set */
       
  1418 	iowrite8(~0, &nic->csr->scb.stat_ack);
       
  1419 
       
  1420 	/* if the command failed, or is not OK, notify and return */
       
  1421 	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
       
  1422 		DPRINTK(PROBE,ERR, "ucode load failed\n");
       
  1423 		err = -EPERM;
       
  1424 	}
       
  1425 
       
  1426 	return err;
       
  1427 }
       
  1428 
       
  1429 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
       
  1430 	struct sk_buff *skb)
       
  1431 {
       
  1432 	cb->command = cpu_to_le16(cb_iaaddr);
       
  1433 	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
       
  1434 }
       
  1435 
       
  1436 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1437 {
       
  1438 	cb->command = cpu_to_le16(cb_dump);
       
  1439 	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
       
  1440 		offsetof(struct mem, dump_buf));
       
  1441 }
       
  1442 
       
  1443 static int e100_phy_check_without_mii(struct nic *nic)
       
  1444 {
       
  1445 	u8 phy_type;
       
  1446 	int without_mii;
       
  1447 
       
  1448 	phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
       
  1449 
       
  1450 	switch (phy_type) {
       
  1451 	case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
       
  1452 	case I82503: /* Non-MII PHY; UNTESTED! */
       
  1453 	case S80C24: /* Non-MII PHY; tested and working */
       
  1454 		/* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
       
  1455 		 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
       
  1456 		 * doesn't have a programming interface of any sort.  The
       
  1457 		 * media is sensed automatically based on how the link partner
       
  1458 		 * is configured.  This is, in essence, manual configuration.
       
  1459 		 */
       
  1460 		DPRINTK(PROBE, INFO,
       
  1461 			 "found MII-less i82503 or 80c24 or other PHY\n");
       
  1462 
       
  1463 		nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
       
  1464 		nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
       
  1465 
       
  1466 		/* these might be needed for certain MII-less cards...
       
  1467 		 * nic->flags |= ich;
       
  1468 		 * nic->flags |= ich_10h_workaround; */
       
  1469 
       
  1470 		without_mii = 1;
       
  1471 		break;
       
  1472 	default:
       
  1473 		without_mii = 0;
       
  1474 		break;
       
  1475 	}
       
  1476 	return without_mii;
       
  1477 }
       
  1478 
       
  1479 #define NCONFIG_AUTO_SWITCH	0x0080
       
  1480 #define MII_NSC_CONG		MII_RESV1
       
  1481 #define NSC_CONG_ENABLE		0x0100
       
  1482 #define NSC_CONG_TXREADY	0x0400
       
  1483 #define ADVERTISE_FC_SUPPORTED	0x0400
       
  1484 static int e100_phy_init(struct nic *nic)
       
  1485 {
       
  1486 	struct net_device *netdev = nic->netdev;
       
  1487 	u32 addr;
       
  1488 	u16 bmcr, stat, id_lo, id_hi, cong;
       
  1489 
       
  1490 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
       
  1491 	for (addr = 0; addr < 32; addr++) {
       
  1492 		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
       
  1493 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1494 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1495 		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
       
  1496 		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
       
  1497 			break;
       
  1498 	}
       
  1499 	if (addr == 32) {
       
  1500 		/* uhoh, no PHY detected: check whether we seem to be some
       
  1501 		 * weird, rare variant which is *known* to not have any MII.
       
  1502 		 * But do this AFTER MII checking only, since this does
       
  1503 		 * lookup of EEPROM values which may easily be unreliable. */
       
  1504 		if (e100_phy_check_without_mii(nic))
       
  1505 			return 0; /* simply return and hope for the best */
       
  1506 		else {
       
  1507 			/* for unknown cases log a fatal error */
       
  1508 			DPRINTK(HW, ERR,
       
  1509 				"Failed to locate any known PHY, aborting.\n");
       
  1510 			return -EAGAIN;
       
  1511 		}
       
  1512 	} else
       
  1513 		DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
       
  1514 
       
  1515 	/* Get phy ID */
       
  1516 	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
       
  1517 	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
       
  1518 	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
       
  1519 	DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
       
  1520 
       
  1521 	/* Select the phy and isolate the rest */
       
  1522 	for (addr = 0; addr < 32; addr++) {
       
  1523 		if (addr != nic->mii.phy_id) {
       
  1524 			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
       
  1525 		} else if (nic->phy != phy_82552_v) {
       
  1526 			bmcr = mdio_read(netdev, addr, MII_BMCR);
       
  1527 			mdio_write(netdev, addr, MII_BMCR,
       
  1528 				bmcr & ~BMCR_ISOLATE);
       
  1529 		}
       
  1530 	}
       
  1531 	/*
       
  1532 	 * Workaround for 82552:
       
  1533 	 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
       
  1534 	 * other phy_id's) using bmcr value from addr discovery loop above.
       
  1535 	 */
       
  1536 	if (nic->phy == phy_82552_v)
       
  1537 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
       
  1538 			bmcr & ~BMCR_ISOLATE);
       
  1539 
       
  1540 	/* Handle National tx phys */
       
  1541 #define NCS_PHY_MODEL_MASK	0xFFF0FFFF
       
  1542 	if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
       
  1543 		/* Disable congestion control */
       
  1544 		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
       
  1545 		cong |= NSC_CONG_TXREADY;
       
  1546 		cong &= ~NSC_CONG_ENABLE;
       
  1547 		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
       
  1548 	}
       
  1549 
       
  1550 	if (nic->phy == phy_82552_v) {
       
  1551 		u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
       
  1552 
       
  1553 		/* assign special tweaked mdio_ctrl() function */
       
  1554 		nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
       
  1555 
       
  1556 		/* Workaround Si not advertising flow-control during autoneg */
       
  1557 		advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
       
  1558 		mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
       
  1559 
       
  1560 		/* Reset for the above changes to take effect */
       
  1561 		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
       
  1562 		bmcr |= BMCR_RESET;
       
  1563 		mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
       
  1564 	} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
       
  1565 	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
       
  1566 		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
       
  1567 		/* enable/disable MDI/MDI-X auto-switching. */
       
  1568 		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
       
  1569 				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
       
  1570 	}
       
  1571 
       
  1572 	return 0;
       
  1573 }
       
  1574 
       
  1575 static int e100_hw_init(struct nic *nic)
       
  1576 {
       
  1577 	int err;
       
  1578 
       
  1579 	e100_hw_reset(nic);
       
  1580 
       
  1581 	DPRINTK(HW, ERR, "e100_hw_init\n");
       
  1582 	if (!in_interrupt() && (err = e100_self_test(nic)))
       
  1583 		return err;
       
  1584 
       
  1585 	if ((err = e100_phy_init(nic)))
       
  1586 		return err;
       
  1587 	if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
       
  1588 		return err;
       
  1589 	if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
       
  1590 		return err;
       
  1591 	if ((err = e100_load_ucode_wait(nic)))
       
  1592 		return err;
       
  1593 	if ((err = e100_exec_cb(nic, NULL, e100_configure)))
       
  1594 		return err;
       
  1595 	if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
       
  1596 		return err;
       
  1597 	if ((err = e100_exec_cmd(nic, cuc_dump_addr,
       
  1598 		nic->dma_addr + offsetof(struct mem, stats))))
       
  1599 		return err;
       
  1600 	if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
       
  1601 		return err;
       
  1602 
       
  1603 	e100_disable_irq(nic);
       
  1604 
       
  1605 	return 0;
       
  1606 }
       
  1607 
       
  1608 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
       
  1609 {
       
  1610 	struct net_device *netdev = nic->netdev;
       
  1611 	struct dev_mc_list *list = netdev->mc_list;
       
  1612 	u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
       
  1613 
       
  1614 	cb->command = cpu_to_le16(cb_multi);
       
  1615 	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
       
  1616 	for (i = 0; list && i < count; i++, list = list->next)
       
  1617 		memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
       
  1618 			ETH_ALEN);
       
  1619 }
       
  1620 
       
  1621 static void e100_set_multicast_list(struct net_device *netdev)
       
  1622 {
       
  1623 	struct nic *nic = netdev_priv(netdev);
       
  1624 
       
  1625 	DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
       
  1626 		netdev->mc_count, netdev->flags);
       
  1627 
       
  1628 	if (netdev->flags & IFF_PROMISC)
       
  1629 		nic->flags |= promiscuous;
       
  1630 	else
       
  1631 		nic->flags &= ~promiscuous;
       
  1632 
       
  1633 	if (netdev->flags & IFF_ALLMULTI ||
       
  1634 		netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
       
  1635 		nic->flags |= multicast_all;
       
  1636 	else
       
  1637 		nic->flags &= ~multicast_all;
       
  1638 
       
  1639 	e100_exec_cb(nic, NULL, e100_configure);
       
  1640 	e100_exec_cb(nic, NULL, e100_multi);
       
  1641 }
       
  1642 
       
  1643 static void e100_update_stats(struct nic *nic)
       
  1644 {
       
  1645 	struct net_device *dev = nic->netdev;
       
  1646 	struct net_device_stats *ns = &dev->stats;
       
  1647 	struct stats *s = &nic->mem->stats;
       
  1648 	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
       
  1649 		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
       
  1650 		&s->complete;
       
  1651 
       
  1652 	/* Device's stats reporting may take several microseconds to
       
  1653 	 * complete, so we're always waiting for results of the
       
  1654 	 * previous command. */
       
  1655 
       
  1656 	if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
       
  1657 		*complete = 0;
       
  1658 		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
       
  1659 		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
       
  1660 		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
       
  1661 		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
       
  1662 		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
       
  1663 		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
       
  1664 		ns->collisions += nic->tx_collisions;
       
  1665 		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
       
  1666 			le32_to_cpu(s->tx_lost_crs);
       
  1667 		ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
       
  1668 			nic->rx_over_length_errors;
       
  1669 		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
       
  1670 		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
       
  1671 		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1672 		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
       
  1673 		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
       
  1674 		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
       
  1675 			le32_to_cpu(s->rx_alignment_errors) +
       
  1676 			le32_to_cpu(s->rx_short_frame_errors) +
       
  1677 			le32_to_cpu(s->rx_cdt_errors);
       
  1678 		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
       
  1679 		nic->tx_single_collisions +=
       
  1680 			le32_to_cpu(s->tx_single_collisions);
       
  1681 		nic->tx_multiple_collisions +=
       
  1682 			le32_to_cpu(s->tx_multiple_collisions);
       
  1683 		if (nic->mac >= mac_82558_D101_A4) {
       
  1684 			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
       
  1685 			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
       
  1686 			nic->rx_fc_unsupported +=
       
  1687 				le32_to_cpu(s->fc_rcv_unsupported);
       
  1688 			if (nic->mac >= mac_82559_D101M) {
       
  1689 				nic->tx_tco_frames +=
       
  1690 					le16_to_cpu(s->xmt_tco_frames);
       
  1691 				nic->rx_tco_frames +=
       
  1692 					le16_to_cpu(s->rcv_tco_frames);
       
  1693 			}
       
  1694 		}
       
  1695 	}
       
  1696 
       
  1697 
       
  1698 	if (e100_exec_cmd(nic, cuc_dump_reset, 0))
       
  1699 		DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
       
  1700 }
       
  1701 
       
  1702 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
       
  1703 {
       
  1704 	/* Adjust inter-frame-spacing (IFS) between two transmits if
       
  1705 	 * we're getting collisions on a half-duplex connection. */
       
  1706 
       
  1707 	if (duplex == DUPLEX_HALF) {
       
  1708 		u32 prev = nic->adaptive_ifs;
       
  1709 		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
       
  1710 
       
  1711 		if ((nic->tx_frames / 32 < nic->tx_collisions) &&
       
  1712 		   (nic->tx_frames > min_frames)) {
       
  1713 			if (nic->adaptive_ifs < 60)
       
  1714 				nic->adaptive_ifs += 5;
       
  1715 		} else if (nic->tx_frames < min_frames) {
       
  1716 			if (nic->adaptive_ifs >= 5)
       
  1717 				nic->adaptive_ifs -= 5;
       
  1718 		}
       
  1719 		if (nic->adaptive_ifs != prev)
       
  1720 			e100_exec_cb(nic, NULL, e100_configure);
       
  1721 	}
       
  1722 }
       
  1723 
       
  1724 static void e100_watchdog(unsigned long data)
       
  1725 {
       
  1726 	struct nic *nic = (struct nic *)data;
       
  1727 	struct ethtool_cmd cmd;
       
  1728 
       
  1729 	DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
       
  1730 
       
  1731 	/* mii library handles link maintenance tasks */
       
  1732 
       
  1733     if (nic->ecdev) {
       
  1734     	ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0);
       
  1735 	} else {
       
  1736 		mii_ethtool_gset(&nic->mii, &cmd);
       
  1737 
       
  1738 		if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
       
  1739 			printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
       
  1740 					nic->netdev->name,
       
  1741 					cmd.speed == SPEED_100 ? "100" : "10",
       
  1742 					cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
       
  1743 		} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
       
  1744 			printk(KERN_INFO "e100: %s NIC Link is Down\n",
       
  1745 					nic->netdev->name);
       
  1746 		}
       
  1747 
       
  1748 		mii_check_link(&nic->mii);
       
  1749 
       
  1750 		/* Software generated interrupt to recover from (rare) Rx
       
  1751 		 * allocation failure.
       
  1752 		 * Unfortunately have to use a spinlock to not re-enable interrupts
       
  1753 		 * accidentally, due to hardware that shares a register between the
       
  1754 		 * interrupt mask bit and the SW Interrupt generation bit */
       
  1755 		spin_lock_irq(&nic->cmd_lock);
       
  1756 		iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
       
  1757 		e100_write_flush(nic);
       
  1758 		spin_unlock_irq(&nic->cmd_lock);
       
  1759 
       
  1760 		e100_update_stats(nic);
       
  1761 		e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
       
  1762 
       
  1763 		if (nic->mac <= mac_82557_D100_C)
       
  1764 			/* Issue a multicast command to workaround a 557 lock up */
       
  1765 			e100_set_multicast_list(nic->netdev);
       
  1766 
       
  1767 		if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
       
  1768 			/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
       
  1769 			nic->flags |= ich_10h_workaround;
       
  1770 		else
       
  1771 			nic->flags &= ~ich_10h_workaround;
       
  1772 
       
  1773 		mod_timer(&nic->watchdog,
       
  1774 				round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
       
  1775 	}
       
  1776 }
       
  1777 
       
  1778 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
       
  1779 	struct sk_buff *skb)
       
  1780 {
       
  1781 	cb->command = nic->tx_command;
       
  1782 	/* interrupt every 16 packets regardless of delay */
       
  1783 	if ((nic->cbs_avail & ~15) == nic->cbs_avail)
       
  1784 		cb->command |= cpu_to_le16(cb_i);
       
  1785 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
       
  1786 	cb->u.tcb.tcb_byte_count = 0;
       
  1787 	cb->u.tcb.threshold = nic->tx_threshold;
       
  1788 	cb->u.tcb.tbd_count = 1;
       
  1789 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
       
  1790 		skb->data, skb->len, PCI_DMA_TODEVICE));
       
  1791 	/* check for mapping failure? */
       
  1792 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
       
  1793 }
       
  1794 
       
  1795 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
       
  1796 				   struct net_device *netdev)
       
  1797 {
       
  1798 	struct nic *nic = netdev_priv(netdev);
       
  1799 	int err;
       
  1800 
       
  1801 	if (nic->flags & ich_10h_workaround) {
       
  1802 		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
       
  1803 		   Issue a NOP command followed by a 1us delay before
       
  1804 		   issuing the Tx command. */
       
  1805 		if (e100_exec_cmd(nic, cuc_nop, 0))
       
  1806 			DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
       
  1807 		udelay(1);
       
  1808 	}
       
  1809 
       
  1810 	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
       
  1811 
       
  1812 	switch (err) {
       
  1813 	case -ENOSPC:
       
  1814 		/* We queued the skb, but now we're out of space. */
       
  1815 		DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
       
  1816 		if (!nic->ecdev)
       
  1817 			netif_stop_queue(netdev);
       
  1818 		break;
       
  1819 	case -ENOMEM:
       
  1820 		/* This is a hard error - log it. */
       
  1821 		DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
       
  1822 		if (!nic->ecdev)
       
  1823 			netif_stop_queue(netdev);
       
  1824 		return NETDEV_TX_BUSY;
       
  1825 	}
       
  1826 
       
  1827 	netdev->trans_start = jiffies;
       
  1828 	return NETDEV_TX_OK;
       
  1829 }
       
  1830 
       
  1831 static int e100_tx_clean(struct nic *nic)
       
  1832 {
       
  1833 	struct net_device *dev = nic->netdev;
       
  1834 	struct cb *cb;
       
  1835 	int tx_cleaned = 0;
       
  1836 
       
  1837 	if (!nic->ecdev)
       
  1838 		spin_lock(&nic->cb_lock);
       
  1839 
       
  1840 	/* Clean CBs marked complete */
       
  1841 	for (cb = nic->cb_to_clean;
       
  1842 	    cb->status & cpu_to_le16(cb_complete);
       
  1843 	    cb = nic->cb_to_clean = cb->next) {
       
  1844 		DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
       
  1845 		        (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
       
  1846 		        cb->status);
       
  1847 
       
  1848 		if (likely(cb->skb != NULL)) {
       
  1849 			dev->stats.tx_packets++;
       
  1850 			dev->stats.tx_bytes += cb->skb->len;
       
  1851 
       
  1852 			pci_unmap_single(nic->pdev,
       
  1853 				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1854 				le16_to_cpu(cb->u.tcb.tbd.size),
       
  1855 				PCI_DMA_TODEVICE);
       
  1856 			if (!nic->ecdev)
       
  1857 				dev_kfree_skb_any(cb->skb);
       
  1858 			cb->skb = NULL;
       
  1859 			tx_cleaned = 1;
       
  1860 		}
       
  1861 		cb->status = 0;
       
  1862 		nic->cbs_avail++;
       
  1863 	}
       
  1864 
       
  1865 	if (!nic->ecdev) {
       
  1866 		spin_unlock(&nic->cb_lock);
       
  1867 
       
  1868 		/* Recover from running out of Tx resources in xmit_frame */
       
  1869 		if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
       
  1870 			netif_wake_queue(nic->netdev);
       
  1871 	}
       
  1872 
       
  1873 	return tx_cleaned;
       
  1874 }
       
  1875 
       
  1876 static void e100_clean_cbs(struct nic *nic)
       
  1877 {
       
  1878 	if (nic->cbs) {
       
  1879 		while (nic->cbs_avail != nic->params.cbs.count) {
       
  1880 			struct cb *cb = nic->cb_to_clean;
       
  1881 			if (cb->skb) {
       
  1882 				pci_unmap_single(nic->pdev,
       
  1883 					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
       
  1884 					le16_to_cpu(cb->u.tcb.tbd.size),
       
  1885 					PCI_DMA_TODEVICE);
       
  1886 				if (!nic->ecdev)
       
  1887 					dev_kfree_skb(cb->skb);
       
  1888 			}
       
  1889 			nic->cb_to_clean = nic->cb_to_clean->next;
       
  1890 			nic->cbs_avail++;
       
  1891 		}
       
  1892 		pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
       
  1893 		nic->cbs = NULL;
       
  1894 		nic->cbs_avail = 0;
       
  1895 	}
       
  1896 	nic->cuc_cmd = cuc_start;
       
  1897 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
       
  1898 		nic->cbs;
       
  1899 }
       
  1900 
       
  1901 static int e100_alloc_cbs(struct nic *nic)
       
  1902 {
       
  1903 	struct cb *cb;
       
  1904 	unsigned int i, count = nic->params.cbs.count;
       
  1905 
       
  1906 	nic->cuc_cmd = cuc_start;
       
  1907 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
       
  1908 	nic->cbs_avail = 0;
       
  1909 
       
  1910 	nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
       
  1911 				  &nic->cbs_dma_addr);
       
  1912 	if (!nic->cbs)
       
  1913 		return -ENOMEM;
       
  1914 	memset(nic->cbs, 0, count * sizeof(struct cb));
       
  1915 
       
  1916 	for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
       
  1917 		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
       
  1918 		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
       
  1919 
       
  1920 		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
       
  1921 		cb->link = cpu_to_le32(nic->cbs_dma_addr +
       
  1922 			((i+1) % count) * sizeof(struct cb));
       
  1923 	}
       
  1924 
       
  1925 	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
       
  1926 	nic->cbs_avail = count;
       
  1927 
       
  1928 	return 0;
       
  1929 }
       
  1930 
       
  1931 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
       
  1932 {
       
  1933 	if (!nic->rxs) return;
       
  1934 	if (RU_SUSPENDED != nic->ru_running) return;
       
  1935 
       
  1936 	/* handle init time starts */
       
  1937 	if (!rx) rx = nic->rxs;
       
  1938 
       
  1939 	/* (Re)start RU if suspended or idle and RFA is non-NULL */
       
  1940 	if (rx->skb) {
       
  1941 		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
       
  1942 		nic->ru_running = RU_RUNNING;
       
  1943 	}
       
  1944 }
       
  1945 
       
  1946 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
       
  1947 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
       
  1948 {
       
  1949 	if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
       
  1950 		return -ENOMEM;
       
  1951 
       
  1952 	/* Init, and map the RFD. */
       
  1953 	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
       
  1954 	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
       
  1955 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  1956 
       
  1957 	if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  1958 		dev_kfree_skb_any(rx->skb);
       
  1959 		rx->skb = NULL;
       
  1960 		rx->dma_addr = 0;
       
  1961 		return -ENOMEM;
       
  1962 	}
       
  1963 
       
  1964 	/* Link the RFD to end of RFA by linking previous RFD to
       
  1965 	 * this one.  We are safe to touch the previous RFD because
       
  1966 	 * it is protected by the before last buffer's el bit being set */
       
  1967 	if (rx->prev->skb) {
       
  1968 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  1969 		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  1970 		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  1971 			sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  1972 	}
       
  1973 
       
  1974 	return 0;
       
  1975 }
       
  1976 
       
  1977 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
       
  1978 	unsigned int *work_done, unsigned int work_to_do)
       
  1979 {
       
  1980 	struct net_device *dev = nic->netdev;
       
  1981 	struct sk_buff *skb = rx->skb;
       
  1982 	struct rfd *rfd = (struct rfd *)skb->data;
       
  1983 	u16 rfd_status, actual_size;
       
  1984 
       
  1985 	if (unlikely(work_done && *work_done >= work_to_do))
       
  1986 		return -EAGAIN;
       
  1987 
       
  1988 	/* Need to sync before taking a peek at cb_complete bit */
       
  1989 	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
       
  1990 		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  1991 	rfd_status = le16_to_cpu(rfd->status);
       
  1992 
       
  1993 	DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
       
  1994 
       
  1995 	/* If data isn't ready, nothing to indicate */
       
  1996 	if (unlikely(!(rfd_status & cb_complete))) {
       
  1997 		/* If the next buffer has the el bit, but we think the receiver
       
  1998 		 * is still running, check to see if it really stopped while
       
  1999 		 * we had interrupts off.
       
  2000 		 * This allows for a fast restart without re-enabling
       
  2001 		 * interrupts */
       
  2002 		if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2003 		    (RU_RUNNING == nic->ru_running))
       
  2004 
       
  2005 			if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2006 				nic->ru_running = RU_SUSPENDED;
       
  2007 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2008 					       sizeof(struct rfd),
       
  2009 					       PCI_DMA_FROMDEVICE);
       
  2010 		return -ENODATA;
       
  2011 	}
       
  2012 
       
  2013 	/* Get actual data size */
       
  2014 	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
       
  2015 	if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
       
  2016 		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
       
  2017 
       
  2018 	/* Get data */
       
  2019 	pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2020 		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2021 
       
  2022 	/* If this buffer has the el bit, but we think the receiver
       
  2023 	 * is still running, check to see if it really stopped while
       
  2024 	 * we had interrupts off.
       
  2025 	 * This allows for a fast restart without re-enabling interrupts.
       
  2026 	 * This can happen when the RU sees the size change but also sees
       
  2027 	 * the el bit set. */
       
  2028 	if ((le16_to_cpu(rfd->command) & cb_el) &&
       
  2029 	    (RU_RUNNING == nic->ru_running)) {
       
  2030 
       
  2031 	    if (ioread8(&nic->csr->scb.status) & rus_no_res)
       
  2032 		nic->ru_running = RU_SUSPENDED;
       
  2033 	}
       
  2034 
       
  2035 	if (!nic->ecdev) {
       
  2036 		/* Pull off the RFD and put the actual data (minus eth hdr) */
       
  2037 		skb_reserve(skb, sizeof(struct rfd));
       
  2038 		skb_put(skb, actual_size);
       
  2039 		skb->protocol = eth_type_trans(skb, nic->netdev);
       
  2040 	}
       
  2041 
       
  2042 	if (unlikely(!(rfd_status & cb_ok))) {
       
  2043 		if (!nic->ecdev) {
       
  2044 			/* Don't indicate if hardware indicates errors */
       
  2045 			dev_kfree_skb_any(skb);
       
  2046 		}
       
  2047 	} else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
       
  2048 		/* Don't indicate oversized frames */
       
  2049 		nic->rx_over_length_errors++;
       
  2050 		if (!nic->ecdev)
       
  2051 			dev_kfree_skb_any(skb);
       
  2052 	} else {
       
  2053 		dev->stats.rx_packets++;
       
  2054 		dev->stats.rx_bytes += actual_size;
       
  2055 		if (nic->ecdev) {
       
  2056 			ecdev_receive(nic->ecdev,
       
  2057 					skb->data + sizeof(struct rfd), actual_size);
       
  2058 
       
  2059 			// No need to detect link status as
       
  2060 			// long as frames are received: Reset watchdog.
       
  2061 			nic->ec_watchdog_jiffies = jiffies;
       
  2062 		} else {
       
  2063 			netif_receive_skb(skb);
       
  2064 		}
       
  2065 		if (work_done)
       
  2066 			(*work_done)++;
       
  2067 	}
       
  2068 
       
  2069 	if (nic->ecdev) {
       
  2070 		// make receive frame descriptior usable again
       
  2071 		memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd));
       
  2072 		rx->dma_addr = pci_map_single(nic->pdev, skb->data,
       
  2073 				RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2074 		if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
       
  2075 			rx->dma_addr = 0;
       
  2076 		}
       
  2077 
       
  2078 		/* Link the RFD to end of RFA by linking previous RFD to
       
  2079 		 * this one.  We are safe to touch the previous RFD because
       
  2080 		 * it is protected by the before last buffer's el bit being set */
       
  2081 		if (rx->prev->skb) {
       
  2082 			struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
       
  2083 			put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
       
  2084 			pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
       
  2085 					sizeof(struct rfd), PCI_DMA_TODEVICE);
       
  2086 		}
       
  2087 	} else {
       
  2088 		rx->skb = NULL;
       
  2089 	}
       
  2090 
       
  2091 	return 0;
       
  2092 }
       
  2093 
       
  2094 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
       
  2095 	unsigned int work_to_do)
       
  2096 {
       
  2097 	struct rx *rx;
       
  2098 	int restart_required = 0, err = 0;
       
  2099 	struct rx *old_before_last_rx, *new_before_last_rx;
       
  2100 	struct rfd *old_before_last_rfd, *new_before_last_rfd;
       
  2101 
       
  2102 	/* Indicate newly arrived packets */
       
  2103 	for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
       
  2104 		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
       
  2105 		/* Hit quota or no more to clean */
       
  2106 		if (-EAGAIN == err || -ENODATA == err)
       
  2107 			break;
       
  2108 	}
       
  2109 
       
  2110 
       
  2111 	/* On EAGAIN, hit quota so have more work to do, restart once
       
  2112 	 * cleanup is complete.
       
  2113 	 * Else, are we already rnr? then pay attention!!! this ensures that
       
  2114 	 * the state machine progression never allows a start with a
       
  2115 	 * partially cleaned list, avoiding a race between hardware
       
  2116 	 * and rx_to_clean when in NAPI mode */
       
  2117 	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
       
  2118 		restart_required = 1;
       
  2119 
       
  2120 	old_before_last_rx = nic->rx_to_use->prev->prev;
       
  2121 	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
       
  2122 
       
  2123 	if (!nic->ecdev) {
       
  2124 		/* Alloc new skbs to refill list */
       
  2125 		for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
       
  2126 			if(unlikely(e100_rx_alloc_skb(nic, rx)))
       
  2127 				break; /* Better luck next time (see watchdog) */
       
  2128 		}
       
  2129 	}
       
  2130 
       
  2131 	new_before_last_rx = nic->rx_to_use->prev->prev;
       
  2132 	if (new_before_last_rx != old_before_last_rx) {
       
  2133 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2134 		 * This lets us update the next pointer on the last buffer
       
  2135 		 * without worrying about hardware touching it.
       
  2136 		 * We set the size to 0 to prevent hardware from touching this
       
  2137 		 * buffer.
       
  2138 		 * When the hardware hits the before last buffer with el-bit
       
  2139 		 * and size of 0, it will RNR interrupt, the RUS will go into
       
  2140 		 * the No Resources state.  It will not complete nor write to
       
  2141 		 * this buffer. */
       
  2142 		new_before_last_rfd =
       
  2143 			(struct rfd *)new_before_last_rx->skb->data;
       
  2144 		new_before_last_rfd->size = 0;
       
  2145 		new_before_last_rfd->command |= cpu_to_le16(cb_el);
       
  2146 		pci_dma_sync_single_for_device(nic->pdev,
       
  2147 			new_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2148 			PCI_DMA_BIDIRECTIONAL);
       
  2149 
       
  2150 		/* Now that we have a new stopping point, we can clear the old
       
  2151 		 * stopping point.  We must sync twice to get the proper
       
  2152 		 * ordering on the hardware side of things. */
       
  2153 		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
       
  2154 		pci_dma_sync_single_for_device(nic->pdev,
       
  2155 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2156 			PCI_DMA_BIDIRECTIONAL);
       
  2157 		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
       
  2158 		pci_dma_sync_single_for_device(nic->pdev,
       
  2159 			old_before_last_rx->dma_addr, sizeof(struct rfd),
       
  2160 			PCI_DMA_BIDIRECTIONAL);
       
  2161 	}
       
  2162 
       
  2163 	if (restart_required) {
       
  2164 		// ack the rnr?
       
  2165 		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
       
  2166 		e100_start_receiver(nic, nic->rx_to_clean);
       
  2167 		if (work_done)
       
  2168 			(*work_done)++;
       
  2169 	}
       
  2170 }
       
  2171 
       
  2172 static void e100_rx_clean_list(struct nic *nic)
       
  2173 {
       
  2174 	struct rx *rx;
       
  2175 	unsigned int i, count = nic->params.rfds.count;
       
  2176 
       
  2177 	nic->ru_running = RU_UNINITIALIZED;
       
  2178 
       
  2179 	if (nic->rxs) {
       
  2180 		for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2181 			if (rx->skb) {
       
  2182 				pci_unmap_single(nic->pdev, rx->dma_addr,
       
  2183 					RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2184 				dev_kfree_skb(rx->skb);
       
  2185 			}
       
  2186 		}
       
  2187 		kfree(nic->rxs);
       
  2188 		nic->rxs = NULL;
       
  2189 	}
       
  2190 
       
  2191 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2192 }
       
  2193 
       
  2194 static int e100_rx_alloc_list(struct nic *nic)
       
  2195 {
       
  2196 	struct rx *rx;
       
  2197 	unsigned int i, count = nic->params.rfds.count;
       
  2198 	struct rfd *before_last;
       
  2199 
       
  2200 	nic->rx_to_use = nic->rx_to_clean = NULL;
       
  2201 	nic->ru_running = RU_UNINITIALIZED;
       
  2202 
       
  2203 	if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
       
  2204 		return -ENOMEM;
       
  2205 
       
  2206 	for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
       
  2207 		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
       
  2208 		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
       
  2209 		if (e100_rx_alloc_skb(nic, rx)) {
       
  2210 			e100_rx_clean_list(nic);
       
  2211 			return -ENOMEM;
       
  2212 		}
       
  2213 	}
       
  2214 
       
  2215 	if (!nic->ecdev) {
       
  2216 		/* Set the el-bit on the buffer that is before the last buffer.
       
  2217 		 * This lets us update the next pointer on the last buffer without
       
  2218 		 * worrying about hardware touching it.
       
  2219 		 * We set the size to 0 to prevent hardware from touching this buffer.
       
  2220 		 * When the hardware hits the before last buffer with el-bit and size
       
  2221 		 * of 0, it will RNR interrupt, the RU will go into the No Resources
       
  2222 		 * state.  It will not complete nor write to this buffer. */
       
  2223 		rx = nic->rxs->prev->prev;
       
  2224 		before_last = (struct rfd *)rx->skb->data;
       
  2225 		before_last->command |= cpu_to_le16(cb_el);
       
  2226 		before_last->size = 0;
       
  2227 		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
       
  2228 				sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
       
  2229 	}
       
  2230 
       
  2231 	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
       
  2232 	nic->ru_running = RU_SUSPENDED;
       
  2233 
       
  2234 	return 0;
       
  2235 }
       
  2236 
       
  2237 static irqreturn_t e100_intr(int irq, void *dev_id)
       
  2238 {
       
  2239 	struct net_device *netdev = dev_id;
       
  2240 	struct nic *nic = netdev_priv(netdev);
       
  2241 	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
       
  2242 
       
  2243 	DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
       
  2244 
       
  2245 	if (stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
       
  2246 	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
       
  2247 		return IRQ_NONE;
       
  2248 
       
  2249 	/* Ack interrupt(s) */
       
  2250 	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
       
  2251 
       
  2252 	/* We hit Receive No Resource (RNR); restart RU after cleaning */
       
  2253 	if (stat_ack & stat_ack_rnr)
       
  2254 		nic->ru_running = RU_SUSPENDED;
       
  2255 
       
  2256 	if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) {
       
  2257 		e100_disable_irq(nic);
       
  2258 		__napi_schedule(&nic->napi);
       
  2259 	}
       
  2260 
       
  2261 	return IRQ_HANDLED;
       
  2262 }
       
  2263 
       
  2264 void e100_ec_poll(struct net_device *netdev)
       
  2265 {
       
  2266 	struct nic *nic = netdev_priv(netdev);
       
  2267 
       
  2268 	e100_rx_clean(nic, NULL, 100);
       
  2269 	e100_tx_clean(nic);
       
  2270 
       
  2271     if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) {
       
  2272         e100_watchdog((unsigned long) nic);
       
  2273         nic->ec_watchdog_jiffies = jiffies;
       
  2274     }
       
  2275 }
       
  2276 
       
  2277 
       
  2278 static int e100_poll(struct napi_struct *napi, int budget)
       
  2279 {
       
  2280 	struct nic *nic = container_of(napi, struct nic, napi);
       
  2281 	unsigned int work_done = 0;
       
  2282 
       
  2283 	e100_rx_clean(nic, &work_done, budget);
       
  2284 	e100_tx_clean(nic);
       
  2285 
       
  2286 	/* If budget not fully consumed, exit the polling mode */
       
  2287 	if (work_done < budget) {
       
  2288 		napi_complete(napi);
       
  2289 		e100_enable_irq(nic);
       
  2290 	}
       
  2291 
       
  2292 	return work_done;
       
  2293 }
       
  2294 
       
  2295 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2296 static void e100_netpoll(struct net_device *netdev)
       
  2297 {
       
  2298 	struct nic *nic = netdev_priv(netdev);
       
  2299 
       
  2300 	e100_disable_irq(nic);
       
  2301 	e100_intr(nic->pdev->irq, netdev);
       
  2302 	e100_tx_clean(nic);
       
  2303 	e100_enable_irq(nic);
       
  2304 }
       
  2305 #endif
       
  2306 
       
  2307 static int e100_set_mac_address(struct net_device *netdev, void *p)
       
  2308 {
       
  2309 	struct nic *nic = netdev_priv(netdev);
       
  2310 	struct sockaddr *addr = p;
       
  2311 
       
  2312 	if (!is_valid_ether_addr(addr->sa_data))
       
  2313 		return -EADDRNOTAVAIL;
       
  2314 
       
  2315 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2316 	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
       
  2317 
       
  2318 	return 0;
       
  2319 }
       
  2320 
       
  2321 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
       
  2322 {
       
  2323 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
       
  2324 		return -EINVAL;
       
  2325 	netdev->mtu = new_mtu;
       
  2326 	return 0;
       
  2327 }
       
  2328 
       
  2329 static int e100_asf(struct nic *nic)
       
  2330 {
       
  2331 	/* ASF can be enabled from eeprom */
       
  2332 	return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
       
  2333 	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
       
  2334 	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
       
  2335 	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
       
  2336 }
       
  2337 
       
  2338 static int e100_up(struct nic *nic)
       
  2339 {
       
  2340 	int err;
       
  2341 
       
  2342 	if ((err = e100_rx_alloc_list(nic)))
       
  2343 		return err;
       
  2344 	if ((err = e100_alloc_cbs(nic)))
       
  2345 		goto err_rx_clean_list;
       
  2346 	if ((err = e100_hw_init(nic)))
       
  2347 		goto err_clean_cbs;
       
  2348 	e100_set_multicast_list(nic->netdev);
       
  2349 	e100_start_receiver(nic, NULL);
       
  2350 	if (!nic->ecdev) {
       
  2351 		mod_timer(&nic->watchdog, jiffies);
       
  2352 	}
       
  2353 	if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
       
  2354 		nic->netdev->name, nic->netdev)))
       
  2355 		goto err_no_irq;
       
  2356 	if (!nic->ecdev) {
       
  2357 		netif_wake_queue(nic->netdev);
       
  2358 		napi_enable(&nic->napi);
       
  2359 		/* enable ints _after_ enabling poll, preventing a race between
       
  2360 		 * disable ints+schedule */
       
  2361 		e100_enable_irq(nic);
       
  2362 	}
       
  2363 	return 0;
       
  2364 
       
  2365 err_no_irq:
       
  2366 	if (!nic->ecdev)
       
  2367 		del_timer_sync(&nic->watchdog);
       
  2368 err_clean_cbs:
       
  2369 	e100_clean_cbs(nic);
       
  2370 err_rx_clean_list:
       
  2371 	e100_rx_clean_list(nic);
       
  2372 	return err;
       
  2373 }
       
  2374 
       
  2375 static void e100_down(struct nic *nic)
       
  2376 {
       
  2377 	if (!nic->ecdev) {
       
  2378 		/* wait here for poll to complete */
       
  2379 		napi_disable(&nic->napi);
       
  2380 		netif_stop_queue(nic->netdev);
       
  2381 	}
       
  2382 	e100_hw_reset(nic);
       
  2383 	free_irq(nic->pdev->irq, nic->netdev);
       
  2384 	if (!nic->ecdev) {
       
  2385 		del_timer_sync(&nic->watchdog);
       
  2386 		netif_carrier_off(nic->netdev);
       
  2387 	}
       
  2388 	e100_clean_cbs(nic);
       
  2389 	e100_rx_clean_list(nic);
       
  2390 }
       
  2391 
       
  2392 static void e100_tx_timeout(struct net_device *netdev)
       
  2393 {
       
  2394 	struct nic *nic = netdev_priv(netdev);
       
  2395 
       
  2396 	/* Reset outside of interrupt context, to avoid request_irq
       
  2397 	 * in interrupt context */
       
  2398 	schedule_work(&nic->tx_timeout_task);
       
  2399 }
       
  2400 
       
  2401 static void e100_tx_timeout_task(struct work_struct *work)
       
  2402 {
       
  2403 	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
       
  2404 	struct net_device *netdev = nic->netdev;
       
  2405 
       
  2406 	DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
       
  2407 		ioread8(&nic->csr->scb.status));
       
  2408 	e100_down(netdev_priv(netdev));
       
  2409 	e100_up(netdev_priv(netdev));
       
  2410 }
       
  2411 
       
  2412 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
       
  2413 {
       
  2414 	int err;
       
  2415 	struct sk_buff *skb;
       
  2416 
       
  2417 	/* Use driver resources to perform internal MAC or PHY
       
  2418 	 * loopback test.  A single packet is prepared and transmitted
       
  2419 	 * in loopback mode, and the test passes if the received
       
  2420 	 * packet compares byte-for-byte to the transmitted packet. */
       
  2421 
       
  2422 	if ((err = e100_rx_alloc_list(nic)))
       
  2423 		return err;
       
  2424 	if ((err = e100_alloc_cbs(nic)))
       
  2425 		goto err_clean_rx;
       
  2426 
       
  2427 	/* ICH PHY loopback is broken so do MAC loopback instead */
       
  2428 	if (nic->flags & ich && loopback_mode == lb_phy)
       
  2429 		loopback_mode = lb_mac;
       
  2430 
       
  2431 	nic->loopback = loopback_mode;
       
  2432 	if ((err = e100_hw_init(nic)))
       
  2433 		goto err_loopback_none;
       
  2434 
       
  2435 	if (loopback_mode == lb_phy)
       
  2436 		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
       
  2437 			BMCR_LOOPBACK);
       
  2438 
       
  2439 	e100_start_receiver(nic, NULL);
       
  2440 
       
  2441 	if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
       
  2442 		err = -ENOMEM;
       
  2443 		goto err_loopback_none;
       
  2444 	}
       
  2445 	skb_put(skb, ETH_DATA_LEN);
       
  2446 	memset(skb->data, 0xFF, ETH_DATA_LEN);
       
  2447 	e100_xmit_frame(skb, nic->netdev);
       
  2448 
       
  2449 	msleep(10);
       
  2450 
       
  2451 	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
       
  2452 			RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
       
  2453 
       
  2454 	if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
       
  2455 	   skb->data, ETH_DATA_LEN))
       
  2456 		err = -EAGAIN;
       
  2457 
       
  2458 err_loopback_none:
       
  2459 	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
       
  2460 	nic->loopback = lb_none;
       
  2461 	e100_clean_cbs(nic);
       
  2462 	e100_hw_reset(nic);
       
  2463 err_clean_rx:
       
  2464 	e100_rx_clean_list(nic);
       
  2465 	return err;
       
  2466 }
       
  2467 
       
  2468 #define MII_LED_CONTROL	0x1B
       
  2469 #define E100_82552_LED_OVERRIDE 0x19
       
  2470 #define E100_82552_LED_ON       0x000F /* LEDTX and LED_RX both on */
       
  2471 #define E100_82552_LED_OFF      0x000A /* LEDTX and LED_RX both off */
       
  2472 static void e100_blink_led(unsigned long data)
       
  2473 {
       
  2474 	struct nic *nic = (struct nic *)data;
       
  2475 	enum led_state {
       
  2476 		led_on     = 0x01,
       
  2477 		led_off    = 0x04,
       
  2478 		led_on_559 = 0x05,
       
  2479 		led_on_557 = 0x07,
       
  2480 	};
       
  2481 	u16 led_reg = MII_LED_CONTROL;
       
  2482 
       
  2483 	if (nic->phy == phy_82552_v) {
       
  2484 		led_reg = E100_82552_LED_OVERRIDE;
       
  2485 
       
  2486 		nic->leds = (nic->leds == E100_82552_LED_ON) ?
       
  2487 		            E100_82552_LED_OFF : E100_82552_LED_ON;
       
  2488 	} else {
       
  2489 		nic->leds = (nic->leds & led_on) ? led_off :
       
  2490 		            (nic->mac < mac_82559_D101M) ? led_on_557 :
       
  2491 		            led_on_559;
       
  2492 	}
       
  2493 	mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
       
  2494 	mod_timer(&nic->blink_timer, jiffies + HZ / 4);
       
  2495 }
       
  2496 
       
  2497 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2498 {
       
  2499 	struct nic *nic = netdev_priv(netdev);
       
  2500 	return mii_ethtool_gset(&nic->mii, cmd);
       
  2501 }
       
  2502 
       
  2503 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
       
  2504 {
       
  2505 	struct nic *nic = netdev_priv(netdev);
       
  2506 	int err;
       
  2507 
       
  2508 	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
       
  2509 	err = mii_ethtool_sset(&nic->mii, cmd);
       
  2510 	e100_exec_cb(nic, NULL, e100_configure);
       
  2511 
       
  2512 	return err;
       
  2513 }
       
  2514 
       
  2515 static void e100_get_drvinfo(struct net_device *netdev,
       
  2516 	struct ethtool_drvinfo *info)
       
  2517 {
       
  2518 	struct nic *nic = netdev_priv(netdev);
       
  2519 	strcpy(info->driver, DRV_NAME);
       
  2520 	strcpy(info->version, DRV_VERSION);
       
  2521 	strcpy(info->fw_version, "N/A");
       
  2522 	strcpy(info->bus_info, pci_name(nic->pdev));
       
  2523 }
       
  2524 
       
  2525 #define E100_PHY_REGS 0x1C
       
  2526 static int e100_get_regs_len(struct net_device *netdev)
       
  2527 {
       
  2528 	struct nic *nic = netdev_priv(netdev);
       
  2529 	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
       
  2530 }
       
  2531 
       
  2532 static void e100_get_regs(struct net_device *netdev,
       
  2533 	struct ethtool_regs *regs, void *p)
       
  2534 {
       
  2535 	struct nic *nic = netdev_priv(netdev);
       
  2536 	u32 *buff = p;
       
  2537 	int i;
       
  2538 
       
  2539 	regs->version = (1 << 24) | nic->pdev->revision;
       
  2540 	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
       
  2541 		ioread8(&nic->csr->scb.cmd_lo) << 16 |
       
  2542 		ioread16(&nic->csr->scb.status);
       
  2543 	for (i = E100_PHY_REGS; i >= 0; i--)
       
  2544 		buff[1 + E100_PHY_REGS - i] =
       
  2545 			mdio_read(netdev, nic->mii.phy_id, i);
       
  2546 	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
       
  2547 	e100_exec_cb(nic, NULL, e100_dump);
       
  2548 	msleep(10);
       
  2549 	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
       
  2550 		sizeof(nic->mem->dump_buf));
       
  2551 }
       
  2552 
       
  2553 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2554 {
       
  2555 	struct nic *nic = netdev_priv(netdev);
       
  2556 	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
       
  2557 	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
       
  2558 }
       
  2559 
       
  2560 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
       
  2561 {
       
  2562 	struct nic *nic = netdev_priv(netdev);
       
  2563 
       
  2564 	if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
       
  2565 	    !device_can_wakeup(&nic->pdev->dev))
       
  2566 		return -EOPNOTSUPP;
       
  2567 
       
  2568 	if (wol->wolopts)
       
  2569 		nic->flags |= wol_magic;
       
  2570 	else
       
  2571 		nic->flags &= ~wol_magic;
       
  2572 
       
  2573 	device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
       
  2574 
       
  2575 	e100_exec_cb(nic, NULL, e100_configure);
       
  2576 
       
  2577 	return 0;
       
  2578 }
       
  2579 
       
  2580 static u32 e100_get_msglevel(struct net_device *netdev)
       
  2581 {
       
  2582 	struct nic *nic = netdev_priv(netdev);
       
  2583 	return nic->msg_enable;
       
  2584 }
       
  2585 
       
  2586 static void e100_set_msglevel(struct net_device *netdev, u32 value)
       
  2587 {
       
  2588 	struct nic *nic = netdev_priv(netdev);
       
  2589 	nic->msg_enable = value;
       
  2590 }
       
  2591 
       
  2592 static int e100_nway_reset(struct net_device *netdev)
       
  2593 {
       
  2594 	struct nic *nic = netdev_priv(netdev);
       
  2595 	return mii_nway_restart(&nic->mii);
       
  2596 }
       
  2597 
       
  2598 static u32 e100_get_link(struct net_device *netdev)
       
  2599 {
       
  2600 	struct nic *nic = netdev_priv(netdev);
       
  2601 	return mii_link_ok(&nic->mii);
       
  2602 }
       
  2603 
       
  2604 static int e100_get_eeprom_len(struct net_device *netdev)
       
  2605 {
       
  2606 	struct nic *nic = netdev_priv(netdev);
       
  2607 	return nic->eeprom_wc << 1;
       
  2608 }
       
  2609 
       
  2610 #define E100_EEPROM_MAGIC	0x1234
       
  2611 static int e100_get_eeprom(struct net_device *netdev,
       
  2612 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2613 {
       
  2614 	struct nic *nic = netdev_priv(netdev);
       
  2615 
       
  2616 	eeprom->magic = E100_EEPROM_MAGIC;
       
  2617 	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
       
  2618 
       
  2619 	return 0;
       
  2620 }
       
  2621 
       
  2622 static int e100_set_eeprom(struct net_device *netdev,
       
  2623 	struct ethtool_eeprom *eeprom, u8 *bytes)
       
  2624 {
       
  2625 	struct nic *nic = netdev_priv(netdev);
       
  2626 
       
  2627 	if (eeprom->magic != E100_EEPROM_MAGIC)
       
  2628 		return -EINVAL;
       
  2629 
       
  2630 	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
       
  2631 
       
  2632 	return e100_eeprom_save(nic, eeprom->offset >> 1,
       
  2633 		(eeprom->len >> 1) + 1);
       
  2634 }
       
  2635 
       
  2636 static void e100_get_ringparam(struct net_device *netdev,
       
  2637 	struct ethtool_ringparam *ring)
       
  2638 {
       
  2639 	struct nic *nic = netdev_priv(netdev);
       
  2640 	struct param_range *rfds = &nic->params.rfds;
       
  2641 	struct param_range *cbs = &nic->params.cbs;
       
  2642 
       
  2643 	ring->rx_max_pending = rfds->max;
       
  2644 	ring->tx_max_pending = cbs->max;
       
  2645 	ring->rx_mini_max_pending = 0;
       
  2646 	ring->rx_jumbo_max_pending = 0;
       
  2647 	ring->rx_pending = rfds->count;
       
  2648 	ring->tx_pending = cbs->count;
       
  2649 	ring->rx_mini_pending = 0;
       
  2650 	ring->rx_jumbo_pending = 0;
       
  2651 }
       
  2652 
       
  2653 static int e100_set_ringparam(struct net_device *netdev,
       
  2654 	struct ethtool_ringparam *ring)
       
  2655 {
       
  2656 	struct nic *nic = netdev_priv(netdev);
       
  2657 	struct param_range *rfds = &nic->params.rfds;
       
  2658 	struct param_range *cbs = &nic->params.cbs;
       
  2659 
       
  2660 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
       
  2661 		return -EINVAL;
       
  2662 
       
  2663 	if (netif_running(netdev))
       
  2664 		e100_down(nic);
       
  2665 	rfds->count = max(ring->rx_pending, rfds->min);
       
  2666 	rfds->count = min(rfds->count, rfds->max);
       
  2667 	cbs->count = max(ring->tx_pending, cbs->min);
       
  2668 	cbs->count = min(cbs->count, cbs->max);
       
  2669 	DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
       
  2670 	        rfds->count, cbs->count);
       
  2671 	if (netif_running(netdev))
       
  2672 		e100_up(nic);
       
  2673 
       
  2674 	return 0;
       
  2675 }
       
  2676 
       
  2677 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
       
  2678 	"Link test     (on/offline)",
       
  2679 	"Eeprom test   (on/offline)",
       
  2680 	"Self test        (offline)",
       
  2681 	"Mac loopback     (offline)",
       
  2682 	"Phy loopback     (offline)",
       
  2683 };
       
  2684 #define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
       
  2685 
       
  2686 static void e100_diag_test(struct net_device *netdev,
       
  2687 	struct ethtool_test *test, u64 *data)
       
  2688 {
       
  2689 	struct ethtool_cmd cmd;
       
  2690 	struct nic *nic = netdev_priv(netdev);
       
  2691 	int i, err;
       
  2692 
       
  2693 	memset(data, 0, E100_TEST_LEN * sizeof(u64));
       
  2694 	data[0] = !mii_link_ok(&nic->mii);
       
  2695 	data[1] = e100_eeprom_load(nic);
       
  2696 	if (test->flags & ETH_TEST_FL_OFFLINE) {
       
  2697 
       
  2698 		/* save speed, duplex & autoneg settings */
       
  2699 		err = mii_ethtool_gset(&nic->mii, &cmd);
       
  2700 
       
  2701 		if (netif_running(netdev))
       
  2702 			e100_down(nic);
       
  2703 		data[2] = e100_self_test(nic);
       
  2704 		data[3] = e100_loopback_test(nic, lb_mac);
       
  2705 		data[4] = e100_loopback_test(nic, lb_phy);
       
  2706 
       
  2707 		/* restore speed, duplex & autoneg settings */
       
  2708 		err = mii_ethtool_sset(&nic->mii, &cmd);
       
  2709 
       
  2710 		if (netif_running(netdev))
       
  2711 			e100_up(nic);
       
  2712 	}
       
  2713 	for (i = 0; i < E100_TEST_LEN; i++)
       
  2714 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
       
  2715 
       
  2716 	msleep_interruptible(4 * 1000);
       
  2717 }
       
  2718 
       
  2719 static int e100_phys_id(struct net_device *netdev, u32 data)
       
  2720 {
       
  2721 	struct nic *nic = netdev_priv(netdev);
       
  2722 	u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
       
  2723 	              MII_LED_CONTROL;
       
  2724 
       
  2725 	if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
       
  2726 		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
       
  2727 	mod_timer(&nic->blink_timer, jiffies);
       
  2728 	msleep_interruptible(data * 1000);
       
  2729 	del_timer_sync(&nic->blink_timer);
       
  2730 	mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
       
  2731 
       
  2732 	return 0;
       
  2733 }
       
  2734 
       
  2735 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
       
  2736 	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
       
  2737 	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
       
  2738 	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
       
  2739 	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
       
  2740 	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
       
  2741 	"tx_heartbeat_errors", "tx_window_errors",
       
  2742 	/* device-specific stats */
       
  2743 	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
       
  2744 	"tx_flow_control_pause", "rx_flow_control_pause",
       
  2745 	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
       
  2746 };
       
  2747 #define E100_NET_STATS_LEN	21
       
  2748 #define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
       
  2749 
       
  2750 static int e100_get_sset_count(struct net_device *netdev, int sset)
       
  2751 {
       
  2752 	switch (sset) {
       
  2753 	case ETH_SS_TEST:
       
  2754 		return E100_TEST_LEN;
       
  2755 	case ETH_SS_STATS:
       
  2756 		return E100_STATS_LEN;
       
  2757 	default:
       
  2758 		return -EOPNOTSUPP;
       
  2759 	}
       
  2760 }
       
  2761 
       
  2762 static void e100_get_ethtool_stats(struct net_device *netdev,
       
  2763 	struct ethtool_stats *stats, u64 *data)
       
  2764 {
       
  2765 	struct nic *nic = netdev_priv(netdev);
       
  2766 	int i;
       
  2767 
       
  2768 	for (i = 0; i < E100_NET_STATS_LEN; i++)
       
  2769 		data[i] = ((unsigned long *)&netdev->stats)[i];
       
  2770 
       
  2771 	data[i++] = nic->tx_deferred;
       
  2772 	data[i++] = nic->tx_single_collisions;
       
  2773 	data[i++] = nic->tx_multiple_collisions;
       
  2774 	data[i++] = nic->tx_fc_pause;
       
  2775 	data[i++] = nic->rx_fc_pause;
       
  2776 	data[i++] = nic->rx_fc_unsupported;
       
  2777 	data[i++] = nic->tx_tco_frames;
       
  2778 	data[i++] = nic->rx_tco_frames;
       
  2779 }
       
  2780 
       
  2781 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
       
  2782 {
       
  2783 	switch (stringset) {
       
  2784 	case ETH_SS_TEST:
       
  2785 		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
       
  2786 		break;
       
  2787 	case ETH_SS_STATS:
       
  2788 		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
       
  2789 		break;
       
  2790 	}
       
  2791 }
       
  2792 
       
  2793 static const struct ethtool_ops e100_ethtool_ops = {
       
  2794 	.get_settings		= e100_get_settings,
       
  2795 	.set_settings		= e100_set_settings,
       
  2796 	.get_drvinfo		= e100_get_drvinfo,
       
  2797 	.get_regs_len		= e100_get_regs_len,
       
  2798 	.get_regs		= e100_get_regs,
       
  2799 	.get_wol		= e100_get_wol,
       
  2800 	.set_wol		= e100_set_wol,
       
  2801 	.get_msglevel		= e100_get_msglevel,
       
  2802 	.set_msglevel		= e100_set_msglevel,
       
  2803 	.nway_reset		= e100_nway_reset,
       
  2804 	.get_link		= e100_get_link,
       
  2805 	.get_eeprom_len		= e100_get_eeprom_len,
       
  2806 	.get_eeprom		= e100_get_eeprom,
       
  2807 	.set_eeprom		= e100_set_eeprom,
       
  2808 	.get_ringparam		= e100_get_ringparam,
       
  2809 	.set_ringparam		= e100_set_ringparam,
       
  2810 	.self_test		= e100_diag_test,
       
  2811 	.get_strings		= e100_get_strings,
       
  2812 	.phys_id		= e100_phys_id,
       
  2813 	.get_ethtool_stats	= e100_get_ethtool_stats,
       
  2814 	.get_sset_count		= e100_get_sset_count,
       
  2815 };
       
  2816 
       
  2817 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  2818 {
       
  2819 	struct nic *nic = netdev_priv(netdev);
       
  2820 
       
  2821 	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
       
  2822 }
       
  2823 
       
  2824 static int e100_alloc(struct nic *nic)
       
  2825 {
       
  2826 	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
       
  2827 		&nic->dma_addr);
       
  2828 	return nic->mem ? 0 : -ENOMEM;
       
  2829 }
       
  2830 
       
  2831 static void e100_free(struct nic *nic)
       
  2832 {
       
  2833 	if (nic->mem) {
       
  2834 		pci_free_consistent(nic->pdev, sizeof(struct mem),
       
  2835 			nic->mem, nic->dma_addr);
       
  2836 		nic->mem = NULL;
       
  2837 	}
       
  2838 }
       
  2839 
       
  2840 static int e100_open(struct net_device *netdev)
       
  2841 {
       
  2842 	struct nic *nic = netdev_priv(netdev);
       
  2843 	int err = 0;
       
  2844 
       
  2845 	if (!nic->ecdev)
       
  2846 		netif_carrier_off(netdev);
       
  2847 	if ((err = e100_up(nic)))
       
  2848 		DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
       
  2849 	return err;
       
  2850 }
       
  2851 
       
  2852 static int e100_close(struct net_device *netdev)
       
  2853 {
       
  2854 	e100_down(netdev_priv(netdev));
       
  2855 	return 0;
       
  2856 }
       
  2857 
       
  2858 static const struct net_device_ops e100_netdev_ops = {
       
  2859 	.ndo_open		= e100_open,
       
  2860 	.ndo_stop		= e100_close,
       
  2861 	.ndo_start_xmit		= e100_xmit_frame,
       
  2862 	.ndo_validate_addr	= eth_validate_addr,
       
  2863 	.ndo_set_multicast_list	= e100_set_multicast_list,
       
  2864 	.ndo_set_mac_address	= e100_set_mac_address,
       
  2865 	.ndo_change_mtu		= e100_change_mtu,
       
  2866 	.ndo_do_ioctl		= e100_do_ioctl,
       
  2867 	.ndo_tx_timeout		= e100_tx_timeout,
       
  2868 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  2869 	.ndo_poll_controller	= e100_netpoll,
       
  2870 #endif
       
  2871 };
       
  2872 
       
  2873 static int __devinit e100_probe(struct pci_dev *pdev,
       
  2874 	const struct pci_device_id *ent)
       
  2875 {
       
  2876 	struct net_device *netdev;
       
  2877 	struct nic *nic;
       
  2878 	int err;
       
  2879 
       
  2880 	if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
       
  2881 		if (((1 << debug) - 1) & NETIF_MSG_PROBE)
       
  2882 			printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
       
  2883 		return -ENOMEM;
       
  2884 	}
       
  2885 
       
  2886 	netdev->netdev_ops = &e100_netdev_ops;
       
  2887 	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
       
  2888 	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
       
  2889 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  2890 
       
  2891 	nic = netdev_priv(netdev);
       
  2892 	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
       
  2893 	nic->netdev = netdev;
       
  2894 	nic->pdev = pdev;
       
  2895 	nic->msg_enable = (1 << debug) - 1;
       
  2896 	nic->mdio_ctrl = mdio_ctrl_hw;
       
  2897 	pci_set_drvdata(pdev, netdev);
       
  2898 
       
  2899 	if ((err = pci_enable_device(pdev))) {
       
  2900 		DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
       
  2901 		goto err_out_free_dev;
       
  2902 	}
       
  2903 
       
  2904 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
       
  2905 		DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
       
  2906 			"base address, aborting.\n");
       
  2907 		err = -ENODEV;
       
  2908 		goto err_out_disable_pdev;
       
  2909 	}
       
  2910 
       
  2911 	if ((err = pci_request_regions(pdev, DRV_NAME))) {
       
  2912 		DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
       
  2913 		goto err_out_disable_pdev;
       
  2914 	}
       
  2915 
       
  2916 	if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
       
  2917 		DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
       
  2918 		goto err_out_free_res;
       
  2919 	}
       
  2920 
       
  2921 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  2922 
       
  2923 	if (use_io)
       
  2924 		DPRINTK(PROBE, INFO, "using i/o access mode\n");
       
  2925 
       
  2926 	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
       
  2927 	if (!nic->csr) {
       
  2928 		DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
       
  2929 		err = -ENOMEM;
       
  2930 		goto err_out_free_res;
       
  2931 	}
       
  2932 
       
  2933 	if (ent->driver_data)
       
  2934 		nic->flags |= ich;
       
  2935 	else
       
  2936 		nic->flags &= ~ich;
       
  2937 
       
  2938 	e100_get_defaults(nic);
       
  2939 
       
  2940 	/* locks must be initialized before calling hw_reset */
       
  2941 	spin_lock_init(&nic->cb_lock);
       
  2942 	spin_lock_init(&nic->cmd_lock);
       
  2943 	spin_lock_init(&nic->mdio_lock);
       
  2944 
       
  2945 	/* Reset the device before pci_set_master() in case device is in some
       
  2946 	 * funky state and has an interrupt pending - hint: we don't have the
       
  2947 	 * interrupt handler registered yet. */
       
  2948 	e100_hw_reset(nic);
       
  2949 
       
  2950 	pci_set_master(pdev);
       
  2951 
       
  2952 	init_timer(&nic->watchdog);
       
  2953 	nic->watchdog.function = e100_watchdog;
       
  2954 	nic->watchdog.data = (unsigned long)nic;
       
  2955 	init_timer(&nic->blink_timer);
       
  2956 	nic->blink_timer.function = e100_blink_led;
       
  2957 	nic->blink_timer.data = (unsigned long)nic;
       
  2958 
       
  2959 	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
       
  2960 
       
  2961 	if ((err = e100_alloc(nic))) {
       
  2962 		DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
       
  2963 		goto err_out_iounmap;
       
  2964 	}
       
  2965 
       
  2966 	if ((err = e100_eeprom_load(nic)))
       
  2967 		goto err_out_free;
       
  2968 
       
  2969 	e100_phy_init(nic);
       
  2970 
       
  2971 	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
       
  2972 	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
       
  2973 	if (!is_valid_ether_addr(netdev->perm_addr)) {
       
  2974 		if (!eeprom_bad_csum_allow) {
       
  2975 			DPRINTK(PROBE, ERR, "Invalid MAC address from "
       
  2976 			        "EEPROM, aborting.\n");
       
  2977 			err = -EAGAIN;
       
  2978 			goto err_out_free;
       
  2979 		} else {
       
  2980 			DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
       
  2981 			        "you MUST configure one.\n");
       
  2982 		}
       
  2983 	}
       
  2984 
       
  2985 	/* Wol magic packet can be enabled from eeprom */
       
  2986 	if ((nic->mac >= mac_82558_D101_A4) &&
       
  2987 	   (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
       
  2988 		nic->flags |= wol_magic;
       
  2989 		device_set_wakeup_enable(&pdev->dev, true);
       
  2990 	}
       
  2991 
       
  2992 	/* ack any pending wake events, disable PME */
       
  2993 	pci_pme_active(pdev, false);
       
  2994 
       
  2995 	// offer device to EtherCAT master module
       
  2996 	nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE);
       
  2997 	if (nic->ecdev) {
       
  2998 		if (ecdev_open(nic->ecdev)) {
       
  2999 			ecdev_withdraw(nic->ecdev);
       
  3000 			goto err_out_free;
       
  3001 		}
       
  3002 	} else {
       
  3003 		strcpy(netdev->name, "eth%d");
       
  3004 		if((err = register_netdev(netdev))) {
       
  3005 			DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
       
  3006 			goto err_out_free;
       
  3007 		}
       
  3008 		nic->cbs_pool = pci_pool_create(netdev->name,
       
  3009 			   nic->pdev,
       
  3010 			   nic->params.cbs.max * sizeof(struct cb),
       
  3011 			   sizeof(u32),
       
  3012 			   0);
       
  3013 	}
       
  3014 	DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
       
  3015 		(unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
       
  3016 		pdev->irq, netdev->dev_addr);
       
  3017 
       
  3018 	return 0;
       
  3019 
       
  3020 err_out_free:
       
  3021 	e100_free(nic);
       
  3022 err_out_iounmap:
       
  3023 	pci_iounmap(pdev, nic->csr);
       
  3024 err_out_free_res:
       
  3025 	pci_release_regions(pdev);
       
  3026 err_out_disable_pdev:
       
  3027 	pci_disable_device(pdev);
       
  3028 err_out_free_dev:
       
  3029 	pci_set_drvdata(pdev, NULL);
       
  3030 	free_netdev(netdev);
       
  3031 	return err;
       
  3032 }
       
  3033 
       
  3034 static void __devexit e100_remove(struct pci_dev *pdev)
       
  3035 {
       
  3036 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3037 
       
  3038 	if (netdev) {
       
  3039 		struct nic *nic = netdev_priv(netdev);
       
  3040 		if (nic->ecdev) {
       
  3041 			ecdev_close(nic->ecdev);
       
  3042 			ecdev_withdraw(nic->ecdev);
       
  3043 		} else {
       
  3044 			unregister_netdev(netdev);
       
  3045 		}
       
  3046 
       
  3047 		e100_free(nic);
       
  3048 		pci_iounmap(pdev, nic->csr);
       
  3049 		pci_pool_destroy(nic->cbs_pool);
       
  3050 		free_netdev(netdev);
       
  3051 		pci_release_regions(pdev);
       
  3052 		pci_disable_device(pdev);
       
  3053 		pci_set_drvdata(pdev, NULL);
       
  3054 	}
       
  3055 }
       
  3056 
       
  3057 #define E100_82552_SMARTSPEED   0x14   /* SmartSpeed Ctrl register */
       
  3058 #define E100_82552_REV_ANEG     0x0200 /* Reverse auto-negotiation */
       
  3059 #define E100_82552_ANEG_NOW     0x0400 /* Auto-negotiate now */
       
  3060 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  3061 {
       
  3062 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3063 	struct nic *nic = netdev_priv(netdev);
       
  3064 
       
  3065 	if (netif_running(netdev))
       
  3066 		e100_down(nic);
       
  3067 	netif_device_detach(netdev);
       
  3068 
       
  3069 	pci_save_state(pdev);
       
  3070 
       
  3071 	if ((nic->flags & wol_magic) | e100_asf(nic)) {
       
  3072 		/* enable reverse auto-negotiation */
       
  3073 		if (nic->phy == phy_82552_v) {
       
  3074 			u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3075 			                           E100_82552_SMARTSPEED);
       
  3076 
       
  3077 			mdio_write(netdev, nic->mii.phy_id,
       
  3078 			           E100_82552_SMARTSPEED, smartspeed |
       
  3079 			           E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
       
  3080 		}
       
  3081 		*enable_wake = true;
       
  3082 	} else {
       
  3083 		*enable_wake = false;
       
  3084 	}
       
  3085 
       
  3086 	pci_disable_device(pdev);
       
  3087 }
       
  3088 
       
  3089 static int __e100_power_off(struct pci_dev *pdev, bool wake)
       
  3090 {
       
  3091 	if (wake)
       
  3092 		return pci_prepare_to_sleep(pdev);
       
  3093 
       
  3094 	pci_wake_from_d3(pdev, false);
       
  3095 	pci_set_power_state(pdev, PCI_D3hot);
       
  3096 
       
  3097 	return 0;
       
  3098 }
       
  3099 
       
  3100 #ifdef CONFIG_PM
       
  3101 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
       
  3102 {
       
  3103 	bool wake;
       
  3104 	__e100_shutdown(pdev, &wake);
       
  3105 	return __e100_power_off(pdev, wake);
       
  3106 }
       
  3107 
       
  3108 static int e100_resume(struct pci_dev *pdev)
       
  3109 {
       
  3110 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3111 	struct nic *nic = netdev_priv(netdev);
       
  3112 
       
  3113 	pci_set_power_state(pdev, PCI_D0);
       
  3114 	pci_restore_state(pdev);
       
  3115 	/* ack any pending wake events, disable PME */
       
  3116 	pci_enable_wake(pdev, 0, 0);
       
  3117 
       
  3118 	/* disable reverse auto-negotiation */
       
  3119 	if (nic->phy == phy_82552_v) {
       
  3120 		u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
       
  3121 		                           E100_82552_SMARTSPEED);
       
  3122 
       
  3123 		mdio_write(netdev, nic->mii.phy_id,
       
  3124 		           E100_82552_SMARTSPEED,
       
  3125 		           smartspeed & ~(E100_82552_REV_ANEG));
       
  3126 	}
       
  3127 
       
  3128 	netif_device_attach(netdev);
       
  3129 	if (netif_running(netdev))
       
  3130 		e100_up(nic);
       
  3131 
       
  3132 	return 0;
       
  3133 }
       
  3134 #endif /* CONFIG_PM */
       
  3135 
       
  3136 static void e100_shutdown(struct pci_dev *pdev)
       
  3137 {
       
  3138 	bool wake;
       
  3139 	__e100_shutdown(pdev, &wake);
       
  3140 	if (system_state == SYSTEM_POWER_OFF)
       
  3141 		__e100_power_off(pdev, wake);
       
  3142 }
       
  3143 
       
  3144 /* ------------------ PCI Error Recovery infrastructure  -------------- */
       
  3145 /**
       
  3146  * e100_io_error_detected - called when PCI error is detected.
       
  3147  * @pdev: Pointer to PCI device
       
  3148  * @state: The current pci connection state
       
  3149  */
       
  3150 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
       
  3151 {
       
  3152 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3153 	struct nic *nic = netdev_priv(netdev);
       
  3154 
       
  3155 	if (nic->ecdev)
       
  3156 		return -EBUSY;
       
  3157 
       
  3158 	netif_device_detach(netdev);
       
  3159 
       
  3160 	if (state == pci_channel_io_perm_failure)
       
  3161 		return PCI_ERS_RESULT_DISCONNECT;
       
  3162 
       
  3163 	if (netif_running(netdev))
       
  3164 		e100_down(nic);
       
  3165 	pci_disable_device(pdev);
       
  3166 
       
  3167 	/* Request a slot reset. */
       
  3168 	return PCI_ERS_RESULT_NEED_RESET;
       
  3169 }
       
  3170 
       
  3171 /**
       
  3172  * e100_io_slot_reset - called after the pci bus has been reset.
       
  3173  * @pdev: Pointer to PCI device
       
  3174  *
       
  3175  * Restart the card from scratch.
       
  3176  */
       
  3177 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
       
  3178 {
       
  3179 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3180 	struct nic *nic = netdev_priv(netdev);
       
  3181 
       
  3182 	if (nic->ecdev)
       
  3183 		return -EBUSY;
       
  3184 
       
  3185 	if (pci_enable_device(pdev)) {
       
  3186 		printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
       
  3187 		return PCI_ERS_RESULT_DISCONNECT;
       
  3188 	}
       
  3189 	pci_set_master(pdev);
       
  3190 
       
  3191 	/* Only one device per card can do a reset */
       
  3192 	if (0 != PCI_FUNC(pdev->devfn))
       
  3193 		return PCI_ERS_RESULT_RECOVERED;
       
  3194 	e100_hw_reset(nic);
       
  3195 	e100_phy_init(nic);
       
  3196 
       
  3197 	return PCI_ERS_RESULT_RECOVERED;
       
  3198 }
       
  3199 
       
  3200 /**
       
  3201  * e100_io_resume - resume normal operations
       
  3202  * @pdev: Pointer to PCI device
       
  3203  *
       
  3204  * Resume normal operations after an error recovery
       
  3205  * sequence has been completed.
       
  3206  */
       
  3207 static void e100_io_resume(struct pci_dev *pdev)
       
  3208 {
       
  3209 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  3210 	struct nic *nic = netdev_priv(netdev);
       
  3211 
       
  3212 	/* ack any pending wake events, disable PME */
       
  3213 	pci_enable_wake(pdev, 0, 0);
       
  3214 
       
  3215 	if (!nic->ecdev)
       
  3216 		netif_device_attach(netdev);
       
  3217 	if (nic->ecdev || netif_running(netdev)) {
       
  3218 		e100_open(netdev);
       
  3219 		if (!nic->ecdev)
       
  3220 			mod_timer(&nic->watchdog, jiffies);
       
  3221 	}
       
  3222 }
       
  3223 
       
  3224 static struct pci_error_handlers e100_err_handler = {
       
  3225 	.error_detected = e100_io_error_detected,
       
  3226 	.slot_reset = e100_io_slot_reset,
       
  3227 	.resume = e100_io_resume,
       
  3228 };
       
  3229 
       
  3230 static struct pci_driver e100_driver = {
       
  3231 	.name =         DRV_NAME,
       
  3232 	.id_table =     e100_id_table,
       
  3233 	.probe =        e100_probe,
       
  3234 	.remove =       __devexit_p(e100_remove),
       
  3235 #ifdef CONFIG_PM
       
  3236 	/* Power Management hooks */
       
  3237 	.suspend =      e100_suspend,
       
  3238 	.resume =       e100_resume,
       
  3239 #endif
       
  3240 	.shutdown =     e100_shutdown,
       
  3241 	.err_handler = &e100_err_handler,
       
  3242 };
       
  3243 
       
  3244 static int __init e100_init_module(void)
       
  3245 {
       
  3246 	printk(KERN_INFO DRV_NAME " " DRV_DESCRIPTION " " DRV_VERSION
       
  3247 			", master " EC_MASTER_VERSION "\n");
       
  3248  
       
  3249  	return pci_register_driver(&e100_driver);
       
  3250 }
       
  3251 
       
  3252 static void __exit e100_cleanup_module(void)
       
  3253 {
       
  3254 	printk(KERN_INFO DRV_NAME " cleaning up module...\n");
       
  3255 	pci_unregister_driver(&e100_driver);
       
  3256 	printk(KERN_INFO DRV_NAME " module cleaned up.\n");
       
  3257 }
       
  3258 
       
  3259 module_init(e100_init_module);
       
  3260 module_exit(e100_cleanup_module);