devices/e1000/e1000_main-2.6.28-orig.c
changeset 2051 3a066ec73fb2
equal deleted inserted replaced
2050:a3e59f2a8589 2051:3a066ec73fb2
       
     1 /*******************************************************************************
       
     2 
       
     3   Intel PRO/1000 Linux driver
       
     4   Copyright(c) 1999 - 2006 Intel Corporation.
       
     5 
       
     6   This program is free software; you can redistribute it and/or modify it
       
     7   under the terms and conditions of the GNU General Public License,
       
     8   version 2, as published by the Free Software Foundation.
       
     9 
       
    10   This program is distributed in the hope it will be useful, but WITHOUT
       
    11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    13   more details.
       
    14 
       
    15   You should have received a copy of the GNU General Public License along with
       
    16   this program; if not, write to the Free Software Foundation, Inc.,
       
    17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    18 
       
    19   The full GNU General Public License is included in this distribution in
       
    20   the file called "COPYING".
       
    21 
       
    22   Contact Information:
       
    23   Linux NICS <linux.nics@intel.com>
       
    24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    26 
       
    27 *******************************************************************************/
       
    28 
       
    29 #include "e1000.h"
       
    30 #include <net/ip6_checksum.h>
       
    31 
       
    32 char e1000_driver_name[] = "e1000";
       
    33 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
       
    34 #define DRV_VERSION "7.3.21-k3-NAPI"
       
    35 const char e1000_driver_version[] = DRV_VERSION;
       
    36 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
       
    37 
       
    38 /* e1000_pci_tbl - PCI Device ID Table
       
    39  *
       
    40  * Last entry must be all 0s
       
    41  *
       
    42  * Macro expands to...
       
    43  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
       
    44  */
       
    45 static struct pci_device_id e1000_pci_tbl[] = {
       
    46 	INTEL_E1000_ETHERNET_DEVICE(0x1000),
       
    47 	INTEL_E1000_ETHERNET_DEVICE(0x1001),
       
    48 	INTEL_E1000_ETHERNET_DEVICE(0x1004),
       
    49 	INTEL_E1000_ETHERNET_DEVICE(0x1008),
       
    50 	INTEL_E1000_ETHERNET_DEVICE(0x1009),
       
    51 	INTEL_E1000_ETHERNET_DEVICE(0x100C),
       
    52 	INTEL_E1000_ETHERNET_DEVICE(0x100D),
       
    53 	INTEL_E1000_ETHERNET_DEVICE(0x100E),
       
    54 	INTEL_E1000_ETHERNET_DEVICE(0x100F),
       
    55 	INTEL_E1000_ETHERNET_DEVICE(0x1010),
       
    56 	INTEL_E1000_ETHERNET_DEVICE(0x1011),
       
    57 	INTEL_E1000_ETHERNET_DEVICE(0x1012),
       
    58 	INTEL_E1000_ETHERNET_DEVICE(0x1013),
       
    59 	INTEL_E1000_ETHERNET_DEVICE(0x1014),
       
    60 	INTEL_E1000_ETHERNET_DEVICE(0x1015),
       
    61 	INTEL_E1000_ETHERNET_DEVICE(0x1016),
       
    62 	INTEL_E1000_ETHERNET_DEVICE(0x1017),
       
    63 	INTEL_E1000_ETHERNET_DEVICE(0x1018),
       
    64 	INTEL_E1000_ETHERNET_DEVICE(0x1019),
       
    65 	INTEL_E1000_ETHERNET_DEVICE(0x101A),
       
    66 	INTEL_E1000_ETHERNET_DEVICE(0x101D),
       
    67 	INTEL_E1000_ETHERNET_DEVICE(0x101E),
       
    68 	INTEL_E1000_ETHERNET_DEVICE(0x1026),
       
    69 	INTEL_E1000_ETHERNET_DEVICE(0x1027),
       
    70 	INTEL_E1000_ETHERNET_DEVICE(0x1028),
       
    71 	INTEL_E1000_ETHERNET_DEVICE(0x1075),
       
    72 	INTEL_E1000_ETHERNET_DEVICE(0x1076),
       
    73 	INTEL_E1000_ETHERNET_DEVICE(0x1077),
       
    74 	INTEL_E1000_ETHERNET_DEVICE(0x1078),
       
    75 	INTEL_E1000_ETHERNET_DEVICE(0x1079),
       
    76 	INTEL_E1000_ETHERNET_DEVICE(0x107A),
       
    77 	INTEL_E1000_ETHERNET_DEVICE(0x107B),
       
    78 	INTEL_E1000_ETHERNET_DEVICE(0x107C),
       
    79 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
       
    80 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
       
    81 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
       
    82 	/* required last entry */
       
    83 	{0,}
       
    84 };
       
    85 
       
    86 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
       
    87 
       
    88 int e1000_up(struct e1000_adapter *adapter);
       
    89 void e1000_down(struct e1000_adapter *adapter);
       
    90 void e1000_reinit_locked(struct e1000_adapter *adapter);
       
    91 void e1000_reset(struct e1000_adapter *adapter);
       
    92 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
       
    93 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
       
    94 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
       
    95 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
       
    96 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
       
    97 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
       
    98                              struct e1000_tx_ring *txdr);
       
    99 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
       
   100                              struct e1000_rx_ring *rxdr);
       
   101 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
       
   102                              struct e1000_tx_ring *tx_ring);
       
   103 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
       
   104                              struct e1000_rx_ring *rx_ring);
       
   105 void e1000_update_stats(struct e1000_adapter *adapter);
       
   106 
       
   107 static int e1000_init_module(void);
       
   108 static void e1000_exit_module(void);
       
   109 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
       
   110 static void __devexit e1000_remove(struct pci_dev *pdev);
       
   111 static int e1000_alloc_queues(struct e1000_adapter *adapter);
       
   112 static int e1000_sw_init(struct e1000_adapter *adapter);
       
   113 static int e1000_open(struct net_device *netdev);
       
   114 static int e1000_close(struct net_device *netdev);
       
   115 static void e1000_configure_tx(struct e1000_adapter *adapter);
       
   116 static void e1000_configure_rx(struct e1000_adapter *adapter);
       
   117 static void e1000_setup_rctl(struct e1000_adapter *adapter);
       
   118 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
       
   119 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
       
   120 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
       
   121                                 struct e1000_tx_ring *tx_ring);
       
   122 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
       
   123                                 struct e1000_rx_ring *rx_ring);
       
   124 static void e1000_set_rx_mode(struct net_device *netdev);
       
   125 static void e1000_update_phy_info(unsigned long data);
       
   126 static void e1000_watchdog(unsigned long data);
       
   127 static void e1000_82547_tx_fifo_stall(unsigned long data);
       
   128 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
       
   129 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
       
   130 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
       
   131 static int e1000_set_mac(struct net_device *netdev, void *p);
       
   132 static irqreturn_t e1000_intr(int irq, void *data);
       
   133 static irqreturn_t e1000_intr_msi(int irq, void *data);
       
   134 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
       
   135 			       struct e1000_tx_ring *tx_ring);
       
   136 static int e1000_clean(struct napi_struct *napi, int budget);
       
   137 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
   138 			       struct e1000_rx_ring *rx_ring,
       
   139 			       int *work_done, int work_to_do);
       
   140 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
   141                                    struct e1000_rx_ring *rx_ring,
       
   142 				   int cleaned_count);
       
   143 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
       
   144 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
   145 			   int cmd);
       
   146 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
       
   147 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
       
   148 static void e1000_tx_timeout(struct net_device *dev);
       
   149 static void e1000_reset_task(struct work_struct *work);
       
   150 static void e1000_smartspeed(struct e1000_adapter *adapter);
       
   151 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
       
   152                                        struct sk_buff *skb);
       
   153 
       
   154 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
       
   155 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
       
   156 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
       
   157 static void e1000_restore_vlan(struct e1000_adapter *adapter);
       
   158 
       
   159 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
       
   160 #ifdef CONFIG_PM
       
   161 static int e1000_resume(struct pci_dev *pdev);
       
   162 #endif
       
   163 static void e1000_shutdown(struct pci_dev *pdev);
       
   164 
       
   165 #ifdef CONFIG_NET_POLL_CONTROLLER
       
   166 /* for netdump / net console */
       
   167 static void e1000_netpoll (struct net_device *netdev);
       
   168 #endif
       
   169 
       
   170 #define COPYBREAK_DEFAULT 256
       
   171 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
       
   172 module_param(copybreak, uint, 0644);
       
   173 MODULE_PARM_DESC(copybreak,
       
   174 	"Maximum size of packet that is copied to a new buffer on receive");
       
   175 
       
   176 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
   177                      pci_channel_state_t state);
       
   178 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
       
   179 static void e1000_io_resume(struct pci_dev *pdev);
       
   180 
       
   181 static struct pci_error_handlers e1000_err_handler = {
       
   182 	.error_detected = e1000_io_error_detected,
       
   183 	.slot_reset = e1000_io_slot_reset,
       
   184 	.resume = e1000_io_resume,
       
   185 };
       
   186 
       
   187 static struct pci_driver e1000_driver = {
       
   188 	.name     = e1000_driver_name,
       
   189 	.id_table = e1000_pci_tbl,
       
   190 	.probe    = e1000_probe,
       
   191 	.remove   = __devexit_p(e1000_remove),
       
   192 #ifdef CONFIG_PM
       
   193 	/* Power Managment Hooks */
       
   194 	.suspend  = e1000_suspend,
       
   195 	.resume   = e1000_resume,
       
   196 #endif
       
   197 	.shutdown = e1000_shutdown,
       
   198 	.err_handler = &e1000_err_handler
       
   199 };
       
   200 
       
   201 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
       
   202 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
       
   203 MODULE_LICENSE("GPL");
       
   204 MODULE_VERSION(DRV_VERSION);
       
   205 
       
   206 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
       
   207 module_param(debug, int, 0);
       
   208 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   209 
       
   210 /**
       
   211  * e1000_init_module - Driver Registration Routine
       
   212  *
       
   213  * e1000_init_module is the first routine called when the driver is
       
   214  * loaded. All it does is register with the PCI subsystem.
       
   215  **/
       
   216 
       
   217 static int __init e1000_init_module(void)
       
   218 {
       
   219 	int ret;
       
   220 	printk(KERN_INFO "%s - version %s\n",
       
   221 	       e1000_driver_string, e1000_driver_version);
       
   222 
       
   223 	printk(KERN_INFO "%s\n", e1000_copyright);
       
   224 
       
   225 	ret = pci_register_driver(&e1000_driver);
       
   226 	if (copybreak != COPYBREAK_DEFAULT) {
       
   227 		if (copybreak == 0)
       
   228 			printk(KERN_INFO "e1000: copybreak disabled\n");
       
   229 		else
       
   230 			printk(KERN_INFO "e1000: copybreak enabled for "
       
   231 			       "packets <= %u bytes\n", copybreak);
       
   232 	}
       
   233 	return ret;
       
   234 }
       
   235 
       
   236 module_init(e1000_init_module);
       
   237 
       
   238 /**
       
   239  * e1000_exit_module - Driver Exit Cleanup Routine
       
   240  *
       
   241  * e1000_exit_module is called just before the driver is removed
       
   242  * from memory.
       
   243  **/
       
   244 
       
   245 static void __exit e1000_exit_module(void)
       
   246 {
       
   247 	pci_unregister_driver(&e1000_driver);
       
   248 }
       
   249 
       
   250 module_exit(e1000_exit_module);
       
   251 
       
   252 static int e1000_request_irq(struct e1000_adapter *adapter)
       
   253 {
       
   254 	struct e1000_hw *hw = &adapter->hw;
       
   255 	struct net_device *netdev = adapter->netdev;
       
   256 	irq_handler_t handler = e1000_intr;
       
   257 	int irq_flags = IRQF_SHARED;
       
   258 	int err;
       
   259 
       
   260 	if (hw->mac_type >= e1000_82571) {
       
   261 		adapter->have_msi = !pci_enable_msi(adapter->pdev);
       
   262 		if (adapter->have_msi) {
       
   263 			handler = e1000_intr_msi;
       
   264 			irq_flags = 0;
       
   265 		}
       
   266 	}
       
   267 
       
   268 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
       
   269 	                  netdev);
       
   270 	if (err) {
       
   271 		if (adapter->have_msi)
       
   272 			pci_disable_msi(adapter->pdev);
       
   273 		DPRINTK(PROBE, ERR,
       
   274 		        "Unable to allocate interrupt Error: %d\n", err);
       
   275 	}
       
   276 
       
   277 	return err;
       
   278 }
       
   279 
       
   280 static void e1000_free_irq(struct e1000_adapter *adapter)
       
   281 {
       
   282 	struct net_device *netdev = adapter->netdev;
       
   283 
       
   284 	free_irq(adapter->pdev->irq, netdev);
       
   285 
       
   286 	if (adapter->have_msi)
       
   287 		pci_disable_msi(adapter->pdev);
       
   288 }
       
   289 
       
   290 /**
       
   291  * e1000_irq_disable - Mask off interrupt generation on the NIC
       
   292  * @adapter: board private structure
       
   293  **/
       
   294 
       
   295 static void e1000_irq_disable(struct e1000_adapter *adapter)
       
   296 {
       
   297 	struct e1000_hw *hw = &adapter->hw;
       
   298 
       
   299 	ew32(IMC, ~0);
       
   300 	E1000_WRITE_FLUSH();
       
   301 	synchronize_irq(adapter->pdev->irq);
       
   302 }
       
   303 
       
   304 /**
       
   305  * e1000_irq_enable - Enable default interrupt generation settings
       
   306  * @adapter: board private structure
       
   307  **/
       
   308 
       
   309 static void e1000_irq_enable(struct e1000_adapter *adapter)
       
   310 {
       
   311 	struct e1000_hw *hw = &adapter->hw;
       
   312 
       
   313 	ew32(IMS, IMS_ENABLE_MASK);
       
   314 	E1000_WRITE_FLUSH();
       
   315 }
       
   316 
       
   317 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
       
   318 {
       
   319 	struct e1000_hw *hw = &adapter->hw;
       
   320 	struct net_device *netdev = adapter->netdev;
       
   321 	u16 vid = hw->mng_cookie.vlan_id;
       
   322 	u16 old_vid = adapter->mng_vlan_id;
       
   323 	if (adapter->vlgrp) {
       
   324 		if (!vlan_group_get_device(adapter->vlgrp, vid)) {
       
   325 			if (hw->mng_cookie.status &
       
   326 				E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
       
   327 				e1000_vlan_rx_add_vid(netdev, vid);
       
   328 				adapter->mng_vlan_id = vid;
       
   329 			} else
       
   330 				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
   331 
       
   332 			if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
       
   333 					(vid != old_vid) &&
       
   334 			    !vlan_group_get_device(adapter->vlgrp, old_vid))
       
   335 				e1000_vlan_rx_kill_vid(netdev, old_vid);
       
   336 		} else
       
   337 			adapter->mng_vlan_id = vid;
       
   338 	}
       
   339 }
       
   340 
       
   341 /**
       
   342  * e1000_release_hw_control - release control of the h/w to f/w
       
   343  * @adapter: address of board private structure
       
   344  *
       
   345  * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
       
   346  * For ASF and Pass Through versions of f/w this means that the
       
   347  * driver is no longer loaded. For AMT version (only with 82573) i
       
   348  * of the f/w this means that the network i/f is closed.
       
   349  *
       
   350  **/
       
   351 
       
   352 static void e1000_release_hw_control(struct e1000_adapter *adapter)
       
   353 {
       
   354 	u32 ctrl_ext;
       
   355 	u32 swsm;
       
   356 	struct e1000_hw *hw = &adapter->hw;
       
   357 
       
   358 	/* Let firmware taken over control of h/w */
       
   359 	switch (hw->mac_type) {
       
   360 	case e1000_82573:
       
   361 		swsm = er32(SWSM);
       
   362 		ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
       
   363 		break;
       
   364 	case e1000_82571:
       
   365 	case e1000_82572:
       
   366 	case e1000_80003es2lan:
       
   367 	case e1000_ich8lan:
       
   368 		ctrl_ext = er32(CTRL_EXT);
       
   369 		ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
       
   370 		break;
       
   371 	default:
       
   372 		break;
       
   373 	}
       
   374 }
       
   375 
       
   376 /**
       
   377  * e1000_get_hw_control - get control of the h/w from f/w
       
   378  * @adapter: address of board private structure
       
   379  *
       
   380  * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
       
   381  * For ASF and Pass Through versions of f/w this means that
       
   382  * the driver is loaded. For AMT version (only with 82573)
       
   383  * of the f/w this means that the network i/f is open.
       
   384  *
       
   385  **/
       
   386 
       
   387 static void e1000_get_hw_control(struct e1000_adapter *adapter)
       
   388 {
       
   389 	u32 ctrl_ext;
       
   390 	u32 swsm;
       
   391 	struct e1000_hw *hw = &adapter->hw;
       
   392 
       
   393 	/* Let firmware know the driver has taken over */
       
   394 	switch (hw->mac_type) {
       
   395 	case e1000_82573:
       
   396 		swsm = er32(SWSM);
       
   397 		ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
       
   398 		break;
       
   399 	case e1000_82571:
       
   400 	case e1000_82572:
       
   401 	case e1000_80003es2lan:
       
   402 	case e1000_ich8lan:
       
   403 		ctrl_ext = er32(CTRL_EXT);
       
   404 		ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
       
   405 		break;
       
   406 	default:
       
   407 		break;
       
   408 	}
       
   409 }
       
   410 
       
   411 static void e1000_init_manageability(struct e1000_adapter *adapter)
       
   412 {
       
   413 	struct e1000_hw *hw = &adapter->hw;
       
   414 
       
   415 	if (adapter->en_mng_pt) {
       
   416 		u32 manc = er32(MANC);
       
   417 
       
   418 		/* disable hardware interception of ARP */
       
   419 		manc &= ~(E1000_MANC_ARP_EN);
       
   420 
       
   421 		/* enable receiving management packets to the host */
       
   422 		/* this will probably generate destination unreachable messages
       
   423 		 * from the host OS, but the packets will be handled on SMBUS */
       
   424 		if (hw->has_manc2h) {
       
   425 			u32 manc2h = er32(MANC2H);
       
   426 
       
   427 			manc |= E1000_MANC_EN_MNG2HOST;
       
   428 #define E1000_MNG2HOST_PORT_623 (1 << 5)
       
   429 #define E1000_MNG2HOST_PORT_664 (1 << 6)
       
   430 			manc2h |= E1000_MNG2HOST_PORT_623;
       
   431 			manc2h |= E1000_MNG2HOST_PORT_664;
       
   432 			ew32(MANC2H, manc2h);
       
   433 		}
       
   434 
       
   435 		ew32(MANC, manc);
       
   436 	}
       
   437 }
       
   438 
       
   439 static void e1000_release_manageability(struct e1000_adapter *adapter)
       
   440 {
       
   441 	struct e1000_hw *hw = &adapter->hw;
       
   442 
       
   443 	if (adapter->en_mng_pt) {
       
   444 		u32 manc = er32(MANC);
       
   445 
       
   446 		/* re-enable hardware interception of ARP */
       
   447 		manc |= E1000_MANC_ARP_EN;
       
   448 
       
   449 		if (hw->has_manc2h)
       
   450 			manc &= ~E1000_MANC_EN_MNG2HOST;
       
   451 
       
   452 		/* don't explicitly have to mess with MANC2H since
       
   453 		 * MANC has an enable disable that gates MANC2H */
       
   454 
       
   455 		ew32(MANC, manc);
       
   456 	}
       
   457 }
       
   458 
       
   459 /**
       
   460  * e1000_configure - configure the hardware for RX and TX
       
   461  * @adapter = private board structure
       
   462  **/
       
   463 static void e1000_configure(struct e1000_adapter *adapter)
       
   464 {
       
   465 	struct net_device *netdev = adapter->netdev;
       
   466 	int i;
       
   467 
       
   468 	e1000_set_rx_mode(netdev);
       
   469 
       
   470 	e1000_restore_vlan(adapter);
       
   471 	e1000_init_manageability(adapter);
       
   472 
       
   473 	e1000_configure_tx(adapter);
       
   474 	e1000_setup_rctl(adapter);
       
   475 	e1000_configure_rx(adapter);
       
   476 	/* call E1000_DESC_UNUSED which always leaves
       
   477 	 * at least 1 descriptor unused to make sure
       
   478 	 * next_to_use != next_to_clean */
       
   479 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
   480 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
       
   481 		adapter->alloc_rx_buf(adapter, ring,
       
   482 		                      E1000_DESC_UNUSED(ring));
       
   483 	}
       
   484 
       
   485 	adapter->tx_queue_len = netdev->tx_queue_len;
       
   486 }
       
   487 
       
   488 int e1000_up(struct e1000_adapter *adapter)
       
   489 {
       
   490 	struct e1000_hw *hw = &adapter->hw;
       
   491 
       
   492 	/* hardware has been reset, we need to reload some things */
       
   493 	e1000_configure(adapter);
       
   494 
       
   495 	clear_bit(__E1000_DOWN, &adapter->flags);
       
   496 
       
   497 	napi_enable(&adapter->napi);
       
   498 
       
   499 	e1000_irq_enable(adapter);
       
   500 
       
   501 	/* fire a link change interrupt to start the watchdog */
       
   502 	ew32(ICS, E1000_ICS_LSC);
       
   503 	return 0;
       
   504 }
       
   505 
       
   506 /**
       
   507  * e1000_power_up_phy - restore link in case the phy was powered down
       
   508  * @adapter: address of board private structure
       
   509  *
       
   510  * The phy may be powered down to save power and turn off link when the
       
   511  * driver is unloaded and wake on lan is not enabled (among others)
       
   512  * *** this routine MUST be followed by a call to e1000_reset ***
       
   513  *
       
   514  **/
       
   515 
       
   516 void e1000_power_up_phy(struct e1000_adapter *adapter)
       
   517 {
       
   518 	struct e1000_hw *hw = &adapter->hw;
       
   519 	u16 mii_reg = 0;
       
   520 
       
   521 	/* Just clear the power down bit to wake the phy back up */
       
   522 	if (hw->media_type == e1000_media_type_copper) {
       
   523 		/* according to the manual, the phy will retain its
       
   524 		 * settings across a power-down/up cycle */
       
   525 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
       
   526 		mii_reg &= ~MII_CR_POWER_DOWN;
       
   527 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
       
   528 	}
       
   529 }
       
   530 
       
   531 static void e1000_power_down_phy(struct e1000_adapter *adapter)
       
   532 {
       
   533 	struct e1000_hw *hw = &adapter->hw;
       
   534 
       
   535 	/* Power down the PHY so no link is implied when interface is down *
       
   536 	 * The PHY cannot be powered down if any of the following is true *
       
   537 	 * (a) WoL is enabled
       
   538 	 * (b) AMT is active
       
   539 	 * (c) SoL/IDER session is active */
       
   540 	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
       
   541 	   hw->media_type == e1000_media_type_copper) {
       
   542 		u16 mii_reg = 0;
       
   543 
       
   544 		switch (hw->mac_type) {
       
   545 		case e1000_82540:
       
   546 		case e1000_82545:
       
   547 		case e1000_82545_rev_3:
       
   548 		case e1000_82546:
       
   549 		case e1000_82546_rev_3:
       
   550 		case e1000_82541:
       
   551 		case e1000_82541_rev_2:
       
   552 		case e1000_82547:
       
   553 		case e1000_82547_rev_2:
       
   554 			if (er32(MANC) & E1000_MANC_SMBUS_EN)
       
   555 				goto out;
       
   556 			break;
       
   557 		case e1000_82571:
       
   558 		case e1000_82572:
       
   559 		case e1000_82573:
       
   560 		case e1000_80003es2lan:
       
   561 		case e1000_ich8lan:
       
   562 			if (e1000_check_mng_mode(hw) ||
       
   563 			    e1000_check_phy_reset_block(hw))
       
   564 				goto out;
       
   565 			break;
       
   566 		default:
       
   567 			goto out;
       
   568 		}
       
   569 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
       
   570 		mii_reg |= MII_CR_POWER_DOWN;
       
   571 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
       
   572 		mdelay(1);
       
   573 	}
       
   574 out:
       
   575 	return;
       
   576 }
       
   577 
       
   578 void e1000_down(struct e1000_adapter *adapter)
       
   579 {
       
   580 	struct net_device *netdev = adapter->netdev;
       
   581 
       
   582 	/* signal that we're down so the interrupt handler does not
       
   583 	 * reschedule our watchdog timer */
       
   584 	set_bit(__E1000_DOWN, &adapter->flags);
       
   585 
       
   586 	napi_disable(&adapter->napi);
       
   587 
       
   588 	e1000_irq_disable(adapter);
       
   589 
       
   590 	del_timer_sync(&adapter->tx_fifo_stall_timer);
       
   591 	del_timer_sync(&adapter->watchdog_timer);
       
   592 	del_timer_sync(&adapter->phy_info_timer);
       
   593 
       
   594 	netdev->tx_queue_len = adapter->tx_queue_len;
       
   595 	adapter->link_speed = 0;
       
   596 	adapter->link_duplex = 0;
       
   597 	netif_carrier_off(netdev);
       
   598 	netif_stop_queue(netdev);
       
   599 
       
   600 	e1000_reset(adapter);
       
   601 	e1000_clean_all_tx_rings(adapter);
       
   602 	e1000_clean_all_rx_rings(adapter);
       
   603 }
       
   604 
       
   605 void e1000_reinit_locked(struct e1000_adapter *adapter)
       
   606 {
       
   607 	WARN_ON(in_interrupt());
       
   608 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
   609 		msleep(1);
       
   610 	e1000_down(adapter);
       
   611 	e1000_up(adapter);
       
   612 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
   613 }
       
   614 
       
   615 void e1000_reset(struct e1000_adapter *adapter)
       
   616 {
       
   617 	struct e1000_hw *hw = &adapter->hw;
       
   618 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
       
   619 	u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
       
   620 	bool legacy_pba_adjust = false;
       
   621 
       
   622 	/* Repartition Pba for greater than 9k mtu
       
   623 	 * To take effect CTRL.RST is required.
       
   624 	 */
       
   625 
       
   626 	switch (hw->mac_type) {
       
   627 	case e1000_82542_rev2_0:
       
   628 	case e1000_82542_rev2_1:
       
   629 	case e1000_82543:
       
   630 	case e1000_82544:
       
   631 	case e1000_82540:
       
   632 	case e1000_82541:
       
   633 	case e1000_82541_rev_2:
       
   634 		legacy_pba_adjust = true;
       
   635 		pba = E1000_PBA_48K;
       
   636 		break;
       
   637 	case e1000_82545:
       
   638 	case e1000_82545_rev_3:
       
   639 	case e1000_82546:
       
   640 	case e1000_82546_rev_3:
       
   641 		pba = E1000_PBA_48K;
       
   642 		break;
       
   643 	case e1000_82547:
       
   644 	case e1000_82547_rev_2:
       
   645 		legacy_pba_adjust = true;
       
   646 		pba = E1000_PBA_30K;
       
   647 		break;
       
   648 	case e1000_82571:
       
   649 	case e1000_82572:
       
   650 	case e1000_80003es2lan:
       
   651 		pba = E1000_PBA_38K;
       
   652 		break;
       
   653 	case e1000_82573:
       
   654 		pba = E1000_PBA_20K;
       
   655 		break;
       
   656 	case e1000_ich8lan:
       
   657 		pba = E1000_PBA_8K;
       
   658 	case e1000_undefined:
       
   659 	case e1000_num_macs:
       
   660 		break;
       
   661 	}
       
   662 
       
   663 	if (legacy_pba_adjust) {
       
   664 		if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
       
   665 			pba -= 8; /* allocate more FIFO for Tx */
       
   666 
       
   667 		if (hw->mac_type == e1000_82547) {
       
   668 			adapter->tx_fifo_head = 0;
       
   669 			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
       
   670 			adapter->tx_fifo_size =
       
   671 				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
       
   672 			atomic_set(&adapter->tx_fifo_stall, 0);
       
   673 		}
       
   674 	} else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
       
   675 		/* adjust PBA for jumbo frames */
       
   676 		ew32(PBA, pba);
       
   677 
       
   678 		/* To maintain wire speed transmits, the Tx FIFO should be
       
   679 		 * large enough to accomodate two full transmit packets,
       
   680 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
       
   681 		 * the Rx FIFO should be large enough to accomodate at least
       
   682 		 * one full receive packet and is similarly rounded up and
       
   683 		 * expressed in KB. */
       
   684 		pba = er32(PBA);
       
   685 		/* upper 16 bits has Tx packet buffer allocation size in KB */
       
   686 		tx_space = pba >> 16;
       
   687 		/* lower 16 bits has Rx packet buffer allocation size in KB */
       
   688 		pba &= 0xffff;
       
   689 		/* don't include ethernet FCS because hardware appends/strips */
       
   690 		min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
       
   691 		               VLAN_TAG_SIZE;
       
   692 		min_tx_space = min_rx_space;
       
   693 		min_tx_space *= 2;
       
   694 		min_tx_space = ALIGN(min_tx_space, 1024);
       
   695 		min_tx_space >>= 10;
       
   696 		min_rx_space = ALIGN(min_rx_space, 1024);
       
   697 		min_rx_space >>= 10;
       
   698 
       
   699 		/* If current Tx allocation is less than the min Tx FIFO size,
       
   700 		 * and the min Tx FIFO size is less than the current Rx FIFO
       
   701 		 * allocation, take space away from current Rx allocation */
       
   702 		if (tx_space < min_tx_space &&
       
   703 		    ((min_tx_space - tx_space) < pba)) {
       
   704 			pba = pba - (min_tx_space - tx_space);
       
   705 
       
   706 			/* PCI/PCIx hardware has PBA alignment constraints */
       
   707 			switch (hw->mac_type) {
       
   708 			case e1000_82545 ... e1000_82546_rev_3:
       
   709 				pba &= ~(E1000_PBA_8K - 1);
       
   710 				break;
       
   711 			default:
       
   712 				break;
       
   713 			}
       
   714 
       
   715 			/* if short on rx space, rx wins and must trump tx
       
   716 			 * adjustment or use Early Receive if available */
       
   717 			if (pba < min_rx_space) {
       
   718 				switch (hw->mac_type) {
       
   719 				case e1000_82573:
       
   720 					/* ERT enabled in e1000_configure_rx */
       
   721 					break;
       
   722 				default:
       
   723 					pba = min_rx_space;
       
   724 					break;
       
   725 				}
       
   726 			}
       
   727 		}
       
   728 	}
       
   729 
       
   730 	ew32(PBA, pba);
       
   731 
       
   732 	/* flow control settings */
       
   733 	/* Set the FC high water mark to 90% of the FIFO size.
       
   734 	 * Required to clear last 3 LSB */
       
   735 	fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
       
   736 	/* We can't use 90% on small FIFOs because the remainder
       
   737 	 * would be less than 1 full frame.  In this case, we size
       
   738 	 * it to allow at least a full frame above the high water
       
   739 	 *  mark. */
       
   740 	if (pba < E1000_PBA_16K)
       
   741 		fc_high_water_mark = (pba * 1024) - 1600;
       
   742 
       
   743 	hw->fc_high_water = fc_high_water_mark;
       
   744 	hw->fc_low_water = fc_high_water_mark - 8;
       
   745 	if (hw->mac_type == e1000_80003es2lan)
       
   746 		hw->fc_pause_time = 0xFFFF;
       
   747 	else
       
   748 		hw->fc_pause_time = E1000_FC_PAUSE_TIME;
       
   749 	hw->fc_send_xon = 1;
       
   750 	hw->fc = hw->original_fc;
       
   751 
       
   752 	/* Allow time for pending master requests to run */
       
   753 	e1000_reset_hw(hw);
       
   754 	if (hw->mac_type >= e1000_82544)
       
   755 		ew32(WUC, 0);
       
   756 
       
   757 	if (e1000_init_hw(hw))
       
   758 		DPRINTK(PROBE, ERR, "Hardware Error\n");
       
   759 	e1000_update_mng_vlan(adapter);
       
   760 
       
   761 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
       
   762 	if (hw->mac_type >= e1000_82544 &&
       
   763 	    hw->mac_type <= e1000_82547_rev_2 &&
       
   764 	    hw->autoneg == 1 &&
       
   765 	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
       
   766 		u32 ctrl = er32(CTRL);
       
   767 		/* clear phy power management bit if we are in gig only mode,
       
   768 		 * which if enabled will attempt negotiation to 100Mb, which
       
   769 		 * can cause a loss of link at power off or driver unload */
       
   770 		ctrl &= ~E1000_CTRL_SWDPIN3;
       
   771 		ew32(CTRL, ctrl);
       
   772 	}
       
   773 
       
   774 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
       
   775 	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
       
   776 
       
   777 	e1000_reset_adaptive(hw);
       
   778 	e1000_phy_get_info(hw, &adapter->phy_info);
       
   779 
       
   780 	if (!adapter->smart_power_down &&
       
   781 	    (hw->mac_type == e1000_82571 ||
       
   782 	     hw->mac_type == e1000_82572)) {
       
   783 		u16 phy_data = 0;
       
   784 		/* speed up time to link by disabling smart power down, ignore
       
   785 		 * the return value of this function because there is nothing
       
   786 		 * different we would do if it failed */
       
   787 		e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
       
   788 		                   &phy_data);
       
   789 		phy_data &= ~IGP02E1000_PM_SPD;
       
   790 		e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
       
   791 		                    phy_data);
       
   792 	}
       
   793 
       
   794 	e1000_release_manageability(adapter);
       
   795 }
       
   796 
       
   797 /**
       
   798  *  Dump the eeprom for users having checksum issues
       
   799  **/
       
   800 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
       
   801 {
       
   802 	struct net_device *netdev = adapter->netdev;
       
   803 	struct ethtool_eeprom eeprom;
       
   804 	const struct ethtool_ops *ops = netdev->ethtool_ops;
       
   805 	u8 *data;
       
   806 	int i;
       
   807 	u16 csum_old, csum_new = 0;
       
   808 
       
   809 	eeprom.len = ops->get_eeprom_len(netdev);
       
   810 	eeprom.offset = 0;
       
   811 
       
   812 	data = kmalloc(eeprom.len, GFP_KERNEL);
       
   813 	if (!data) {
       
   814 		printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
       
   815 		       " data\n");
       
   816 		return;
       
   817 	}
       
   818 
       
   819 	ops->get_eeprom(netdev, &eeprom, data);
       
   820 
       
   821 	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
       
   822 		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
       
   823 	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
       
   824 		csum_new += data[i] + (data[i + 1] << 8);
       
   825 	csum_new = EEPROM_SUM - csum_new;
       
   826 
       
   827 	printk(KERN_ERR "/*********************/\n");
       
   828 	printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
       
   829 	printk(KERN_ERR "Calculated              : 0x%04x\n", csum_new);
       
   830 
       
   831 	printk(KERN_ERR "Offset    Values\n");
       
   832 	printk(KERN_ERR "========  ======\n");
       
   833 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
       
   834 
       
   835 	printk(KERN_ERR "Include this output when contacting your support "
       
   836 	       "provider.\n");
       
   837 	printk(KERN_ERR "This is not a software error! Something bad "
       
   838 	       "happened to your hardware or\n");
       
   839 	printk(KERN_ERR "EEPROM image. Ignoring this "
       
   840 	       "problem could result in further problems,\n");
       
   841 	printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
       
   842 	printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
       
   843 	       "which is invalid\n");
       
   844 	printk(KERN_ERR "and requires you to set the proper MAC "
       
   845 	       "address manually before continuing\n");
       
   846 	printk(KERN_ERR "to enable this network device.\n");
       
   847 	printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
       
   848 	       "to your hardware vendor\n");
       
   849 	printk(KERN_ERR "or Intel Customer Support.\n");
       
   850 	printk(KERN_ERR "/*********************/\n");
       
   851 
       
   852 	kfree(data);
       
   853 }
       
   854 
       
   855 /**
       
   856  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
       
   857  * @pdev: PCI device information struct
       
   858  *
       
   859  * Return true if an adapter needs ioport resources
       
   860  **/
       
   861 static int e1000_is_need_ioport(struct pci_dev *pdev)
       
   862 {
       
   863 	switch (pdev->device) {
       
   864 	case E1000_DEV_ID_82540EM:
       
   865 	case E1000_DEV_ID_82540EM_LOM:
       
   866 	case E1000_DEV_ID_82540EP:
       
   867 	case E1000_DEV_ID_82540EP_LOM:
       
   868 	case E1000_DEV_ID_82540EP_LP:
       
   869 	case E1000_DEV_ID_82541EI:
       
   870 	case E1000_DEV_ID_82541EI_MOBILE:
       
   871 	case E1000_DEV_ID_82541ER:
       
   872 	case E1000_DEV_ID_82541ER_LOM:
       
   873 	case E1000_DEV_ID_82541GI:
       
   874 	case E1000_DEV_ID_82541GI_LF:
       
   875 	case E1000_DEV_ID_82541GI_MOBILE:
       
   876 	case E1000_DEV_ID_82544EI_COPPER:
       
   877 	case E1000_DEV_ID_82544EI_FIBER:
       
   878 	case E1000_DEV_ID_82544GC_COPPER:
       
   879 	case E1000_DEV_ID_82544GC_LOM:
       
   880 	case E1000_DEV_ID_82545EM_COPPER:
       
   881 	case E1000_DEV_ID_82545EM_FIBER:
       
   882 	case E1000_DEV_ID_82546EB_COPPER:
       
   883 	case E1000_DEV_ID_82546EB_FIBER:
       
   884 	case E1000_DEV_ID_82546EB_QUAD_COPPER:
       
   885 		return true;
       
   886 	default:
       
   887 		return false;
       
   888 	}
       
   889 }
       
   890 
       
   891 /**
       
   892  * e1000_probe - Device Initialization Routine
       
   893  * @pdev: PCI device information struct
       
   894  * @ent: entry in e1000_pci_tbl
       
   895  *
       
   896  * Returns 0 on success, negative on failure
       
   897  *
       
   898  * e1000_probe initializes an adapter identified by a pci_dev structure.
       
   899  * The OS initialization, configuring of the adapter private structure,
       
   900  * and a hardware reset occur.
       
   901  **/
       
   902 static int __devinit e1000_probe(struct pci_dev *pdev,
       
   903 				 const struct pci_device_id *ent)
       
   904 {
       
   905 	struct net_device *netdev;
       
   906 	struct e1000_adapter *adapter;
       
   907 	struct e1000_hw *hw;
       
   908 
       
   909 	static int cards_found = 0;
       
   910 	static int global_quad_port_a = 0; /* global ksp3 port a indication */
       
   911 	int i, err, pci_using_dac;
       
   912 	u16 eeprom_data = 0;
       
   913 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
       
   914 	int bars, need_ioport;
       
   915 	DECLARE_MAC_BUF(mac);
       
   916 
       
   917 	/* do not allocate ioport bars when not needed */
       
   918 	need_ioport = e1000_is_need_ioport(pdev);
       
   919 	if (need_ioport) {
       
   920 		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
       
   921 		err = pci_enable_device(pdev);
       
   922 	} else {
       
   923 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
       
   924 		err = pci_enable_device_mem(pdev);
       
   925 	}
       
   926 	if (err)
       
   927 		return err;
       
   928 
       
   929 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
       
   930 	    !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
       
   931 		pci_using_dac = 1;
       
   932 	} else {
       
   933 		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
       
   934 		if (err) {
       
   935 			err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
       
   936 			if (err) {
       
   937 				E1000_ERR("No usable DMA configuration, "
       
   938 					  "aborting\n");
       
   939 				goto err_dma;
       
   940 			}
       
   941 		}
       
   942 		pci_using_dac = 0;
       
   943 	}
       
   944 
       
   945 	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
       
   946 	if (err)
       
   947 		goto err_pci_reg;
       
   948 
       
   949 	pci_set_master(pdev);
       
   950 
       
   951 	err = -ENOMEM;
       
   952 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
       
   953 	if (!netdev)
       
   954 		goto err_alloc_etherdev;
       
   955 
       
   956 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
   957 
       
   958 	pci_set_drvdata(pdev, netdev);
       
   959 	adapter = netdev_priv(netdev);
       
   960 	adapter->netdev = netdev;
       
   961 	adapter->pdev = pdev;
       
   962 	adapter->msg_enable = (1 << debug) - 1;
       
   963 	adapter->bars = bars;
       
   964 	adapter->need_ioport = need_ioport;
       
   965 
       
   966 	hw = &adapter->hw;
       
   967 	hw->back = adapter;
       
   968 
       
   969 	err = -EIO;
       
   970 	hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
       
   971 			      pci_resource_len(pdev, BAR_0));
       
   972 	if (!hw->hw_addr)
       
   973 		goto err_ioremap;
       
   974 
       
   975 	if (adapter->need_ioport) {
       
   976 		for (i = BAR_1; i <= BAR_5; i++) {
       
   977 			if (pci_resource_len(pdev, i) == 0)
       
   978 				continue;
       
   979 			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
       
   980 				hw->io_base = pci_resource_start(pdev, i);
       
   981 				break;
       
   982 			}
       
   983 		}
       
   984 	}
       
   985 
       
   986 	netdev->open = &e1000_open;
       
   987 	netdev->stop = &e1000_close;
       
   988 	netdev->hard_start_xmit = &e1000_xmit_frame;
       
   989 	netdev->get_stats = &e1000_get_stats;
       
   990 	netdev->set_rx_mode = &e1000_set_rx_mode;
       
   991 	netdev->set_mac_address = &e1000_set_mac;
       
   992 	netdev->change_mtu = &e1000_change_mtu;
       
   993 	netdev->do_ioctl = &e1000_ioctl;
       
   994 	e1000_set_ethtool_ops(netdev);
       
   995 	netdev->tx_timeout = &e1000_tx_timeout;
       
   996 	netdev->watchdog_timeo = 5 * HZ;
       
   997 	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
       
   998 	netdev->vlan_rx_register = e1000_vlan_rx_register;
       
   999 	netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
       
  1000 	netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
       
  1001 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  1002 	netdev->poll_controller = e1000_netpoll;
       
  1003 #endif
       
  1004 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  1005 
       
  1006 	adapter->bd_number = cards_found;
       
  1007 
       
  1008 	/* setup the private structure */
       
  1009 
       
  1010 	err = e1000_sw_init(adapter);
       
  1011 	if (err)
       
  1012 		goto err_sw_init;
       
  1013 
       
  1014 	err = -EIO;
       
  1015 	/* Flash BAR mapping must happen after e1000_sw_init
       
  1016 	 * because it depends on mac_type */
       
  1017 	if ((hw->mac_type == e1000_ich8lan) &&
       
  1018 	   (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
       
  1019 		hw->flash_address =
       
  1020 			ioremap(pci_resource_start(pdev, 1),
       
  1021 				pci_resource_len(pdev, 1));
       
  1022 		if (!hw->flash_address)
       
  1023 			goto err_flashmap;
       
  1024 	}
       
  1025 
       
  1026 	if (e1000_check_phy_reset_block(hw))
       
  1027 		DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
       
  1028 
       
  1029 	if (hw->mac_type >= e1000_82543) {
       
  1030 		netdev->features = NETIF_F_SG |
       
  1031 				   NETIF_F_HW_CSUM |
       
  1032 				   NETIF_F_HW_VLAN_TX |
       
  1033 				   NETIF_F_HW_VLAN_RX |
       
  1034 				   NETIF_F_HW_VLAN_FILTER;
       
  1035 		if (hw->mac_type == e1000_ich8lan)
       
  1036 			netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
       
  1037 	}
       
  1038 
       
  1039 	if ((hw->mac_type >= e1000_82544) &&
       
  1040 	   (hw->mac_type != e1000_82547))
       
  1041 		netdev->features |= NETIF_F_TSO;
       
  1042 
       
  1043 	if (hw->mac_type > e1000_82547_rev_2)
       
  1044 		netdev->features |= NETIF_F_TSO6;
       
  1045 	if (pci_using_dac)
       
  1046 		netdev->features |= NETIF_F_HIGHDMA;
       
  1047 
       
  1048 	netdev->features |= NETIF_F_LLTX;
       
  1049 
       
  1050 	netdev->vlan_features |= NETIF_F_TSO;
       
  1051 	netdev->vlan_features |= NETIF_F_TSO6;
       
  1052 	netdev->vlan_features |= NETIF_F_HW_CSUM;
       
  1053 	netdev->vlan_features |= NETIF_F_SG;
       
  1054 
       
  1055 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
       
  1056 
       
  1057 	/* initialize eeprom parameters */
       
  1058 	if (e1000_init_eeprom_params(hw)) {
       
  1059 		E1000_ERR("EEPROM initialization failed\n");
       
  1060 		goto err_eeprom;
       
  1061 	}
       
  1062 
       
  1063 	/* before reading the EEPROM, reset the controller to
       
  1064 	 * put the device in a known good starting state */
       
  1065 
       
  1066 	e1000_reset_hw(hw);
       
  1067 
       
  1068 	/* make sure the EEPROM is good */
       
  1069 	if (e1000_validate_eeprom_checksum(hw) < 0) {
       
  1070 		DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
       
  1071 		e1000_dump_eeprom(adapter);
       
  1072 		/*
       
  1073 		 * set MAC address to all zeroes to invalidate and temporary
       
  1074 		 * disable this device for the user. This blocks regular
       
  1075 		 * traffic while still permitting ethtool ioctls from reaching
       
  1076 		 * the hardware as well as allowing the user to run the
       
  1077 		 * interface after manually setting a hw addr using
       
  1078 		 * `ip set address`
       
  1079 		 */
       
  1080 		memset(hw->mac_addr, 0, netdev->addr_len);
       
  1081 	} else {
       
  1082 		/* copy the MAC address out of the EEPROM */
       
  1083 		if (e1000_read_mac_addr(hw))
       
  1084 			DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
       
  1085 	}
       
  1086 	/* don't block initalization here due to bad MAC address */
       
  1087 	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
       
  1088 	memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
       
  1089 
       
  1090 	if (!is_valid_ether_addr(netdev->perm_addr))
       
  1091 		DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
       
  1092 
       
  1093 	e1000_get_bus_info(hw);
       
  1094 
       
  1095 	init_timer(&adapter->tx_fifo_stall_timer);
       
  1096 	adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
       
  1097 	adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
       
  1098 
       
  1099 	init_timer(&adapter->watchdog_timer);
       
  1100 	adapter->watchdog_timer.function = &e1000_watchdog;
       
  1101 	adapter->watchdog_timer.data = (unsigned long) adapter;
       
  1102 
       
  1103 	init_timer(&adapter->phy_info_timer);
       
  1104 	adapter->phy_info_timer.function = &e1000_update_phy_info;
       
  1105 	adapter->phy_info_timer.data = (unsigned long)adapter;
       
  1106 
       
  1107 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
       
  1108 
       
  1109 	e1000_check_options(adapter);
       
  1110 
       
  1111 	/* Initial Wake on LAN setting
       
  1112 	 * If APM wake is enabled in the EEPROM,
       
  1113 	 * enable the ACPI Magic Packet filter
       
  1114 	 */
       
  1115 
       
  1116 	switch (hw->mac_type) {
       
  1117 	case e1000_82542_rev2_0:
       
  1118 	case e1000_82542_rev2_1:
       
  1119 	case e1000_82543:
       
  1120 		break;
       
  1121 	case e1000_82544:
       
  1122 		e1000_read_eeprom(hw,
       
  1123 			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
       
  1124 		eeprom_apme_mask = E1000_EEPROM_82544_APM;
       
  1125 		break;
       
  1126 	case e1000_ich8lan:
       
  1127 		e1000_read_eeprom(hw,
       
  1128 			EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
       
  1129 		eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
       
  1130 		break;
       
  1131 	case e1000_82546:
       
  1132 	case e1000_82546_rev_3:
       
  1133 	case e1000_82571:
       
  1134 	case e1000_80003es2lan:
       
  1135 		if (er32(STATUS) & E1000_STATUS_FUNC_1){
       
  1136 			e1000_read_eeprom(hw,
       
  1137 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
       
  1138 			break;
       
  1139 		}
       
  1140 		/* Fall Through */
       
  1141 	default:
       
  1142 		e1000_read_eeprom(hw,
       
  1143 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
       
  1144 		break;
       
  1145 	}
       
  1146 	if (eeprom_data & eeprom_apme_mask)
       
  1147 		adapter->eeprom_wol |= E1000_WUFC_MAG;
       
  1148 
       
  1149 	/* now that we have the eeprom settings, apply the special cases
       
  1150 	 * where the eeprom may be wrong or the board simply won't support
       
  1151 	 * wake on lan on a particular port */
       
  1152 	switch (pdev->device) {
       
  1153 	case E1000_DEV_ID_82546GB_PCIE:
       
  1154 		adapter->eeprom_wol = 0;
       
  1155 		break;
       
  1156 	case E1000_DEV_ID_82546EB_FIBER:
       
  1157 	case E1000_DEV_ID_82546GB_FIBER:
       
  1158 	case E1000_DEV_ID_82571EB_FIBER:
       
  1159 		/* Wake events only supported on port A for dual fiber
       
  1160 		 * regardless of eeprom setting */
       
  1161 		if (er32(STATUS) & E1000_STATUS_FUNC_1)
       
  1162 			adapter->eeprom_wol = 0;
       
  1163 		break;
       
  1164 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
       
  1165 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
       
  1166 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
       
  1167 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
       
  1168 	case E1000_DEV_ID_82571PT_QUAD_COPPER:
       
  1169 		/* if quad port adapter, disable WoL on all but port A */
       
  1170 		if (global_quad_port_a != 0)
       
  1171 			adapter->eeprom_wol = 0;
       
  1172 		else
       
  1173 			adapter->quad_port_a = 1;
       
  1174 		/* Reset for multiple quad port adapters */
       
  1175 		if (++global_quad_port_a == 4)
       
  1176 			global_quad_port_a = 0;
       
  1177 		break;
       
  1178 	}
       
  1179 
       
  1180 	/* initialize the wol settings based on the eeprom settings */
       
  1181 	adapter->wol = adapter->eeprom_wol;
       
  1182 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
       
  1183 
       
  1184 	/* print bus type/speed/width info */
       
  1185 	DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
       
  1186 		((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
       
  1187 		 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
       
  1188 		((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
       
  1189 		 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
       
  1190 		 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
       
  1191 		 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
       
  1192 		 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
       
  1193 		((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
       
  1194 		 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
       
  1195 		 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
       
  1196 		 "32-bit"));
       
  1197 
       
  1198 	printk("%s\n", print_mac(mac, netdev->dev_addr));
       
  1199 
       
  1200 	if (hw->bus_type == e1000_bus_type_pci_express) {
       
  1201 		DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
       
  1202 			"longer be supported by this driver in the future.\n",
       
  1203 			pdev->vendor, pdev->device);
       
  1204 		DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
       
  1205 			"driver instead.\n");
       
  1206 	}
       
  1207 
       
  1208 	/* reset the hardware with the new settings */
       
  1209 	e1000_reset(adapter);
       
  1210 
       
  1211 	/* If the controller is 82573 and f/w is AMT, do not set
       
  1212 	 * DRV_LOAD until the interface is up.  For all other cases,
       
  1213 	 * let the f/w know that the h/w is now under the control
       
  1214 	 * of the driver. */
       
  1215 	if (hw->mac_type != e1000_82573 ||
       
  1216 	    !e1000_check_mng_mode(hw))
       
  1217 		e1000_get_hw_control(adapter);
       
  1218 
       
  1219 	/* tell the stack to leave us alone until e1000_open() is called */
       
  1220 	netif_carrier_off(netdev);
       
  1221 	netif_stop_queue(netdev);
       
  1222 
       
  1223 	strcpy(netdev->name, "eth%d");
       
  1224 	err = register_netdev(netdev);
       
  1225 	if (err)
       
  1226 		goto err_register;
       
  1227 
       
  1228 	DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
       
  1229 
       
  1230 	cards_found++;
       
  1231 	return 0;
       
  1232 
       
  1233 err_register:
       
  1234 	e1000_release_hw_control(adapter);
       
  1235 err_eeprom:
       
  1236 	if (!e1000_check_phy_reset_block(hw))
       
  1237 		e1000_phy_hw_reset(hw);
       
  1238 
       
  1239 	if (hw->flash_address)
       
  1240 		iounmap(hw->flash_address);
       
  1241 err_flashmap:
       
  1242 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  1243 		dev_put(&adapter->polling_netdev[i]);
       
  1244 
       
  1245 	kfree(adapter->tx_ring);
       
  1246 	kfree(adapter->rx_ring);
       
  1247 	kfree(adapter->polling_netdev);
       
  1248 err_sw_init:
       
  1249 	iounmap(hw->hw_addr);
       
  1250 err_ioremap:
       
  1251 	free_netdev(netdev);
       
  1252 err_alloc_etherdev:
       
  1253 	pci_release_selected_regions(pdev, bars);
       
  1254 err_pci_reg:
       
  1255 err_dma:
       
  1256 	pci_disable_device(pdev);
       
  1257 	return err;
       
  1258 }
       
  1259 
       
  1260 /**
       
  1261  * e1000_remove - Device Removal Routine
       
  1262  * @pdev: PCI device information struct
       
  1263  *
       
  1264  * e1000_remove is called by the PCI subsystem to alert the driver
       
  1265  * that it should release a PCI device.  The could be caused by a
       
  1266  * Hot-Plug event, or because the driver is going to be removed from
       
  1267  * memory.
       
  1268  **/
       
  1269 
       
  1270 static void __devexit e1000_remove(struct pci_dev *pdev)
       
  1271 {
       
  1272 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  1273 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1274 	struct e1000_hw *hw = &adapter->hw;
       
  1275 	int i;
       
  1276 
       
  1277 	cancel_work_sync(&adapter->reset_task);
       
  1278 
       
  1279 	e1000_release_manageability(adapter);
       
  1280 
       
  1281 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
       
  1282 	 * would have already happened in close and is redundant. */
       
  1283 	e1000_release_hw_control(adapter);
       
  1284 
       
  1285 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  1286 		dev_put(&adapter->polling_netdev[i]);
       
  1287 
       
  1288 	unregister_netdev(netdev);
       
  1289 
       
  1290 	if (!e1000_check_phy_reset_block(hw))
       
  1291 		e1000_phy_hw_reset(hw);
       
  1292 
       
  1293 	kfree(adapter->tx_ring);
       
  1294 	kfree(adapter->rx_ring);
       
  1295 	kfree(adapter->polling_netdev);
       
  1296 
       
  1297 	iounmap(hw->hw_addr);
       
  1298 	if (hw->flash_address)
       
  1299 		iounmap(hw->flash_address);
       
  1300 	pci_release_selected_regions(pdev, adapter->bars);
       
  1301 
       
  1302 	free_netdev(netdev);
       
  1303 
       
  1304 	pci_disable_device(pdev);
       
  1305 }
       
  1306 
       
  1307 /**
       
  1308  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
       
  1309  * @adapter: board private structure to initialize
       
  1310  *
       
  1311  * e1000_sw_init initializes the Adapter private data structure.
       
  1312  * Fields are initialized based on PCI device information and
       
  1313  * OS network device settings (MTU size).
       
  1314  **/
       
  1315 
       
  1316 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
       
  1317 {
       
  1318 	struct e1000_hw *hw = &adapter->hw;
       
  1319 	struct net_device *netdev = adapter->netdev;
       
  1320 	struct pci_dev *pdev = adapter->pdev;
       
  1321 	int i;
       
  1322 
       
  1323 	/* PCI config space info */
       
  1324 
       
  1325 	hw->vendor_id = pdev->vendor;
       
  1326 	hw->device_id = pdev->device;
       
  1327 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
       
  1328 	hw->subsystem_id = pdev->subsystem_device;
       
  1329 	hw->revision_id = pdev->revision;
       
  1330 
       
  1331 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
       
  1332 
       
  1333 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
       
  1334 	hw->max_frame_size = netdev->mtu +
       
  1335 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
       
  1336 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
       
  1337 
       
  1338 	/* identify the MAC */
       
  1339 
       
  1340 	if (e1000_set_mac_type(hw)) {
       
  1341 		DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
       
  1342 		return -EIO;
       
  1343 	}
       
  1344 
       
  1345 	switch (hw->mac_type) {
       
  1346 	default:
       
  1347 		break;
       
  1348 	case e1000_82541:
       
  1349 	case e1000_82547:
       
  1350 	case e1000_82541_rev_2:
       
  1351 	case e1000_82547_rev_2:
       
  1352 		hw->phy_init_script = 1;
       
  1353 		break;
       
  1354 	}
       
  1355 
       
  1356 	e1000_set_media_type(hw);
       
  1357 
       
  1358 	hw->wait_autoneg_complete = false;
       
  1359 	hw->tbi_compatibility_en = true;
       
  1360 	hw->adaptive_ifs = true;
       
  1361 
       
  1362 	/* Copper options */
       
  1363 
       
  1364 	if (hw->media_type == e1000_media_type_copper) {
       
  1365 		hw->mdix = AUTO_ALL_MODES;
       
  1366 		hw->disable_polarity_correction = false;
       
  1367 		hw->master_slave = E1000_MASTER_SLAVE;
       
  1368 	}
       
  1369 
       
  1370 	adapter->num_tx_queues = 1;
       
  1371 	adapter->num_rx_queues = 1;
       
  1372 
       
  1373 	if (e1000_alloc_queues(adapter)) {
       
  1374 		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
       
  1375 		return -ENOMEM;
       
  1376 	}
       
  1377 
       
  1378 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
  1379 		adapter->polling_netdev[i].priv = adapter;
       
  1380 		dev_hold(&adapter->polling_netdev[i]);
       
  1381 		set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
       
  1382 	}
       
  1383 	spin_lock_init(&adapter->tx_queue_lock);
       
  1384 
       
  1385 	/* Explicitly disable IRQ since the NIC can be in any state. */
       
  1386 	e1000_irq_disable(adapter);
       
  1387 
       
  1388 	spin_lock_init(&adapter->stats_lock);
       
  1389 
       
  1390 	set_bit(__E1000_DOWN, &adapter->flags);
       
  1391 
       
  1392 	return 0;
       
  1393 }
       
  1394 
       
  1395 /**
       
  1396  * e1000_alloc_queues - Allocate memory for all rings
       
  1397  * @adapter: board private structure to initialize
       
  1398  *
       
  1399  * We allocate one ring per queue at run-time since we don't know the
       
  1400  * number of queues at compile-time.  The polling_netdev array is
       
  1401  * intended for Multiqueue, but should work fine with a single queue.
       
  1402  **/
       
  1403 
       
  1404 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
       
  1405 {
       
  1406 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
       
  1407 	                           sizeof(struct e1000_tx_ring), GFP_KERNEL);
       
  1408 	if (!adapter->tx_ring)
       
  1409 		return -ENOMEM;
       
  1410 
       
  1411 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
       
  1412 	                           sizeof(struct e1000_rx_ring), GFP_KERNEL);
       
  1413 	if (!adapter->rx_ring) {
       
  1414 		kfree(adapter->tx_ring);
       
  1415 		return -ENOMEM;
       
  1416 	}
       
  1417 
       
  1418 	adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
       
  1419 	                                  sizeof(struct net_device),
       
  1420 	                                  GFP_KERNEL);
       
  1421 	if (!adapter->polling_netdev) {
       
  1422 		kfree(adapter->tx_ring);
       
  1423 		kfree(adapter->rx_ring);
       
  1424 		return -ENOMEM;
       
  1425 	}
       
  1426 
       
  1427 	return E1000_SUCCESS;
       
  1428 }
       
  1429 
       
  1430 /**
       
  1431  * e1000_open - Called when a network interface is made active
       
  1432  * @netdev: network interface device structure
       
  1433  *
       
  1434  * Returns 0 on success, negative value on failure
       
  1435  *
       
  1436  * The open entry point is called when a network interface is made
       
  1437  * active by the system (IFF_UP).  At this point all resources needed
       
  1438  * for transmit and receive operations are allocated, the interrupt
       
  1439  * handler is registered with the OS, the watchdog timer is started,
       
  1440  * and the stack is notified that the interface is ready.
       
  1441  **/
       
  1442 
       
  1443 static int e1000_open(struct net_device *netdev)
       
  1444 {
       
  1445 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1446 	struct e1000_hw *hw = &adapter->hw;
       
  1447 	int err;
       
  1448 
       
  1449 	/* disallow open during test */
       
  1450 	if (test_bit(__E1000_TESTING, &adapter->flags))
       
  1451 		return -EBUSY;
       
  1452 
       
  1453 	/* allocate transmit descriptors */
       
  1454 	err = e1000_setup_all_tx_resources(adapter);
       
  1455 	if (err)
       
  1456 		goto err_setup_tx;
       
  1457 
       
  1458 	/* allocate receive descriptors */
       
  1459 	err = e1000_setup_all_rx_resources(adapter);
       
  1460 	if (err)
       
  1461 		goto err_setup_rx;
       
  1462 
       
  1463 	e1000_power_up_phy(adapter);
       
  1464 
       
  1465 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
  1466 	if ((hw->mng_cookie.status &
       
  1467 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
       
  1468 		e1000_update_mng_vlan(adapter);
       
  1469 	}
       
  1470 
       
  1471 	/* If AMT is enabled, let the firmware know that the network
       
  1472 	 * interface is now open */
       
  1473 	if (hw->mac_type == e1000_82573 &&
       
  1474 	    e1000_check_mng_mode(hw))
       
  1475 		e1000_get_hw_control(adapter);
       
  1476 
       
  1477 	/* before we allocate an interrupt, we must be ready to handle it.
       
  1478 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
       
  1479 	 * as soon as we call pci_request_irq, so we have to setup our
       
  1480 	 * clean_rx handler before we do so.  */
       
  1481 	e1000_configure(adapter);
       
  1482 
       
  1483 	err = e1000_request_irq(adapter);
       
  1484 	if (err)
       
  1485 		goto err_req_irq;
       
  1486 
       
  1487 	/* From here on the code is the same as e1000_up() */
       
  1488 	clear_bit(__E1000_DOWN, &adapter->flags);
       
  1489 
       
  1490 	napi_enable(&adapter->napi);
       
  1491 
       
  1492 	e1000_irq_enable(adapter);
       
  1493 
       
  1494 	netif_start_queue(netdev);
       
  1495 
       
  1496 	/* fire a link status change interrupt to start the watchdog */
       
  1497 	ew32(ICS, E1000_ICS_LSC);
       
  1498 
       
  1499 	return E1000_SUCCESS;
       
  1500 
       
  1501 err_req_irq:
       
  1502 	e1000_release_hw_control(adapter);
       
  1503 	e1000_power_down_phy(adapter);
       
  1504 	e1000_free_all_rx_resources(adapter);
       
  1505 err_setup_rx:
       
  1506 	e1000_free_all_tx_resources(adapter);
       
  1507 err_setup_tx:
       
  1508 	e1000_reset(adapter);
       
  1509 
       
  1510 	return err;
       
  1511 }
       
  1512 
       
  1513 /**
       
  1514  * e1000_close - Disables a network interface
       
  1515  * @netdev: network interface device structure
       
  1516  *
       
  1517  * Returns 0, this is not allowed to fail
       
  1518  *
       
  1519  * The close entry point is called when an interface is de-activated
       
  1520  * by the OS.  The hardware is still under the drivers control, but
       
  1521  * needs to be disabled.  A global MAC reset is issued to stop the
       
  1522  * hardware, and all transmit and receive resources are freed.
       
  1523  **/
       
  1524 
       
  1525 static int e1000_close(struct net_device *netdev)
       
  1526 {
       
  1527 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1528 	struct e1000_hw *hw = &adapter->hw;
       
  1529 
       
  1530 	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
       
  1531 	e1000_down(adapter);
       
  1532 	e1000_power_down_phy(adapter);
       
  1533 	e1000_free_irq(adapter);
       
  1534 
       
  1535 	e1000_free_all_tx_resources(adapter);
       
  1536 	e1000_free_all_rx_resources(adapter);
       
  1537 
       
  1538 	/* kill manageability vlan ID if supported, but not if a vlan with
       
  1539 	 * the same ID is registered on the host OS (let 8021q kill it) */
       
  1540 	if ((hw->mng_cookie.status &
       
  1541 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  1542 	     !(adapter->vlgrp &&
       
  1543 	       vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
       
  1544 		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
       
  1545 	}
       
  1546 
       
  1547 	/* If AMT is enabled, let the firmware know that the network
       
  1548 	 * interface is now closed */
       
  1549 	if (hw->mac_type == e1000_82573 &&
       
  1550 	    e1000_check_mng_mode(hw))
       
  1551 		e1000_release_hw_control(adapter);
       
  1552 
       
  1553 	return 0;
       
  1554 }
       
  1555 
       
  1556 /**
       
  1557  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
       
  1558  * @adapter: address of board private structure
       
  1559  * @start: address of beginning of memory
       
  1560  * @len: length of memory
       
  1561  **/
       
  1562 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
       
  1563 				  unsigned long len)
       
  1564 {
       
  1565 	struct e1000_hw *hw = &adapter->hw;
       
  1566 	unsigned long begin = (unsigned long)start;
       
  1567 	unsigned long end = begin + len;
       
  1568 
       
  1569 	/* First rev 82545 and 82546 need to not allow any memory
       
  1570 	 * write location to cross 64k boundary due to errata 23 */
       
  1571 	if (hw->mac_type == e1000_82545 ||
       
  1572 	    hw->mac_type == e1000_82546) {
       
  1573 		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
       
  1574 	}
       
  1575 
       
  1576 	return true;
       
  1577 }
       
  1578 
       
  1579 /**
       
  1580  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
       
  1581  * @adapter: board private structure
       
  1582  * @txdr:    tx descriptor ring (for a specific queue) to setup
       
  1583  *
       
  1584  * Return 0 on success, negative on failure
       
  1585  **/
       
  1586 
       
  1587 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
       
  1588 				    struct e1000_tx_ring *txdr)
       
  1589 {
       
  1590 	struct pci_dev *pdev = adapter->pdev;
       
  1591 	int size;
       
  1592 
       
  1593 	size = sizeof(struct e1000_buffer) * txdr->count;
       
  1594 	txdr->buffer_info = vmalloc(size);
       
  1595 	if (!txdr->buffer_info) {
       
  1596 		DPRINTK(PROBE, ERR,
       
  1597 		"Unable to allocate memory for the transmit descriptor ring\n");
       
  1598 		return -ENOMEM;
       
  1599 	}
       
  1600 	memset(txdr->buffer_info, 0, size);
       
  1601 
       
  1602 	/* round up to nearest 4K */
       
  1603 
       
  1604 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
       
  1605 	txdr->size = ALIGN(txdr->size, 4096);
       
  1606 
       
  1607 	txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
       
  1608 	if (!txdr->desc) {
       
  1609 setup_tx_desc_die:
       
  1610 		vfree(txdr->buffer_info);
       
  1611 		DPRINTK(PROBE, ERR,
       
  1612 		"Unable to allocate memory for the transmit descriptor ring\n");
       
  1613 		return -ENOMEM;
       
  1614 	}
       
  1615 
       
  1616 	/* Fix for errata 23, can't cross 64kB boundary */
       
  1617 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
       
  1618 		void *olddesc = txdr->desc;
       
  1619 		dma_addr_t olddma = txdr->dma;
       
  1620 		DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
       
  1621 				     "at %p\n", txdr->size, txdr->desc);
       
  1622 		/* Try again, without freeing the previous */
       
  1623 		txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
       
  1624 		/* Failed allocation, critical failure */
       
  1625 		if (!txdr->desc) {
       
  1626 			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
       
  1627 			goto setup_tx_desc_die;
       
  1628 		}
       
  1629 
       
  1630 		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
       
  1631 			/* give up */
       
  1632 			pci_free_consistent(pdev, txdr->size, txdr->desc,
       
  1633 					    txdr->dma);
       
  1634 			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
       
  1635 			DPRINTK(PROBE, ERR,
       
  1636 				"Unable to allocate aligned memory "
       
  1637 				"for the transmit descriptor ring\n");
       
  1638 			vfree(txdr->buffer_info);
       
  1639 			return -ENOMEM;
       
  1640 		} else {
       
  1641 			/* Free old allocation, new allocation was successful */
       
  1642 			pci_free_consistent(pdev, txdr->size, olddesc, olddma);
       
  1643 		}
       
  1644 	}
       
  1645 	memset(txdr->desc, 0, txdr->size);
       
  1646 
       
  1647 	txdr->next_to_use = 0;
       
  1648 	txdr->next_to_clean = 0;
       
  1649 	spin_lock_init(&txdr->tx_lock);
       
  1650 
       
  1651 	return 0;
       
  1652 }
       
  1653 
       
  1654 /**
       
  1655  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
       
  1656  * 				  (Descriptors) for all queues
       
  1657  * @adapter: board private structure
       
  1658  *
       
  1659  * Return 0 on success, negative on failure
       
  1660  **/
       
  1661 
       
  1662 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
       
  1663 {
       
  1664 	int i, err = 0;
       
  1665 
       
  1666 	for (i = 0; i < adapter->num_tx_queues; i++) {
       
  1667 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
       
  1668 		if (err) {
       
  1669 			DPRINTK(PROBE, ERR,
       
  1670 				"Allocation for Tx Queue %u failed\n", i);
       
  1671 			for (i-- ; i >= 0; i--)
       
  1672 				e1000_free_tx_resources(adapter,
       
  1673 							&adapter->tx_ring[i]);
       
  1674 			break;
       
  1675 		}
       
  1676 	}
       
  1677 
       
  1678 	return err;
       
  1679 }
       
  1680 
       
  1681 /**
       
  1682  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
       
  1683  * @adapter: board private structure
       
  1684  *
       
  1685  * Configure the Tx unit of the MAC after a reset.
       
  1686  **/
       
  1687 
       
  1688 static void e1000_configure_tx(struct e1000_adapter *adapter)
       
  1689 {
       
  1690 	u64 tdba;
       
  1691 	struct e1000_hw *hw = &adapter->hw;
       
  1692 	u32 tdlen, tctl, tipg, tarc;
       
  1693 	u32 ipgr1, ipgr2;
       
  1694 
       
  1695 	/* Setup the HW Tx Head and Tail descriptor pointers */
       
  1696 
       
  1697 	switch (adapter->num_tx_queues) {
       
  1698 	case 1:
       
  1699 	default:
       
  1700 		tdba = adapter->tx_ring[0].dma;
       
  1701 		tdlen = adapter->tx_ring[0].count *
       
  1702 			sizeof(struct e1000_tx_desc);
       
  1703 		ew32(TDLEN, tdlen);
       
  1704 		ew32(TDBAH, (tdba >> 32));
       
  1705 		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
       
  1706 		ew32(TDT, 0);
       
  1707 		ew32(TDH, 0);
       
  1708 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
       
  1709 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
       
  1710 		break;
       
  1711 	}
       
  1712 
       
  1713 	/* Set the default values for the Tx Inter Packet Gap timer */
       
  1714 	if (hw->mac_type <= e1000_82547_rev_2 &&
       
  1715 	    (hw->media_type == e1000_media_type_fiber ||
       
  1716 	     hw->media_type == e1000_media_type_internal_serdes))
       
  1717 		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
       
  1718 	else
       
  1719 		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
       
  1720 
       
  1721 	switch (hw->mac_type) {
       
  1722 	case e1000_82542_rev2_0:
       
  1723 	case e1000_82542_rev2_1:
       
  1724 		tipg = DEFAULT_82542_TIPG_IPGT;
       
  1725 		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
       
  1726 		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
       
  1727 		break;
       
  1728 	case e1000_80003es2lan:
       
  1729 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
       
  1730 		ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
       
  1731 		break;
       
  1732 	default:
       
  1733 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
       
  1734 		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
       
  1735 		break;
       
  1736 	}
       
  1737 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
       
  1738 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
       
  1739 	ew32(TIPG, tipg);
       
  1740 
       
  1741 	/* Set the Tx Interrupt Delay register */
       
  1742 
       
  1743 	ew32(TIDV, adapter->tx_int_delay);
       
  1744 	if (hw->mac_type >= e1000_82540)
       
  1745 		ew32(TADV, adapter->tx_abs_int_delay);
       
  1746 
       
  1747 	/* Program the Transmit Control Register */
       
  1748 
       
  1749 	tctl = er32(TCTL);
       
  1750 	tctl &= ~E1000_TCTL_CT;
       
  1751 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
       
  1752 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
       
  1753 
       
  1754 	if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
       
  1755 		tarc = er32(TARC0);
       
  1756 		/* set the speed mode bit, we'll clear it if we're not at
       
  1757 		 * gigabit link later */
       
  1758 		tarc |= (1 << 21);
       
  1759 		ew32(TARC0, tarc);
       
  1760 	} else if (hw->mac_type == e1000_80003es2lan) {
       
  1761 		tarc = er32(TARC0);
       
  1762 		tarc |= 1;
       
  1763 		ew32(TARC0, tarc);
       
  1764 		tarc = er32(TARC1);
       
  1765 		tarc |= 1;
       
  1766 		ew32(TARC1, tarc);
       
  1767 	}
       
  1768 
       
  1769 	e1000_config_collision_dist(hw);
       
  1770 
       
  1771 	/* Setup Transmit Descriptor Settings for eop descriptor */
       
  1772 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
       
  1773 
       
  1774 	/* only set IDE if we are delaying interrupts using the timers */
       
  1775 	if (adapter->tx_int_delay)
       
  1776 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
       
  1777 
       
  1778 	if (hw->mac_type < e1000_82543)
       
  1779 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
       
  1780 	else
       
  1781 		adapter->txd_cmd |= E1000_TXD_CMD_RS;
       
  1782 
       
  1783 	/* Cache if we're 82544 running in PCI-X because we'll
       
  1784 	 * need this to apply a workaround later in the send path. */
       
  1785 	if (hw->mac_type == e1000_82544 &&
       
  1786 	    hw->bus_type == e1000_bus_type_pcix)
       
  1787 		adapter->pcix_82544 = 1;
       
  1788 
       
  1789 	ew32(TCTL, tctl);
       
  1790 
       
  1791 }
       
  1792 
       
  1793 /**
       
  1794  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
       
  1795  * @adapter: board private structure
       
  1796  * @rxdr:    rx descriptor ring (for a specific queue) to setup
       
  1797  *
       
  1798  * Returns 0 on success, negative on failure
       
  1799  **/
       
  1800 
       
  1801 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
       
  1802 				    struct e1000_rx_ring *rxdr)
       
  1803 {
       
  1804 	struct e1000_hw *hw = &adapter->hw;
       
  1805 	struct pci_dev *pdev = adapter->pdev;
       
  1806 	int size, desc_len;
       
  1807 
       
  1808 	size = sizeof(struct e1000_buffer) * rxdr->count;
       
  1809 	rxdr->buffer_info = vmalloc(size);
       
  1810 	if (!rxdr->buffer_info) {
       
  1811 		DPRINTK(PROBE, ERR,
       
  1812 		"Unable to allocate memory for the receive descriptor ring\n");
       
  1813 		return -ENOMEM;
       
  1814 	}
       
  1815 	memset(rxdr->buffer_info, 0, size);
       
  1816 
       
  1817 	if (hw->mac_type <= e1000_82547_rev_2)
       
  1818 		desc_len = sizeof(struct e1000_rx_desc);
       
  1819 	else
       
  1820 		desc_len = sizeof(union e1000_rx_desc_packet_split);
       
  1821 
       
  1822 	/* Round up to nearest 4K */
       
  1823 
       
  1824 	rxdr->size = rxdr->count * desc_len;
       
  1825 	rxdr->size = ALIGN(rxdr->size, 4096);
       
  1826 
       
  1827 	rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
       
  1828 
       
  1829 	if (!rxdr->desc) {
       
  1830 		DPRINTK(PROBE, ERR,
       
  1831 		"Unable to allocate memory for the receive descriptor ring\n");
       
  1832 setup_rx_desc_die:
       
  1833 		vfree(rxdr->buffer_info);
       
  1834 		return -ENOMEM;
       
  1835 	}
       
  1836 
       
  1837 	/* Fix for errata 23, can't cross 64kB boundary */
       
  1838 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
       
  1839 		void *olddesc = rxdr->desc;
       
  1840 		dma_addr_t olddma = rxdr->dma;
       
  1841 		DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
       
  1842 				     "at %p\n", rxdr->size, rxdr->desc);
       
  1843 		/* Try again, without freeing the previous */
       
  1844 		rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
       
  1845 		/* Failed allocation, critical failure */
       
  1846 		if (!rxdr->desc) {
       
  1847 			pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
       
  1848 			DPRINTK(PROBE, ERR,
       
  1849 				"Unable to allocate memory "
       
  1850 				"for the receive descriptor ring\n");
       
  1851 			goto setup_rx_desc_die;
       
  1852 		}
       
  1853 
       
  1854 		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
       
  1855 			/* give up */
       
  1856 			pci_free_consistent(pdev, rxdr->size, rxdr->desc,
       
  1857 					    rxdr->dma);
       
  1858 			pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
       
  1859 			DPRINTK(PROBE, ERR,
       
  1860 				"Unable to allocate aligned memory "
       
  1861 				"for the receive descriptor ring\n");
       
  1862 			goto setup_rx_desc_die;
       
  1863 		} else {
       
  1864 			/* Free old allocation, new allocation was successful */
       
  1865 			pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
       
  1866 		}
       
  1867 	}
       
  1868 	memset(rxdr->desc, 0, rxdr->size);
       
  1869 
       
  1870 	rxdr->next_to_clean = 0;
       
  1871 	rxdr->next_to_use = 0;
       
  1872 
       
  1873 	return 0;
       
  1874 }
       
  1875 
       
  1876 /**
       
  1877  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
       
  1878  * 				  (Descriptors) for all queues
       
  1879  * @adapter: board private structure
       
  1880  *
       
  1881  * Return 0 on success, negative on failure
       
  1882  **/
       
  1883 
       
  1884 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
       
  1885 {
       
  1886 	int i, err = 0;
       
  1887 
       
  1888 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
  1889 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
       
  1890 		if (err) {
       
  1891 			DPRINTK(PROBE, ERR,
       
  1892 				"Allocation for Rx Queue %u failed\n", i);
       
  1893 			for (i-- ; i >= 0; i--)
       
  1894 				e1000_free_rx_resources(adapter,
       
  1895 							&adapter->rx_ring[i]);
       
  1896 			break;
       
  1897 		}
       
  1898 	}
       
  1899 
       
  1900 	return err;
       
  1901 }
       
  1902 
       
  1903 /**
       
  1904  * e1000_setup_rctl - configure the receive control registers
       
  1905  * @adapter: Board private structure
       
  1906  **/
       
  1907 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
       
  1908 			(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
       
  1909 static void e1000_setup_rctl(struct e1000_adapter *adapter)
       
  1910 {
       
  1911 	struct e1000_hw *hw = &adapter->hw;
       
  1912 	u32 rctl;
       
  1913 
       
  1914 	rctl = er32(RCTL);
       
  1915 
       
  1916 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
       
  1917 
       
  1918 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
       
  1919 		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
       
  1920 		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
       
  1921 
       
  1922 	if (hw->tbi_compatibility_on == 1)
       
  1923 		rctl |= E1000_RCTL_SBP;
       
  1924 	else
       
  1925 		rctl &= ~E1000_RCTL_SBP;
       
  1926 
       
  1927 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
       
  1928 		rctl &= ~E1000_RCTL_LPE;
       
  1929 	else
       
  1930 		rctl |= E1000_RCTL_LPE;
       
  1931 
       
  1932 	/* Setup buffer sizes */
       
  1933 	rctl &= ~E1000_RCTL_SZ_4096;
       
  1934 	rctl |= E1000_RCTL_BSEX;
       
  1935 	switch (adapter->rx_buffer_len) {
       
  1936 		case E1000_RXBUFFER_256:
       
  1937 			rctl |= E1000_RCTL_SZ_256;
       
  1938 			rctl &= ~E1000_RCTL_BSEX;
       
  1939 			break;
       
  1940 		case E1000_RXBUFFER_512:
       
  1941 			rctl |= E1000_RCTL_SZ_512;
       
  1942 			rctl &= ~E1000_RCTL_BSEX;
       
  1943 			break;
       
  1944 		case E1000_RXBUFFER_1024:
       
  1945 			rctl |= E1000_RCTL_SZ_1024;
       
  1946 			rctl &= ~E1000_RCTL_BSEX;
       
  1947 			break;
       
  1948 		case E1000_RXBUFFER_2048:
       
  1949 		default:
       
  1950 			rctl |= E1000_RCTL_SZ_2048;
       
  1951 			rctl &= ~E1000_RCTL_BSEX;
       
  1952 			break;
       
  1953 		case E1000_RXBUFFER_4096:
       
  1954 			rctl |= E1000_RCTL_SZ_4096;
       
  1955 			break;
       
  1956 		case E1000_RXBUFFER_8192:
       
  1957 			rctl |= E1000_RCTL_SZ_8192;
       
  1958 			break;
       
  1959 		case E1000_RXBUFFER_16384:
       
  1960 			rctl |= E1000_RCTL_SZ_16384;
       
  1961 			break;
       
  1962 	}
       
  1963 
       
  1964 	ew32(RCTL, rctl);
       
  1965 }
       
  1966 
       
  1967 /**
       
  1968  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
       
  1969  * @adapter: board private structure
       
  1970  *
       
  1971  * Configure the Rx unit of the MAC after a reset.
       
  1972  **/
       
  1973 
       
  1974 static void e1000_configure_rx(struct e1000_adapter *adapter)
       
  1975 {
       
  1976 	u64 rdba;
       
  1977 	struct e1000_hw *hw = &adapter->hw;
       
  1978 	u32 rdlen, rctl, rxcsum, ctrl_ext;
       
  1979 
       
  1980 	rdlen = adapter->rx_ring[0].count *
       
  1981 		sizeof(struct e1000_rx_desc);
       
  1982 	adapter->clean_rx = e1000_clean_rx_irq;
       
  1983 	adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
       
  1984 
       
  1985 	/* disable receives while setting up the descriptors */
       
  1986 	rctl = er32(RCTL);
       
  1987 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
  1988 
       
  1989 	/* set the Receive Delay Timer Register */
       
  1990 	ew32(RDTR, adapter->rx_int_delay);
       
  1991 
       
  1992 	if (hw->mac_type >= e1000_82540) {
       
  1993 		ew32(RADV, adapter->rx_abs_int_delay);
       
  1994 		if (adapter->itr_setting != 0)
       
  1995 			ew32(ITR, 1000000000 / (adapter->itr * 256));
       
  1996 	}
       
  1997 
       
  1998 	if (hw->mac_type >= e1000_82571) {
       
  1999 		ctrl_ext = er32(CTRL_EXT);
       
  2000 		/* Reset delay timers after every interrupt */
       
  2001 		ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
       
  2002 		/* Auto-Mask interrupts upon ICR access */
       
  2003 		ctrl_ext |= E1000_CTRL_EXT_IAME;
       
  2004 		ew32(IAM, 0xffffffff);
       
  2005 		ew32(CTRL_EXT, ctrl_ext);
       
  2006 		E1000_WRITE_FLUSH();
       
  2007 	}
       
  2008 
       
  2009 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
       
  2010 	 * the Base and Length of the Rx Descriptor Ring */
       
  2011 	switch (adapter->num_rx_queues) {
       
  2012 	case 1:
       
  2013 	default:
       
  2014 		rdba = adapter->rx_ring[0].dma;
       
  2015 		ew32(RDLEN, rdlen);
       
  2016 		ew32(RDBAH, (rdba >> 32));
       
  2017 		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
       
  2018 		ew32(RDT, 0);
       
  2019 		ew32(RDH, 0);
       
  2020 		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
       
  2021 		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
       
  2022 		break;
       
  2023 	}
       
  2024 
       
  2025 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
       
  2026 	if (hw->mac_type >= e1000_82543) {
       
  2027 		rxcsum = er32(RXCSUM);
       
  2028 		if (adapter->rx_csum)
       
  2029 			rxcsum |= E1000_RXCSUM_TUOFL;
       
  2030 		else
       
  2031 			/* don't need to clear IPPCSE as it defaults to 0 */
       
  2032 			rxcsum &= ~E1000_RXCSUM_TUOFL;
       
  2033 		ew32(RXCSUM, rxcsum);
       
  2034 	}
       
  2035 
       
  2036 	/* Enable Receives */
       
  2037 	ew32(RCTL, rctl);
       
  2038 }
       
  2039 
       
  2040 /**
       
  2041  * e1000_free_tx_resources - Free Tx Resources per Queue
       
  2042  * @adapter: board private structure
       
  2043  * @tx_ring: Tx descriptor ring for a specific queue
       
  2044  *
       
  2045  * Free all transmit software resources
       
  2046  **/
       
  2047 
       
  2048 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
       
  2049 				    struct e1000_tx_ring *tx_ring)
       
  2050 {
       
  2051 	struct pci_dev *pdev = adapter->pdev;
       
  2052 
       
  2053 	e1000_clean_tx_ring(adapter, tx_ring);
       
  2054 
       
  2055 	vfree(tx_ring->buffer_info);
       
  2056 	tx_ring->buffer_info = NULL;
       
  2057 
       
  2058 	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
       
  2059 
       
  2060 	tx_ring->desc = NULL;
       
  2061 }
       
  2062 
       
  2063 /**
       
  2064  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
       
  2065  * @adapter: board private structure
       
  2066  *
       
  2067  * Free all transmit software resources
       
  2068  **/
       
  2069 
       
  2070 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
       
  2071 {
       
  2072 	int i;
       
  2073 
       
  2074 	for (i = 0; i < adapter->num_tx_queues; i++)
       
  2075 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
       
  2076 }
       
  2077 
       
  2078 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
       
  2079 					     struct e1000_buffer *buffer_info)
       
  2080 {
       
  2081 	if (buffer_info->dma) {
       
  2082 		pci_unmap_page(adapter->pdev,
       
  2083 				buffer_info->dma,
       
  2084 				buffer_info->length,
       
  2085 				PCI_DMA_TODEVICE);
       
  2086 		buffer_info->dma = 0;
       
  2087 	}
       
  2088 	if (buffer_info->skb) {
       
  2089 		dev_kfree_skb_any(buffer_info->skb);
       
  2090 		buffer_info->skb = NULL;
       
  2091 	}
       
  2092 	/* buffer_info must be completely set up in the transmit path */
       
  2093 }
       
  2094 
       
  2095 /**
       
  2096  * e1000_clean_tx_ring - Free Tx Buffers
       
  2097  * @adapter: board private structure
       
  2098  * @tx_ring: ring to be cleaned
       
  2099  **/
       
  2100 
       
  2101 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
       
  2102 				struct e1000_tx_ring *tx_ring)
       
  2103 {
       
  2104 	struct e1000_hw *hw = &adapter->hw;
       
  2105 	struct e1000_buffer *buffer_info;
       
  2106 	unsigned long size;
       
  2107 	unsigned int i;
       
  2108 
       
  2109 	/* Free all the Tx ring sk_buffs */
       
  2110 
       
  2111 	for (i = 0; i < tx_ring->count; i++) {
       
  2112 		buffer_info = &tx_ring->buffer_info[i];
       
  2113 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  2114 	}
       
  2115 
       
  2116 	size = sizeof(struct e1000_buffer) * tx_ring->count;
       
  2117 	memset(tx_ring->buffer_info, 0, size);
       
  2118 
       
  2119 	/* Zero out the descriptor ring */
       
  2120 
       
  2121 	memset(tx_ring->desc, 0, tx_ring->size);
       
  2122 
       
  2123 	tx_ring->next_to_use = 0;
       
  2124 	tx_ring->next_to_clean = 0;
       
  2125 	tx_ring->last_tx_tso = 0;
       
  2126 
       
  2127 	writel(0, hw->hw_addr + tx_ring->tdh);
       
  2128 	writel(0, hw->hw_addr + tx_ring->tdt);
       
  2129 }
       
  2130 
       
  2131 /**
       
  2132  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
       
  2133  * @adapter: board private structure
       
  2134  **/
       
  2135 
       
  2136 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
       
  2137 {
       
  2138 	int i;
       
  2139 
       
  2140 	for (i = 0; i < adapter->num_tx_queues; i++)
       
  2141 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
       
  2142 }
       
  2143 
       
  2144 /**
       
  2145  * e1000_free_rx_resources - Free Rx Resources
       
  2146  * @adapter: board private structure
       
  2147  * @rx_ring: ring to clean the resources from
       
  2148  *
       
  2149  * Free all receive software resources
       
  2150  **/
       
  2151 
       
  2152 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
       
  2153 				    struct e1000_rx_ring *rx_ring)
       
  2154 {
       
  2155 	struct pci_dev *pdev = adapter->pdev;
       
  2156 
       
  2157 	e1000_clean_rx_ring(adapter, rx_ring);
       
  2158 
       
  2159 	vfree(rx_ring->buffer_info);
       
  2160 	rx_ring->buffer_info = NULL;
       
  2161 
       
  2162 	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
       
  2163 
       
  2164 	rx_ring->desc = NULL;
       
  2165 }
       
  2166 
       
  2167 /**
       
  2168  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
       
  2169  * @adapter: board private structure
       
  2170  *
       
  2171  * Free all receive software resources
       
  2172  **/
       
  2173 
       
  2174 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
       
  2175 {
       
  2176 	int i;
       
  2177 
       
  2178 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  2179 		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
       
  2180 }
       
  2181 
       
  2182 /**
       
  2183  * e1000_clean_rx_ring - Free Rx Buffers per Queue
       
  2184  * @adapter: board private structure
       
  2185  * @rx_ring: ring to free buffers from
       
  2186  **/
       
  2187 
       
  2188 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
       
  2189 				struct e1000_rx_ring *rx_ring)
       
  2190 {
       
  2191 	struct e1000_hw *hw = &adapter->hw;
       
  2192 	struct e1000_buffer *buffer_info;
       
  2193 	struct pci_dev *pdev = adapter->pdev;
       
  2194 	unsigned long size;
       
  2195 	unsigned int i;
       
  2196 
       
  2197 	/* Free all the Rx ring sk_buffs */
       
  2198 	for (i = 0; i < rx_ring->count; i++) {
       
  2199 		buffer_info = &rx_ring->buffer_info[i];
       
  2200 		if (buffer_info->skb) {
       
  2201 			pci_unmap_single(pdev,
       
  2202 					 buffer_info->dma,
       
  2203 					 buffer_info->length,
       
  2204 					 PCI_DMA_FROMDEVICE);
       
  2205 
       
  2206 			dev_kfree_skb(buffer_info->skb);
       
  2207 			buffer_info->skb = NULL;
       
  2208 		}
       
  2209 	}
       
  2210 
       
  2211 	size = sizeof(struct e1000_buffer) * rx_ring->count;
       
  2212 	memset(rx_ring->buffer_info, 0, size);
       
  2213 
       
  2214 	/* Zero out the descriptor ring */
       
  2215 
       
  2216 	memset(rx_ring->desc, 0, rx_ring->size);
       
  2217 
       
  2218 	rx_ring->next_to_clean = 0;
       
  2219 	rx_ring->next_to_use = 0;
       
  2220 
       
  2221 	writel(0, hw->hw_addr + rx_ring->rdh);
       
  2222 	writel(0, hw->hw_addr + rx_ring->rdt);
       
  2223 }
       
  2224 
       
  2225 /**
       
  2226  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
       
  2227  * @adapter: board private structure
       
  2228  **/
       
  2229 
       
  2230 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
       
  2231 {
       
  2232 	int i;
       
  2233 
       
  2234 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  2235 		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
       
  2236 }
       
  2237 
       
  2238 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
       
  2239  * and memory write and invalidate disabled for certain operations
       
  2240  */
       
  2241 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
       
  2242 {
       
  2243 	struct e1000_hw *hw = &adapter->hw;
       
  2244 	struct net_device *netdev = adapter->netdev;
       
  2245 	u32 rctl;
       
  2246 
       
  2247 	e1000_pci_clear_mwi(hw);
       
  2248 
       
  2249 	rctl = er32(RCTL);
       
  2250 	rctl |= E1000_RCTL_RST;
       
  2251 	ew32(RCTL, rctl);
       
  2252 	E1000_WRITE_FLUSH();
       
  2253 	mdelay(5);
       
  2254 
       
  2255 	if (netif_running(netdev))
       
  2256 		e1000_clean_all_rx_rings(adapter);
       
  2257 }
       
  2258 
       
  2259 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
       
  2260 {
       
  2261 	struct e1000_hw *hw = &adapter->hw;
       
  2262 	struct net_device *netdev = adapter->netdev;
       
  2263 	u32 rctl;
       
  2264 
       
  2265 	rctl = er32(RCTL);
       
  2266 	rctl &= ~E1000_RCTL_RST;
       
  2267 	ew32(RCTL, rctl);
       
  2268 	E1000_WRITE_FLUSH();
       
  2269 	mdelay(5);
       
  2270 
       
  2271 	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
       
  2272 		e1000_pci_set_mwi(hw);
       
  2273 
       
  2274 	if (netif_running(netdev)) {
       
  2275 		/* No need to loop, because 82542 supports only 1 queue */
       
  2276 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
       
  2277 		e1000_configure_rx(adapter);
       
  2278 		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
       
  2279 	}
       
  2280 }
       
  2281 
       
  2282 /**
       
  2283  * e1000_set_mac - Change the Ethernet Address of the NIC
       
  2284  * @netdev: network interface device structure
       
  2285  * @p: pointer to an address structure
       
  2286  *
       
  2287  * Returns 0 on success, negative on failure
       
  2288  **/
       
  2289 
       
  2290 static int e1000_set_mac(struct net_device *netdev, void *p)
       
  2291 {
       
  2292 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2293 	struct e1000_hw *hw = &adapter->hw;
       
  2294 	struct sockaddr *addr = p;
       
  2295 
       
  2296 	if (!is_valid_ether_addr(addr->sa_data))
       
  2297 		return -EADDRNOTAVAIL;
       
  2298 
       
  2299 	/* 82542 2.0 needs to be in reset to write receive address registers */
       
  2300 
       
  2301 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2302 		e1000_enter_82542_rst(adapter);
       
  2303 
       
  2304 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2305 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
       
  2306 
       
  2307 	e1000_rar_set(hw, hw->mac_addr, 0);
       
  2308 
       
  2309 	/* With 82571 controllers, LAA may be overwritten (with the default)
       
  2310 	 * due to controller reset from the other port. */
       
  2311 	if (hw->mac_type == e1000_82571) {
       
  2312 		/* activate the work around */
       
  2313 		hw->laa_is_present = 1;
       
  2314 
       
  2315 		/* Hold a copy of the LAA in RAR[14] This is done so that
       
  2316 		 * between the time RAR[0] gets clobbered  and the time it
       
  2317 		 * gets fixed (in e1000_watchdog), the actual LAA is in one
       
  2318 		 * of the RARs and no incoming packets directed to this port
       
  2319 		 * are dropped. Eventaully the LAA will be in RAR[0] and
       
  2320 		 * RAR[14] */
       
  2321 		e1000_rar_set(hw, hw->mac_addr,
       
  2322 					E1000_RAR_ENTRIES - 1);
       
  2323 	}
       
  2324 
       
  2325 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2326 		e1000_leave_82542_rst(adapter);
       
  2327 
       
  2328 	return 0;
       
  2329 }
       
  2330 
       
  2331 /**
       
  2332  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
       
  2333  * @netdev: network interface device structure
       
  2334  *
       
  2335  * The set_rx_mode entry point is called whenever the unicast or multicast
       
  2336  * address lists or the network interface flags are updated. This routine is
       
  2337  * responsible for configuring the hardware for proper unicast, multicast,
       
  2338  * promiscuous mode, and all-multi behavior.
       
  2339  **/
       
  2340 
       
  2341 static void e1000_set_rx_mode(struct net_device *netdev)
       
  2342 {
       
  2343 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2344 	struct e1000_hw *hw = &adapter->hw;
       
  2345 	struct dev_addr_list *uc_ptr;
       
  2346 	struct dev_addr_list *mc_ptr;
       
  2347 	u32 rctl;
       
  2348 	u32 hash_value;
       
  2349 	int i, rar_entries = E1000_RAR_ENTRIES;
       
  2350 	int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
       
  2351 				E1000_NUM_MTA_REGISTERS_ICH8LAN :
       
  2352 				E1000_NUM_MTA_REGISTERS;
       
  2353 
       
  2354 	if (hw->mac_type == e1000_ich8lan)
       
  2355 		rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
       
  2356 
       
  2357 	/* reserve RAR[14] for LAA over-write work-around */
       
  2358 	if (hw->mac_type == e1000_82571)
       
  2359 		rar_entries--;
       
  2360 
       
  2361 	/* Check for Promiscuous and All Multicast modes */
       
  2362 
       
  2363 	rctl = er32(RCTL);
       
  2364 
       
  2365 	if (netdev->flags & IFF_PROMISC) {
       
  2366 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
       
  2367 		rctl &= ~E1000_RCTL_VFE;
       
  2368 	} else {
       
  2369 		if (netdev->flags & IFF_ALLMULTI) {
       
  2370 			rctl |= E1000_RCTL_MPE;
       
  2371 		} else {
       
  2372 			rctl &= ~E1000_RCTL_MPE;
       
  2373 		}
       
  2374 		if (adapter->hw.mac_type != e1000_ich8lan)
       
  2375 			rctl |= E1000_RCTL_VFE;
       
  2376 	}
       
  2377 
       
  2378 	uc_ptr = NULL;
       
  2379 	if (netdev->uc_count > rar_entries - 1) {
       
  2380 		rctl |= E1000_RCTL_UPE;
       
  2381 	} else if (!(netdev->flags & IFF_PROMISC)) {
       
  2382 		rctl &= ~E1000_RCTL_UPE;
       
  2383 		uc_ptr = netdev->uc_list;
       
  2384 	}
       
  2385 
       
  2386 	ew32(RCTL, rctl);
       
  2387 
       
  2388 	/* 82542 2.0 needs to be in reset to write receive address registers */
       
  2389 
       
  2390 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2391 		e1000_enter_82542_rst(adapter);
       
  2392 
       
  2393 	/* load the first 14 addresses into the exact filters 1-14. Unicast
       
  2394 	 * addresses take precedence to avoid disabling unicast filtering
       
  2395 	 * when possible.
       
  2396 	 *
       
  2397 	 * RAR 0 is used for the station MAC adddress
       
  2398 	 * if there are not 14 addresses, go ahead and clear the filters
       
  2399 	 * -- with 82571 controllers only 0-13 entries are filled here
       
  2400 	 */
       
  2401 	mc_ptr = netdev->mc_list;
       
  2402 
       
  2403 	for (i = 1; i < rar_entries; i++) {
       
  2404 		if (uc_ptr) {
       
  2405 			e1000_rar_set(hw, uc_ptr->da_addr, i);
       
  2406 			uc_ptr = uc_ptr->next;
       
  2407 		} else if (mc_ptr) {
       
  2408 			e1000_rar_set(hw, mc_ptr->da_addr, i);
       
  2409 			mc_ptr = mc_ptr->next;
       
  2410 		} else {
       
  2411 			E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
       
  2412 			E1000_WRITE_FLUSH();
       
  2413 			E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
       
  2414 			E1000_WRITE_FLUSH();
       
  2415 		}
       
  2416 	}
       
  2417 	WARN_ON(uc_ptr != NULL);
       
  2418 
       
  2419 	/* clear the old settings from the multicast hash table */
       
  2420 
       
  2421 	for (i = 0; i < mta_reg_count; i++) {
       
  2422 		E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
       
  2423 		E1000_WRITE_FLUSH();
       
  2424 	}
       
  2425 
       
  2426 	/* load any remaining addresses into the hash table */
       
  2427 
       
  2428 	for (; mc_ptr; mc_ptr = mc_ptr->next) {
       
  2429 		hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
       
  2430 		e1000_mta_set(hw, hash_value);
       
  2431 	}
       
  2432 
       
  2433 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2434 		e1000_leave_82542_rst(adapter);
       
  2435 }
       
  2436 
       
  2437 /* Need to wait a few seconds after link up to get diagnostic information from
       
  2438  * the phy */
       
  2439 
       
  2440 static void e1000_update_phy_info(unsigned long data)
       
  2441 {
       
  2442 	struct e1000_adapter *adapter = (struct e1000_adapter *)data;
       
  2443 	struct e1000_hw *hw = &adapter->hw;
       
  2444 	e1000_phy_get_info(hw, &adapter->phy_info);
       
  2445 }
       
  2446 
       
  2447 /**
       
  2448  * e1000_82547_tx_fifo_stall - Timer Call-back
       
  2449  * @data: pointer to adapter cast into an unsigned long
       
  2450  **/
       
  2451 
       
  2452 static void e1000_82547_tx_fifo_stall(unsigned long data)
       
  2453 {
       
  2454 	struct e1000_adapter *adapter = (struct e1000_adapter *)data;
       
  2455 	struct e1000_hw *hw = &adapter->hw;
       
  2456 	struct net_device *netdev = adapter->netdev;
       
  2457 	u32 tctl;
       
  2458 
       
  2459 	if (atomic_read(&adapter->tx_fifo_stall)) {
       
  2460 		if ((er32(TDT) == er32(TDH)) &&
       
  2461 		   (er32(TDFT) == er32(TDFH)) &&
       
  2462 		   (er32(TDFTS) == er32(TDFHS))) {
       
  2463 			tctl = er32(TCTL);
       
  2464 			ew32(TCTL, tctl & ~E1000_TCTL_EN);
       
  2465 			ew32(TDFT, adapter->tx_head_addr);
       
  2466 			ew32(TDFH, adapter->tx_head_addr);
       
  2467 			ew32(TDFTS, adapter->tx_head_addr);
       
  2468 			ew32(TDFHS, adapter->tx_head_addr);
       
  2469 			ew32(TCTL, tctl);
       
  2470 			E1000_WRITE_FLUSH();
       
  2471 
       
  2472 			adapter->tx_fifo_head = 0;
       
  2473 			atomic_set(&adapter->tx_fifo_stall, 0);
       
  2474 			netif_wake_queue(netdev);
       
  2475 		} else {
       
  2476 			mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
       
  2477 		}
       
  2478 	}
       
  2479 }
       
  2480 
       
  2481 /**
       
  2482  * e1000_watchdog - Timer Call-back
       
  2483  * @data: pointer to adapter cast into an unsigned long
       
  2484  **/
       
  2485 static void e1000_watchdog(unsigned long data)
       
  2486 {
       
  2487 	struct e1000_adapter *adapter = (struct e1000_adapter *)data;
       
  2488 	struct e1000_hw *hw = &adapter->hw;
       
  2489 	struct net_device *netdev = adapter->netdev;
       
  2490 	struct e1000_tx_ring *txdr = adapter->tx_ring;
       
  2491 	u32 link, tctl;
       
  2492 	s32 ret_val;
       
  2493 
       
  2494 	ret_val = e1000_check_for_link(hw);
       
  2495 	if ((ret_val == E1000_ERR_PHY) &&
       
  2496 	    (hw->phy_type == e1000_phy_igp_3) &&
       
  2497 	    (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
       
  2498 		/* See e1000_kumeran_lock_loss_workaround() */
       
  2499 		DPRINTK(LINK, INFO,
       
  2500 			"Gigabit has been disabled, downgrading speed\n");
       
  2501 	}
       
  2502 
       
  2503 	if (hw->mac_type == e1000_82573) {
       
  2504 		e1000_enable_tx_pkt_filtering(hw);
       
  2505 		if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
       
  2506 			e1000_update_mng_vlan(adapter);
       
  2507 	}
       
  2508 
       
  2509 	if ((hw->media_type == e1000_media_type_internal_serdes) &&
       
  2510 	   !(er32(TXCW) & E1000_TXCW_ANE))
       
  2511 		link = !hw->serdes_link_down;
       
  2512 	else
       
  2513 		link = er32(STATUS) & E1000_STATUS_LU;
       
  2514 
       
  2515 	if (link) {
       
  2516 		if (!netif_carrier_ok(netdev)) {
       
  2517 			u32 ctrl;
       
  2518 			bool txb2b = true;
       
  2519 			e1000_get_speed_and_duplex(hw,
       
  2520 			                           &adapter->link_speed,
       
  2521 			                           &adapter->link_duplex);
       
  2522 
       
  2523 			ctrl = er32(CTRL);
       
  2524 			DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
       
  2525 			        "Flow Control: %s\n",
       
  2526 			        adapter->link_speed,
       
  2527 			        adapter->link_duplex == FULL_DUPLEX ?
       
  2528 			        "Full Duplex" : "Half Duplex",
       
  2529 			        ((ctrl & E1000_CTRL_TFCE) && (ctrl &
       
  2530 			        E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
       
  2531 			        E1000_CTRL_RFCE) ? "RX" : ((ctrl &
       
  2532 			        E1000_CTRL_TFCE) ? "TX" : "None" )));
       
  2533 
       
  2534 			/* tweak tx_queue_len according to speed/duplex
       
  2535 			 * and adjust the timeout factor */
       
  2536 			netdev->tx_queue_len = adapter->tx_queue_len;
       
  2537 			adapter->tx_timeout_factor = 1;
       
  2538 			switch (adapter->link_speed) {
       
  2539 			case SPEED_10:
       
  2540 				txb2b = false;
       
  2541 				netdev->tx_queue_len = 10;
       
  2542 				adapter->tx_timeout_factor = 8;
       
  2543 				break;
       
  2544 			case SPEED_100:
       
  2545 				txb2b = false;
       
  2546 				netdev->tx_queue_len = 100;
       
  2547 				/* maybe add some timeout factor ? */
       
  2548 				break;
       
  2549 			}
       
  2550 
       
  2551 			if ((hw->mac_type == e1000_82571 ||
       
  2552 			     hw->mac_type == e1000_82572) &&
       
  2553 			    !txb2b) {
       
  2554 				u32 tarc0;
       
  2555 				tarc0 = er32(TARC0);
       
  2556 				tarc0 &= ~(1 << 21);
       
  2557 				ew32(TARC0, tarc0);
       
  2558 			}
       
  2559 
       
  2560 			/* disable TSO for pcie and 10/100 speeds, to avoid
       
  2561 			 * some hardware issues */
       
  2562 			if (!adapter->tso_force &&
       
  2563 			    hw->bus_type == e1000_bus_type_pci_express){
       
  2564 				switch (adapter->link_speed) {
       
  2565 				case SPEED_10:
       
  2566 				case SPEED_100:
       
  2567 					DPRINTK(PROBE,INFO,
       
  2568 				        "10/100 speed: disabling TSO\n");
       
  2569 					netdev->features &= ~NETIF_F_TSO;
       
  2570 					netdev->features &= ~NETIF_F_TSO6;
       
  2571 					break;
       
  2572 				case SPEED_1000:
       
  2573 					netdev->features |= NETIF_F_TSO;
       
  2574 					netdev->features |= NETIF_F_TSO6;
       
  2575 					break;
       
  2576 				default:
       
  2577 					/* oops */
       
  2578 					break;
       
  2579 				}
       
  2580 			}
       
  2581 
       
  2582 			/* enable transmits in the hardware, need to do this
       
  2583 			 * after setting TARC0 */
       
  2584 			tctl = er32(TCTL);
       
  2585 			tctl |= E1000_TCTL_EN;
       
  2586 			ew32(TCTL, tctl);
       
  2587 
       
  2588 			netif_carrier_on(netdev);
       
  2589 			netif_wake_queue(netdev);
       
  2590 			mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
       
  2591 			adapter->smartspeed = 0;
       
  2592 		} else {
       
  2593 			/* make sure the receive unit is started */
       
  2594 			if (hw->rx_needs_kicking) {
       
  2595 				u32 rctl = er32(RCTL);
       
  2596 				ew32(RCTL, rctl | E1000_RCTL_EN);
       
  2597 			}
       
  2598 		}
       
  2599 	} else {
       
  2600 		if (netif_carrier_ok(netdev)) {
       
  2601 			adapter->link_speed = 0;
       
  2602 			adapter->link_duplex = 0;
       
  2603 			DPRINTK(LINK, INFO, "NIC Link is Down\n");
       
  2604 			netif_carrier_off(netdev);
       
  2605 			netif_stop_queue(netdev);
       
  2606 			mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
       
  2607 
       
  2608 			/* 80003ES2LAN workaround--
       
  2609 			 * For packet buffer work-around on link down event;
       
  2610 			 * disable receives in the ISR and
       
  2611 			 * reset device here in the watchdog
       
  2612 			 */
       
  2613 			if (hw->mac_type == e1000_80003es2lan)
       
  2614 				/* reset device */
       
  2615 				schedule_work(&adapter->reset_task);
       
  2616 		}
       
  2617 
       
  2618 		e1000_smartspeed(adapter);
       
  2619 	}
       
  2620 
       
  2621 	e1000_update_stats(adapter);
       
  2622 
       
  2623 	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
       
  2624 	adapter->tpt_old = adapter->stats.tpt;
       
  2625 	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
       
  2626 	adapter->colc_old = adapter->stats.colc;
       
  2627 
       
  2628 	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
       
  2629 	adapter->gorcl_old = adapter->stats.gorcl;
       
  2630 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
       
  2631 	adapter->gotcl_old = adapter->stats.gotcl;
       
  2632 
       
  2633 	e1000_update_adaptive(hw);
       
  2634 
       
  2635 	if (!netif_carrier_ok(netdev)) {
       
  2636 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
       
  2637 			/* We've lost link, so the controller stops DMA,
       
  2638 			 * but we've got queued Tx work that's never going
       
  2639 			 * to get done, so reset controller to flush Tx.
       
  2640 			 * (Do the reset outside of interrupt context). */
       
  2641 			adapter->tx_timeout_count++;
       
  2642 			schedule_work(&adapter->reset_task);
       
  2643 		}
       
  2644 	}
       
  2645 
       
  2646 	/* Cause software interrupt to ensure rx ring is cleaned */
       
  2647 	ew32(ICS, E1000_ICS_RXDMT0);
       
  2648 
       
  2649 	/* Force detection of hung controller every watchdog period */
       
  2650 	adapter->detect_tx_hung = true;
       
  2651 
       
  2652 	/* With 82571 controllers, LAA may be overwritten due to controller
       
  2653 	 * reset from the other port. Set the appropriate LAA in RAR[0] */
       
  2654 	if (hw->mac_type == e1000_82571 && hw->laa_is_present)
       
  2655 		e1000_rar_set(hw, hw->mac_addr, 0);
       
  2656 
       
  2657 	/* Reset the timer */
       
  2658 	mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
       
  2659 }
       
  2660 
       
  2661 enum latency_range {
       
  2662 	lowest_latency = 0,
       
  2663 	low_latency = 1,
       
  2664 	bulk_latency = 2,
       
  2665 	latency_invalid = 255
       
  2666 };
       
  2667 
       
  2668 /**
       
  2669  * e1000_update_itr - update the dynamic ITR value based on statistics
       
  2670  *      Stores a new ITR value based on packets and byte
       
  2671  *      counts during the last interrupt.  The advantage of per interrupt
       
  2672  *      computation is faster updates and more accurate ITR for the current
       
  2673  *      traffic pattern.  Constants in this function were computed
       
  2674  *      based on theoretical maximum wire speed and thresholds were set based
       
  2675  *      on testing data as well as attempting to minimize response time
       
  2676  *      while increasing bulk throughput.
       
  2677  *      this functionality is controlled by the InterruptThrottleRate module
       
  2678  *      parameter (see e1000_param.c)
       
  2679  * @adapter: pointer to adapter
       
  2680  * @itr_setting: current adapter->itr
       
  2681  * @packets: the number of packets during this measurement interval
       
  2682  * @bytes: the number of bytes during this measurement interval
       
  2683  **/
       
  2684 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
       
  2685 				     u16 itr_setting, int packets, int bytes)
       
  2686 {
       
  2687 	unsigned int retval = itr_setting;
       
  2688 	struct e1000_hw *hw = &adapter->hw;
       
  2689 
       
  2690 	if (unlikely(hw->mac_type < e1000_82540))
       
  2691 		goto update_itr_done;
       
  2692 
       
  2693 	if (packets == 0)
       
  2694 		goto update_itr_done;
       
  2695 
       
  2696 	switch (itr_setting) {
       
  2697 	case lowest_latency:
       
  2698 		/* jumbo frames get bulk treatment*/
       
  2699 		if (bytes/packets > 8000)
       
  2700 			retval = bulk_latency;
       
  2701 		else if ((packets < 5) && (bytes > 512))
       
  2702 			retval = low_latency;
       
  2703 		break;
       
  2704 	case low_latency:  /* 50 usec aka 20000 ints/s */
       
  2705 		if (bytes > 10000) {
       
  2706 			/* jumbo frames need bulk latency setting */
       
  2707 			if (bytes/packets > 8000)
       
  2708 				retval = bulk_latency;
       
  2709 			else if ((packets < 10) || ((bytes/packets) > 1200))
       
  2710 				retval = bulk_latency;
       
  2711 			else if ((packets > 35))
       
  2712 				retval = lowest_latency;
       
  2713 		} else if (bytes/packets > 2000)
       
  2714 			retval = bulk_latency;
       
  2715 		else if (packets <= 2 && bytes < 512)
       
  2716 			retval = lowest_latency;
       
  2717 		break;
       
  2718 	case bulk_latency: /* 250 usec aka 4000 ints/s */
       
  2719 		if (bytes > 25000) {
       
  2720 			if (packets > 35)
       
  2721 				retval = low_latency;
       
  2722 		} else if (bytes < 6000) {
       
  2723 			retval = low_latency;
       
  2724 		}
       
  2725 		break;
       
  2726 	}
       
  2727 
       
  2728 update_itr_done:
       
  2729 	return retval;
       
  2730 }
       
  2731 
       
  2732 static void e1000_set_itr(struct e1000_adapter *adapter)
       
  2733 {
       
  2734 	struct e1000_hw *hw = &adapter->hw;
       
  2735 	u16 current_itr;
       
  2736 	u32 new_itr = adapter->itr;
       
  2737 
       
  2738 	if (unlikely(hw->mac_type < e1000_82540))
       
  2739 		return;
       
  2740 
       
  2741 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
       
  2742 	if (unlikely(adapter->link_speed != SPEED_1000)) {
       
  2743 		current_itr = 0;
       
  2744 		new_itr = 4000;
       
  2745 		goto set_itr_now;
       
  2746 	}
       
  2747 
       
  2748 	adapter->tx_itr = e1000_update_itr(adapter,
       
  2749 	                            adapter->tx_itr,
       
  2750 	                            adapter->total_tx_packets,
       
  2751 	                            adapter->total_tx_bytes);
       
  2752 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2753 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
       
  2754 		adapter->tx_itr = low_latency;
       
  2755 
       
  2756 	adapter->rx_itr = e1000_update_itr(adapter,
       
  2757 	                            adapter->rx_itr,
       
  2758 	                            adapter->total_rx_packets,
       
  2759 	                            adapter->total_rx_bytes);
       
  2760 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2761 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
       
  2762 		adapter->rx_itr = low_latency;
       
  2763 
       
  2764 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
       
  2765 
       
  2766 	switch (current_itr) {
       
  2767 	/* counts and packets in update_itr are dependent on these numbers */
       
  2768 	case lowest_latency:
       
  2769 		new_itr = 70000;
       
  2770 		break;
       
  2771 	case low_latency:
       
  2772 		new_itr = 20000; /* aka hwitr = ~200 */
       
  2773 		break;
       
  2774 	case bulk_latency:
       
  2775 		new_itr = 4000;
       
  2776 		break;
       
  2777 	default:
       
  2778 		break;
       
  2779 	}
       
  2780 
       
  2781 set_itr_now:
       
  2782 	if (new_itr != adapter->itr) {
       
  2783 		/* this attempts to bias the interrupt rate towards Bulk
       
  2784 		 * by adding intermediate steps when interrupt rate is
       
  2785 		 * increasing */
       
  2786 		new_itr = new_itr > adapter->itr ?
       
  2787 		             min(adapter->itr + (new_itr >> 2), new_itr) :
       
  2788 		             new_itr;
       
  2789 		adapter->itr = new_itr;
       
  2790 		ew32(ITR, 1000000000 / (new_itr * 256));
       
  2791 	}
       
  2792 
       
  2793 	return;
       
  2794 }
       
  2795 
       
  2796 #define E1000_TX_FLAGS_CSUM		0x00000001
       
  2797 #define E1000_TX_FLAGS_VLAN		0x00000002
       
  2798 #define E1000_TX_FLAGS_TSO		0x00000004
       
  2799 #define E1000_TX_FLAGS_IPV4		0x00000008
       
  2800 #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
       
  2801 #define E1000_TX_FLAGS_VLAN_SHIFT	16
       
  2802 
       
  2803 static int e1000_tso(struct e1000_adapter *adapter,
       
  2804 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
       
  2805 {
       
  2806 	struct e1000_context_desc *context_desc;
       
  2807 	struct e1000_buffer *buffer_info;
       
  2808 	unsigned int i;
       
  2809 	u32 cmd_length = 0;
       
  2810 	u16 ipcse = 0, tucse, mss;
       
  2811 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
       
  2812 	int err;
       
  2813 
       
  2814 	if (skb_is_gso(skb)) {
       
  2815 		if (skb_header_cloned(skb)) {
       
  2816 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
       
  2817 			if (err)
       
  2818 				return err;
       
  2819 		}
       
  2820 
       
  2821 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  2822 		mss = skb_shinfo(skb)->gso_size;
       
  2823 		if (skb->protocol == htons(ETH_P_IP)) {
       
  2824 			struct iphdr *iph = ip_hdr(skb);
       
  2825 			iph->tot_len = 0;
       
  2826 			iph->check = 0;
       
  2827 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
       
  2828 								 iph->daddr, 0,
       
  2829 								 IPPROTO_TCP,
       
  2830 								 0);
       
  2831 			cmd_length = E1000_TXD_CMD_IP;
       
  2832 			ipcse = skb_transport_offset(skb) - 1;
       
  2833 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
       
  2834 			ipv6_hdr(skb)->payload_len = 0;
       
  2835 			tcp_hdr(skb)->check =
       
  2836 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
       
  2837 						 &ipv6_hdr(skb)->daddr,
       
  2838 						 0, IPPROTO_TCP, 0);
       
  2839 			ipcse = 0;
       
  2840 		}
       
  2841 		ipcss = skb_network_offset(skb);
       
  2842 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
       
  2843 		tucss = skb_transport_offset(skb);
       
  2844 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
       
  2845 		tucse = 0;
       
  2846 
       
  2847 		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
       
  2848 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
       
  2849 
       
  2850 		i = tx_ring->next_to_use;
       
  2851 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  2852 		buffer_info = &tx_ring->buffer_info[i];
       
  2853 
       
  2854 		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
       
  2855 		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
       
  2856 		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
       
  2857 		context_desc->upper_setup.tcp_fields.tucss = tucss;
       
  2858 		context_desc->upper_setup.tcp_fields.tucso = tucso;
       
  2859 		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
       
  2860 		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
       
  2861 		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
       
  2862 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
       
  2863 
       
  2864 		buffer_info->time_stamp = jiffies;
       
  2865 		buffer_info->next_to_watch = i;
       
  2866 
       
  2867 		if (++i == tx_ring->count) i = 0;
       
  2868 		tx_ring->next_to_use = i;
       
  2869 
       
  2870 		return true;
       
  2871 	}
       
  2872 	return false;
       
  2873 }
       
  2874 
       
  2875 static bool e1000_tx_csum(struct e1000_adapter *adapter,
       
  2876 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
       
  2877 {
       
  2878 	struct e1000_context_desc *context_desc;
       
  2879 	struct e1000_buffer *buffer_info;
       
  2880 	unsigned int i;
       
  2881 	u8 css;
       
  2882 	u32 cmd_len = E1000_TXD_CMD_DEXT;
       
  2883 
       
  2884 	if (skb->ip_summed != CHECKSUM_PARTIAL)
       
  2885 		return false;
       
  2886 
       
  2887 	switch (skb->protocol) {
       
  2888 	case __constant_htons(ETH_P_IP):
       
  2889 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
       
  2890 			cmd_len |= E1000_TXD_CMD_TCP;
       
  2891 		break;
       
  2892 	case __constant_htons(ETH_P_IPV6):
       
  2893 		/* XXX not handling all IPV6 headers */
       
  2894 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
       
  2895 			cmd_len |= E1000_TXD_CMD_TCP;
       
  2896 		break;
       
  2897 	default:
       
  2898 		if (unlikely(net_ratelimit()))
       
  2899 			DPRINTK(DRV, WARNING,
       
  2900 			        "checksum_partial proto=%x!\n", skb->protocol);
       
  2901 		break;
       
  2902 	}
       
  2903 
       
  2904 	css = skb_transport_offset(skb);
       
  2905 
       
  2906 	i = tx_ring->next_to_use;
       
  2907 	buffer_info = &tx_ring->buffer_info[i];
       
  2908 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  2909 
       
  2910 	context_desc->lower_setup.ip_config = 0;
       
  2911 	context_desc->upper_setup.tcp_fields.tucss = css;
       
  2912 	context_desc->upper_setup.tcp_fields.tucso =
       
  2913 		css + skb->csum_offset;
       
  2914 	context_desc->upper_setup.tcp_fields.tucse = 0;
       
  2915 	context_desc->tcp_seg_setup.data = 0;
       
  2916 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
       
  2917 
       
  2918 	buffer_info->time_stamp = jiffies;
       
  2919 	buffer_info->next_to_watch = i;
       
  2920 
       
  2921 	if (unlikely(++i == tx_ring->count)) i = 0;
       
  2922 	tx_ring->next_to_use = i;
       
  2923 
       
  2924 	return true;
       
  2925 }
       
  2926 
       
  2927 #define E1000_MAX_TXD_PWR	12
       
  2928 #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
       
  2929 
       
  2930 static int e1000_tx_map(struct e1000_adapter *adapter,
       
  2931 			struct e1000_tx_ring *tx_ring,
       
  2932 			struct sk_buff *skb, unsigned int first,
       
  2933 			unsigned int max_per_txd, unsigned int nr_frags,
       
  2934 			unsigned int mss)
       
  2935 {
       
  2936 	struct e1000_hw *hw = &adapter->hw;
       
  2937 	struct e1000_buffer *buffer_info;
       
  2938 	unsigned int len = skb->len;
       
  2939 	unsigned int offset = 0, size, count = 0, i;
       
  2940 	unsigned int f;
       
  2941 	len -= skb->data_len;
       
  2942 
       
  2943 	i = tx_ring->next_to_use;
       
  2944 
       
  2945 	while (len) {
       
  2946 		buffer_info = &tx_ring->buffer_info[i];
       
  2947 		size = min(len, max_per_txd);
       
  2948 		/* Workaround for Controller erratum --
       
  2949 		 * descriptor for non-tso packet in a linear SKB that follows a
       
  2950 		 * tso gets written back prematurely before the data is fully
       
  2951 		 * DMA'd to the controller */
       
  2952 		if (!skb->data_len && tx_ring->last_tx_tso &&
       
  2953 		    !skb_is_gso(skb)) {
       
  2954 			tx_ring->last_tx_tso = 0;
       
  2955 			size -= 4;
       
  2956 		}
       
  2957 
       
  2958 		/* Workaround for premature desc write-backs
       
  2959 		 * in TSO mode.  Append 4-byte sentinel desc */
       
  2960 		if (unlikely(mss && !nr_frags && size == len && size > 8))
       
  2961 			size -= 4;
       
  2962 		/* work-around for errata 10 and it applies
       
  2963 		 * to all controllers in PCI-X mode
       
  2964 		 * The fix is to make sure that the first descriptor of a
       
  2965 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
       
  2966 		 */
       
  2967 		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
       
  2968 		                (size > 2015) && count == 0))
       
  2969 		        size = 2015;
       
  2970 
       
  2971 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
       
  2972 		 * terminating buffers within evenly-aligned dwords. */
       
  2973 		if (unlikely(adapter->pcix_82544 &&
       
  2974 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
       
  2975 		   size > 4))
       
  2976 			size -= 4;
       
  2977 
       
  2978 		buffer_info->length = size;
       
  2979 		buffer_info->dma =
       
  2980 			pci_map_single(adapter->pdev,
       
  2981 				skb->data + offset,
       
  2982 				size,
       
  2983 				PCI_DMA_TODEVICE);
       
  2984 		buffer_info->time_stamp = jiffies;
       
  2985 		buffer_info->next_to_watch = i;
       
  2986 
       
  2987 		len -= size;
       
  2988 		offset += size;
       
  2989 		count++;
       
  2990 		if (unlikely(++i == tx_ring->count)) i = 0;
       
  2991 	}
       
  2992 
       
  2993 	for (f = 0; f < nr_frags; f++) {
       
  2994 		struct skb_frag_struct *frag;
       
  2995 
       
  2996 		frag = &skb_shinfo(skb)->frags[f];
       
  2997 		len = frag->size;
       
  2998 		offset = frag->page_offset;
       
  2999 
       
  3000 		while (len) {
       
  3001 			buffer_info = &tx_ring->buffer_info[i];
       
  3002 			size = min(len, max_per_txd);
       
  3003 			/* Workaround for premature desc write-backs
       
  3004 			 * in TSO mode.  Append 4-byte sentinel desc */
       
  3005 			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
       
  3006 				size -= 4;
       
  3007 			/* Workaround for potential 82544 hang in PCI-X.
       
  3008 			 * Avoid terminating buffers within evenly-aligned
       
  3009 			 * dwords. */
       
  3010 			if (unlikely(adapter->pcix_82544 &&
       
  3011 			   !((unsigned long)(frag->page+offset+size-1) & 4) &&
       
  3012 			   size > 4))
       
  3013 				size -= 4;
       
  3014 
       
  3015 			buffer_info->length = size;
       
  3016 			buffer_info->dma =
       
  3017 				pci_map_page(adapter->pdev,
       
  3018 					frag->page,
       
  3019 					offset,
       
  3020 					size,
       
  3021 					PCI_DMA_TODEVICE);
       
  3022 			buffer_info->time_stamp = jiffies;
       
  3023 			buffer_info->next_to_watch = i;
       
  3024 
       
  3025 			len -= size;
       
  3026 			offset += size;
       
  3027 			count++;
       
  3028 			if (unlikely(++i == tx_ring->count)) i = 0;
       
  3029 		}
       
  3030 	}
       
  3031 
       
  3032 	i = (i == 0) ? tx_ring->count - 1 : i - 1;
       
  3033 	tx_ring->buffer_info[i].skb = skb;
       
  3034 	tx_ring->buffer_info[first].next_to_watch = i;
       
  3035 
       
  3036 	return count;
       
  3037 }
       
  3038 
       
  3039 static void e1000_tx_queue(struct e1000_adapter *adapter,
       
  3040 			   struct e1000_tx_ring *tx_ring, int tx_flags,
       
  3041 			   int count)
       
  3042 {
       
  3043 	struct e1000_hw *hw = &adapter->hw;
       
  3044 	struct e1000_tx_desc *tx_desc = NULL;
       
  3045 	struct e1000_buffer *buffer_info;
       
  3046 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
       
  3047 	unsigned int i;
       
  3048 
       
  3049 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
       
  3050 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
       
  3051 		             E1000_TXD_CMD_TSE;
       
  3052 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  3053 
       
  3054 		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
       
  3055 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
       
  3056 	}
       
  3057 
       
  3058 	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
       
  3059 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
       
  3060 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  3061 	}
       
  3062 
       
  3063 	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
       
  3064 		txd_lower |= E1000_TXD_CMD_VLE;
       
  3065 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
       
  3066 	}
       
  3067 
       
  3068 	i = tx_ring->next_to_use;
       
  3069 
       
  3070 	while (count--) {
       
  3071 		buffer_info = &tx_ring->buffer_info[i];
       
  3072 		tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3073 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  3074 		tx_desc->lower.data =
       
  3075 			cpu_to_le32(txd_lower | buffer_info->length);
       
  3076 		tx_desc->upper.data = cpu_to_le32(txd_upper);
       
  3077 		if (unlikely(++i == tx_ring->count)) i = 0;
       
  3078 	}
       
  3079 
       
  3080 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
       
  3081 
       
  3082 	/* Force memory writes to complete before letting h/w
       
  3083 	 * know there are new descriptors to fetch.  (Only
       
  3084 	 * applicable for weak-ordered memory model archs,
       
  3085 	 * such as IA-64). */
       
  3086 	wmb();
       
  3087 
       
  3088 	tx_ring->next_to_use = i;
       
  3089 	writel(i, hw->hw_addr + tx_ring->tdt);
       
  3090 	/* we need this if more than one processor can write to our tail
       
  3091 	 * at a time, it syncronizes IO on IA64/Altix systems */
       
  3092 	mmiowb();
       
  3093 }
       
  3094 
       
  3095 /**
       
  3096  * 82547 workaround to avoid controller hang in half-duplex environment.
       
  3097  * The workaround is to avoid queuing a large packet that would span
       
  3098  * the internal Tx FIFO ring boundary by notifying the stack to resend
       
  3099  * the packet at a later time.  This gives the Tx FIFO an opportunity to
       
  3100  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
       
  3101  * to the beginning of the Tx FIFO.
       
  3102  **/
       
  3103 
       
  3104 #define E1000_FIFO_HDR			0x10
       
  3105 #define E1000_82547_PAD_LEN		0x3E0
       
  3106 
       
  3107 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
       
  3108 				       struct sk_buff *skb)
       
  3109 {
       
  3110 	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
       
  3111 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
       
  3112 
       
  3113 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
       
  3114 
       
  3115 	if (adapter->link_duplex != HALF_DUPLEX)
       
  3116 		goto no_fifo_stall_required;
       
  3117 
       
  3118 	if (atomic_read(&adapter->tx_fifo_stall))
       
  3119 		return 1;
       
  3120 
       
  3121 	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
       
  3122 		atomic_set(&adapter->tx_fifo_stall, 1);
       
  3123 		return 1;
       
  3124 	}
       
  3125 
       
  3126 no_fifo_stall_required:
       
  3127 	adapter->tx_fifo_head += skb_fifo_len;
       
  3128 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
       
  3129 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
       
  3130 	return 0;
       
  3131 }
       
  3132 
       
  3133 #define MINIMUM_DHCP_PACKET_SIZE 282
       
  3134 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
       
  3135 				    struct sk_buff *skb)
       
  3136 {
       
  3137 	struct e1000_hw *hw =  &adapter->hw;
       
  3138 	u16 length, offset;
       
  3139 	if (vlan_tx_tag_present(skb)) {
       
  3140 		if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
       
  3141 			( hw->mng_cookie.status &
       
  3142 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
       
  3143 			return 0;
       
  3144 	}
       
  3145 	if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
       
  3146 		struct ethhdr *eth = (struct ethhdr *)skb->data;
       
  3147 		if ((htons(ETH_P_IP) == eth->h_proto)) {
       
  3148 			const struct iphdr *ip =
       
  3149 				(struct iphdr *)((u8 *)skb->data+14);
       
  3150 			if (IPPROTO_UDP == ip->protocol) {
       
  3151 				struct udphdr *udp =
       
  3152 					(struct udphdr *)((u8 *)ip +
       
  3153 						(ip->ihl << 2));
       
  3154 				if (ntohs(udp->dest) == 67) {
       
  3155 					offset = (u8 *)udp + 8 - skb->data;
       
  3156 					length = skb->len - offset;
       
  3157 
       
  3158 					return e1000_mng_write_dhcp_info(hw,
       
  3159 							(u8 *)udp + 8,
       
  3160 							length);
       
  3161 				}
       
  3162 			}
       
  3163 		}
       
  3164 	}
       
  3165 	return 0;
       
  3166 }
       
  3167 
       
  3168 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
       
  3169 {
       
  3170 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3171 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
       
  3172 
       
  3173 	netif_stop_queue(netdev);
       
  3174 	/* Herbert's original patch had:
       
  3175 	 *  smp_mb__after_netif_stop_queue();
       
  3176 	 * but since that doesn't exist yet, just open code it. */
       
  3177 	smp_mb();
       
  3178 
       
  3179 	/* We need to check again in a case another CPU has just
       
  3180 	 * made room available. */
       
  3181 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
       
  3182 		return -EBUSY;
       
  3183 
       
  3184 	/* A reprieve! */
       
  3185 	netif_start_queue(netdev);
       
  3186 	++adapter->restart_queue;
       
  3187 	return 0;
       
  3188 }
       
  3189 
       
  3190 static int e1000_maybe_stop_tx(struct net_device *netdev,
       
  3191                                struct e1000_tx_ring *tx_ring, int size)
       
  3192 {
       
  3193 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
       
  3194 		return 0;
       
  3195 	return __e1000_maybe_stop_tx(netdev, size);
       
  3196 }
       
  3197 
       
  3198 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
       
  3199 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
       
  3200 {
       
  3201 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3202 	struct e1000_hw *hw = &adapter->hw;
       
  3203 	struct e1000_tx_ring *tx_ring;
       
  3204 	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
       
  3205 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
       
  3206 	unsigned int tx_flags = 0;
       
  3207 	unsigned int len = skb->len - skb->data_len;
       
  3208 	unsigned long flags;
       
  3209 	unsigned int nr_frags;
       
  3210 	unsigned int mss;
       
  3211 	int count = 0;
       
  3212 	int tso;
       
  3213 	unsigned int f;
       
  3214 
       
  3215 	/* This goes back to the question of how to logically map a tx queue
       
  3216 	 * to a flow.  Right now, performance is impacted slightly negatively
       
  3217 	 * if using multiple tx queues.  If the stack breaks away from a
       
  3218 	 * single qdisc implementation, we can look at this again. */
       
  3219 	tx_ring = adapter->tx_ring;
       
  3220 
       
  3221 	if (unlikely(skb->len <= 0)) {
       
  3222 		dev_kfree_skb_any(skb);
       
  3223 		return NETDEV_TX_OK;
       
  3224 	}
       
  3225 
       
  3226 	/* 82571 and newer doesn't need the workaround that limited descriptor
       
  3227 	 * length to 4kB */
       
  3228 	if (hw->mac_type >= e1000_82571)
       
  3229 		max_per_txd = 8192;
       
  3230 
       
  3231 	mss = skb_shinfo(skb)->gso_size;
       
  3232 	/* The controller does a simple calculation to
       
  3233 	 * make sure there is enough room in the FIFO before
       
  3234 	 * initiating the DMA for each buffer.  The calc is:
       
  3235 	 * 4 = ceil(buffer len/mss).  To make sure we don't
       
  3236 	 * overrun the FIFO, adjust the max buffer len if mss
       
  3237 	 * drops. */
       
  3238 	if (mss) {
       
  3239 		u8 hdr_len;
       
  3240 		max_per_txd = min(mss << 2, max_per_txd);
       
  3241 		max_txd_pwr = fls(max_per_txd) - 1;
       
  3242 
       
  3243 		/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
       
  3244 		* points to just header, pull a few bytes of payload from
       
  3245 		* frags into skb->data */
       
  3246 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  3247 		if (skb->data_len && hdr_len == len) {
       
  3248 			switch (hw->mac_type) {
       
  3249 				unsigned int pull_size;
       
  3250 			case e1000_82544:
       
  3251 				/* Make sure we have room to chop off 4 bytes,
       
  3252 				 * and that the end alignment will work out to
       
  3253 				 * this hardware's requirements
       
  3254 				 * NOTE: this is a TSO only workaround
       
  3255 				 * if end byte alignment not correct move us
       
  3256 				 * into the next dword */
       
  3257 				if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
       
  3258 					break;
       
  3259 				/* fall through */
       
  3260 			case e1000_82571:
       
  3261 			case e1000_82572:
       
  3262 			case e1000_82573:
       
  3263 			case e1000_ich8lan:
       
  3264 				pull_size = min((unsigned int)4, skb->data_len);
       
  3265 				if (!__pskb_pull_tail(skb, pull_size)) {
       
  3266 					DPRINTK(DRV, ERR,
       
  3267 						"__pskb_pull_tail failed.\n");
       
  3268 					dev_kfree_skb_any(skb);
       
  3269 					return NETDEV_TX_OK;
       
  3270 				}
       
  3271 				len = skb->len - skb->data_len;
       
  3272 				break;
       
  3273 			default:
       
  3274 				/* do nothing */
       
  3275 				break;
       
  3276 			}
       
  3277 		}
       
  3278 	}
       
  3279 
       
  3280 	/* reserve a descriptor for the offload context */
       
  3281 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
       
  3282 		count++;
       
  3283 	count++;
       
  3284 
       
  3285 	/* Controller Erratum workaround */
       
  3286 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
       
  3287 		count++;
       
  3288 
       
  3289 	count += TXD_USE_COUNT(len, max_txd_pwr);
       
  3290 
       
  3291 	if (adapter->pcix_82544)
       
  3292 		count++;
       
  3293 
       
  3294 	/* work-around for errata 10 and it applies to all controllers
       
  3295 	 * in PCI-X mode, so add one more descriptor to the count
       
  3296 	 */
       
  3297 	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
       
  3298 			(len > 2015)))
       
  3299 		count++;
       
  3300 
       
  3301 	nr_frags = skb_shinfo(skb)->nr_frags;
       
  3302 	for (f = 0; f < nr_frags; f++)
       
  3303 		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
       
  3304 				       max_txd_pwr);
       
  3305 	if (adapter->pcix_82544)
       
  3306 		count += nr_frags;
       
  3307 
       
  3308 
       
  3309 	if (hw->tx_pkt_filtering &&
       
  3310 	    (hw->mac_type == e1000_82573))
       
  3311 		e1000_transfer_dhcp_info(adapter, skb);
       
  3312 
       
  3313 	if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
       
  3314 		/* Collision - tell upper layer to requeue */
       
  3315 		return NETDEV_TX_LOCKED;
       
  3316 
       
  3317 	/* need: count + 2 desc gap to keep tail from touching
       
  3318 	 * head, otherwise try next time */
       
  3319 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
       
  3320 		spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
       
  3321 		return NETDEV_TX_BUSY;
       
  3322 	}
       
  3323 
       
  3324 	if (unlikely(hw->mac_type == e1000_82547)) {
       
  3325 		if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
       
  3326 			netif_stop_queue(netdev);
       
  3327 			mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
       
  3328 			spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
       
  3329 			return NETDEV_TX_BUSY;
       
  3330 		}
       
  3331 	}
       
  3332 
       
  3333 	if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
       
  3334 		tx_flags |= E1000_TX_FLAGS_VLAN;
       
  3335 		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
       
  3336 	}
       
  3337 
       
  3338 	first = tx_ring->next_to_use;
       
  3339 
       
  3340 	tso = e1000_tso(adapter, tx_ring, skb);
       
  3341 	if (tso < 0) {
       
  3342 		dev_kfree_skb_any(skb);
       
  3343 		spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
       
  3344 		return NETDEV_TX_OK;
       
  3345 	}
       
  3346 
       
  3347 	if (likely(tso)) {
       
  3348 		tx_ring->last_tx_tso = 1;
       
  3349 		tx_flags |= E1000_TX_FLAGS_TSO;
       
  3350 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
       
  3351 		tx_flags |= E1000_TX_FLAGS_CSUM;
       
  3352 
       
  3353 	/* Old method was to assume IPv4 packet by default if TSO was enabled.
       
  3354 	 * 82571 hardware supports TSO capabilities for IPv6 as well...
       
  3355 	 * no longer assume, we must. */
       
  3356 	if (likely(skb->protocol == htons(ETH_P_IP)))
       
  3357 		tx_flags |= E1000_TX_FLAGS_IPV4;
       
  3358 
       
  3359 	e1000_tx_queue(adapter, tx_ring, tx_flags,
       
  3360 	               e1000_tx_map(adapter, tx_ring, skb, first,
       
  3361 	                            max_per_txd, nr_frags, mss));
       
  3362 
       
  3363 	netdev->trans_start = jiffies;
       
  3364 
       
  3365 	/* Make sure there is space in the ring for the next send. */
       
  3366 	e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
       
  3367 
       
  3368 	spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
       
  3369 	return NETDEV_TX_OK;
       
  3370 }
       
  3371 
       
  3372 /**
       
  3373  * e1000_tx_timeout - Respond to a Tx Hang
       
  3374  * @netdev: network interface device structure
       
  3375  **/
       
  3376 
       
  3377 static void e1000_tx_timeout(struct net_device *netdev)
       
  3378 {
       
  3379 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3380 
       
  3381 	/* Do the reset outside of interrupt context */
       
  3382 	adapter->tx_timeout_count++;
       
  3383 	schedule_work(&adapter->reset_task);
       
  3384 }
       
  3385 
       
  3386 static void e1000_reset_task(struct work_struct *work)
       
  3387 {
       
  3388 	struct e1000_adapter *adapter =
       
  3389 		container_of(work, struct e1000_adapter, reset_task);
       
  3390 
       
  3391 	e1000_reinit_locked(adapter);
       
  3392 }
       
  3393 
       
  3394 /**
       
  3395  * e1000_get_stats - Get System Network Statistics
       
  3396  * @netdev: network interface device structure
       
  3397  *
       
  3398  * Returns the address of the device statistics structure.
       
  3399  * The statistics are actually updated from the timer callback.
       
  3400  **/
       
  3401 
       
  3402 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
       
  3403 {
       
  3404 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3405 
       
  3406 	/* only return the current stats */
       
  3407 	return &adapter->net_stats;
       
  3408 }
       
  3409 
       
  3410 /**
       
  3411  * e1000_change_mtu - Change the Maximum Transfer Unit
       
  3412  * @netdev: network interface device structure
       
  3413  * @new_mtu: new value for maximum frame size
       
  3414  *
       
  3415  * Returns 0 on success, negative on failure
       
  3416  **/
       
  3417 
       
  3418 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
       
  3419 {
       
  3420 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3421 	struct e1000_hw *hw = &adapter->hw;
       
  3422 	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
       
  3423 	u16 eeprom_data = 0;
       
  3424 
       
  3425 	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
       
  3426 	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
       
  3427 		DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
       
  3428 		return -EINVAL;
       
  3429 	}
       
  3430 
       
  3431 	/* Adapter-specific max frame size limits. */
       
  3432 	switch (hw->mac_type) {
       
  3433 	case e1000_undefined ... e1000_82542_rev2_1:
       
  3434 	case e1000_ich8lan:
       
  3435 		if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
       
  3436 			DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
       
  3437 			return -EINVAL;
       
  3438 		}
       
  3439 		break;
       
  3440 	case e1000_82573:
       
  3441 		/* Jumbo Frames not supported if:
       
  3442 		 * - this is not an 82573L device
       
  3443 		 * - ASPM is enabled in any way (0x1A bits 3:2) */
       
  3444 		e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
       
  3445 		                  &eeprom_data);
       
  3446 		if ((hw->device_id != E1000_DEV_ID_82573L) ||
       
  3447 		    (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
       
  3448 			if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
       
  3449 				DPRINTK(PROBE, ERR,
       
  3450 			            	"Jumbo Frames not supported.\n");
       
  3451 				return -EINVAL;
       
  3452 			}
       
  3453 			break;
       
  3454 		}
       
  3455 		/* ERT will be enabled later to enable wire speed receives */
       
  3456 
       
  3457 		/* fall through to get support */
       
  3458 	case e1000_82571:
       
  3459 	case e1000_82572:
       
  3460 	case e1000_80003es2lan:
       
  3461 #define MAX_STD_JUMBO_FRAME_SIZE 9234
       
  3462 		if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
       
  3463 			DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
       
  3464 			return -EINVAL;
       
  3465 		}
       
  3466 		break;
       
  3467 	default:
       
  3468 		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
       
  3469 		break;
       
  3470 	}
       
  3471 
       
  3472 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
       
  3473 	 * means we reserve 2 more, this pushes us to allocate from the next
       
  3474 	 * larger slab size
       
  3475 	 * i.e. RXBUFFER_2048 --> size-4096 slab */
       
  3476 
       
  3477 	if (max_frame <= E1000_RXBUFFER_256)
       
  3478 		adapter->rx_buffer_len = E1000_RXBUFFER_256;
       
  3479 	else if (max_frame <= E1000_RXBUFFER_512)
       
  3480 		adapter->rx_buffer_len = E1000_RXBUFFER_512;
       
  3481 	else if (max_frame <= E1000_RXBUFFER_1024)
       
  3482 		adapter->rx_buffer_len = E1000_RXBUFFER_1024;
       
  3483 	else if (max_frame <= E1000_RXBUFFER_2048)
       
  3484 		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
       
  3485 	else if (max_frame <= E1000_RXBUFFER_4096)
       
  3486 		adapter->rx_buffer_len = E1000_RXBUFFER_4096;
       
  3487 	else if (max_frame <= E1000_RXBUFFER_8192)
       
  3488 		adapter->rx_buffer_len = E1000_RXBUFFER_8192;
       
  3489 	else if (max_frame <= E1000_RXBUFFER_16384)
       
  3490 		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
       
  3491 
       
  3492 	/* adjust allocation if LPE protects us, and we aren't using SBP */
       
  3493 	if (!hw->tbi_compatibility_on &&
       
  3494 	    ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
       
  3495 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
       
  3496 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
       
  3497 
       
  3498 	netdev->mtu = new_mtu;
       
  3499 	hw->max_frame_size = max_frame;
       
  3500 
       
  3501 	if (netif_running(netdev))
       
  3502 		e1000_reinit_locked(adapter);
       
  3503 
       
  3504 	return 0;
       
  3505 }
       
  3506 
       
  3507 /**
       
  3508  * e1000_update_stats - Update the board statistics counters
       
  3509  * @adapter: board private structure
       
  3510  **/
       
  3511 
       
  3512 void e1000_update_stats(struct e1000_adapter *adapter)
       
  3513 {
       
  3514 	struct e1000_hw *hw = &adapter->hw;
       
  3515 	struct pci_dev *pdev = adapter->pdev;
       
  3516 	unsigned long flags;
       
  3517 	u16 phy_tmp;
       
  3518 
       
  3519 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
       
  3520 
       
  3521 	/*
       
  3522 	 * Prevent stats update while adapter is being reset, or if the pci
       
  3523 	 * connection is down.
       
  3524 	 */
       
  3525 	if (adapter->link_speed == 0)
       
  3526 		return;
       
  3527 	if (pci_channel_offline(pdev))
       
  3528 		return;
       
  3529 
       
  3530 	spin_lock_irqsave(&adapter->stats_lock, flags);
       
  3531 
       
  3532 	/* these counters are modified from e1000_tbi_adjust_stats,
       
  3533 	 * called from the interrupt context, so they must only
       
  3534 	 * be written while holding adapter->stats_lock
       
  3535 	 */
       
  3536 
       
  3537 	adapter->stats.crcerrs += er32(CRCERRS);
       
  3538 	adapter->stats.gprc += er32(GPRC);
       
  3539 	adapter->stats.gorcl += er32(GORCL);
       
  3540 	adapter->stats.gorch += er32(GORCH);
       
  3541 	adapter->stats.bprc += er32(BPRC);
       
  3542 	adapter->stats.mprc += er32(MPRC);
       
  3543 	adapter->stats.roc += er32(ROC);
       
  3544 
       
  3545 	if (hw->mac_type != e1000_ich8lan) {
       
  3546 		adapter->stats.prc64 += er32(PRC64);
       
  3547 		adapter->stats.prc127 += er32(PRC127);
       
  3548 		adapter->stats.prc255 += er32(PRC255);
       
  3549 		adapter->stats.prc511 += er32(PRC511);
       
  3550 		adapter->stats.prc1023 += er32(PRC1023);
       
  3551 		adapter->stats.prc1522 += er32(PRC1522);
       
  3552 	}
       
  3553 
       
  3554 	adapter->stats.symerrs += er32(SYMERRS);
       
  3555 	adapter->stats.mpc += er32(MPC);
       
  3556 	adapter->stats.scc += er32(SCC);
       
  3557 	adapter->stats.ecol += er32(ECOL);
       
  3558 	adapter->stats.mcc += er32(MCC);
       
  3559 	adapter->stats.latecol += er32(LATECOL);
       
  3560 	adapter->stats.dc += er32(DC);
       
  3561 	adapter->stats.sec += er32(SEC);
       
  3562 	adapter->stats.rlec += er32(RLEC);
       
  3563 	adapter->stats.xonrxc += er32(XONRXC);
       
  3564 	adapter->stats.xontxc += er32(XONTXC);
       
  3565 	adapter->stats.xoffrxc += er32(XOFFRXC);
       
  3566 	adapter->stats.xofftxc += er32(XOFFTXC);
       
  3567 	adapter->stats.fcruc += er32(FCRUC);
       
  3568 	adapter->stats.gptc += er32(GPTC);
       
  3569 	adapter->stats.gotcl += er32(GOTCL);
       
  3570 	adapter->stats.gotch += er32(GOTCH);
       
  3571 	adapter->stats.rnbc += er32(RNBC);
       
  3572 	adapter->stats.ruc += er32(RUC);
       
  3573 	adapter->stats.rfc += er32(RFC);
       
  3574 	adapter->stats.rjc += er32(RJC);
       
  3575 	adapter->stats.torl += er32(TORL);
       
  3576 	adapter->stats.torh += er32(TORH);
       
  3577 	adapter->stats.totl += er32(TOTL);
       
  3578 	adapter->stats.toth += er32(TOTH);
       
  3579 	adapter->stats.tpr += er32(TPR);
       
  3580 
       
  3581 	if (hw->mac_type != e1000_ich8lan) {
       
  3582 		adapter->stats.ptc64 += er32(PTC64);
       
  3583 		adapter->stats.ptc127 += er32(PTC127);
       
  3584 		adapter->stats.ptc255 += er32(PTC255);
       
  3585 		adapter->stats.ptc511 += er32(PTC511);
       
  3586 		adapter->stats.ptc1023 += er32(PTC1023);
       
  3587 		adapter->stats.ptc1522 += er32(PTC1522);
       
  3588 	}
       
  3589 
       
  3590 	adapter->stats.mptc += er32(MPTC);
       
  3591 	adapter->stats.bptc += er32(BPTC);
       
  3592 
       
  3593 	/* used for adaptive IFS */
       
  3594 
       
  3595 	hw->tx_packet_delta = er32(TPT);
       
  3596 	adapter->stats.tpt += hw->tx_packet_delta;
       
  3597 	hw->collision_delta = er32(COLC);
       
  3598 	adapter->stats.colc += hw->collision_delta;
       
  3599 
       
  3600 	if (hw->mac_type >= e1000_82543) {
       
  3601 		adapter->stats.algnerrc += er32(ALGNERRC);
       
  3602 		adapter->stats.rxerrc += er32(RXERRC);
       
  3603 		adapter->stats.tncrs += er32(TNCRS);
       
  3604 		adapter->stats.cexterr += er32(CEXTERR);
       
  3605 		adapter->stats.tsctc += er32(TSCTC);
       
  3606 		adapter->stats.tsctfc += er32(TSCTFC);
       
  3607 	}
       
  3608 	if (hw->mac_type > e1000_82547_rev_2) {
       
  3609 		adapter->stats.iac += er32(IAC);
       
  3610 		adapter->stats.icrxoc += er32(ICRXOC);
       
  3611 
       
  3612 		if (hw->mac_type != e1000_ich8lan) {
       
  3613 			adapter->stats.icrxptc += er32(ICRXPTC);
       
  3614 			adapter->stats.icrxatc += er32(ICRXATC);
       
  3615 			adapter->stats.ictxptc += er32(ICTXPTC);
       
  3616 			adapter->stats.ictxatc += er32(ICTXATC);
       
  3617 			adapter->stats.ictxqec += er32(ICTXQEC);
       
  3618 			adapter->stats.ictxqmtc += er32(ICTXQMTC);
       
  3619 			adapter->stats.icrxdmtc += er32(ICRXDMTC);
       
  3620 		}
       
  3621 	}
       
  3622 
       
  3623 	/* Fill out the OS statistics structure */
       
  3624 	adapter->net_stats.multicast = adapter->stats.mprc;
       
  3625 	adapter->net_stats.collisions = adapter->stats.colc;
       
  3626 
       
  3627 	/* Rx Errors */
       
  3628 
       
  3629 	/* RLEC on some newer hardware can be incorrect so build
       
  3630 	* our own version based on RUC and ROC */
       
  3631 	adapter->net_stats.rx_errors = adapter->stats.rxerrc +
       
  3632 		adapter->stats.crcerrs + adapter->stats.algnerrc +
       
  3633 		adapter->stats.ruc + adapter->stats.roc +
       
  3634 		adapter->stats.cexterr;
       
  3635 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
       
  3636 	adapter->net_stats.rx_length_errors = adapter->stats.rlerrc;
       
  3637 	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
       
  3638 	adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
       
  3639 	adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
       
  3640 
       
  3641 	/* Tx Errors */
       
  3642 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
       
  3643 	adapter->net_stats.tx_errors = adapter->stats.txerrc;
       
  3644 	adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
       
  3645 	adapter->net_stats.tx_window_errors = adapter->stats.latecol;
       
  3646 	adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
       
  3647 	if (hw->bad_tx_carr_stats_fd &&
       
  3648 	    adapter->link_duplex == FULL_DUPLEX) {
       
  3649 		adapter->net_stats.tx_carrier_errors = 0;
       
  3650 		adapter->stats.tncrs = 0;
       
  3651 	}
       
  3652 
       
  3653 	/* Tx Dropped needs to be maintained elsewhere */
       
  3654 
       
  3655 	/* Phy Stats */
       
  3656 	if (hw->media_type == e1000_media_type_copper) {
       
  3657 		if ((adapter->link_speed == SPEED_1000) &&
       
  3658 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
       
  3659 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
       
  3660 			adapter->phy_stats.idle_errors += phy_tmp;
       
  3661 		}
       
  3662 
       
  3663 		if ((hw->mac_type <= e1000_82546) &&
       
  3664 		   (hw->phy_type == e1000_phy_m88) &&
       
  3665 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
       
  3666 			adapter->phy_stats.receive_errors += phy_tmp;
       
  3667 	}
       
  3668 
       
  3669 	/* Management Stats */
       
  3670 	if (hw->has_smbus) {
       
  3671 		adapter->stats.mgptc += er32(MGTPTC);
       
  3672 		adapter->stats.mgprc += er32(MGTPRC);
       
  3673 		adapter->stats.mgpdc += er32(MGTPDC);
       
  3674 	}
       
  3675 
       
  3676 	spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  3677 }
       
  3678 
       
  3679 /**
       
  3680  * e1000_intr_msi - Interrupt Handler
       
  3681  * @irq: interrupt number
       
  3682  * @data: pointer to a network interface device structure
       
  3683  **/
       
  3684 
       
  3685 static irqreturn_t e1000_intr_msi(int irq, void *data)
       
  3686 {
       
  3687 	struct net_device *netdev = data;
       
  3688 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3689 	struct e1000_hw *hw = &adapter->hw;
       
  3690 	u32 icr = er32(ICR);
       
  3691 
       
  3692 	/* in NAPI mode read ICR disables interrupts using IAM */
       
  3693 
       
  3694 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
       
  3695 		hw->get_link_status = 1;
       
  3696 		/* 80003ES2LAN workaround-- For packet buffer work-around on
       
  3697 		 * link down event; disable receives here in the ISR and reset
       
  3698 		 * adapter in watchdog */
       
  3699 		if (netif_carrier_ok(netdev) &&
       
  3700 		    (hw->mac_type == e1000_80003es2lan)) {
       
  3701 			/* disable receives */
       
  3702 			u32 rctl = er32(RCTL);
       
  3703 			ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
  3704 		}
       
  3705 		/* guard against interrupt when we're going down */
       
  3706 		if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3707 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
       
  3708 	}
       
  3709 
       
  3710 	if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
       
  3711 		adapter->total_tx_bytes = 0;
       
  3712 		adapter->total_tx_packets = 0;
       
  3713 		adapter->total_rx_bytes = 0;
       
  3714 		adapter->total_rx_packets = 0;
       
  3715 		__netif_rx_schedule(netdev, &adapter->napi);
       
  3716 	} else
       
  3717 		e1000_irq_enable(adapter);
       
  3718 
       
  3719 	return IRQ_HANDLED;
       
  3720 }
       
  3721 
       
  3722 /**
       
  3723  * e1000_intr - Interrupt Handler
       
  3724  * @irq: interrupt number
       
  3725  * @data: pointer to a network interface device structure
       
  3726  **/
       
  3727 
       
  3728 static irqreturn_t e1000_intr(int irq, void *data)
       
  3729 {
       
  3730 	struct net_device *netdev = data;
       
  3731 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3732 	struct e1000_hw *hw = &adapter->hw;
       
  3733 	u32 rctl, icr = er32(ICR);
       
  3734 
       
  3735 	if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags)))
       
  3736 		return IRQ_NONE;  /* Not our interrupt */
       
  3737 
       
  3738 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
       
  3739 	 * not set, then the adapter didn't send an interrupt */
       
  3740 	if (unlikely(hw->mac_type >= e1000_82571 &&
       
  3741 	             !(icr & E1000_ICR_INT_ASSERTED)))
       
  3742 		return IRQ_NONE;
       
  3743 
       
  3744 	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
       
  3745 	 * need for the IMC write */
       
  3746 
       
  3747 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
       
  3748 		hw->get_link_status = 1;
       
  3749 		/* 80003ES2LAN workaround--
       
  3750 		 * For packet buffer work-around on link down event;
       
  3751 		 * disable receives here in the ISR and
       
  3752 		 * reset adapter in watchdog
       
  3753 		 */
       
  3754 		if (netif_carrier_ok(netdev) &&
       
  3755 		    (hw->mac_type == e1000_80003es2lan)) {
       
  3756 			/* disable receives */
       
  3757 			rctl = er32(RCTL);
       
  3758 			ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
  3759 		}
       
  3760 		/* guard against interrupt when we're going down */
       
  3761 		if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3762 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
       
  3763 	}
       
  3764 
       
  3765 	if (unlikely(hw->mac_type < e1000_82571)) {
       
  3766 		/* disable interrupts, without the synchronize_irq bit */
       
  3767 		ew32(IMC, ~0);
       
  3768 		E1000_WRITE_FLUSH();
       
  3769 	}
       
  3770 	if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
       
  3771 		adapter->total_tx_bytes = 0;
       
  3772 		adapter->total_tx_packets = 0;
       
  3773 		adapter->total_rx_bytes = 0;
       
  3774 		adapter->total_rx_packets = 0;
       
  3775 		__netif_rx_schedule(netdev, &adapter->napi);
       
  3776 	} else
       
  3777 		/* this really should not happen! if it does it is basically a
       
  3778 		 * bug, but not a hard error, so enable ints and continue */
       
  3779 		e1000_irq_enable(adapter);
       
  3780 
       
  3781 	return IRQ_HANDLED;
       
  3782 }
       
  3783 
       
  3784 /**
       
  3785  * e1000_clean - NAPI Rx polling callback
       
  3786  * @adapter: board private structure
       
  3787  **/
       
  3788 static int e1000_clean(struct napi_struct *napi, int budget)
       
  3789 {
       
  3790 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
       
  3791 	struct net_device *poll_dev = adapter->netdev;
       
  3792 	int tx_cleaned = 0, work_done = 0;
       
  3793 
       
  3794 	/* Must NOT use netdev_priv macro here. */
       
  3795 	adapter = poll_dev->priv;
       
  3796 
       
  3797 	/* e1000_clean is called per-cpu.  This lock protects
       
  3798 	 * tx_ring[0] from being cleaned by multiple cpus
       
  3799 	 * simultaneously.  A failure obtaining the lock means
       
  3800 	 * tx_ring[0] is currently being cleaned anyway. */
       
  3801 	if (spin_trylock(&adapter->tx_queue_lock)) {
       
  3802 		tx_cleaned = e1000_clean_tx_irq(adapter,
       
  3803 						&adapter->tx_ring[0]);
       
  3804 		spin_unlock(&adapter->tx_queue_lock);
       
  3805 	}
       
  3806 
       
  3807 	adapter->clean_rx(adapter, &adapter->rx_ring[0],
       
  3808 	                  &work_done, budget);
       
  3809 
       
  3810 	if (tx_cleaned)
       
  3811 		work_done = budget;
       
  3812 
       
  3813 	/* If budget not fully consumed, exit the polling mode */
       
  3814 	if (work_done < budget) {
       
  3815 		if (likely(adapter->itr_setting & 3))
       
  3816 			e1000_set_itr(adapter);
       
  3817 		netif_rx_complete(poll_dev, napi);
       
  3818 		e1000_irq_enable(adapter);
       
  3819 	}
       
  3820 
       
  3821 	return work_done;
       
  3822 }
       
  3823 
       
  3824 /**
       
  3825  * e1000_clean_tx_irq - Reclaim resources after transmit completes
       
  3826  * @adapter: board private structure
       
  3827  **/
       
  3828 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
       
  3829 			       struct e1000_tx_ring *tx_ring)
       
  3830 {
       
  3831 	struct e1000_hw *hw = &adapter->hw;
       
  3832 	struct net_device *netdev = adapter->netdev;
       
  3833 	struct e1000_tx_desc *tx_desc, *eop_desc;
       
  3834 	struct e1000_buffer *buffer_info;
       
  3835 	unsigned int i, eop;
       
  3836 	unsigned int count = 0;
       
  3837 	bool cleaned = false;
       
  3838 	unsigned int total_tx_bytes=0, total_tx_packets=0;
       
  3839 
       
  3840 	i = tx_ring->next_to_clean;
       
  3841 	eop = tx_ring->buffer_info[i].next_to_watch;
       
  3842 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  3843 
       
  3844 	while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
       
  3845 		for (cleaned = false; !cleaned; ) {
       
  3846 			tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3847 			buffer_info = &tx_ring->buffer_info[i];
       
  3848 			cleaned = (i == eop);
       
  3849 
       
  3850 			if (cleaned) {
       
  3851 				struct sk_buff *skb = buffer_info->skb;
       
  3852 				unsigned int segs, bytecount;
       
  3853 				segs = skb_shinfo(skb)->gso_segs ?: 1;
       
  3854 				/* multiply data chunks by size of headers */
       
  3855 				bytecount = ((segs - 1) * skb_headlen(skb)) +
       
  3856 				            skb->len;
       
  3857 				total_tx_packets += segs;
       
  3858 				total_tx_bytes += bytecount;
       
  3859 			}
       
  3860 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  3861 			tx_desc->upper.data = 0;
       
  3862 
       
  3863 			if (unlikely(++i == tx_ring->count)) i = 0;
       
  3864 		}
       
  3865 
       
  3866 		eop = tx_ring->buffer_info[i].next_to_watch;
       
  3867 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  3868 #define E1000_TX_WEIGHT 64
       
  3869 		/* weight of a sort for tx, to avoid endless transmit cleanup */
       
  3870 		if (count++ == E1000_TX_WEIGHT)
       
  3871 			break;
       
  3872 	}
       
  3873 
       
  3874 	tx_ring->next_to_clean = i;
       
  3875 
       
  3876 #define TX_WAKE_THRESHOLD 32
       
  3877 	if (unlikely(cleaned && netif_carrier_ok(netdev) &&
       
  3878 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
       
  3879 		/* Make sure that anybody stopping the queue after this
       
  3880 		 * sees the new next_to_clean.
       
  3881 		 */
       
  3882 		smp_mb();
       
  3883 		if (netif_queue_stopped(netdev)) {
       
  3884 			netif_wake_queue(netdev);
       
  3885 			++adapter->restart_queue;
       
  3886 		}
       
  3887 	}
       
  3888 
       
  3889 	if (adapter->detect_tx_hung) {
       
  3890 		/* Detect a transmit hang in hardware, this serializes the
       
  3891 		 * check with the clearing of time_stamp and movement of i */
       
  3892 		adapter->detect_tx_hung = false;
       
  3893 		if (tx_ring->buffer_info[eop].dma &&
       
  3894 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
       
  3895 		               (adapter->tx_timeout_factor * HZ))
       
  3896 		    && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
       
  3897 
       
  3898 			/* detected Tx unit hang */
       
  3899 			DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
       
  3900 					"  Tx Queue             <%lu>\n"
       
  3901 					"  TDH                  <%x>\n"
       
  3902 					"  TDT                  <%x>\n"
       
  3903 					"  next_to_use          <%x>\n"
       
  3904 					"  next_to_clean        <%x>\n"
       
  3905 					"buffer_info[next_to_clean]\n"
       
  3906 					"  time_stamp           <%lx>\n"
       
  3907 					"  next_to_watch        <%x>\n"
       
  3908 					"  jiffies              <%lx>\n"
       
  3909 					"  next_to_watch.status <%x>\n",
       
  3910 				(unsigned long)((tx_ring - adapter->tx_ring) /
       
  3911 					sizeof(struct e1000_tx_ring)),
       
  3912 				readl(hw->hw_addr + tx_ring->tdh),
       
  3913 				readl(hw->hw_addr + tx_ring->tdt),
       
  3914 				tx_ring->next_to_use,
       
  3915 				tx_ring->next_to_clean,
       
  3916 				tx_ring->buffer_info[eop].time_stamp,
       
  3917 				eop,
       
  3918 				jiffies,
       
  3919 				eop_desc->upper.fields.status);
       
  3920 			netif_stop_queue(netdev);
       
  3921 		}
       
  3922 	}
       
  3923 	adapter->total_tx_bytes += total_tx_bytes;
       
  3924 	adapter->total_tx_packets += total_tx_packets;
       
  3925 	adapter->net_stats.tx_bytes += total_tx_bytes;
       
  3926 	adapter->net_stats.tx_packets += total_tx_packets;
       
  3927 	return cleaned;
       
  3928 }
       
  3929 
       
  3930 /**
       
  3931  * e1000_rx_checksum - Receive Checksum Offload for 82543
       
  3932  * @adapter:     board private structure
       
  3933  * @status_err:  receive descriptor status and error fields
       
  3934  * @csum:        receive descriptor csum field
       
  3935  * @sk_buff:     socket buffer with received data
       
  3936  **/
       
  3937 
       
  3938 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
       
  3939 			      u32 csum, struct sk_buff *skb)
       
  3940 {
       
  3941 	struct e1000_hw *hw = &adapter->hw;
       
  3942 	u16 status = (u16)status_err;
       
  3943 	u8 errors = (u8)(status_err >> 24);
       
  3944 	skb->ip_summed = CHECKSUM_NONE;
       
  3945 
       
  3946 	/* 82543 or newer only */
       
  3947 	if (unlikely(hw->mac_type < e1000_82543)) return;
       
  3948 	/* Ignore Checksum bit is set */
       
  3949 	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
       
  3950 	/* TCP/UDP checksum error bit is set */
       
  3951 	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
       
  3952 		/* let the stack verify checksum errors */
       
  3953 		adapter->hw_csum_err++;
       
  3954 		return;
       
  3955 	}
       
  3956 	/* TCP/UDP Checksum has not been calculated */
       
  3957 	if (hw->mac_type <= e1000_82547_rev_2) {
       
  3958 		if (!(status & E1000_RXD_STAT_TCPCS))
       
  3959 			return;
       
  3960 	} else {
       
  3961 		if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
       
  3962 			return;
       
  3963 	}
       
  3964 	/* It must be a TCP or UDP packet with a valid checksum */
       
  3965 	if (likely(status & E1000_RXD_STAT_TCPCS)) {
       
  3966 		/* TCP checksum is good */
       
  3967 		skb->ip_summed = CHECKSUM_UNNECESSARY;
       
  3968 	} else if (hw->mac_type > e1000_82547_rev_2) {
       
  3969 		/* IP fragment with UDP payload */
       
  3970 		/* Hardware complements the payload checksum, so we undo it
       
  3971 		 * and then put the value in host order for further stack use.
       
  3972 		 */
       
  3973 		__sum16 sum = (__force __sum16)htons(csum);
       
  3974 		skb->csum = csum_unfold(~sum);
       
  3975 		skb->ip_summed = CHECKSUM_COMPLETE;
       
  3976 	}
       
  3977 	adapter->hw_csum_good++;
       
  3978 }
       
  3979 
       
  3980 /**
       
  3981  * e1000_clean_rx_irq - Send received data up the network stack; legacy
       
  3982  * @adapter: board private structure
       
  3983  **/
       
  3984 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
  3985 			       struct e1000_rx_ring *rx_ring,
       
  3986 			       int *work_done, int work_to_do)
       
  3987 {
       
  3988 	struct e1000_hw *hw = &adapter->hw;
       
  3989 	struct net_device *netdev = adapter->netdev;
       
  3990 	struct pci_dev *pdev = adapter->pdev;
       
  3991 	struct e1000_rx_desc *rx_desc, *next_rxd;
       
  3992 	struct e1000_buffer *buffer_info, *next_buffer;
       
  3993 	unsigned long flags;
       
  3994 	u32 length;
       
  3995 	u8 last_byte;
       
  3996 	unsigned int i;
       
  3997 	int cleaned_count = 0;
       
  3998 	bool cleaned = false;
       
  3999 	unsigned int total_rx_bytes=0, total_rx_packets=0;
       
  4000 
       
  4001 	i = rx_ring->next_to_clean;
       
  4002 	rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4003 	buffer_info = &rx_ring->buffer_info[i];
       
  4004 
       
  4005 	while (rx_desc->status & E1000_RXD_STAT_DD) {
       
  4006 		struct sk_buff *skb;
       
  4007 		u8 status;
       
  4008 
       
  4009 		if (*work_done >= work_to_do)
       
  4010 			break;
       
  4011 		(*work_done)++;
       
  4012 
       
  4013 		status = rx_desc->status;
       
  4014 		skb = buffer_info->skb;
       
  4015 		buffer_info->skb = NULL;
       
  4016 
       
  4017 		prefetch(skb->data - NET_IP_ALIGN);
       
  4018 
       
  4019 		if (++i == rx_ring->count) i = 0;
       
  4020 		next_rxd = E1000_RX_DESC(*rx_ring, i);
       
  4021 		prefetch(next_rxd);
       
  4022 
       
  4023 		next_buffer = &rx_ring->buffer_info[i];
       
  4024 
       
  4025 		cleaned = true;
       
  4026 		cleaned_count++;
       
  4027 		pci_unmap_single(pdev,
       
  4028 		                 buffer_info->dma,
       
  4029 		                 buffer_info->length,
       
  4030 		                 PCI_DMA_FROMDEVICE);
       
  4031 
       
  4032 		length = le16_to_cpu(rx_desc->length);
       
  4033 
       
  4034 		if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
       
  4035 			/* All receives must fit into a single buffer */
       
  4036 			E1000_DBG("%s: Receive packet consumed multiple"
       
  4037 				  " buffers\n", netdev->name);
       
  4038 			/* recycle */
       
  4039 			buffer_info->skb = skb;
       
  4040 			goto next_desc;
       
  4041 		}
       
  4042 
       
  4043 		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
       
  4044 			last_byte = *(skb->data + length - 1);
       
  4045 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
       
  4046 				       last_byte)) {
       
  4047 				spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4048 				e1000_tbi_adjust_stats(hw, &adapter->stats,
       
  4049 				                       length, skb->data);
       
  4050 				spin_unlock_irqrestore(&adapter->stats_lock,
       
  4051 				                       flags);
       
  4052 				length--;
       
  4053 			} else {
       
  4054 				/* recycle */
       
  4055 				buffer_info->skb = skb;
       
  4056 				goto next_desc;
       
  4057 			}
       
  4058 		}
       
  4059 
       
  4060 		/* adjust length to remove Ethernet CRC, this must be
       
  4061 		 * done after the TBI_ACCEPT workaround above */
       
  4062 		length -= 4;
       
  4063 
       
  4064 		/* probably a little skewed due to removing CRC */
       
  4065 		total_rx_bytes += length;
       
  4066 		total_rx_packets++;
       
  4067 
       
  4068 		/* code added for copybreak, this should improve
       
  4069 		 * performance for small packets with large amounts
       
  4070 		 * of reassembly being done in the stack */
       
  4071 		if (length < copybreak) {
       
  4072 			struct sk_buff *new_skb =
       
  4073 			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
       
  4074 			if (new_skb) {
       
  4075 				skb_reserve(new_skb, NET_IP_ALIGN);
       
  4076 				skb_copy_to_linear_data_offset(new_skb,
       
  4077 							       -NET_IP_ALIGN,
       
  4078 							       (skb->data -
       
  4079 							        NET_IP_ALIGN),
       
  4080 							       (length +
       
  4081 							        NET_IP_ALIGN));
       
  4082 				/* save the skb in buffer_info as good */
       
  4083 				buffer_info->skb = skb;
       
  4084 				skb = new_skb;
       
  4085 			}
       
  4086 			/* else just continue with the old one */
       
  4087 		}
       
  4088 		/* end copybreak code */
       
  4089 		skb_put(skb, length);
       
  4090 
       
  4091 		/* Receive Checksum Offload */
       
  4092 		e1000_rx_checksum(adapter,
       
  4093 				  (u32)(status) |
       
  4094 				  ((u32)(rx_desc->errors) << 24),
       
  4095 				  le16_to_cpu(rx_desc->csum), skb);
       
  4096 
       
  4097 		skb->protocol = eth_type_trans(skb, netdev);
       
  4098 
       
  4099 		if (unlikely(adapter->vlgrp &&
       
  4100 			    (status & E1000_RXD_STAT_VP))) {
       
  4101 			vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
       
  4102 						 le16_to_cpu(rx_desc->special));
       
  4103 		} else {
       
  4104 			netif_receive_skb(skb);
       
  4105 		}
       
  4106 
       
  4107 		netdev->last_rx = jiffies;
       
  4108 
       
  4109 next_desc:
       
  4110 		rx_desc->status = 0;
       
  4111 
       
  4112 		/* return some buffers to hardware, one at a time is too slow */
       
  4113 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
       
  4114 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4115 			cleaned_count = 0;
       
  4116 		}
       
  4117 
       
  4118 		/* use prefetched values */
       
  4119 		rx_desc = next_rxd;
       
  4120 		buffer_info = next_buffer;
       
  4121 	}
       
  4122 	rx_ring->next_to_clean = i;
       
  4123 
       
  4124 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
       
  4125 	if (cleaned_count)
       
  4126 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4127 
       
  4128 	adapter->total_rx_packets += total_rx_packets;
       
  4129 	adapter->total_rx_bytes += total_rx_bytes;
       
  4130 	adapter->net_stats.rx_bytes += total_rx_bytes;
       
  4131 	adapter->net_stats.rx_packets += total_rx_packets;
       
  4132 	return cleaned;
       
  4133 }
       
  4134 
       
  4135 /**
       
  4136  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
       
  4137  * @adapter: address of board private structure
       
  4138  **/
       
  4139 
       
  4140 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
  4141 				   struct e1000_rx_ring *rx_ring,
       
  4142 				   int cleaned_count)
       
  4143 {
       
  4144 	struct e1000_hw *hw = &adapter->hw;
       
  4145 	struct net_device *netdev = adapter->netdev;
       
  4146 	struct pci_dev *pdev = adapter->pdev;
       
  4147 	struct e1000_rx_desc *rx_desc;
       
  4148 	struct e1000_buffer *buffer_info;
       
  4149 	struct sk_buff *skb;
       
  4150 	unsigned int i;
       
  4151 	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
       
  4152 
       
  4153 	i = rx_ring->next_to_use;
       
  4154 	buffer_info = &rx_ring->buffer_info[i];
       
  4155 
       
  4156 	while (cleaned_count--) {
       
  4157 		skb = buffer_info->skb;
       
  4158 		if (skb) {
       
  4159 			skb_trim(skb, 0);
       
  4160 			goto map_skb;
       
  4161 		}
       
  4162 
       
  4163 		skb = netdev_alloc_skb(netdev, bufsz);
       
  4164 		if (unlikely(!skb)) {
       
  4165 			/* Better luck next round */
       
  4166 			adapter->alloc_rx_buff_failed++;
       
  4167 			break;
       
  4168 		}
       
  4169 
       
  4170 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4171 		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4172 			struct sk_buff *oldskb = skb;
       
  4173 			DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
       
  4174 					     "at %p\n", bufsz, skb->data);
       
  4175 			/* Try again, without freeing the previous */
       
  4176 			skb = netdev_alloc_skb(netdev, bufsz);
       
  4177 			/* Failed allocation, critical failure */
       
  4178 			if (!skb) {
       
  4179 				dev_kfree_skb(oldskb);
       
  4180 				break;
       
  4181 			}
       
  4182 
       
  4183 			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4184 				/* give up */
       
  4185 				dev_kfree_skb(skb);
       
  4186 				dev_kfree_skb(oldskb);
       
  4187 				break; /* while !buffer_info->skb */
       
  4188 			}
       
  4189 
       
  4190 			/* Use new allocation */
       
  4191 			dev_kfree_skb(oldskb);
       
  4192 		}
       
  4193 		/* Make buffer alignment 2 beyond a 16 byte boundary
       
  4194 		 * this will result in a 16 byte aligned IP header after
       
  4195 		 * the 14 byte MAC header is removed
       
  4196 		 */
       
  4197 		skb_reserve(skb, NET_IP_ALIGN);
       
  4198 
       
  4199 		buffer_info->skb = skb;
       
  4200 		buffer_info->length = adapter->rx_buffer_len;
       
  4201 map_skb:
       
  4202 		buffer_info->dma = pci_map_single(pdev,
       
  4203 						  skb->data,
       
  4204 						  adapter->rx_buffer_len,
       
  4205 						  PCI_DMA_FROMDEVICE);
       
  4206 
       
  4207 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4208 		if (!e1000_check_64k_bound(adapter,
       
  4209 					(void *)(unsigned long)buffer_info->dma,
       
  4210 					adapter->rx_buffer_len)) {
       
  4211 			DPRINTK(RX_ERR, ERR,
       
  4212 				"dma align check failed: %u bytes at %p\n",
       
  4213 				adapter->rx_buffer_len,
       
  4214 				(void *)(unsigned long)buffer_info->dma);
       
  4215 			dev_kfree_skb(skb);
       
  4216 			buffer_info->skb = NULL;
       
  4217 
       
  4218 			pci_unmap_single(pdev, buffer_info->dma,
       
  4219 					 adapter->rx_buffer_len,
       
  4220 					 PCI_DMA_FROMDEVICE);
       
  4221 
       
  4222 			break; /* while !buffer_info->skb */
       
  4223 		}
       
  4224 		rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4225 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  4226 
       
  4227 		if (unlikely(++i == rx_ring->count))
       
  4228 			i = 0;
       
  4229 		buffer_info = &rx_ring->buffer_info[i];
       
  4230 	}
       
  4231 
       
  4232 	if (likely(rx_ring->next_to_use != i)) {
       
  4233 		rx_ring->next_to_use = i;
       
  4234 		if (unlikely(i-- == 0))
       
  4235 			i = (rx_ring->count - 1);
       
  4236 
       
  4237 		/* Force memory writes to complete before letting h/w
       
  4238 		 * know there are new descriptors to fetch.  (Only
       
  4239 		 * applicable for weak-ordered memory model archs,
       
  4240 		 * such as IA-64). */
       
  4241 		wmb();
       
  4242 		writel(i, hw->hw_addr + rx_ring->rdt);
       
  4243 	}
       
  4244 }
       
  4245 
       
  4246 /**
       
  4247  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
       
  4248  * @adapter:
       
  4249  **/
       
  4250 
       
  4251 static void e1000_smartspeed(struct e1000_adapter *adapter)
       
  4252 {
       
  4253 	struct e1000_hw *hw = &adapter->hw;
       
  4254 	u16 phy_status;
       
  4255 	u16 phy_ctrl;
       
  4256 
       
  4257 	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
       
  4258 	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
       
  4259 		return;
       
  4260 
       
  4261 	if (adapter->smartspeed == 0) {
       
  4262 		/* If Master/Slave config fault is asserted twice,
       
  4263 		 * we assume back-to-back */
       
  4264 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
       
  4265 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
       
  4266 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
       
  4267 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
       
  4268 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
       
  4269 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
       
  4270 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
       
  4271 			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
       
  4272 					    phy_ctrl);
       
  4273 			adapter->smartspeed++;
       
  4274 			if (!e1000_phy_setup_autoneg(hw) &&
       
  4275 			   !e1000_read_phy_reg(hw, PHY_CTRL,
       
  4276 				   	       &phy_ctrl)) {
       
  4277 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
       
  4278 					     MII_CR_RESTART_AUTO_NEG);
       
  4279 				e1000_write_phy_reg(hw, PHY_CTRL,
       
  4280 						    phy_ctrl);
       
  4281 			}
       
  4282 		}
       
  4283 		return;
       
  4284 	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
       
  4285 		/* If still no link, perhaps using 2/3 pair cable */
       
  4286 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
       
  4287 		phy_ctrl |= CR_1000T_MS_ENABLE;
       
  4288 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
       
  4289 		if (!e1000_phy_setup_autoneg(hw) &&
       
  4290 		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
       
  4291 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
       
  4292 				     MII_CR_RESTART_AUTO_NEG);
       
  4293 			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
       
  4294 		}
       
  4295 	}
       
  4296 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
       
  4297 	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
       
  4298 		adapter->smartspeed = 0;
       
  4299 }
       
  4300 
       
  4301 /**
       
  4302  * e1000_ioctl -
       
  4303  * @netdev:
       
  4304  * @ifreq:
       
  4305  * @cmd:
       
  4306  **/
       
  4307 
       
  4308 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  4309 {
       
  4310 	switch (cmd) {
       
  4311 	case SIOCGMIIPHY:
       
  4312 	case SIOCGMIIREG:
       
  4313 	case SIOCSMIIREG:
       
  4314 		return e1000_mii_ioctl(netdev, ifr, cmd);
       
  4315 	default:
       
  4316 		return -EOPNOTSUPP;
       
  4317 	}
       
  4318 }
       
  4319 
       
  4320 /**
       
  4321  * e1000_mii_ioctl -
       
  4322  * @netdev:
       
  4323  * @ifreq:
       
  4324  * @cmd:
       
  4325  **/
       
  4326 
       
  4327 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
  4328 			   int cmd)
       
  4329 {
       
  4330 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4331 	struct e1000_hw *hw = &adapter->hw;
       
  4332 	struct mii_ioctl_data *data = if_mii(ifr);
       
  4333 	int retval;
       
  4334 	u16 mii_reg;
       
  4335 	u16 spddplx;
       
  4336 	unsigned long flags;
       
  4337 
       
  4338 	if (hw->media_type != e1000_media_type_copper)
       
  4339 		return -EOPNOTSUPP;
       
  4340 
       
  4341 	switch (cmd) {
       
  4342 	case SIOCGMIIPHY:
       
  4343 		data->phy_id = hw->phy_addr;
       
  4344 		break;
       
  4345 	case SIOCGMIIREG:
       
  4346 		if (!capable(CAP_NET_ADMIN))
       
  4347 			return -EPERM;
       
  4348 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4349 		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
       
  4350 				   &data->val_out)) {
       
  4351 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4352 			return -EIO;
       
  4353 		}
       
  4354 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4355 		break;
       
  4356 	case SIOCSMIIREG:
       
  4357 		if (!capable(CAP_NET_ADMIN))
       
  4358 			return -EPERM;
       
  4359 		if (data->reg_num & ~(0x1F))
       
  4360 			return -EFAULT;
       
  4361 		mii_reg = data->val_in;
       
  4362 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4363 		if (e1000_write_phy_reg(hw, data->reg_num,
       
  4364 					mii_reg)) {
       
  4365 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4366 			return -EIO;
       
  4367 		}
       
  4368 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4369 		if (hw->media_type == e1000_media_type_copper) {
       
  4370 			switch (data->reg_num) {
       
  4371 			case PHY_CTRL:
       
  4372 				if (mii_reg & MII_CR_POWER_DOWN)
       
  4373 					break;
       
  4374 				if (mii_reg & MII_CR_AUTO_NEG_EN) {
       
  4375 					hw->autoneg = 1;
       
  4376 					hw->autoneg_advertised = 0x2F;
       
  4377 				} else {
       
  4378 					if (mii_reg & 0x40)
       
  4379 						spddplx = SPEED_1000;
       
  4380 					else if (mii_reg & 0x2000)
       
  4381 						spddplx = SPEED_100;
       
  4382 					else
       
  4383 						spddplx = SPEED_10;
       
  4384 					spddplx += (mii_reg & 0x100)
       
  4385 						   ? DUPLEX_FULL :
       
  4386 						   DUPLEX_HALF;
       
  4387 					retval = e1000_set_spd_dplx(adapter,
       
  4388 								    spddplx);
       
  4389 					if (retval)
       
  4390 						return retval;
       
  4391 				}
       
  4392 				if (netif_running(adapter->netdev))
       
  4393 					e1000_reinit_locked(adapter);
       
  4394 				else
       
  4395 					e1000_reset(adapter);
       
  4396 				break;
       
  4397 			case M88E1000_PHY_SPEC_CTRL:
       
  4398 			case M88E1000_EXT_PHY_SPEC_CTRL:
       
  4399 				if (e1000_phy_reset(hw))
       
  4400 					return -EIO;
       
  4401 				break;
       
  4402 			}
       
  4403 		} else {
       
  4404 			switch (data->reg_num) {
       
  4405 			case PHY_CTRL:
       
  4406 				if (mii_reg & MII_CR_POWER_DOWN)
       
  4407 					break;
       
  4408 				if (netif_running(adapter->netdev))
       
  4409 					e1000_reinit_locked(adapter);
       
  4410 				else
       
  4411 					e1000_reset(adapter);
       
  4412 				break;
       
  4413 			}
       
  4414 		}
       
  4415 		break;
       
  4416 	default:
       
  4417 		return -EOPNOTSUPP;
       
  4418 	}
       
  4419 	return E1000_SUCCESS;
       
  4420 }
       
  4421 
       
  4422 void e1000_pci_set_mwi(struct e1000_hw *hw)
       
  4423 {
       
  4424 	struct e1000_adapter *adapter = hw->back;
       
  4425 	int ret_val = pci_set_mwi(adapter->pdev);
       
  4426 
       
  4427 	if (ret_val)
       
  4428 		DPRINTK(PROBE, ERR, "Error in setting MWI\n");
       
  4429 }
       
  4430 
       
  4431 void e1000_pci_clear_mwi(struct e1000_hw *hw)
       
  4432 {
       
  4433 	struct e1000_adapter *adapter = hw->back;
       
  4434 
       
  4435 	pci_clear_mwi(adapter->pdev);
       
  4436 }
       
  4437 
       
  4438 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
       
  4439 {
       
  4440 	struct e1000_adapter *adapter = hw->back;
       
  4441 	return pcix_get_mmrbc(adapter->pdev);
       
  4442 }
       
  4443 
       
  4444 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
       
  4445 {
       
  4446 	struct e1000_adapter *adapter = hw->back;
       
  4447 	pcix_set_mmrbc(adapter->pdev, mmrbc);
       
  4448 }
       
  4449 
       
  4450 s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
       
  4451 {
       
  4452     struct e1000_adapter *adapter = hw->back;
       
  4453     u16 cap_offset;
       
  4454 
       
  4455     cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
       
  4456     if (!cap_offset)
       
  4457         return -E1000_ERR_CONFIG;
       
  4458 
       
  4459     pci_read_config_word(adapter->pdev, cap_offset + reg, value);
       
  4460 
       
  4461     return E1000_SUCCESS;
       
  4462 }
       
  4463 
       
  4464 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
       
  4465 {
       
  4466 	outl(value, port);
       
  4467 }
       
  4468 
       
  4469 static void e1000_vlan_rx_register(struct net_device *netdev,
       
  4470 				   struct vlan_group *grp)
       
  4471 {
       
  4472 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4473 	struct e1000_hw *hw = &adapter->hw;
       
  4474 	u32 ctrl, rctl;
       
  4475 
       
  4476 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4477 		e1000_irq_disable(adapter);
       
  4478 	adapter->vlgrp = grp;
       
  4479 
       
  4480 	if (grp) {
       
  4481 		/* enable VLAN tag insert/strip */
       
  4482 		ctrl = er32(CTRL);
       
  4483 		ctrl |= E1000_CTRL_VME;
       
  4484 		ew32(CTRL, ctrl);
       
  4485 
       
  4486 		if (adapter->hw.mac_type != e1000_ich8lan) {
       
  4487 			/* enable VLAN receive filtering */
       
  4488 			rctl = er32(RCTL);
       
  4489 			rctl &= ~E1000_RCTL_CFIEN;
       
  4490 			ew32(RCTL, rctl);
       
  4491 			e1000_update_mng_vlan(adapter);
       
  4492 		}
       
  4493 	} else {
       
  4494 		/* disable VLAN tag insert/strip */
       
  4495 		ctrl = er32(CTRL);
       
  4496 		ctrl &= ~E1000_CTRL_VME;
       
  4497 		ew32(CTRL, ctrl);
       
  4498 
       
  4499 		if (adapter->hw.mac_type != e1000_ich8lan) {
       
  4500 			if (adapter->mng_vlan_id !=
       
  4501 			    (u16)E1000_MNG_VLAN_NONE) {
       
  4502 				e1000_vlan_rx_kill_vid(netdev,
       
  4503 				                       adapter->mng_vlan_id);
       
  4504 				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
  4505 			}
       
  4506 		}
       
  4507 	}
       
  4508 
       
  4509 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4510 		e1000_irq_enable(adapter);
       
  4511 }
       
  4512 
       
  4513 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
       
  4514 {
       
  4515 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4516 	struct e1000_hw *hw = &adapter->hw;
       
  4517 	u32 vfta, index;
       
  4518 
       
  4519 	if ((hw->mng_cookie.status &
       
  4520 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  4521 	    (vid == adapter->mng_vlan_id))
       
  4522 		return;
       
  4523 	/* add VID to filter table */
       
  4524 	index = (vid >> 5) & 0x7F;
       
  4525 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
       
  4526 	vfta |= (1 << (vid & 0x1F));
       
  4527 	e1000_write_vfta(hw, index, vfta);
       
  4528 }
       
  4529 
       
  4530 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
       
  4531 {
       
  4532 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4533 	struct e1000_hw *hw = &adapter->hw;
       
  4534 	u32 vfta, index;
       
  4535 
       
  4536 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4537 		e1000_irq_disable(adapter);
       
  4538 	vlan_group_set_device(adapter->vlgrp, vid, NULL);
       
  4539 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4540 		e1000_irq_enable(adapter);
       
  4541 
       
  4542 	if ((hw->mng_cookie.status &
       
  4543 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  4544 	    (vid == adapter->mng_vlan_id)) {
       
  4545 		/* release control to f/w */
       
  4546 		e1000_release_hw_control(adapter);
       
  4547 		return;
       
  4548 	}
       
  4549 
       
  4550 	/* remove VID from filter table */
       
  4551 	index = (vid >> 5) & 0x7F;
       
  4552 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
       
  4553 	vfta &= ~(1 << (vid & 0x1F));
       
  4554 	e1000_write_vfta(hw, index, vfta);
       
  4555 }
       
  4556 
       
  4557 static void e1000_restore_vlan(struct e1000_adapter *adapter)
       
  4558 {
       
  4559 	e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
       
  4560 
       
  4561 	if (adapter->vlgrp) {
       
  4562 		u16 vid;
       
  4563 		for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
       
  4564 			if (!vlan_group_get_device(adapter->vlgrp, vid))
       
  4565 				continue;
       
  4566 			e1000_vlan_rx_add_vid(adapter->netdev, vid);
       
  4567 		}
       
  4568 	}
       
  4569 }
       
  4570 
       
  4571 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
       
  4572 {
       
  4573 	struct e1000_hw *hw = &adapter->hw;
       
  4574 
       
  4575 	hw->autoneg = 0;
       
  4576 
       
  4577 	/* Fiber NICs only allow 1000 gbps Full duplex */
       
  4578 	if ((hw->media_type == e1000_media_type_fiber) &&
       
  4579 		spddplx != (SPEED_1000 + DUPLEX_FULL)) {
       
  4580 		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
       
  4581 		return -EINVAL;
       
  4582 	}
       
  4583 
       
  4584 	switch (spddplx) {
       
  4585 	case SPEED_10 + DUPLEX_HALF:
       
  4586 		hw->forced_speed_duplex = e1000_10_half;
       
  4587 		break;
       
  4588 	case SPEED_10 + DUPLEX_FULL:
       
  4589 		hw->forced_speed_duplex = e1000_10_full;
       
  4590 		break;
       
  4591 	case SPEED_100 + DUPLEX_HALF:
       
  4592 		hw->forced_speed_duplex = e1000_100_half;
       
  4593 		break;
       
  4594 	case SPEED_100 + DUPLEX_FULL:
       
  4595 		hw->forced_speed_duplex = e1000_100_full;
       
  4596 		break;
       
  4597 	case SPEED_1000 + DUPLEX_FULL:
       
  4598 		hw->autoneg = 1;
       
  4599 		hw->autoneg_advertised = ADVERTISE_1000_FULL;
       
  4600 		break;
       
  4601 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
       
  4602 	default:
       
  4603 		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
       
  4604 		return -EINVAL;
       
  4605 	}
       
  4606 	return 0;
       
  4607 }
       
  4608 
       
  4609 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
       
  4610 {
       
  4611 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  4612 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4613 	struct e1000_hw *hw = &adapter->hw;
       
  4614 	u32 ctrl, ctrl_ext, rctl, status;
       
  4615 	u32 wufc = adapter->wol;
       
  4616 #ifdef CONFIG_PM
       
  4617 	int retval = 0;
       
  4618 #endif
       
  4619 
       
  4620 	netif_device_detach(netdev);
       
  4621 
       
  4622 	if (netif_running(netdev)) {
       
  4623 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
       
  4624 		e1000_down(adapter);
       
  4625 	}
       
  4626 
       
  4627 #ifdef CONFIG_PM
       
  4628 	retval = pci_save_state(pdev);
       
  4629 	if (retval)
       
  4630 		return retval;
       
  4631 #endif
       
  4632 
       
  4633 	status = er32(STATUS);
       
  4634 	if (status & E1000_STATUS_LU)
       
  4635 		wufc &= ~E1000_WUFC_LNKC;
       
  4636 
       
  4637 	if (wufc) {
       
  4638 		e1000_setup_rctl(adapter);
       
  4639 		e1000_set_rx_mode(netdev);
       
  4640 
       
  4641 		/* turn on all-multi mode if wake on multicast is enabled */
       
  4642 		if (wufc & E1000_WUFC_MC) {
       
  4643 			rctl = er32(RCTL);
       
  4644 			rctl |= E1000_RCTL_MPE;
       
  4645 			ew32(RCTL, rctl);
       
  4646 		}
       
  4647 
       
  4648 		if (hw->mac_type >= e1000_82540) {
       
  4649 			ctrl = er32(CTRL);
       
  4650 			/* advertise wake from D3Cold */
       
  4651 			#define E1000_CTRL_ADVD3WUC 0x00100000
       
  4652 			/* phy power management enable */
       
  4653 			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
       
  4654 			ctrl |= E1000_CTRL_ADVD3WUC |
       
  4655 				E1000_CTRL_EN_PHY_PWR_MGMT;
       
  4656 			ew32(CTRL, ctrl);
       
  4657 		}
       
  4658 
       
  4659 		if (hw->media_type == e1000_media_type_fiber ||
       
  4660 		   hw->media_type == e1000_media_type_internal_serdes) {
       
  4661 			/* keep the laser running in D3 */
       
  4662 			ctrl_ext = er32(CTRL_EXT);
       
  4663 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
       
  4664 			ew32(CTRL_EXT, ctrl_ext);
       
  4665 		}
       
  4666 
       
  4667 		/* Allow time for pending master requests to run */
       
  4668 		e1000_disable_pciex_master(hw);
       
  4669 
       
  4670 		ew32(WUC, E1000_WUC_PME_EN);
       
  4671 		ew32(WUFC, wufc);
       
  4672 		pci_enable_wake(pdev, PCI_D3hot, 1);
       
  4673 		pci_enable_wake(pdev, PCI_D3cold, 1);
       
  4674 	} else {
       
  4675 		ew32(WUC, 0);
       
  4676 		ew32(WUFC, 0);
       
  4677 		pci_enable_wake(pdev, PCI_D3hot, 0);
       
  4678 		pci_enable_wake(pdev, PCI_D3cold, 0);
       
  4679 	}
       
  4680 
       
  4681 	e1000_release_manageability(adapter);
       
  4682 
       
  4683 	/* make sure adapter isn't asleep if manageability is enabled */
       
  4684 	if (adapter->en_mng_pt) {
       
  4685 		pci_enable_wake(pdev, PCI_D3hot, 1);
       
  4686 		pci_enable_wake(pdev, PCI_D3cold, 1);
       
  4687 	}
       
  4688 
       
  4689 	if (hw->phy_type == e1000_phy_igp_3)
       
  4690 		e1000_phy_powerdown_workaround(hw);
       
  4691 
       
  4692 	if (netif_running(netdev))
       
  4693 		e1000_free_irq(adapter);
       
  4694 
       
  4695 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
       
  4696 	 * would have already happened in close and is redundant. */
       
  4697 	e1000_release_hw_control(adapter);
       
  4698 
       
  4699 	pci_disable_device(pdev);
       
  4700 
       
  4701 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
       
  4702 
       
  4703 	return 0;
       
  4704 }
       
  4705 
       
  4706 #ifdef CONFIG_PM
       
  4707 static int e1000_resume(struct pci_dev *pdev)
       
  4708 {
       
  4709 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  4710 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4711 	struct e1000_hw *hw = &adapter->hw;
       
  4712 	u32 err;
       
  4713 
       
  4714 	pci_set_power_state(pdev, PCI_D0);
       
  4715 	pci_restore_state(pdev);
       
  4716 
       
  4717 	if (adapter->need_ioport)
       
  4718 		err = pci_enable_device(pdev);
       
  4719 	else
       
  4720 		err = pci_enable_device_mem(pdev);
       
  4721 	if (err) {
       
  4722 		printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
       
  4723 		return err;
       
  4724 	}
       
  4725 	pci_set_master(pdev);
       
  4726 
       
  4727 	pci_enable_wake(pdev, PCI_D3hot, 0);
       
  4728 	pci_enable_wake(pdev, PCI_D3cold, 0);
       
  4729 
       
  4730 	if (netif_running(netdev)) {
       
  4731 		err = e1000_request_irq(adapter);
       
  4732 		if (err)
       
  4733 			return err;
       
  4734 	}
       
  4735 
       
  4736 	e1000_power_up_phy(adapter);
       
  4737 	e1000_reset(adapter);
       
  4738 	ew32(WUS, ~0);
       
  4739 
       
  4740 	e1000_init_manageability(adapter);
       
  4741 
       
  4742 	if (netif_running(netdev))
       
  4743 		e1000_up(adapter);
       
  4744 
       
  4745 	netif_device_attach(netdev);
       
  4746 
       
  4747 	/* If the controller is 82573 and f/w is AMT, do not set
       
  4748 	 * DRV_LOAD until the interface is up.  For all other cases,
       
  4749 	 * let the f/w know that the h/w is now under the control
       
  4750 	 * of the driver. */
       
  4751 	if (hw->mac_type != e1000_82573 ||
       
  4752 	    !e1000_check_mng_mode(hw))
       
  4753 		e1000_get_hw_control(adapter);
       
  4754 
       
  4755 	return 0;
       
  4756 }
       
  4757 #endif
       
  4758 
       
  4759 static void e1000_shutdown(struct pci_dev *pdev)
       
  4760 {
       
  4761 	e1000_suspend(pdev, PMSG_SUSPEND);
       
  4762 }
       
  4763 
       
  4764 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  4765 /*
       
  4766  * Polling 'interrupt' - used by things like netconsole to send skbs
       
  4767  * without having to re-enable interrupts. It's not called while
       
  4768  * the interrupt routine is executing.
       
  4769  */
       
  4770 static void e1000_netpoll(struct net_device *netdev)
       
  4771 {
       
  4772 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4773 
       
  4774 	disable_irq(adapter->pdev->irq);
       
  4775 	e1000_intr(adapter->pdev->irq, netdev);
       
  4776 	enable_irq(adapter->pdev->irq);
       
  4777 }
       
  4778 #endif
       
  4779 
       
  4780 /**
       
  4781  * e1000_io_error_detected - called when PCI error is detected
       
  4782  * @pdev: Pointer to PCI device
       
  4783  * @state: The current pci conneection state
       
  4784  *
       
  4785  * This function is called after a PCI bus error affecting
       
  4786  * this device has been detected.
       
  4787  */
       
  4788 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
  4789 						pci_channel_state_t state)
       
  4790 {
       
  4791 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  4792 	struct e1000_adapter *adapter = netdev->priv;
       
  4793 
       
  4794 	netif_device_detach(netdev);
       
  4795 
       
  4796 	if (netif_running(netdev))
       
  4797 		e1000_down(adapter);
       
  4798 	pci_disable_device(pdev);
       
  4799 
       
  4800 	/* Request a slot slot reset. */
       
  4801 	return PCI_ERS_RESULT_NEED_RESET;
       
  4802 }
       
  4803 
       
  4804 /**
       
  4805  * e1000_io_slot_reset - called after the pci bus has been reset.
       
  4806  * @pdev: Pointer to PCI device
       
  4807  *
       
  4808  * Restart the card from scratch, as if from a cold-boot. Implementation
       
  4809  * resembles the first-half of the e1000_resume routine.
       
  4810  */
       
  4811 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
       
  4812 {
       
  4813 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  4814 	struct e1000_adapter *adapter = netdev->priv;
       
  4815 	struct e1000_hw *hw = &adapter->hw;
       
  4816 	int err;
       
  4817 
       
  4818 	if (adapter->need_ioport)
       
  4819 		err = pci_enable_device(pdev);
       
  4820 	else
       
  4821 		err = pci_enable_device_mem(pdev);
       
  4822 	if (err) {
       
  4823 		printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
       
  4824 		return PCI_ERS_RESULT_DISCONNECT;
       
  4825 	}
       
  4826 	pci_set_master(pdev);
       
  4827 
       
  4828 	pci_enable_wake(pdev, PCI_D3hot, 0);
       
  4829 	pci_enable_wake(pdev, PCI_D3cold, 0);
       
  4830 
       
  4831 	e1000_reset(adapter);
       
  4832 	ew32(WUS, ~0);
       
  4833 
       
  4834 	return PCI_ERS_RESULT_RECOVERED;
       
  4835 }
       
  4836 
       
  4837 /**
       
  4838  * e1000_io_resume - called when traffic can start flowing again.
       
  4839  * @pdev: Pointer to PCI device
       
  4840  *
       
  4841  * This callback is called when the error recovery driver tells us that
       
  4842  * its OK to resume normal operation. Implementation resembles the
       
  4843  * second-half of the e1000_resume routine.
       
  4844  */
       
  4845 static void e1000_io_resume(struct pci_dev *pdev)
       
  4846 {
       
  4847 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  4848 	struct e1000_adapter *adapter = netdev->priv;
       
  4849 	struct e1000_hw *hw = &adapter->hw;
       
  4850 
       
  4851 	e1000_init_manageability(adapter);
       
  4852 
       
  4853 	if (netif_running(netdev)) {
       
  4854 		if (e1000_up(adapter)) {
       
  4855 			printk("e1000: can't bring device back up after reset\n");
       
  4856 			return;
       
  4857 		}
       
  4858 	}
       
  4859 
       
  4860 	netif_device_attach(netdev);
       
  4861 
       
  4862 	/* If the controller is 82573 and f/w is AMT, do not set
       
  4863 	 * DRV_LOAD until the interface is up.  For all other cases,
       
  4864 	 * let the f/w know that the h/w is now under the control
       
  4865 	 * of the driver. */
       
  4866 	if (hw->mac_type != e1000_82573 ||
       
  4867 	    !e1000_check_mng_mode(hw))
       
  4868 		e1000_get_hw_control(adapter);
       
  4869 
       
  4870 }
       
  4871 
       
  4872 /* e1000_main.c */