devices/e1000/e1000_main-3.4-ethercat.c
branchstable-1.5
changeset 2494 d1bd39013a32
child 2582 87e502828b3f
equal deleted inserted replaced
2493:fcd918d2122f 2494:d1bd39013a32
       
     1 /*******************************************************************************
       
     2 
       
     3   Intel PRO/1000 Linux driver
       
     4   Copyright(c) 1999 - 2006 Intel Corporation.
       
     5 
       
     6   This program is free software; you can redistribute it and/or modify it
       
     7   under the terms and conditions of the GNU General Public License,
       
     8   version 2, as published by the Free Software Foundation.
       
     9 
       
    10   This program is distributed in the hope it will be useful, but WITHOUT
       
    11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    13   more details.
       
    14 
       
    15   You should have received a copy of the GNU General Public License along with
       
    16   this program; if not, write to the Free Software Foundation, Inc.,
       
    17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    18 
       
    19   The full GNU General Public License is included in this distribution in
       
    20   the file called "COPYING".
       
    21 
       
    22   Contact Information:
       
    23   Linux NICS <linux.nics@intel.com>
       
    24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    26 
       
    27   vim: noexpandtab
       
    28 
       
    29 *******************************************************************************/
       
    30 
       
    31 #include "e1000-3.4-ethercat.h"
       
    32 #include <net/ip6_checksum.h>
       
    33 #include <linux/io.h>
       
    34 #include <linux/prefetch.h>
       
    35 #include <linux/bitops.h>
       
    36 #include <linux/if_vlan.h>
       
    37 
       
    38 char e1000_driver_name[] = "ec_e1000";
       
    39 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
       
    40 #define DRV_VERSION "7.3.21-k8-NAPI"
       
    41 const char e1000_driver_version[] = DRV_VERSION;
       
    42 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
       
    43 
       
    44 /* e1000_pci_tbl - PCI Device ID Table
       
    45  *
       
    46  * Last entry must be all 0s
       
    47  *
       
    48  * Macro expands to...
       
    49  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
       
    50  */
       
    51 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
       
    52 	INTEL_E1000_ETHERNET_DEVICE(0x1000),
       
    53 	INTEL_E1000_ETHERNET_DEVICE(0x1001),
       
    54 	INTEL_E1000_ETHERNET_DEVICE(0x1004),
       
    55 	INTEL_E1000_ETHERNET_DEVICE(0x1008),
       
    56 	INTEL_E1000_ETHERNET_DEVICE(0x1009),
       
    57 	INTEL_E1000_ETHERNET_DEVICE(0x100C),
       
    58 	INTEL_E1000_ETHERNET_DEVICE(0x100D),
       
    59 	INTEL_E1000_ETHERNET_DEVICE(0x100E),
       
    60 	INTEL_E1000_ETHERNET_DEVICE(0x100F),
       
    61 	INTEL_E1000_ETHERNET_DEVICE(0x1010),
       
    62 	INTEL_E1000_ETHERNET_DEVICE(0x1011),
       
    63 	INTEL_E1000_ETHERNET_DEVICE(0x1012),
       
    64 	INTEL_E1000_ETHERNET_DEVICE(0x1013),
       
    65 	INTEL_E1000_ETHERNET_DEVICE(0x1014),
       
    66 	INTEL_E1000_ETHERNET_DEVICE(0x1015),
       
    67 	INTEL_E1000_ETHERNET_DEVICE(0x1016),
       
    68 	INTEL_E1000_ETHERNET_DEVICE(0x1017),
       
    69 	INTEL_E1000_ETHERNET_DEVICE(0x1018),
       
    70 	INTEL_E1000_ETHERNET_DEVICE(0x1019),
       
    71 	INTEL_E1000_ETHERNET_DEVICE(0x101A),
       
    72 	INTEL_E1000_ETHERNET_DEVICE(0x101D),
       
    73 	INTEL_E1000_ETHERNET_DEVICE(0x101E),
       
    74 	INTEL_E1000_ETHERNET_DEVICE(0x1026),
       
    75 	INTEL_E1000_ETHERNET_DEVICE(0x1027),
       
    76 	INTEL_E1000_ETHERNET_DEVICE(0x1028),
       
    77 	INTEL_E1000_ETHERNET_DEVICE(0x1075),
       
    78 	INTEL_E1000_ETHERNET_DEVICE(0x1076),
       
    79 	INTEL_E1000_ETHERNET_DEVICE(0x1077),
       
    80 	INTEL_E1000_ETHERNET_DEVICE(0x1078),
       
    81 	INTEL_E1000_ETHERNET_DEVICE(0x1079),
       
    82 	INTEL_E1000_ETHERNET_DEVICE(0x107A),
       
    83 	INTEL_E1000_ETHERNET_DEVICE(0x107B),
       
    84 	INTEL_E1000_ETHERNET_DEVICE(0x107C),
       
    85 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
       
    86 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
       
    87 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
       
    88 	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
       
    89 	/* required last entry */
       
    90 	{0,}
       
    91 };
       
    92 
       
    93 // do not auto-load driver
       
    94 // MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
       
    95 
       
    96 int e1000_up(struct e1000_adapter *adapter);
       
    97 void e1000_down(struct e1000_adapter *adapter);
       
    98 void e1000_reinit_locked(struct e1000_adapter *adapter);
       
    99 void e1000_reset(struct e1000_adapter *adapter);
       
   100 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
       
   101 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
       
   102 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
       
   103 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
       
   104 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
       
   105                              struct e1000_tx_ring *txdr);
       
   106 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
       
   107                              struct e1000_rx_ring *rxdr);
       
   108 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
       
   109                              struct e1000_tx_ring *tx_ring);
       
   110 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
       
   111                              struct e1000_rx_ring *rx_ring);
       
   112 void e1000_update_stats(struct e1000_adapter *adapter);
       
   113 
       
   114 static int e1000_init_module(void);
       
   115 static void e1000_exit_module(void);
       
   116 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
       
   117 static void __devexit e1000_remove(struct pci_dev *pdev);
       
   118 static int e1000_alloc_queues(struct e1000_adapter *adapter);
       
   119 static int e1000_sw_init(struct e1000_adapter *adapter);
       
   120 static int e1000_open(struct net_device *netdev);
       
   121 static int e1000_close(struct net_device *netdev);
       
   122 static void e1000_configure_tx(struct e1000_adapter *adapter);
       
   123 static void e1000_configure_rx(struct e1000_adapter *adapter);
       
   124 static void e1000_setup_rctl(struct e1000_adapter *adapter);
       
   125 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
       
   126 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
       
   127 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
       
   128                                 struct e1000_tx_ring *tx_ring);
       
   129 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
       
   130                                 struct e1000_rx_ring *rx_ring);
       
   131 static void e1000_set_rx_mode(struct net_device *netdev);
       
   132 static void e1000_update_phy_info_task(struct work_struct *work);
       
   133 static void e1000_watchdog(struct work_struct *work);
       
   134 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
       
   135 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
       
   136 				    struct net_device *netdev);
       
   137 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
       
   138 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
       
   139 static int e1000_set_mac(struct net_device *netdev, void *p);
       
   140 void ec_poll(struct net_device *);
       
   141 static irqreturn_t e1000_intr(int irq, void *data);
       
   142 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
       
   143 			       struct e1000_tx_ring *tx_ring);
       
   144 static int e1000_clean(struct napi_struct *napi, int budget);
       
   145 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
   146 			       struct e1000_rx_ring *rx_ring,
       
   147 			       int *work_done, int work_to_do);
       
   148 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
       
   149 				     struct e1000_rx_ring *rx_ring,
       
   150 				     int *work_done, int work_to_do);
       
   151 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
   152 				   struct e1000_rx_ring *rx_ring,
       
   153 				   int cleaned_count);
       
   154 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
       
   155 					 struct e1000_rx_ring *rx_ring,
       
   156 					 int cleaned_count);
       
   157 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
       
   158 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
   159 			   int cmd);
       
   160 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
       
   161 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
       
   162 static void e1000_tx_timeout(struct net_device *dev);
       
   163 static void e1000_reset_task(struct work_struct *work);
       
   164 static void e1000_smartspeed(struct e1000_adapter *adapter);
       
   165 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
       
   166                                        struct sk_buff *skb);
       
   167 
       
   168 static bool e1000_vlan_used(struct e1000_adapter *adapter);
       
   169 static void e1000_vlan_mode(struct net_device *netdev,
       
   170 			    netdev_features_t features);
       
   171 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
       
   172 				     bool filter_on);
       
   173 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
       
   174 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
       
   175 static void e1000_restore_vlan(struct e1000_adapter *adapter);
       
   176 
       
   177 #ifdef CONFIG_PM
       
   178 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
       
   179 static int e1000_resume(struct pci_dev *pdev);
       
   180 #endif
       
   181 static void e1000_shutdown(struct pci_dev *pdev);
       
   182 
       
   183 #ifdef CONFIG_NET_POLL_CONTROLLER
       
   184 /* for netdump / net console */
       
   185 static void e1000_netpoll (struct net_device *netdev);
       
   186 #endif
       
   187 
       
   188 #define COPYBREAK_DEFAULT 256
       
   189 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
       
   190 module_param(copybreak, uint, 0644);
       
   191 MODULE_PARM_DESC(copybreak,
       
   192 	"Maximum size of packet that is copied to a new buffer on receive");
       
   193 
       
   194 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
   195                      pci_channel_state_t state);
       
   196 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
       
   197 static void e1000_io_resume(struct pci_dev *pdev);
       
   198 
       
   199 static struct pci_error_handlers e1000_err_handler = {
       
   200 	.error_detected = e1000_io_error_detected,
       
   201 	.slot_reset = e1000_io_slot_reset,
       
   202 	.resume = e1000_io_resume,
       
   203 };
       
   204 
       
   205 static struct pci_driver e1000_driver = {
       
   206 	.name     = e1000_driver_name,
       
   207 	.id_table = e1000_pci_tbl,
       
   208 	.probe    = e1000_probe,
       
   209 	.remove   = __devexit_p(e1000_remove),
       
   210 #ifdef CONFIG_PM
       
   211 	/* Power Management Hooks */
       
   212 	.suspend  = e1000_suspend,
       
   213 	.resume   = e1000_resume,
       
   214 #endif
       
   215 	.shutdown = e1000_shutdown,
       
   216 	.err_handler = &e1000_err_handler
       
   217 };
       
   218 
       
   219 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   220 MODULE_DESCRIPTION("EtherCAT-capable Intel(R) PRO/1000 Network Driver");
       
   221 MODULE_LICENSE("GPL");
       
   222 MODULE_VERSION(DRV_VERSION);
       
   223 
       
   224 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
       
   225 static int debug = -1;
       
   226 module_param(debug, int, 0);
       
   227 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   228 
       
   229 /**
       
   230  * e1000_get_hw_dev - return device
       
   231  * used by hardware layer to print debugging information
       
   232  *
       
   233  **/
       
   234 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
       
   235 {
       
   236 	struct e1000_adapter *adapter = hw->back;
       
   237 	return adapter->netdev;
       
   238 }
       
   239 
       
   240 /**
       
   241  * e1000_init_module - Driver Registration Routine
       
   242  *
       
   243  * e1000_init_module is the first routine called when the driver is
       
   244  * loaded. All it does is register with the PCI subsystem.
       
   245  **/
       
   246 
       
   247 static int __init e1000_init_module(void)
       
   248 {
       
   249 	int ret;
       
   250 	pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
       
   251 
       
   252 	pr_info("%s\n", e1000_copyright);
       
   253 
       
   254 	ret = pci_register_driver(&e1000_driver);
       
   255 	if (copybreak != COPYBREAK_DEFAULT) {
       
   256 		if (copybreak == 0)
       
   257 			pr_info("copybreak disabled\n");
       
   258 		else
       
   259 			pr_info("copybreak enabled for "
       
   260 				   "packets <= %u bytes\n", copybreak);
       
   261 	}
       
   262 	return ret;
       
   263 }
       
   264 
       
   265 module_init(e1000_init_module);
       
   266 
       
   267 /**
       
   268  * e1000_exit_module - Driver Exit Cleanup Routine
       
   269  *
       
   270  * e1000_exit_module is called just before the driver is removed
       
   271  * from memory.
       
   272  **/
       
   273 
       
   274 static void __exit e1000_exit_module(void)
       
   275 {
       
   276 	pci_unregister_driver(&e1000_driver);
       
   277 }
       
   278 
       
   279 module_exit(e1000_exit_module);
       
   280 
       
   281 static int e1000_request_irq(struct e1000_adapter *adapter)
       
   282 {
       
   283 	struct net_device *netdev = adapter->netdev;
       
   284 	irq_handler_t handler = e1000_intr;
       
   285 	int irq_flags = IRQF_SHARED;
       
   286 	int err;
       
   287 
       
   288 	if (adapter->ecdev) {
       
   289 		return 0;
       
   290 	}
       
   291 
       
   292 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
       
   293 	                  netdev);
       
   294 	if (err) {
       
   295 		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
       
   296 	}
       
   297 
       
   298 	return err;
       
   299 }
       
   300 
       
   301 static void e1000_free_irq(struct e1000_adapter *adapter)
       
   302 {
       
   303 	struct net_device *netdev = adapter->netdev;
       
   304 
       
   305 	if (adapter->ecdev) {
       
   306 		return;
       
   307 	}
       
   308 
       
   309 	free_irq(adapter->pdev->irq, netdev);
       
   310 }
       
   311 
       
   312 /**
       
   313  * e1000_irq_disable - Mask off interrupt generation on the NIC
       
   314  * @adapter: board private structure
       
   315  **/
       
   316 
       
   317 static void e1000_irq_disable(struct e1000_adapter *adapter)
       
   318 {
       
   319 	struct e1000_hw *hw = &adapter->hw;
       
   320 
       
   321 	if (adapter->ecdev) {
       
   322 		return;
       
   323 	}
       
   324 
       
   325 	ew32(IMC, ~0);
       
   326 	E1000_WRITE_FLUSH();
       
   327 	synchronize_irq(adapter->pdev->irq);
       
   328 }
       
   329 
       
   330 /**
       
   331  * e1000_irq_enable - Enable default interrupt generation settings
       
   332  * @adapter: board private structure
       
   333  **/
       
   334 
       
   335 static void e1000_irq_enable(struct e1000_adapter *adapter)
       
   336 {
       
   337 	struct e1000_hw *hw = &adapter->hw;
       
   338 
       
   339 	if (adapter->ecdev) {
       
   340 		return;
       
   341 	}
       
   342 
       
   343 	ew32(IMS, IMS_ENABLE_MASK);
       
   344 	E1000_WRITE_FLUSH();
       
   345 }
       
   346 
       
   347 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
       
   348 {
       
   349 	struct e1000_hw *hw = &adapter->hw;
       
   350 	struct net_device *netdev = adapter->netdev;
       
   351 	u16 vid = hw->mng_cookie.vlan_id;
       
   352 	u16 old_vid = adapter->mng_vlan_id;
       
   353 
       
   354 	if (!e1000_vlan_used(adapter))
       
   355 		return;
       
   356 
       
   357 	if (!test_bit(vid, adapter->active_vlans)) {
       
   358 		if (hw->mng_cookie.status &
       
   359 		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
       
   360 			e1000_vlan_rx_add_vid(netdev, vid);
       
   361 			adapter->mng_vlan_id = vid;
       
   362 		} else {
       
   363 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
   364 		}
       
   365 		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
       
   366 		    (vid != old_vid) &&
       
   367 		    !test_bit(old_vid, adapter->active_vlans))
       
   368 			e1000_vlan_rx_kill_vid(netdev, old_vid);
       
   369 	} else {
       
   370 		adapter->mng_vlan_id = vid;
       
   371 	}
       
   372 }
       
   373 
       
   374 static void e1000_init_manageability(struct e1000_adapter *adapter)
       
   375 {
       
   376 	struct e1000_hw *hw = &adapter->hw;
       
   377 
       
   378 	if (adapter->en_mng_pt) {
       
   379 		u32 manc = er32(MANC);
       
   380 
       
   381 		/* disable hardware interception of ARP */
       
   382 		manc &= ~(E1000_MANC_ARP_EN);
       
   383 
       
   384 		ew32(MANC, manc);
       
   385 	}
       
   386 }
       
   387 
       
   388 static void e1000_release_manageability(struct e1000_adapter *adapter)
       
   389 {
       
   390 	struct e1000_hw *hw = &adapter->hw;
       
   391 
       
   392 	if (adapter->en_mng_pt) {
       
   393 		u32 manc = er32(MANC);
       
   394 
       
   395 		/* re-enable hardware interception of ARP */
       
   396 		manc |= E1000_MANC_ARP_EN;
       
   397 
       
   398 		ew32(MANC, manc);
       
   399 	}
       
   400 }
       
   401 
       
   402 /**
       
   403  * e1000_configure - configure the hardware for RX and TX
       
   404  * @adapter = private board structure
       
   405  **/
       
   406 static void e1000_configure(struct e1000_adapter *adapter)
       
   407 {
       
   408 	struct net_device *netdev = adapter->netdev;
       
   409 	int i;
       
   410 
       
   411 	e1000_set_rx_mode(netdev);
       
   412 
       
   413 	e1000_restore_vlan(adapter);
       
   414 	e1000_init_manageability(adapter);
       
   415 
       
   416 	e1000_configure_tx(adapter);
       
   417 	e1000_setup_rctl(adapter);
       
   418 	e1000_configure_rx(adapter);
       
   419 	/* call E1000_DESC_UNUSED which always leaves
       
   420 	 * at least 1 descriptor unused to make sure
       
   421 	 * next_to_use != next_to_clean */
       
   422 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
   423 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
       
   424 		if (adapter->ecdev) {
       
   425 			/* fill rx ring completely! */
       
   426 			adapter->alloc_rx_buf(adapter, ring, ring->count);
       
   427 		} else {
       
   428 			/* this one leaves the last ring element unallocated! */
       
   429 			adapter->alloc_rx_buf(adapter, ring,
       
   430 					E1000_DESC_UNUSED(ring));
       
   431 		}
       
   432 	}
       
   433 }
       
   434 
       
   435 int e1000_up(struct e1000_adapter *adapter)
       
   436 {
       
   437 	struct e1000_hw *hw = &adapter->hw;
       
   438 
       
   439 	/* hardware has been reset, we need to reload some things */
       
   440 	e1000_configure(adapter);
       
   441 
       
   442 	clear_bit(__E1000_DOWN, &adapter->flags);
       
   443 
       
   444 	if (!adapter->ecdev) {
       
   445 		napi_enable(&adapter->napi);
       
   446 
       
   447 		e1000_irq_enable(adapter);
       
   448 
       
   449 		netif_wake_queue(adapter->netdev);
       
   450 
       
   451 		/* fire a link change interrupt to start the watchdog */
       
   452 		ew32(ICS, E1000_ICS_LSC);
       
   453 	}
       
   454 	return 0;
       
   455 }
       
   456 
       
   457 /**
       
   458  * e1000_power_up_phy - restore link in case the phy was powered down
       
   459  * @adapter: address of board private structure
       
   460  *
       
   461  * The phy may be powered down to save power and turn off link when the
       
   462  * driver is unloaded and wake on lan is not enabled (among others)
       
   463  * *** this routine MUST be followed by a call to e1000_reset ***
       
   464  *
       
   465  **/
       
   466 
       
   467 void e1000_power_up_phy(struct e1000_adapter *adapter)
       
   468 {
       
   469 	struct e1000_hw *hw = &adapter->hw;
       
   470 	u16 mii_reg = 0;
       
   471 
       
   472 	/* Just clear the power down bit to wake the phy back up */
       
   473 	if (hw->media_type == e1000_media_type_copper) {
       
   474 		/* according to the manual, the phy will retain its
       
   475 		 * settings across a power-down/up cycle */
       
   476 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
       
   477 		mii_reg &= ~MII_CR_POWER_DOWN;
       
   478 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
       
   479 	}
       
   480 }
       
   481 
       
   482 static void e1000_power_down_phy(struct e1000_adapter *adapter)
       
   483 {
       
   484 	struct e1000_hw *hw = &adapter->hw;
       
   485 
       
   486 	/* Power down the PHY so no link is implied when interface is down *
       
   487 	 * The PHY cannot be powered down if any of the following is true *
       
   488 	 * (a) WoL is enabled
       
   489 	 * (b) AMT is active
       
   490 	 * (c) SoL/IDER session is active */
       
   491 	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
       
   492 	   hw->media_type == e1000_media_type_copper) {
       
   493 		u16 mii_reg = 0;
       
   494 
       
   495 		switch (hw->mac_type) {
       
   496 		case e1000_82540:
       
   497 		case e1000_82545:
       
   498 		case e1000_82545_rev_3:
       
   499 		case e1000_82546:
       
   500 		case e1000_ce4100:
       
   501 		case e1000_82546_rev_3:
       
   502 		case e1000_82541:
       
   503 		case e1000_82541_rev_2:
       
   504 		case e1000_82547:
       
   505 		case e1000_82547_rev_2:
       
   506 			if (er32(MANC) & E1000_MANC_SMBUS_EN)
       
   507 				goto out;
       
   508 			break;
       
   509 		default:
       
   510 			goto out;
       
   511 		}
       
   512 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
       
   513 		mii_reg |= MII_CR_POWER_DOWN;
       
   514 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
       
   515 		msleep(1);
       
   516 	}
       
   517 out:
       
   518 	return;
       
   519 }
       
   520 
       
   521 static void e1000_down_and_stop(struct e1000_adapter *adapter)
       
   522 {
       
   523 	set_bit(__E1000_DOWN, &adapter->flags);
       
   524 
       
   525 	/* Only kill reset task if adapter is not resetting */
       
   526 	if (!test_bit(__E1000_RESETTING, &adapter->flags))
       
   527 		cancel_work_sync(&adapter->reset_task);
       
   528 
       
   529 	if (!adapter->ecdev) {
       
   530 		cancel_delayed_work_sync(&adapter->watchdog_task);
       
   531 		cancel_delayed_work_sync(&adapter->phy_info_task);
       
   532 		cancel_delayed_work_sync(&adapter->fifo_stall_task);
       
   533 	}
       
   534 }
       
   535 
       
   536 void e1000_down(struct e1000_adapter *adapter)
       
   537 {
       
   538 	struct e1000_hw *hw = &adapter->hw;
       
   539 	struct net_device *netdev = adapter->netdev;
       
   540 	u32 rctl, tctl;
       
   541 
       
   542 
       
   543 	/* disable receives in the hardware */	
       
   544 	rctl = er32(RCTL);
       
   545 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
   546 
       
   547 	if (!adapter->ecdev) {
       
   548 		/* flush and sleep below */
       
   549 		netif_tx_disable(netdev);
       
   550 	}
       
   551 
       
   552 	/* disable transmits in the hardware */
       
   553 	tctl = er32(TCTL);
       
   554 	tctl &= ~E1000_TCTL_EN;
       
   555 	ew32(TCTL, tctl);
       
   556 	/* flush both disables and wait for them to finish */
       
   557 	E1000_WRITE_FLUSH();
       
   558 	msleep(10);
       
   559 
       
   560 	if (!adapter->ecdev) {
       
   561 		napi_disable(&adapter->napi);
       
   562 
       
   563 		e1000_irq_disable(adapter);
       
   564 	}
       
   565 
       
   566 	/*
       
   567 	 * Setting DOWN must be after irq_disable to prevent
       
   568 	 * a screaming interrupt.  Setting DOWN also prevents
       
   569 	 * tasks from rescheduling.
       
   570 	 */
       
   571 	e1000_down_and_stop(adapter);
       
   572 
       
   573 	adapter->link_speed = 0;
       
   574 	adapter->link_duplex = 0;
       
   575 
       
   576 	if (!adapter->ecdev) {
       
   577 		netif_carrier_off(netdev);
       
   578 	}
       
   579 
       
   580 	e1000_reset(adapter);
       
   581 	e1000_clean_all_tx_rings(adapter);
       
   582 	e1000_clean_all_rx_rings(adapter);
       
   583 }
       
   584 
       
   585 static void e1000_reinit_safe(struct e1000_adapter *adapter)
       
   586 {
       
   587 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
   588 		msleep(1);
       
   589 	mutex_lock(&adapter->mutex);
       
   590 	e1000_down(adapter);
       
   591 	e1000_up(adapter);
       
   592 	mutex_unlock(&adapter->mutex);
       
   593 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
   594 }
       
   595 
       
   596 void e1000_reinit_locked(struct e1000_adapter *adapter)
       
   597 {
       
   598 	/* if rtnl_lock is not held the call path is bogus */
       
   599 	ASSERT_RTNL();
       
   600 	WARN_ON(in_interrupt());
       
   601 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
   602 		msleep(1);
       
   603 	e1000_down(adapter);
       
   604 	e1000_up(adapter);
       
   605 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
   606 }
       
   607 
       
   608 void e1000_reset(struct e1000_adapter *adapter)
       
   609 {
       
   610 	struct e1000_hw *hw = &adapter->hw;
       
   611 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
       
   612 	bool legacy_pba_adjust = false;
       
   613 	u16 hwm;
       
   614 
       
   615 	/* Repartition Pba for greater than 9k mtu
       
   616 	 * To take effect CTRL.RST is required.
       
   617 	 */
       
   618 
       
   619 	switch (hw->mac_type) {
       
   620 	case e1000_82542_rev2_0:
       
   621 	case e1000_82542_rev2_1:
       
   622 	case e1000_82543:
       
   623 	case e1000_82544:
       
   624 	case e1000_82540:
       
   625 	case e1000_82541:
       
   626 	case e1000_82541_rev_2:
       
   627 		legacy_pba_adjust = true;
       
   628 		pba = E1000_PBA_48K;
       
   629 		break;
       
   630 	case e1000_82545:
       
   631 	case e1000_82545_rev_3:
       
   632 	case e1000_82546:
       
   633 	case e1000_ce4100:
       
   634 	case e1000_82546_rev_3:
       
   635 		pba = E1000_PBA_48K;
       
   636 		break;
       
   637 	case e1000_82547:
       
   638 	case e1000_82547_rev_2:
       
   639 		legacy_pba_adjust = true;
       
   640 		pba = E1000_PBA_30K;
       
   641 		break;
       
   642 	case e1000_undefined:
       
   643 	case e1000_num_macs:
       
   644 		break;
       
   645 	}
       
   646 
       
   647 	if (legacy_pba_adjust) {
       
   648 		if (hw->max_frame_size > E1000_RXBUFFER_8192)
       
   649 			pba -= 8; /* allocate more FIFO for Tx */
       
   650 
       
   651 		if (hw->mac_type == e1000_82547) {
       
   652 			adapter->tx_fifo_head = 0;
       
   653 			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
       
   654 			adapter->tx_fifo_size =
       
   655 				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
       
   656 			atomic_set(&adapter->tx_fifo_stall, 0);
       
   657 		}
       
   658 	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
       
   659 		/* adjust PBA for jumbo frames */
       
   660 		ew32(PBA, pba);
       
   661 
       
   662 		/* To maintain wire speed transmits, the Tx FIFO should be
       
   663 		 * large enough to accommodate two full transmit packets,
       
   664 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
       
   665 		 * the Rx FIFO should be large enough to accommodate at least
       
   666 		 * one full receive packet and is similarly rounded up and
       
   667 		 * expressed in KB. */
       
   668 		pba = er32(PBA);
       
   669 		/* upper 16 bits has Tx packet buffer allocation size in KB */
       
   670 		tx_space = pba >> 16;
       
   671 		/* lower 16 bits has Rx packet buffer allocation size in KB */
       
   672 		pba &= 0xffff;
       
   673 		/*
       
   674 		 * the tx fifo also stores 16 bytes of information about the tx
       
   675 		 * but don't include ethernet FCS because hardware appends it
       
   676 		 */
       
   677 		min_tx_space = (hw->max_frame_size +
       
   678 		                sizeof(struct e1000_tx_desc) -
       
   679 		                ETH_FCS_LEN) * 2;
       
   680 		min_tx_space = ALIGN(min_tx_space, 1024);
       
   681 		min_tx_space >>= 10;
       
   682 		/* software strips receive CRC, so leave room for it */
       
   683 		min_rx_space = hw->max_frame_size;
       
   684 		min_rx_space = ALIGN(min_rx_space, 1024);
       
   685 		min_rx_space >>= 10;
       
   686 
       
   687 		/* If current Tx allocation is less than the min Tx FIFO size,
       
   688 		 * and the min Tx FIFO size is less than the current Rx FIFO
       
   689 		 * allocation, take space away from current Rx allocation */
       
   690 		if (tx_space < min_tx_space &&
       
   691 		    ((min_tx_space - tx_space) < pba)) {
       
   692 			pba = pba - (min_tx_space - tx_space);
       
   693 
       
   694 			/* PCI/PCIx hardware has PBA alignment constraints */
       
   695 			switch (hw->mac_type) {
       
   696 			case e1000_82545 ... e1000_82546_rev_3:
       
   697 				pba &= ~(E1000_PBA_8K - 1);
       
   698 				break;
       
   699 			default:
       
   700 				break;
       
   701 			}
       
   702 
       
   703 			/* if short on rx space, rx wins and must trump tx
       
   704 			 * adjustment or use Early Receive if available */
       
   705 			if (pba < min_rx_space)
       
   706 				pba = min_rx_space;
       
   707 		}
       
   708 	}
       
   709 
       
   710 	ew32(PBA, pba);
       
   711 
       
   712 	/*
       
   713 	 * flow control settings:
       
   714 	 * The high water mark must be low enough to fit one full frame
       
   715 	 * (or the size used for early receive) above it in the Rx FIFO.
       
   716 	 * Set it to the lower of:
       
   717 	 * - 90% of the Rx FIFO size, and
       
   718 	 * - the full Rx FIFO size minus the early receive size (for parts
       
   719 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
       
   720 	 * - the full Rx FIFO size minus one full frame
       
   721 	 */
       
   722 	hwm = min(((pba << 10) * 9 / 10),
       
   723 		  ((pba << 10) - hw->max_frame_size));
       
   724 
       
   725 	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
       
   726 	hw->fc_low_water = hw->fc_high_water - 8;
       
   727 	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
       
   728 	hw->fc_send_xon = 1;
       
   729 	hw->fc = hw->original_fc;
       
   730 
       
   731 	/* Allow time for pending master requests to run */
       
   732 	e1000_reset_hw(hw);
       
   733 	if (hw->mac_type >= e1000_82544)
       
   734 		ew32(WUC, 0);
       
   735 
       
   736 	if (e1000_init_hw(hw))
       
   737 		e_dev_err("Hardware Error\n");
       
   738 	e1000_update_mng_vlan(adapter);
       
   739 
       
   740 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
       
   741 	if (hw->mac_type >= e1000_82544 &&
       
   742 	    hw->autoneg == 1 &&
       
   743 	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
       
   744 		u32 ctrl = er32(CTRL);
       
   745 		/* clear phy power management bit if we are in gig only mode,
       
   746 		 * which if enabled will attempt negotiation to 100Mb, which
       
   747 		 * can cause a loss of link at power off or driver unload */
       
   748 		ctrl &= ~E1000_CTRL_SWDPIN3;
       
   749 		ew32(CTRL, ctrl);
       
   750 	}
       
   751 
       
   752 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
       
   753 	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
       
   754 
       
   755 	e1000_reset_adaptive(hw);
       
   756 	e1000_phy_get_info(hw, &adapter->phy_info);
       
   757 
       
   758 	e1000_release_manageability(adapter);
       
   759 }
       
   760 
       
   761 /**
       
   762  *  Dump the eeprom for users having checksum issues
       
   763  **/
       
   764 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
       
   765 {
       
   766 	struct net_device *netdev = adapter->netdev;
       
   767 	struct ethtool_eeprom eeprom;
       
   768 	const struct ethtool_ops *ops = netdev->ethtool_ops;
       
   769 	u8 *data;
       
   770 	int i;
       
   771 	u16 csum_old, csum_new = 0;
       
   772 
       
   773 	eeprom.len = ops->get_eeprom_len(netdev);
       
   774 	eeprom.offset = 0;
       
   775 
       
   776 	data = kmalloc(eeprom.len, GFP_KERNEL);
       
   777 	if (!data)
       
   778 		return;
       
   779 
       
   780 	ops->get_eeprom(netdev, &eeprom, data);
       
   781 
       
   782 	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
       
   783 		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
       
   784 	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
       
   785 		csum_new += data[i] + (data[i + 1] << 8);
       
   786 	csum_new = EEPROM_SUM - csum_new;
       
   787 
       
   788 	pr_err("/*********************/\n");
       
   789 	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
       
   790 	pr_err("Calculated              : 0x%04x\n", csum_new);
       
   791 
       
   792 	pr_err("Offset    Values\n");
       
   793 	pr_err("========  ======\n");
       
   794 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
       
   795 
       
   796 	pr_err("Include this output when contacting your support provider.\n");
       
   797 	pr_err("This is not a software error! Something bad happened to\n");
       
   798 	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
       
   799 	pr_err("result in further problems, possibly loss of data,\n");
       
   800 	pr_err("corruption or system hangs!\n");
       
   801 	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
       
   802 	pr_err("which is invalid and requires you to set the proper MAC\n");
       
   803 	pr_err("address manually before continuing to enable this network\n");
       
   804 	pr_err("device. Please inspect the EEPROM dump and report the\n");
       
   805 	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
       
   806 	pr_err("/*********************/\n");
       
   807 
       
   808 	kfree(data);
       
   809 }
       
   810 
       
   811 /**
       
   812  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
       
   813  * @pdev: PCI device information struct
       
   814  *
       
   815  * Return true if an adapter needs ioport resources
       
   816  **/
       
   817 static int e1000_is_need_ioport(struct pci_dev *pdev)
       
   818 {
       
   819 	switch (pdev->device) {
       
   820 	case E1000_DEV_ID_82540EM:
       
   821 	case E1000_DEV_ID_82540EM_LOM:
       
   822 	case E1000_DEV_ID_82540EP:
       
   823 	case E1000_DEV_ID_82540EP_LOM:
       
   824 	case E1000_DEV_ID_82540EP_LP:
       
   825 	case E1000_DEV_ID_82541EI:
       
   826 	case E1000_DEV_ID_82541EI_MOBILE:
       
   827 	case E1000_DEV_ID_82541ER:
       
   828 	case E1000_DEV_ID_82541ER_LOM:
       
   829 	case E1000_DEV_ID_82541GI:
       
   830 	case E1000_DEV_ID_82541GI_LF:
       
   831 	case E1000_DEV_ID_82541GI_MOBILE:
       
   832 	case E1000_DEV_ID_82544EI_COPPER:
       
   833 	case E1000_DEV_ID_82544EI_FIBER:
       
   834 	case E1000_DEV_ID_82544GC_COPPER:
       
   835 	case E1000_DEV_ID_82544GC_LOM:
       
   836 	case E1000_DEV_ID_82545EM_COPPER:
       
   837 	case E1000_DEV_ID_82545EM_FIBER:
       
   838 	case E1000_DEV_ID_82546EB_COPPER:
       
   839 	case E1000_DEV_ID_82546EB_FIBER:
       
   840 	case E1000_DEV_ID_82546EB_QUAD_COPPER:
       
   841 		return true;
       
   842 	default:
       
   843 		return false;
       
   844 	}
       
   845 }
       
   846 
       
   847 static netdev_features_t e1000_fix_features(struct net_device *netdev,
       
   848 	netdev_features_t features)
       
   849 {
       
   850 	/*
       
   851 	 * Since there is no support for separate rx/tx vlan accel
       
   852 	 * enable/disable make sure tx flag is always in same state as rx.
       
   853 	 */
       
   854 	if (features & NETIF_F_HW_VLAN_RX)
       
   855 		features |= NETIF_F_HW_VLAN_TX;
       
   856 	else
       
   857 		features &= ~NETIF_F_HW_VLAN_TX;
       
   858 
       
   859 	return features;
       
   860 }
       
   861 
       
   862 static int e1000_set_features(struct net_device *netdev,
       
   863 	netdev_features_t features)
       
   864 {
       
   865 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
   866 	netdev_features_t changed = features ^ netdev->features;
       
   867 
       
   868 	if (changed & NETIF_F_HW_VLAN_RX)
       
   869 		e1000_vlan_mode(netdev, features);
       
   870 
       
   871 	if (!(changed & NETIF_F_RXCSUM))
       
   872 		return 0;
       
   873 
       
   874 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
       
   875 
       
   876 	if (netif_running(netdev))
       
   877 		e1000_reinit_locked(adapter);
       
   878 	else
       
   879 		e1000_reset(adapter);
       
   880 
       
   881 	return 0;
       
   882 }
       
   883 
       
   884 static const struct net_device_ops e1000_netdev_ops = {
       
   885 	.ndo_open		= e1000_open,
       
   886 	.ndo_stop		= e1000_close,
       
   887 	.ndo_start_xmit		= e1000_xmit_frame,
       
   888 	.ndo_get_stats		= e1000_get_stats,
       
   889 	.ndo_set_rx_mode	= e1000_set_rx_mode,
       
   890 	.ndo_set_mac_address	= e1000_set_mac,
       
   891 	.ndo_tx_timeout		= e1000_tx_timeout,
       
   892 	.ndo_change_mtu		= e1000_change_mtu,
       
   893 	.ndo_do_ioctl		= e1000_ioctl,
       
   894 	.ndo_validate_addr	= eth_validate_addr,
       
   895 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
       
   896 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
       
   897 #ifdef CONFIG_NET_POLL_CONTROLLER
       
   898 	.ndo_poll_controller	= e1000_netpoll,
       
   899 #endif
       
   900 	.ndo_fix_features	= e1000_fix_features,
       
   901 	.ndo_set_features	= e1000_set_features,
       
   902 };
       
   903 
       
   904 /**
       
   905  * e1000_init_hw_struct - initialize members of hw struct
       
   906  * @adapter: board private struct
       
   907  * @hw: structure used by e1000_hw.c
       
   908  *
       
   909  * Factors out initialization of the e1000_hw struct to its own function
       
   910  * that can be called very early at init (just after struct allocation).
       
   911  * Fields are initialized based on PCI device information and
       
   912  * OS network device settings (MTU size).
       
   913  * Returns negative error codes if MAC type setup fails.
       
   914  */
       
   915 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
       
   916 				struct e1000_hw *hw)
       
   917 {
       
   918 	struct pci_dev *pdev = adapter->pdev;
       
   919 
       
   920 	/* PCI config space info */
       
   921 	hw->vendor_id = pdev->vendor;
       
   922 	hw->device_id = pdev->device;
       
   923 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
       
   924 	hw->subsystem_id = pdev->subsystem_device;
       
   925 	hw->revision_id = pdev->revision;
       
   926 
       
   927 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
       
   928 
       
   929 	hw->max_frame_size = adapter->netdev->mtu +
       
   930 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
       
   931 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
       
   932 
       
   933 	/* identify the MAC */
       
   934 	if (e1000_set_mac_type(hw)) {
       
   935 		e_err(probe, "Unknown MAC Type\n");
       
   936 		return -EIO;
       
   937 	}
       
   938 
       
   939 	switch (hw->mac_type) {
       
   940 	default:
       
   941 		break;
       
   942 	case e1000_82541:
       
   943 	case e1000_82547:
       
   944 	case e1000_82541_rev_2:
       
   945 	case e1000_82547_rev_2:
       
   946 		hw->phy_init_script = 1;
       
   947 		break;
       
   948 	}
       
   949 
       
   950 	e1000_set_media_type(hw);
       
   951 	e1000_get_bus_info(hw);
       
   952 
       
   953 	hw->wait_autoneg_complete = false;
       
   954 	hw->tbi_compatibility_en = true;
       
   955 	hw->adaptive_ifs = true;
       
   956 
       
   957 	/* Copper options */
       
   958 
       
   959 	if (hw->media_type == e1000_media_type_copper) {
       
   960 		hw->mdix = AUTO_ALL_MODES;
       
   961 		hw->disable_polarity_correction = false;
       
   962 		hw->master_slave = E1000_MASTER_SLAVE;
       
   963 	}
       
   964 
       
   965 	return 0;
       
   966 }
       
   967 
       
   968 /**
       
   969  * e1000_probe - Device Initialization Routine
       
   970  * @pdev: PCI device information struct
       
   971  * @ent: entry in e1000_pci_tbl
       
   972  *
       
   973  * Returns 0 on success, negative on failure
       
   974  *
       
   975  * e1000_probe initializes an adapter identified by a pci_dev structure.
       
   976  * The OS initialization, configuring of the adapter private structure,
       
   977  * and a hardware reset occur.
       
   978  **/
       
   979 static int __devinit e1000_probe(struct pci_dev *pdev,
       
   980 				 const struct pci_device_id *ent)
       
   981 {
       
   982 	struct net_device *netdev;
       
   983 	struct e1000_adapter *adapter;
       
   984 	struct e1000_hw *hw;
       
   985 
       
   986 	static int cards_found = 0;
       
   987 	static int global_quad_port_a = 0; /* global ksp3 port a indication */
       
   988 	int i, err, pci_using_dac;
       
   989 	u16 eeprom_data = 0;
       
   990 	u16 tmp = 0;
       
   991 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
       
   992 	int bars, need_ioport;
       
   993 
       
   994 	/* do not allocate ioport bars when not needed */
       
   995 	need_ioport = e1000_is_need_ioport(pdev);
       
   996 	if (need_ioport) {
       
   997 		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
       
   998 		err = pci_enable_device(pdev);
       
   999 	} else {
       
  1000 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
       
  1001 		err = pci_enable_device_mem(pdev);
       
  1002 	}
       
  1003 	if (err)
       
  1004 		return err;
       
  1005 
       
  1006 	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
       
  1007 	if (err)
       
  1008 		goto err_pci_reg;
       
  1009 
       
  1010 	pci_set_master(pdev);
       
  1011 	err = pci_save_state(pdev);
       
  1012 	if (err)
       
  1013 		goto err_alloc_etherdev;
       
  1014 
       
  1015 	err = -ENOMEM;
       
  1016 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
       
  1017 	if (!netdev)
       
  1018 		goto err_alloc_etherdev;
       
  1019 
       
  1020 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  1021 
       
  1022 	pci_set_drvdata(pdev, netdev);
       
  1023 	adapter = netdev_priv(netdev);
       
  1024 	adapter->netdev = netdev;
       
  1025 	adapter->pdev = pdev;
       
  1026 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
       
  1027 	adapter->bars = bars;
       
  1028 	adapter->need_ioport = need_ioport;
       
  1029 
       
  1030 	hw = &adapter->hw;
       
  1031 	hw->back = adapter;
       
  1032 
       
  1033 	err = -EIO;
       
  1034 	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
       
  1035 	if (!hw->hw_addr)
       
  1036 		goto err_ioremap;
       
  1037 
       
  1038 	if (adapter->need_ioport) {
       
  1039 		for (i = BAR_1; i <= BAR_5; i++) {
       
  1040 			if (pci_resource_len(pdev, i) == 0)
       
  1041 				continue;
       
  1042 			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
       
  1043 				hw->io_base = pci_resource_start(pdev, i);
       
  1044 				break;
       
  1045 			}
       
  1046 		}
       
  1047 	}
       
  1048 
       
  1049 	/* make ready for any if (hw->...) below */
       
  1050 	err = e1000_init_hw_struct(adapter, hw);
       
  1051 	if (err)
       
  1052 		goto err_sw_init;
       
  1053 
       
  1054 	/*
       
  1055 	 * there is a workaround being applied below that limits
       
  1056 	 * 64-bit DMA addresses to 64-bit hardware.  There are some
       
  1057 	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
       
  1058 	 */
       
  1059 	pci_using_dac = 0;
       
  1060 	if ((hw->bus_type == e1000_bus_type_pcix) &&
       
  1061 	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
       
  1062 		/*
       
  1063 		 * according to DMA-API-HOWTO, coherent calls will always
       
  1064 		 * succeed if the set call did
       
  1065 		 */
       
  1066 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
       
  1067 		pci_using_dac = 1;
       
  1068 	} else {
       
  1069 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
       
  1070 		if (err) {
       
  1071 			pr_err("No usable DMA config, aborting\n");
       
  1072 			goto err_dma;
       
  1073 		}
       
  1074 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
       
  1075 	}
       
  1076 
       
  1077 	netdev->netdev_ops = &e1000_netdev_ops;
       
  1078 	e1000_set_ethtool_ops(netdev);
       
  1079 	netdev->watchdog_timeo = 5 * HZ;
       
  1080 	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
       
  1081 
       
  1082 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  1083 
       
  1084 	adapter->bd_number = cards_found;
       
  1085 
       
  1086 	/* setup the private structure */
       
  1087 
       
  1088 	err = e1000_sw_init(adapter);
       
  1089 	if (err)
       
  1090 		goto err_sw_init;
       
  1091 
       
  1092 	err = -EIO;
       
  1093 	if (hw->mac_type == e1000_ce4100) {
       
  1094 		hw->ce4100_gbe_mdio_base_virt =
       
  1095 					ioremap(pci_resource_start(pdev, BAR_1),
       
  1096 		                                pci_resource_len(pdev, BAR_1));
       
  1097 
       
  1098 		if (!hw->ce4100_gbe_mdio_base_virt)
       
  1099 			goto err_mdio_ioremap;
       
  1100 	}
       
  1101 
       
  1102 	if (hw->mac_type >= e1000_82543) {
       
  1103 		netdev->hw_features = NETIF_F_SG |
       
  1104 				   NETIF_F_HW_CSUM |
       
  1105 				   NETIF_F_HW_VLAN_RX;
       
  1106 		netdev->features = NETIF_F_HW_VLAN_TX |
       
  1107 				   NETIF_F_HW_VLAN_FILTER;
       
  1108 	}
       
  1109 
       
  1110 	if ((hw->mac_type >= e1000_82544) &&
       
  1111 	   (hw->mac_type != e1000_82547))
       
  1112 		netdev->hw_features |= NETIF_F_TSO;
       
  1113 
       
  1114 	netdev->priv_flags |= IFF_SUPP_NOFCS;
       
  1115 
       
  1116 	netdev->features |= netdev->hw_features;
       
  1117 	netdev->hw_features |= NETIF_F_RXCSUM;
       
  1118 	netdev->hw_features |= NETIF_F_RXFCS;
       
  1119 
       
  1120 	if (pci_using_dac) {
       
  1121 		netdev->features |= NETIF_F_HIGHDMA;
       
  1122 		netdev->vlan_features |= NETIF_F_HIGHDMA;
       
  1123 	}
       
  1124 
       
  1125 	netdev->vlan_features |= NETIF_F_TSO;
       
  1126 	netdev->vlan_features |= NETIF_F_HW_CSUM;
       
  1127 	netdev->vlan_features |= NETIF_F_SG;
       
  1128 
       
  1129 	netdev->priv_flags |= IFF_UNICAST_FLT;
       
  1130 
       
  1131 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
       
  1132 
       
  1133 	/* initialize eeprom parameters */
       
  1134 	if (e1000_init_eeprom_params(hw)) {
       
  1135 		e_err(probe, "EEPROM initialization failed\n");
       
  1136 		goto err_eeprom;
       
  1137 	}
       
  1138 
       
  1139 	/* before reading the EEPROM, reset the controller to
       
  1140 	 * put the device in a known good starting state */
       
  1141 
       
  1142 	e1000_reset_hw(hw);
       
  1143 
       
  1144 	/* make sure the EEPROM is good */
       
  1145 	if (e1000_validate_eeprom_checksum(hw) < 0) {
       
  1146 		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
       
  1147 		e1000_dump_eeprom(adapter);
       
  1148 		/*
       
  1149 		 * set MAC address to all zeroes to invalidate and temporary
       
  1150 		 * disable this device for the user. This blocks regular
       
  1151 		 * traffic while still permitting ethtool ioctls from reaching
       
  1152 		 * the hardware as well as allowing the user to run the
       
  1153 		 * interface after manually setting a hw addr using
       
  1154 		 * `ip set address`
       
  1155 		 */
       
  1156 		memset(hw->mac_addr, 0, netdev->addr_len);
       
  1157 	} else {
       
  1158 		/* copy the MAC address out of the EEPROM */
       
  1159 		if (e1000_read_mac_addr(hw))
       
  1160 			e_err(probe, "EEPROM Read Error\n");
       
  1161 	}
       
  1162 	/* don't block initalization here due to bad MAC address */
       
  1163 	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
       
  1164 	memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
       
  1165 
       
  1166 	if (!is_valid_ether_addr(netdev->perm_addr))
       
  1167 		e_err(probe, "Invalid MAC Address\n");
       
  1168 
       
  1169 
       
  1170 	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
       
  1171 	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
       
  1172 			  e1000_82547_tx_fifo_stall_task);
       
  1173 	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
       
  1174 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
       
  1175 
       
  1176 	e1000_check_options(adapter);
       
  1177 
       
  1178 	/* Initial Wake on LAN setting
       
  1179 	 * If APM wake is enabled in the EEPROM,
       
  1180 	 * enable the ACPI Magic Packet filter
       
  1181 	 */
       
  1182 
       
  1183 	switch (hw->mac_type) {
       
  1184 	case e1000_82542_rev2_0:
       
  1185 	case e1000_82542_rev2_1:
       
  1186 	case e1000_82543:
       
  1187 		break;
       
  1188 	case e1000_82544:
       
  1189 		e1000_read_eeprom(hw,
       
  1190 			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
       
  1191 		eeprom_apme_mask = E1000_EEPROM_82544_APM;
       
  1192 		break;
       
  1193 	case e1000_82546:
       
  1194 	case e1000_82546_rev_3:
       
  1195 		if (er32(STATUS) & E1000_STATUS_FUNC_1){
       
  1196 			e1000_read_eeprom(hw,
       
  1197 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
       
  1198 			break;
       
  1199 		}
       
  1200 		/* Fall Through */
       
  1201 	default:
       
  1202 		e1000_read_eeprom(hw,
       
  1203 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
       
  1204 		break;
       
  1205 	}
       
  1206 	if (eeprom_data & eeprom_apme_mask)
       
  1207 		adapter->eeprom_wol |= E1000_WUFC_MAG;
       
  1208 
       
  1209 	/* now that we have the eeprom settings, apply the special cases
       
  1210 	 * where the eeprom may be wrong or the board simply won't support
       
  1211 	 * wake on lan on a particular port */
       
  1212 	switch (pdev->device) {
       
  1213 	case E1000_DEV_ID_82546GB_PCIE:
       
  1214 		adapter->eeprom_wol = 0;
       
  1215 		break;
       
  1216 	case E1000_DEV_ID_82546EB_FIBER:
       
  1217 	case E1000_DEV_ID_82546GB_FIBER:
       
  1218 		/* Wake events only supported on port A for dual fiber
       
  1219 		 * regardless of eeprom setting */
       
  1220 		if (er32(STATUS) & E1000_STATUS_FUNC_1)
       
  1221 			adapter->eeprom_wol = 0;
       
  1222 		break;
       
  1223 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
       
  1224 		/* if quad port adapter, disable WoL on all but port A */
       
  1225 		if (global_quad_port_a != 0)
       
  1226 			adapter->eeprom_wol = 0;
       
  1227 		else
       
  1228 			adapter->quad_port_a = true;
       
  1229 		/* Reset for multiple quad port adapters */
       
  1230 		if (++global_quad_port_a == 4)
       
  1231 			global_quad_port_a = 0;
       
  1232 		break;
       
  1233 	}
       
  1234 
       
  1235 	/* initialize the wol settings based on the eeprom settings */
       
  1236 	adapter->wol = adapter->eeprom_wol;
       
  1237 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
       
  1238 
       
  1239 	/* Auto detect PHY address */
       
  1240 	if (hw->mac_type == e1000_ce4100) {
       
  1241 		for (i = 0; i < 32; i++) {
       
  1242 			hw->phy_addr = i;
       
  1243 			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
       
  1244 			if (tmp == 0 || tmp == 0xFF) {
       
  1245 				if (i == 31)
       
  1246 					goto err_eeprom;
       
  1247 				continue;
       
  1248 			} else
       
  1249 				break;
       
  1250 		}
       
  1251 	}
       
  1252 
       
  1253 	/* reset the hardware with the new settings */
       
  1254 	e1000_reset(adapter);
       
  1255 
       
  1256  	// offer device to EtherCAT master module
       
  1257 	adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE);
       
  1258 	if (adapter->ecdev) {
       
  1259 		if (ecdev_open(adapter->ecdev)) {
       
  1260 			ecdev_withdraw(adapter->ecdev);
       
  1261 			goto err_register;
       
  1262 		}
       
  1263 	} else {
       
  1264 		strcpy(netdev->name, "eth%d");
       
  1265 		err = register_netdev(netdev);
       
  1266 		if (err)
       
  1267 			goto err_register;
       
  1268 	}
       
  1269 
       
  1270 	e1000_vlan_filter_on_off(adapter, false);
       
  1271 
       
  1272 	/* print bus type/speed/width info */
       
  1273 	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
       
  1274 	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
       
  1275 	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
       
  1276 		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
       
  1277 		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
       
  1278 		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
       
  1279 	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
       
  1280 	       netdev->dev_addr);
       
  1281 
       
  1282 	if (!adapter->ecdev) {
       
  1283 		/* carrier off reporting is important to ethtool even BEFORE open */
       
  1284 		netif_carrier_off(netdev);
       
  1285 	}
       
  1286 
       
  1287 	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
       
  1288 
       
  1289 	cards_found++;
       
  1290 	return 0;
       
  1291 
       
  1292 err_register:
       
  1293 err_eeprom:
       
  1294 	e1000_phy_hw_reset(hw);
       
  1295 
       
  1296 	if (hw->flash_address)
       
  1297 		iounmap(hw->flash_address);
       
  1298 	kfree(adapter->tx_ring);
       
  1299 	kfree(adapter->rx_ring);
       
  1300 err_dma:
       
  1301 err_sw_init:
       
  1302 err_mdio_ioremap:
       
  1303 	iounmap(hw->ce4100_gbe_mdio_base_virt);
       
  1304 	iounmap(hw->hw_addr);
       
  1305 err_ioremap:
       
  1306 	free_netdev(netdev);
       
  1307 err_alloc_etherdev:
       
  1308 	pci_release_selected_regions(pdev, bars);
       
  1309 err_pci_reg:
       
  1310 	pci_disable_device(pdev);
       
  1311 	return err;
       
  1312 }
       
  1313 
       
  1314 /**
       
  1315  * e1000_remove - Device Removal Routine
       
  1316  * @pdev: PCI device information struct
       
  1317  *
       
  1318  * e1000_remove is called by the PCI subsystem to alert the driver
       
  1319  * that it should release a PCI device.  The could be caused by a
       
  1320  * Hot-Plug event, or because the driver is going to be removed from
       
  1321  * memory.
       
  1322  **/
       
  1323 
       
  1324 static void __devexit e1000_remove(struct pci_dev *pdev)
       
  1325 {
       
  1326 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  1327 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1328 	struct e1000_hw *hw = &adapter->hw;
       
  1329 
       
  1330 	e1000_down_and_stop(adapter);
       
  1331 	e1000_release_manageability(adapter);
       
  1332 
       
  1333 	if (adapter->ecdev) {
       
  1334 		ecdev_close(adapter->ecdev);
       
  1335 		ecdev_withdraw(adapter->ecdev);
       
  1336 	} else {
       
  1337 		unregister_netdev(netdev);
       
  1338 	}
       
  1339 
       
  1340 	e1000_phy_hw_reset(hw);
       
  1341 
       
  1342 	kfree(adapter->tx_ring);
       
  1343 	kfree(adapter->rx_ring);
       
  1344 
       
  1345 	if (hw->mac_type == e1000_ce4100)
       
  1346 		iounmap(hw->ce4100_gbe_mdio_base_virt);
       
  1347 	iounmap(hw->hw_addr);
       
  1348 	if (hw->flash_address)
       
  1349 		iounmap(hw->flash_address);
       
  1350 	pci_release_selected_regions(pdev, adapter->bars);
       
  1351 
       
  1352 	free_netdev(netdev);
       
  1353 
       
  1354 	pci_disable_device(pdev);
       
  1355 }
       
  1356 
       
  1357 /**
       
  1358  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
       
  1359  * @adapter: board private structure to initialize
       
  1360  *
       
  1361  * e1000_sw_init initializes the Adapter private data structure.
       
  1362  * e1000_init_hw_struct MUST be called before this function
       
  1363  **/
       
  1364 
       
  1365 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
       
  1366 {
       
  1367 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
       
  1368 
       
  1369 	adapter->num_tx_queues = 1;
       
  1370 	adapter->num_rx_queues = 1;
       
  1371 
       
  1372 	if (e1000_alloc_queues(adapter)) {
       
  1373 		e_err(probe, "Unable to allocate memory for queues\n");
       
  1374 		return -ENOMEM;
       
  1375 	}
       
  1376 
       
  1377 	/* Explicitly disable IRQ since the NIC can be in any state. */
       
  1378 	e1000_irq_disable(adapter);
       
  1379 
       
  1380 	spin_lock_init(&adapter->stats_lock);
       
  1381 	mutex_init(&adapter->mutex);
       
  1382 
       
  1383 	set_bit(__E1000_DOWN, &adapter->flags);
       
  1384 
       
  1385 	return 0;
       
  1386 }
       
  1387 
       
  1388 /**
       
  1389  * e1000_alloc_queues - Allocate memory for all rings
       
  1390  * @adapter: board private structure to initialize
       
  1391  *
       
  1392  * We allocate one ring per queue at run-time since we don't know the
       
  1393  * number of queues at compile-time.
       
  1394  **/
       
  1395 
       
  1396 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
       
  1397 {
       
  1398 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
       
  1399 	                           sizeof(struct e1000_tx_ring), GFP_KERNEL);
       
  1400 	if (!adapter->tx_ring)
       
  1401 		return -ENOMEM;
       
  1402 
       
  1403 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
       
  1404 	                           sizeof(struct e1000_rx_ring), GFP_KERNEL);
       
  1405 	if (!adapter->rx_ring) {
       
  1406 		kfree(adapter->tx_ring);
       
  1407 		return -ENOMEM;
       
  1408 	}
       
  1409 
       
  1410 	return E1000_SUCCESS;
       
  1411 }
       
  1412 
       
  1413 /**
       
  1414  * e1000_open - Called when a network interface is made active
       
  1415  * @netdev: network interface device structure
       
  1416  *
       
  1417  * Returns 0 on success, negative value on failure
       
  1418  *
       
  1419  * The open entry point is called when a network interface is made
       
  1420  * active by the system (IFF_UP).  At this point all resources needed
       
  1421  * for transmit and receive operations are allocated, the interrupt
       
  1422  * handler is registered with the OS, the watchdog task is started,
       
  1423  * and the stack is notified that the interface is ready.
       
  1424  **/
       
  1425 
       
  1426 static int e1000_open(struct net_device *netdev)
       
  1427 {
       
  1428 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1429 	struct e1000_hw *hw = &adapter->hw;
       
  1430 	int err;
       
  1431 
       
  1432 	/* disallow open during test */
       
  1433 	if (test_bit(__E1000_TESTING, &adapter->flags))
       
  1434 		return -EBUSY;
       
  1435 
       
  1436 	netif_carrier_off(netdev);
       
  1437 
       
  1438 	/* allocate transmit descriptors */
       
  1439 	err = e1000_setup_all_tx_resources(adapter);
       
  1440 	if (err)
       
  1441 		goto err_setup_tx;
       
  1442 
       
  1443 	/* allocate receive descriptors */
       
  1444 	err = e1000_setup_all_rx_resources(adapter);
       
  1445 	if (err)
       
  1446 		goto err_setup_rx;
       
  1447 
       
  1448 	e1000_power_up_phy(adapter);
       
  1449 
       
  1450 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
  1451 	if ((hw->mng_cookie.status &
       
  1452 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
       
  1453 		e1000_update_mng_vlan(adapter);
       
  1454 	}
       
  1455 
       
  1456 	/* before we allocate an interrupt, we must be ready to handle it.
       
  1457 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
       
  1458 	 * as soon as we call pci_request_irq, so we have to setup our
       
  1459 	 * clean_rx handler before we do so.  */
       
  1460 	e1000_configure(adapter);
       
  1461 
       
  1462 	err = e1000_request_irq(adapter);
       
  1463 	if (err)
       
  1464 		goto err_req_irq;
       
  1465 
       
  1466 	/* From here on the code is the same as e1000_up() */
       
  1467 	clear_bit(__E1000_DOWN, &adapter->flags);
       
  1468 
       
  1469 	if (!adapter->ecdev) {
       
  1470 		napi_enable(&adapter->napi);
       
  1471 
       
  1472 		e1000_irq_enable(adapter);
       
  1473 
       
  1474 		netif_start_queue(netdev);
       
  1475 	}
       
  1476 
       
  1477 	/* fire a link status change interrupt to start the watchdog */
       
  1478 	ew32(ICS, E1000_ICS_LSC);
       
  1479 
       
  1480 	return E1000_SUCCESS;
       
  1481 
       
  1482 err_req_irq:
       
  1483 	e1000_power_down_phy(adapter);
       
  1484 	e1000_free_all_rx_resources(adapter);
       
  1485 err_setup_rx:
       
  1486 	e1000_free_all_tx_resources(adapter);
       
  1487 err_setup_tx:
       
  1488 	e1000_reset(adapter);
       
  1489 
       
  1490 	return err;
       
  1491 }
       
  1492 
       
  1493 /**
       
  1494  * e1000_close - Disables a network interface
       
  1495  * @netdev: network interface device structure
       
  1496  *
       
  1497  * Returns 0, this is not allowed to fail
       
  1498  *
       
  1499  * The close entry point is called when an interface is de-activated
       
  1500  * by the OS.  The hardware is still under the drivers control, but
       
  1501  * needs to be disabled.  A global MAC reset is issued to stop the
       
  1502  * hardware, and all transmit and receive resources are freed.
       
  1503  **/
       
  1504 
       
  1505 static int e1000_close(struct net_device *netdev)
       
  1506 {
       
  1507 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1508 	struct e1000_hw *hw = &adapter->hw;
       
  1509 
       
  1510 	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
       
  1511 	e1000_down(adapter);
       
  1512 	e1000_power_down_phy(adapter);
       
  1513 	e1000_free_irq(adapter);
       
  1514 
       
  1515 	e1000_free_all_tx_resources(adapter);
       
  1516 	e1000_free_all_rx_resources(adapter);
       
  1517 
       
  1518 	/* kill manageability vlan ID if supported, but not if a vlan with
       
  1519 	 * the same ID is registered on the host OS (let 8021q kill it) */
       
  1520 	if ((hw->mng_cookie.status &
       
  1521 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  1522 	     !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
       
  1523 		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
       
  1524 	}
       
  1525 
       
  1526 	return 0;
       
  1527 }
       
  1528 
       
  1529 /**
       
  1530  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
       
  1531  * @adapter: address of board private structure
       
  1532  * @start: address of beginning of memory
       
  1533  * @len: length of memory
       
  1534  **/
       
  1535 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
       
  1536 				  unsigned long len)
       
  1537 {
       
  1538 	struct e1000_hw *hw = &adapter->hw;
       
  1539 	unsigned long begin = (unsigned long)start;
       
  1540 	unsigned long end = begin + len;
       
  1541 
       
  1542 	/* First rev 82545 and 82546 need to not allow any memory
       
  1543 	 * write location to cross 64k boundary due to errata 23 */
       
  1544 	if (hw->mac_type == e1000_82545 ||
       
  1545 	    hw->mac_type == e1000_ce4100 ||
       
  1546 	    hw->mac_type == e1000_82546) {
       
  1547 		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
       
  1548 	}
       
  1549 
       
  1550 	return true;
       
  1551 }
       
  1552 
       
  1553 /**
       
  1554  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
       
  1555  * @adapter: board private structure
       
  1556  * @txdr:    tx descriptor ring (for a specific queue) to setup
       
  1557  *
       
  1558  * Return 0 on success, negative on failure
       
  1559  **/
       
  1560 
       
  1561 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
       
  1562 				    struct e1000_tx_ring *txdr)
       
  1563 {
       
  1564 	struct pci_dev *pdev = adapter->pdev;
       
  1565 	int size;
       
  1566 
       
  1567 	size = sizeof(struct e1000_buffer) * txdr->count;
       
  1568 	txdr->buffer_info = vzalloc(size);
       
  1569 	if (!txdr->buffer_info) {
       
  1570 		e_err(probe, "Unable to allocate memory for the Tx descriptor "
       
  1571 		      "ring\n");
       
  1572 		return -ENOMEM;
       
  1573 	}
       
  1574 
       
  1575 	/* round up to nearest 4K */
       
  1576 
       
  1577 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
       
  1578 	txdr->size = ALIGN(txdr->size, 4096);
       
  1579 
       
  1580 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
       
  1581 					GFP_KERNEL);
       
  1582 	if (!txdr->desc) {
       
  1583 setup_tx_desc_die:
       
  1584 		vfree(txdr->buffer_info);
       
  1585 		e_err(probe, "Unable to allocate memory for the Tx descriptor "
       
  1586 		      "ring\n");
       
  1587 		return -ENOMEM;
       
  1588 	}
       
  1589 
       
  1590 	/* Fix for errata 23, can't cross 64kB boundary */
       
  1591 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
       
  1592 		void *olddesc = txdr->desc;
       
  1593 		dma_addr_t olddma = txdr->dma;
       
  1594 		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
       
  1595 		      txdr->size, txdr->desc);
       
  1596 		/* Try again, without freeing the previous */
       
  1597 		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
       
  1598 						&txdr->dma, GFP_KERNEL);
       
  1599 		/* Failed allocation, critical failure */
       
  1600 		if (!txdr->desc) {
       
  1601 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1602 					  olddma);
       
  1603 			goto setup_tx_desc_die;
       
  1604 		}
       
  1605 
       
  1606 		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
       
  1607 			/* give up */
       
  1608 			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
       
  1609 					  txdr->dma);
       
  1610 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1611 					  olddma);
       
  1612 			e_err(probe, "Unable to allocate aligned memory "
       
  1613 			      "for the transmit descriptor ring\n");
       
  1614 			vfree(txdr->buffer_info);
       
  1615 			return -ENOMEM;
       
  1616 		} else {
       
  1617 			/* Free old allocation, new allocation was successful */
       
  1618 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1619 					  olddma);
       
  1620 		}
       
  1621 	}
       
  1622 	memset(txdr->desc, 0, txdr->size);
       
  1623 
       
  1624 	txdr->next_to_use = 0;
       
  1625 	txdr->next_to_clean = 0;
       
  1626 
       
  1627 	return 0;
       
  1628 }
       
  1629 
       
  1630 /**
       
  1631  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
       
  1632  * 				  (Descriptors) for all queues
       
  1633  * @adapter: board private structure
       
  1634  *
       
  1635  * Return 0 on success, negative on failure
       
  1636  **/
       
  1637 
       
  1638 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
       
  1639 {
       
  1640 	int i, err = 0;
       
  1641 
       
  1642 	for (i = 0; i < adapter->num_tx_queues; i++) {
       
  1643 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
       
  1644 		if (err) {
       
  1645 			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
       
  1646 			for (i-- ; i >= 0; i--)
       
  1647 				e1000_free_tx_resources(adapter,
       
  1648 							&adapter->tx_ring[i]);
       
  1649 			break;
       
  1650 		}
       
  1651 	}
       
  1652 
       
  1653 	return err;
       
  1654 }
       
  1655 
       
  1656 /**
       
  1657  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
       
  1658  * @adapter: board private structure
       
  1659  *
       
  1660  * Configure the Tx unit of the MAC after a reset.
       
  1661  **/
       
  1662 
       
  1663 static void e1000_configure_tx(struct e1000_adapter *adapter)
       
  1664 {
       
  1665 	u64 tdba;
       
  1666 	struct e1000_hw *hw = &adapter->hw;
       
  1667 	u32 tdlen, tctl, tipg;
       
  1668 	u32 ipgr1, ipgr2;
       
  1669 
       
  1670 	/* Setup the HW Tx Head and Tail descriptor pointers */
       
  1671 
       
  1672 	switch (adapter->num_tx_queues) {
       
  1673 	case 1:
       
  1674 	default:
       
  1675 		tdba = adapter->tx_ring[0].dma;
       
  1676 		tdlen = adapter->tx_ring[0].count *
       
  1677 			sizeof(struct e1000_tx_desc);
       
  1678 		ew32(TDLEN, tdlen);
       
  1679 		ew32(TDBAH, (tdba >> 32));
       
  1680 		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
       
  1681 		ew32(TDT, 0);
       
  1682 		ew32(TDH, 0);
       
  1683 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
       
  1684 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
       
  1685 		break;
       
  1686 	}
       
  1687 
       
  1688 	/* Set the default values for the Tx Inter Packet Gap timer */
       
  1689 	if ((hw->media_type == e1000_media_type_fiber ||
       
  1690 	     hw->media_type == e1000_media_type_internal_serdes))
       
  1691 		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
       
  1692 	else
       
  1693 		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
       
  1694 
       
  1695 	switch (hw->mac_type) {
       
  1696 	case e1000_82542_rev2_0:
       
  1697 	case e1000_82542_rev2_1:
       
  1698 		tipg = DEFAULT_82542_TIPG_IPGT;
       
  1699 		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
       
  1700 		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
       
  1701 		break;
       
  1702 	default:
       
  1703 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
       
  1704 		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
       
  1705 		break;
       
  1706 	}
       
  1707 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
       
  1708 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
       
  1709 	ew32(TIPG, tipg);
       
  1710 
       
  1711 	/* Set the Tx Interrupt Delay register */
       
  1712 
       
  1713 	ew32(TIDV, adapter->tx_int_delay);
       
  1714 	if (hw->mac_type >= e1000_82540)
       
  1715 		ew32(TADV, adapter->tx_abs_int_delay);
       
  1716 
       
  1717 	/* Program the Transmit Control Register */
       
  1718 
       
  1719 	tctl = er32(TCTL);
       
  1720 	tctl &= ~E1000_TCTL_CT;
       
  1721 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
       
  1722 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
       
  1723 
       
  1724 	e1000_config_collision_dist(hw);
       
  1725 
       
  1726 	/* Setup Transmit Descriptor Settings for eop descriptor */
       
  1727 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
       
  1728 
       
  1729 	/* only set IDE if we are delaying interrupts using the timers */
       
  1730 	if (adapter->tx_int_delay)
       
  1731 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
       
  1732 
       
  1733 	if (hw->mac_type < e1000_82543)
       
  1734 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
       
  1735 	else
       
  1736 		adapter->txd_cmd |= E1000_TXD_CMD_RS;
       
  1737 
       
  1738 	/* Cache if we're 82544 running in PCI-X because we'll
       
  1739 	 * need this to apply a workaround later in the send path. */
       
  1740 	if (hw->mac_type == e1000_82544 &&
       
  1741 	    hw->bus_type == e1000_bus_type_pcix)
       
  1742 		adapter->pcix_82544 = true;
       
  1743 
       
  1744 	ew32(TCTL, tctl);
       
  1745 
       
  1746 }
       
  1747 
       
  1748 /**
       
  1749  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
       
  1750  * @adapter: board private structure
       
  1751  * @rxdr:    rx descriptor ring (for a specific queue) to setup
       
  1752  *
       
  1753  * Returns 0 on success, negative on failure
       
  1754  **/
       
  1755 
       
  1756 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
       
  1757 				    struct e1000_rx_ring *rxdr)
       
  1758 {
       
  1759 	struct pci_dev *pdev = adapter->pdev;
       
  1760 	int size, desc_len;
       
  1761 
       
  1762 	size = sizeof(struct e1000_buffer) * rxdr->count;
       
  1763 	rxdr->buffer_info = vzalloc(size);
       
  1764 	if (!rxdr->buffer_info) {
       
  1765 		e_err(probe, "Unable to allocate memory for the Rx descriptor "
       
  1766 		      "ring\n");
       
  1767 		return -ENOMEM;
       
  1768 	}
       
  1769 
       
  1770 	desc_len = sizeof(struct e1000_rx_desc);
       
  1771 
       
  1772 	/* Round up to nearest 4K */
       
  1773 
       
  1774 	rxdr->size = rxdr->count * desc_len;
       
  1775 	rxdr->size = ALIGN(rxdr->size, 4096);
       
  1776 
       
  1777 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
       
  1778 					GFP_KERNEL);
       
  1779 
       
  1780 	if (!rxdr->desc) {
       
  1781 		e_err(probe, "Unable to allocate memory for the Rx descriptor "
       
  1782 		      "ring\n");
       
  1783 setup_rx_desc_die:
       
  1784 		vfree(rxdr->buffer_info);
       
  1785 		return -ENOMEM;
       
  1786 	}
       
  1787 
       
  1788 	/* Fix for errata 23, can't cross 64kB boundary */
       
  1789 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
       
  1790 		void *olddesc = rxdr->desc;
       
  1791 		dma_addr_t olddma = rxdr->dma;
       
  1792 		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
       
  1793 		      rxdr->size, rxdr->desc);
       
  1794 		/* Try again, without freeing the previous */
       
  1795 		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
       
  1796 						&rxdr->dma, GFP_KERNEL);
       
  1797 		/* Failed allocation, critical failure */
       
  1798 		if (!rxdr->desc) {
       
  1799 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1800 					  olddma);
       
  1801 			e_err(probe, "Unable to allocate memory for the Rx "
       
  1802 			      "descriptor ring\n");
       
  1803 			goto setup_rx_desc_die;
       
  1804 		}
       
  1805 
       
  1806 		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
       
  1807 			/* give up */
       
  1808 			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
       
  1809 					  rxdr->dma);
       
  1810 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1811 					  olddma);
       
  1812 			e_err(probe, "Unable to allocate aligned memory for "
       
  1813 			      "the Rx descriptor ring\n");
       
  1814 			goto setup_rx_desc_die;
       
  1815 		} else {
       
  1816 			/* Free old allocation, new allocation was successful */
       
  1817 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1818 					  olddma);
       
  1819 		}
       
  1820 	}
       
  1821 	memset(rxdr->desc, 0, rxdr->size);
       
  1822 
       
  1823 	rxdr->next_to_clean = 0;
       
  1824 	rxdr->next_to_use = 0;
       
  1825 	rxdr->rx_skb_top = NULL;
       
  1826 
       
  1827 	return 0;
       
  1828 }
       
  1829 
       
  1830 /**
       
  1831  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
       
  1832  * 				  (Descriptors) for all queues
       
  1833  * @adapter: board private structure
       
  1834  *
       
  1835  * Return 0 on success, negative on failure
       
  1836  **/
       
  1837 
       
  1838 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
       
  1839 {
       
  1840 	int i, err = 0;
       
  1841 
       
  1842 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
  1843 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
       
  1844 		if (err) {
       
  1845 			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
       
  1846 			for (i-- ; i >= 0; i--)
       
  1847 				e1000_free_rx_resources(adapter,
       
  1848 							&adapter->rx_ring[i]);
       
  1849 			break;
       
  1850 		}
       
  1851 	}
       
  1852 
       
  1853 	return err;
       
  1854 }
       
  1855 
       
  1856 /**
       
  1857  * e1000_setup_rctl - configure the receive control registers
       
  1858  * @adapter: Board private structure
       
  1859  **/
       
  1860 static void e1000_setup_rctl(struct e1000_adapter *adapter)
       
  1861 {
       
  1862 	struct e1000_hw *hw = &adapter->hw;
       
  1863 	u32 rctl;
       
  1864 
       
  1865 	rctl = er32(RCTL);
       
  1866 
       
  1867 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
       
  1868 
       
  1869 	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
       
  1870 		E1000_RCTL_RDMTS_HALF |
       
  1871 		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
       
  1872 
       
  1873 	if (hw->tbi_compatibility_on == 1)
       
  1874 		rctl |= E1000_RCTL_SBP;
       
  1875 	else
       
  1876 		rctl &= ~E1000_RCTL_SBP;
       
  1877 
       
  1878 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
       
  1879 		rctl &= ~E1000_RCTL_LPE;
       
  1880 	else
       
  1881 		rctl |= E1000_RCTL_LPE;
       
  1882 
       
  1883 	/* Setup buffer sizes */
       
  1884 	rctl &= ~E1000_RCTL_SZ_4096;
       
  1885 	rctl |= E1000_RCTL_BSEX;
       
  1886 	switch (adapter->rx_buffer_len) {
       
  1887 		case E1000_RXBUFFER_2048:
       
  1888 		default:
       
  1889 			rctl |= E1000_RCTL_SZ_2048;
       
  1890 			rctl &= ~E1000_RCTL_BSEX;
       
  1891 			break;
       
  1892 		case E1000_RXBUFFER_4096:
       
  1893 			rctl |= E1000_RCTL_SZ_4096;
       
  1894 			break;
       
  1895 		case E1000_RXBUFFER_8192:
       
  1896 			rctl |= E1000_RCTL_SZ_8192;
       
  1897 			break;
       
  1898 		case E1000_RXBUFFER_16384:
       
  1899 			rctl |= E1000_RCTL_SZ_16384;
       
  1900 			break;
       
  1901 	}
       
  1902 
       
  1903 	ew32(RCTL, rctl);
       
  1904 }
       
  1905 
       
  1906 /**
       
  1907  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
       
  1908  * @adapter: board private structure
       
  1909  *
       
  1910  * Configure the Rx unit of the MAC after a reset.
       
  1911  **/
       
  1912 
       
  1913 static void e1000_configure_rx(struct e1000_adapter *adapter)
       
  1914 {
       
  1915 	u64 rdba;
       
  1916 	struct e1000_hw *hw = &adapter->hw;
       
  1917 	u32 rdlen, rctl, rxcsum;
       
  1918 
       
  1919 	if (adapter->netdev->mtu > ETH_DATA_LEN) {
       
  1920 		rdlen = adapter->rx_ring[0].count *
       
  1921 		        sizeof(struct e1000_rx_desc);
       
  1922 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
       
  1923 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
       
  1924 	} else {
       
  1925 		rdlen = adapter->rx_ring[0].count *
       
  1926 		        sizeof(struct e1000_rx_desc);
       
  1927 		adapter->clean_rx = e1000_clean_rx_irq;
       
  1928 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
       
  1929 	}
       
  1930 
       
  1931 	/* disable receives while setting up the descriptors */
       
  1932 	rctl = er32(RCTL);
       
  1933 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
  1934 
       
  1935 	/* set the Receive Delay Timer Register */
       
  1936 	ew32(RDTR, adapter->rx_int_delay);
       
  1937 
       
  1938 	if (hw->mac_type >= e1000_82540) {
       
  1939 		ew32(RADV, adapter->rx_abs_int_delay);
       
  1940 		if (adapter->itr_setting != 0)
       
  1941 			ew32(ITR, 1000000000 / (adapter->itr * 256));
       
  1942 	}
       
  1943 
       
  1944 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
       
  1945 	 * the Base and Length of the Rx Descriptor Ring */
       
  1946 	switch (adapter->num_rx_queues) {
       
  1947 	case 1:
       
  1948 	default:
       
  1949 		rdba = adapter->rx_ring[0].dma;
       
  1950 		ew32(RDLEN, rdlen);
       
  1951 		ew32(RDBAH, (rdba >> 32));
       
  1952 		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
       
  1953 		ew32(RDT, 0);
       
  1954 		ew32(RDH, 0);
       
  1955 		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
       
  1956 		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
       
  1957 		break;
       
  1958 	}
       
  1959 
       
  1960 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
       
  1961 	if (hw->mac_type >= e1000_82543) {
       
  1962 		rxcsum = er32(RXCSUM);
       
  1963 		if (adapter->rx_csum)
       
  1964 			rxcsum |= E1000_RXCSUM_TUOFL;
       
  1965 		else
       
  1966 			/* don't need to clear IPPCSE as it defaults to 0 */
       
  1967 			rxcsum &= ~E1000_RXCSUM_TUOFL;
       
  1968 		ew32(RXCSUM, rxcsum);
       
  1969 	}
       
  1970 
       
  1971 	/* Enable Receives */
       
  1972 	ew32(RCTL, rctl | E1000_RCTL_EN);
       
  1973 }
       
  1974 
       
  1975 /**
       
  1976  * e1000_free_tx_resources - Free Tx Resources per Queue
       
  1977  * @adapter: board private structure
       
  1978  * @tx_ring: Tx descriptor ring for a specific queue
       
  1979  *
       
  1980  * Free all transmit software resources
       
  1981  **/
       
  1982 
       
  1983 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
       
  1984 				    struct e1000_tx_ring *tx_ring)
       
  1985 {
       
  1986 	struct pci_dev *pdev = adapter->pdev;
       
  1987 
       
  1988 	e1000_clean_tx_ring(adapter, tx_ring);
       
  1989 
       
  1990 	vfree(tx_ring->buffer_info);
       
  1991 	tx_ring->buffer_info = NULL;
       
  1992 
       
  1993 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
       
  1994 			  tx_ring->dma);
       
  1995 
       
  1996 	tx_ring->desc = NULL;
       
  1997 }
       
  1998 
       
  1999 /**
       
  2000  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
       
  2001  * @adapter: board private structure
       
  2002  *
       
  2003  * Free all transmit software resources
       
  2004  **/
       
  2005 
       
  2006 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
       
  2007 {
       
  2008 	int i;
       
  2009 
       
  2010 	for (i = 0; i < adapter->num_tx_queues; i++)
       
  2011 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
       
  2012 }
       
  2013 
       
  2014 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
       
  2015 					     struct e1000_buffer *buffer_info)
       
  2016 {
       
  2017 	if (adapter->ecdev) {
       
  2018 		return;
       
  2019 	}
       
  2020 
       
  2021 	if (buffer_info->dma) {
       
  2022 		if (buffer_info->mapped_as_page)
       
  2023 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
       
  2024 				       buffer_info->length, DMA_TO_DEVICE);
       
  2025 		else
       
  2026 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
       
  2027 					 buffer_info->length,
       
  2028 					 DMA_TO_DEVICE);
       
  2029 		buffer_info->dma = 0;
       
  2030 	}
       
  2031 	if (buffer_info->skb) {
       
  2032 		dev_kfree_skb_any(buffer_info->skb);
       
  2033 		buffer_info->skb = NULL;
       
  2034 	}
       
  2035 	buffer_info->time_stamp = 0;
       
  2036 	/* buffer_info must be completely set up in the transmit path */
       
  2037 }
       
  2038 
       
  2039 /**
       
  2040  * e1000_clean_tx_ring - Free Tx Buffers
       
  2041  * @adapter: board private structure
       
  2042  * @tx_ring: ring to be cleaned
       
  2043  **/
       
  2044 
       
  2045 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
       
  2046 				struct e1000_tx_ring *tx_ring)
       
  2047 {
       
  2048 	struct e1000_hw *hw = &adapter->hw;
       
  2049 	struct e1000_buffer *buffer_info;
       
  2050 	unsigned long size;
       
  2051 	unsigned int i;
       
  2052 
       
  2053 	/* Free all the Tx ring sk_buffs */
       
  2054 
       
  2055 	for (i = 0; i < tx_ring->count; i++) {
       
  2056 		buffer_info = &tx_ring->buffer_info[i];
       
  2057 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  2058 	}
       
  2059 
       
  2060 	size = sizeof(struct e1000_buffer) * tx_ring->count;
       
  2061 	memset(tx_ring->buffer_info, 0, size);
       
  2062 
       
  2063 	/* Zero out the descriptor ring */
       
  2064 
       
  2065 	memset(tx_ring->desc, 0, tx_ring->size);
       
  2066 
       
  2067 	tx_ring->next_to_use = 0;
       
  2068 	tx_ring->next_to_clean = 0;
       
  2069 	tx_ring->last_tx_tso = false;
       
  2070 
       
  2071 	writel(0, hw->hw_addr + tx_ring->tdh);
       
  2072 	writel(0, hw->hw_addr + tx_ring->tdt);
       
  2073 }
       
  2074 
       
  2075 /**
       
  2076  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
       
  2077  * @adapter: board private structure
       
  2078  **/
       
  2079 
       
  2080 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
       
  2081 {
       
  2082 	int i;
       
  2083 
       
  2084 	for (i = 0; i < adapter->num_tx_queues; i++)
       
  2085 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
       
  2086 }
       
  2087 
       
  2088 /**
       
  2089  * e1000_free_rx_resources - Free Rx Resources
       
  2090  * @adapter: board private structure
       
  2091  * @rx_ring: ring to clean the resources from
       
  2092  *
       
  2093  * Free all receive software resources
       
  2094  **/
       
  2095 
       
  2096 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
       
  2097 				    struct e1000_rx_ring *rx_ring)
       
  2098 {
       
  2099 	struct pci_dev *pdev = adapter->pdev;
       
  2100 
       
  2101 	e1000_clean_rx_ring(adapter, rx_ring);
       
  2102 
       
  2103 	vfree(rx_ring->buffer_info);
       
  2104 	rx_ring->buffer_info = NULL;
       
  2105 
       
  2106 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
       
  2107 			  rx_ring->dma);
       
  2108 
       
  2109 	rx_ring->desc = NULL;
       
  2110 }
       
  2111 
       
  2112 /**
       
  2113  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
       
  2114  * @adapter: board private structure
       
  2115  *
       
  2116  * Free all receive software resources
       
  2117  **/
       
  2118 
       
  2119 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
       
  2120 {
       
  2121 	int i;
       
  2122 
       
  2123 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  2124 		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
       
  2125 }
       
  2126 
       
  2127 /**
       
  2128  * e1000_clean_rx_ring - Free Rx Buffers per Queue
       
  2129  * @adapter: board private structure
       
  2130  * @rx_ring: ring to free buffers from
       
  2131  **/
       
  2132 
       
  2133 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
       
  2134 				struct e1000_rx_ring *rx_ring)
       
  2135 {
       
  2136 	struct e1000_hw *hw = &adapter->hw;
       
  2137 	struct e1000_buffer *buffer_info;
       
  2138 	struct pci_dev *pdev = adapter->pdev;
       
  2139 	unsigned long size;
       
  2140 	unsigned int i;
       
  2141 
       
  2142 	/* Free all the Rx ring sk_buffs */
       
  2143 	for (i = 0; i < rx_ring->count; i++) {
       
  2144 		buffer_info = &rx_ring->buffer_info[i];
       
  2145 		if (buffer_info->dma &&
       
  2146 		    adapter->clean_rx == e1000_clean_rx_irq) {
       
  2147 			dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  2148 			                 buffer_info->length,
       
  2149 					 DMA_FROM_DEVICE);
       
  2150 		} else if (buffer_info->dma &&
       
  2151 		           adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
       
  2152 			dma_unmap_page(&pdev->dev, buffer_info->dma,
       
  2153 				       buffer_info->length,
       
  2154 				       DMA_FROM_DEVICE);
       
  2155 		}
       
  2156 
       
  2157 		buffer_info->dma = 0;
       
  2158 		if (buffer_info->page) {
       
  2159 			put_page(buffer_info->page);
       
  2160 			buffer_info->page = NULL;
       
  2161 		}
       
  2162 		if (buffer_info->skb) {
       
  2163 			dev_kfree_skb(buffer_info->skb);
       
  2164 			buffer_info->skb = NULL;
       
  2165 		}
       
  2166 	}
       
  2167 
       
  2168 	/* there also may be some cached data from a chained receive */
       
  2169 	if (rx_ring->rx_skb_top) {
       
  2170 		dev_kfree_skb(rx_ring->rx_skb_top);
       
  2171 		rx_ring->rx_skb_top = NULL;
       
  2172 	}
       
  2173 
       
  2174 	size = sizeof(struct e1000_buffer) * rx_ring->count;
       
  2175 	memset(rx_ring->buffer_info, 0, size);
       
  2176 
       
  2177 	/* Zero out the descriptor ring */
       
  2178 	memset(rx_ring->desc, 0, rx_ring->size);
       
  2179 
       
  2180 	rx_ring->next_to_clean = 0;
       
  2181 	rx_ring->next_to_use = 0;
       
  2182 
       
  2183 	writel(0, hw->hw_addr + rx_ring->rdh);
       
  2184 	writel(0, hw->hw_addr + rx_ring->rdt);
       
  2185 }
       
  2186 
       
  2187 /**
       
  2188  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
       
  2189  * @adapter: board private structure
       
  2190  **/
       
  2191 
       
  2192 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
       
  2193 {
       
  2194 	int i;
       
  2195 
       
  2196 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  2197 		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
       
  2198 }
       
  2199 
       
  2200 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
       
  2201  * and memory write and invalidate disabled for certain operations
       
  2202  */
       
  2203 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
       
  2204 {
       
  2205 	struct e1000_hw *hw = &adapter->hw;
       
  2206 	struct net_device *netdev = adapter->netdev;
       
  2207 	u32 rctl;
       
  2208 
       
  2209 	e1000_pci_clear_mwi(hw);
       
  2210 
       
  2211 	rctl = er32(RCTL);
       
  2212 	rctl |= E1000_RCTL_RST;
       
  2213 	ew32(RCTL, rctl);
       
  2214 	E1000_WRITE_FLUSH();
       
  2215 	mdelay(5);
       
  2216 
       
  2217 	if (!adapter->ecdev && netif_running(netdev))
       
  2218 		e1000_clean_all_rx_rings(adapter);
       
  2219 }
       
  2220 
       
  2221 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
       
  2222 {
       
  2223 	struct e1000_hw *hw = &adapter->hw;
       
  2224 	struct net_device *netdev = adapter->netdev;
       
  2225 	u32 rctl;
       
  2226 
       
  2227 	rctl = er32(RCTL);
       
  2228 	rctl &= ~E1000_RCTL_RST;
       
  2229 	ew32(RCTL, rctl);
       
  2230 	E1000_WRITE_FLUSH();
       
  2231 	mdelay(5);
       
  2232 
       
  2233 	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
       
  2234 		e1000_pci_set_mwi(hw);
       
  2235 
       
  2236 	if (!adapter->netdev && netif_running(netdev)) {
       
  2237 		/* No need to loop, because 82542 supports only 1 queue */
       
  2238 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
       
  2239 		e1000_configure_rx(adapter);
       
  2240 		if (adapter->ecdev) {
       
  2241 			/* fill rx ring completely! */
       
  2242 			adapter->alloc_rx_buf(adapter, ring, ring->count);
       
  2243 		} else {
       
  2244 			/* this one leaves the last ring element unallocated! */
       
  2245 			adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
       
  2246 		}
       
  2247 
       
  2248 	}
       
  2249 }
       
  2250 
       
  2251 /**
       
  2252  * e1000_set_mac - Change the Ethernet Address of the NIC
       
  2253  * @netdev: network interface device structure
       
  2254  * @p: pointer to an address structure
       
  2255  *
       
  2256  * Returns 0 on success, negative on failure
       
  2257  **/
       
  2258 
       
  2259 static int e1000_set_mac(struct net_device *netdev, void *p)
       
  2260 {
       
  2261 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2262 	struct e1000_hw *hw = &adapter->hw;
       
  2263 	struct sockaddr *addr = p;
       
  2264 
       
  2265 	if (!is_valid_ether_addr(addr->sa_data))
       
  2266 		return -EADDRNOTAVAIL;
       
  2267 
       
  2268 	/* 82542 2.0 needs to be in reset to write receive address registers */
       
  2269 
       
  2270 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2271 		e1000_enter_82542_rst(adapter);
       
  2272 
       
  2273 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2274 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
       
  2275 
       
  2276 	e1000_rar_set(hw, hw->mac_addr, 0);
       
  2277 
       
  2278 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2279 		e1000_leave_82542_rst(adapter);
       
  2280 
       
  2281 	return 0;
       
  2282 }
       
  2283 
       
  2284 /**
       
  2285  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
       
  2286  * @netdev: network interface device structure
       
  2287  *
       
  2288  * The set_rx_mode entry point is called whenever the unicast or multicast
       
  2289  * address lists or the network interface flags are updated. This routine is
       
  2290  * responsible for configuring the hardware for proper unicast, multicast,
       
  2291  * promiscuous mode, and all-multi behavior.
       
  2292  **/
       
  2293 
       
  2294 static void e1000_set_rx_mode(struct net_device *netdev)
       
  2295 {
       
  2296 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2297 	struct e1000_hw *hw = &adapter->hw;
       
  2298 	struct netdev_hw_addr *ha;
       
  2299 	bool use_uc = false;
       
  2300 	u32 rctl;
       
  2301 	u32 hash_value;
       
  2302 	int i, rar_entries = E1000_RAR_ENTRIES;
       
  2303 	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
       
  2304 	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
       
  2305 
       
  2306 	if (!mcarray) {
       
  2307 		e_err(probe, "memory allocation failed\n");
       
  2308 		return;
       
  2309 	}
       
  2310 
       
  2311 	/* Check for Promiscuous and All Multicast modes */
       
  2312 
       
  2313 	rctl = er32(RCTL);
       
  2314 
       
  2315 	if (netdev->flags & IFF_PROMISC) {
       
  2316 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
       
  2317 		rctl &= ~E1000_RCTL_VFE;
       
  2318 	} else {
       
  2319 		if (netdev->flags & IFF_ALLMULTI)
       
  2320 			rctl |= E1000_RCTL_MPE;
       
  2321 		else
       
  2322 			rctl &= ~E1000_RCTL_MPE;
       
  2323 		/* Enable VLAN filter if there is a VLAN */
       
  2324 		if (e1000_vlan_used(adapter))
       
  2325 			rctl |= E1000_RCTL_VFE;
       
  2326 	}
       
  2327 
       
  2328 	if (netdev_uc_count(netdev) > rar_entries - 1) {
       
  2329 		rctl |= E1000_RCTL_UPE;
       
  2330 	} else if (!(netdev->flags & IFF_PROMISC)) {
       
  2331 		rctl &= ~E1000_RCTL_UPE;
       
  2332 		use_uc = true;
       
  2333 	}
       
  2334 
       
  2335 	ew32(RCTL, rctl);
       
  2336 
       
  2337 	/* 82542 2.0 needs to be in reset to write receive address registers */
       
  2338 
       
  2339 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2340 		e1000_enter_82542_rst(adapter);
       
  2341 
       
  2342 	/* load the first 14 addresses into the exact filters 1-14. Unicast
       
  2343 	 * addresses take precedence to avoid disabling unicast filtering
       
  2344 	 * when possible.
       
  2345 	 *
       
  2346 	 * RAR 0 is used for the station MAC address
       
  2347 	 * if there are not 14 addresses, go ahead and clear the filters
       
  2348 	 */
       
  2349 	i = 1;
       
  2350 	if (use_uc)
       
  2351 		netdev_for_each_uc_addr(ha, netdev) {
       
  2352 			if (i == rar_entries)
       
  2353 				break;
       
  2354 			e1000_rar_set(hw, ha->addr, i++);
       
  2355 		}
       
  2356 
       
  2357 	netdev_for_each_mc_addr(ha, netdev) {
       
  2358 		if (i == rar_entries) {
       
  2359 			/* load any remaining addresses into the hash table */
       
  2360 			u32 hash_reg, hash_bit, mta;
       
  2361 			hash_value = e1000_hash_mc_addr(hw, ha->addr);
       
  2362 			hash_reg = (hash_value >> 5) & 0x7F;
       
  2363 			hash_bit = hash_value & 0x1F;
       
  2364 			mta = (1 << hash_bit);
       
  2365 			mcarray[hash_reg] |= mta;
       
  2366 		} else {
       
  2367 			e1000_rar_set(hw, ha->addr, i++);
       
  2368 		}
       
  2369 	}
       
  2370 
       
  2371 	for (; i < rar_entries; i++) {
       
  2372 		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
       
  2373 		E1000_WRITE_FLUSH();
       
  2374 		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
       
  2375 		E1000_WRITE_FLUSH();
       
  2376 	}
       
  2377 
       
  2378 	/* write the hash table completely, write from bottom to avoid
       
  2379 	 * both stupid write combining chipsets, and flushing each write */
       
  2380 	for (i = mta_reg_count - 1; i >= 0 ; i--) {
       
  2381 		/*
       
  2382 		 * If we are on an 82544 has an errata where writing odd
       
  2383 		 * offsets overwrites the previous even offset, but writing
       
  2384 		 * backwards over the range solves the issue by always
       
  2385 		 * writing the odd offset first
       
  2386 		 */
       
  2387 		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
       
  2388 	}
       
  2389 	E1000_WRITE_FLUSH();
       
  2390 
       
  2391 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2392 		e1000_leave_82542_rst(adapter);
       
  2393 
       
  2394 	kfree(mcarray);
       
  2395 }
       
  2396 
       
  2397 /**
       
  2398  * e1000_update_phy_info_task - get phy info
       
  2399  * @work: work struct contained inside adapter struct
       
  2400  *
       
  2401  * Need to wait a few seconds after link up to get diagnostic information from
       
  2402  * the phy
       
  2403  */
       
  2404 static void e1000_update_phy_info_task(struct work_struct *work)
       
  2405 {
       
  2406 	struct e1000_adapter *adapter = container_of(work,
       
  2407 						     struct e1000_adapter,
       
  2408 						     phy_info_task.work);
       
  2409 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2410 		return;
       
  2411 	mutex_lock(&adapter->mutex);
       
  2412 	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
       
  2413 	mutex_unlock(&adapter->mutex);
       
  2414 }
       
  2415 
       
  2416 /**
       
  2417  * e1000_82547_tx_fifo_stall_task - task to complete work
       
  2418  * @work: work struct contained inside adapter struct
       
  2419  **/
       
  2420 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
       
  2421 {
       
  2422 	struct e1000_adapter *adapter = container_of(work,
       
  2423 						     struct e1000_adapter,
       
  2424 						     fifo_stall_task.work);
       
  2425 	struct e1000_hw *hw = &adapter->hw;
       
  2426 	struct net_device *netdev = adapter->netdev;
       
  2427 	u32 tctl;
       
  2428 
       
  2429 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2430 		return;
       
  2431 	mutex_lock(&adapter->mutex);
       
  2432 	if (atomic_read(&adapter->tx_fifo_stall)) {
       
  2433 		if ((er32(TDT) == er32(TDH)) &&
       
  2434 		   (er32(TDFT) == er32(TDFH)) &&
       
  2435 		   (er32(TDFTS) == er32(TDFHS))) {
       
  2436 			tctl = er32(TCTL);
       
  2437 			ew32(TCTL, tctl & ~E1000_TCTL_EN);
       
  2438 			ew32(TDFT, adapter->tx_head_addr);
       
  2439 			ew32(TDFH, adapter->tx_head_addr);
       
  2440 			ew32(TDFTS, adapter->tx_head_addr);
       
  2441 			ew32(TDFHS, adapter->tx_head_addr);
       
  2442 			ew32(TCTL, tctl);
       
  2443 			E1000_WRITE_FLUSH();
       
  2444 
       
  2445 			adapter->tx_fifo_head = 0;
       
  2446 			atomic_set(&adapter->tx_fifo_stall, 0);
       
  2447 			netif_wake_queue(netdev);
       
  2448 		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
       
  2449 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
       
  2450 		}
       
  2451 	}
       
  2452 	mutex_unlock(&adapter->mutex);
       
  2453 }
       
  2454 
       
  2455 bool e1000_has_link(struct e1000_adapter *adapter)
       
  2456 {
       
  2457 	struct e1000_hw *hw = &adapter->hw;
       
  2458 	bool link_active = false;
       
  2459 
       
  2460 	/* get_link_status is set on LSC (link status) interrupt or rx
       
  2461 	 * sequence error interrupt (except on intel ce4100).
       
  2462 	 * get_link_status will stay false until the
       
  2463 	 * e1000_check_for_link establishes link for copper adapters
       
  2464 	 * ONLY
       
  2465 	 */
       
  2466 	switch (hw->media_type) {
       
  2467 	case e1000_media_type_copper:
       
  2468 		if (hw->mac_type == e1000_ce4100)
       
  2469 			hw->get_link_status = 1;
       
  2470 		if (hw->get_link_status) {
       
  2471 			e1000_check_for_link(hw);
       
  2472 			link_active = !hw->get_link_status;
       
  2473 		} else {
       
  2474 			link_active = true;
       
  2475 		}
       
  2476 		break;
       
  2477 	case e1000_media_type_fiber:
       
  2478 		e1000_check_for_link(hw);
       
  2479 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
       
  2480 		break;
       
  2481 	case e1000_media_type_internal_serdes:
       
  2482 		e1000_check_for_link(hw);
       
  2483 		link_active = hw->serdes_has_link;
       
  2484 		break;
       
  2485 	default:
       
  2486 		break;
       
  2487 	}
       
  2488 
       
  2489 	return link_active;
       
  2490 }
       
  2491 
       
  2492 /**
       
  2493  * e1000_watchdog - work function
       
  2494  * @work: work struct contained inside adapter struct
       
  2495  **/
       
  2496 static void e1000_watchdog(struct work_struct *work)
       
  2497 {
       
  2498 	struct e1000_adapter *adapter = container_of(work,
       
  2499 						     struct e1000_adapter,
       
  2500 						     watchdog_task.work);
       
  2501 	struct e1000_hw *hw = &adapter->hw;
       
  2502 	struct net_device *netdev = adapter->netdev;
       
  2503 	struct e1000_tx_ring *txdr = adapter->tx_ring;
       
  2504 	u32 link, tctl;
       
  2505 
       
  2506 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2507 		return;
       
  2508 
       
  2509 	mutex_lock(&adapter->mutex);
       
  2510 	link = e1000_has_link(adapter);
       
  2511 	if (!adapter->ecdev && (netif_carrier_ok(netdev)) && link)
       
  2512 		goto link_up;
       
  2513 
       
  2514 	if (link) {
       
  2515 		if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev))
       
  2516 				|| (!adapter->ecdev && !netif_carrier_ok(netdev))) {
       
  2517 			u32 ctrl;
       
  2518 			bool txb2b __attribute__ ((unused)) = true;
       
  2519 			/* update snapshot of PHY registers on LSC */
       
  2520 			e1000_get_speed_and_duplex(hw,
       
  2521 			                           &adapter->link_speed,
       
  2522 			                           &adapter->link_duplex);
       
  2523 
       
  2524 			ctrl = er32(CTRL);
       
  2525 			pr_info("%s NIC Link is Up %d Mbps %s, "
       
  2526 				"Flow Control: %s\n",
       
  2527 				netdev->name,
       
  2528 				adapter->link_speed,
       
  2529 				adapter->link_duplex == FULL_DUPLEX ?
       
  2530 				"Full Duplex" : "Half Duplex",
       
  2531 				((ctrl & E1000_CTRL_TFCE) && (ctrl &
       
  2532 				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
       
  2533 				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
       
  2534 				E1000_CTRL_TFCE) ? "TX" : "None")));
       
  2535 
       
  2536 			/* adjust timeout factor according to speed/duplex */
       
  2537 			adapter->tx_timeout_factor = 1;
       
  2538 			switch (adapter->link_speed) {
       
  2539 			case SPEED_10:
       
  2540 				txb2b = false;
       
  2541 				adapter->tx_timeout_factor = 16;
       
  2542 				break;
       
  2543 			case SPEED_100:
       
  2544 				txb2b = false;
       
  2545 				/* maybe add some timeout factor ? */
       
  2546 				break;
       
  2547 			}
       
  2548 
       
  2549 			/* enable transmits in the hardware */
       
  2550 			tctl = er32(TCTL);
       
  2551 			tctl |= E1000_TCTL_EN;
       
  2552 			ew32(TCTL, tctl);
       
  2553 
       
  2554 			if (adapter->ecdev) {
       
  2555 				ecdev_set_link(adapter->ecdev, 1);
       
  2556 			}
       
  2557 			else {
       
  2558 				netif_carrier_on(netdev);
       
  2559 				if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  2560 					schedule_delayed_work(&adapter->phy_info_task,
       
  2561 							2 * HZ);
       
  2562 			}
       
  2563 			adapter->smartspeed = 0;
       
  2564 		}
       
  2565 	} else {
       
  2566 		if ((adapter->ecdev && ecdev_get_link(adapter->ecdev))
       
  2567 				|| (!adapter->ecdev && netif_carrier_ok(netdev))) {
       
  2568 			adapter->link_speed = 0;
       
  2569 			adapter->link_duplex = 0;
       
  2570 			pr_info("%s NIC Link is Down\n",
       
  2571 				netdev->name);
       
  2572 
       
  2573 			if (adapter->ecdev) {
       
  2574 				ecdev_set_link(adapter->ecdev, 0);
       
  2575 			} else {
       
  2576 				netif_carrier_off(netdev);
       
  2577 
       
  2578 				if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  2579 					schedule_delayed_work(&adapter->phy_info_task,
       
  2580 							2 * HZ);
       
  2581 			}
       
  2582 		}
       
  2583 
       
  2584 		e1000_smartspeed(adapter);
       
  2585 	}
       
  2586 
       
  2587 link_up:
       
  2588 	e1000_update_stats(adapter);
       
  2589 
       
  2590 	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
       
  2591 	adapter->tpt_old = adapter->stats.tpt;
       
  2592 	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
       
  2593 	adapter->colc_old = adapter->stats.colc;
       
  2594 
       
  2595 	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
       
  2596 	adapter->gorcl_old = adapter->stats.gorcl;
       
  2597 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
       
  2598 	adapter->gotcl_old = adapter->stats.gotcl;
       
  2599 
       
  2600 	e1000_update_adaptive(hw);
       
  2601 
       
  2602 	if (!adapter->ecdev && !netif_carrier_ok(netdev)) {
       
  2603 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
       
  2604 			/* We've lost link, so the controller stops DMA,
       
  2605 			 * but we've got queued Tx work that's never going
       
  2606 			 * to get done, so reset controller to flush Tx.
       
  2607 			 * (Do the reset outside of interrupt context). */
       
  2608 			adapter->tx_timeout_count++;
       
  2609 			schedule_work(&adapter->reset_task);
       
  2610 			/* exit immediately since reset is imminent */
       
  2611 			goto unlock;
       
  2612 		}
       
  2613 	}
       
  2614 
       
  2615 	/* Simple mode for Interrupt Throttle Rate (ITR) */
       
  2616 	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
       
  2617 		/*
       
  2618 		 * Symmetric Tx/Rx gets a reduced ITR=2000;
       
  2619 		 * Total asymmetrical Tx or Rx gets ITR=8000;
       
  2620 		 * everyone else is between 2000-8000.
       
  2621 		 */
       
  2622 		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
       
  2623 		u32 dif = (adapter->gotcl > adapter->gorcl ?
       
  2624 			    adapter->gotcl - adapter->gorcl :
       
  2625 			    adapter->gorcl - adapter->gotcl) / 10000;
       
  2626 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
       
  2627 
       
  2628 		ew32(ITR, 1000000000 / (itr * 256));
       
  2629 	}
       
  2630 
       
  2631 	/* Cause software interrupt to ensure rx ring is cleaned */
       
  2632 	ew32(ICS, E1000_ICS_RXDMT0);
       
  2633 
       
  2634 	/* Force detection of hung controller every watchdog period */
       
  2635 	adapter->detect_tx_hung = true;
       
  2636 
       
  2637 	/* Reschedule the task */
       
  2638 	if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags))
       
  2639 		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
       
  2640 
       
  2641 unlock:
       
  2642 	mutex_unlock(&adapter->mutex);
       
  2643 }
       
  2644 
       
  2645 enum latency_range {
       
  2646 	lowest_latency = 0,
       
  2647 	low_latency = 1,
       
  2648 	bulk_latency = 2,
       
  2649 	latency_invalid = 255
       
  2650 };
       
  2651 
       
  2652 /**
       
  2653  * e1000_update_itr - update the dynamic ITR value based on statistics
       
  2654  * @adapter: pointer to adapter
       
  2655  * @itr_setting: current adapter->itr
       
  2656  * @packets: the number of packets during this measurement interval
       
  2657  * @bytes: the number of bytes during this measurement interval
       
  2658  *
       
  2659  *      Stores a new ITR value based on packets and byte
       
  2660  *      counts during the last interrupt.  The advantage of per interrupt
       
  2661  *      computation is faster updates and more accurate ITR for the current
       
  2662  *      traffic pattern.  Constants in this function were computed
       
  2663  *      based on theoretical maximum wire speed and thresholds were set based
       
  2664  *      on testing data as well as attempting to minimize response time
       
  2665  *      while increasing bulk throughput.
       
  2666  *      this functionality is controlled by the InterruptThrottleRate module
       
  2667  *      parameter (see e1000_param.c)
       
  2668  **/
       
  2669 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
       
  2670 				     u16 itr_setting, int packets, int bytes)
       
  2671 {
       
  2672 	unsigned int retval = itr_setting;
       
  2673 	struct e1000_hw *hw = &adapter->hw;
       
  2674 
       
  2675 	if (unlikely(hw->mac_type < e1000_82540))
       
  2676 		goto update_itr_done;
       
  2677 
       
  2678 	if (packets == 0)
       
  2679 		goto update_itr_done;
       
  2680 
       
  2681 	switch (itr_setting) {
       
  2682 	case lowest_latency:
       
  2683 		/* jumbo frames get bulk treatment*/
       
  2684 		if (bytes/packets > 8000)
       
  2685 			retval = bulk_latency;
       
  2686 		else if ((packets < 5) && (bytes > 512))
       
  2687 			retval = low_latency;
       
  2688 		break;
       
  2689 	case low_latency:  /* 50 usec aka 20000 ints/s */
       
  2690 		if (bytes > 10000) {
       
  2691 			/* jumbo frames need bulk latency setting */
       
  2692 			if (bytes/packets > 8000)
       
  2693 				retval = bulk_latency;
       
  2694 			else if ((packets < 10) || ((bytes/packets) > 1200))
       
  2695 				retval = bulk_latency;
       
  2696 			else if ((packets > 35))
       
  2697 				retval = lowest_latency;
       
  2698 		} else if (bytes/packets > 2000)
       
  2699 			retval = bulk_latency;
       
  2700 		else if (packets <= 2 && bytes < 512)
       
  2701 			retval = lowest_latency;
       
  2702 		break;
       
  2703 	case bulk_latency: /* 250 usec aka 4000 ints/s */
       
  2704 		if (bytes > 25000) {
       
  2705 			if (packets > 35)
       
  2706 				retval = low_latency;
       
  2707 		} else if (bytes < 6000) {
       
  2708 			retval = low_latency;
       
  2709 		}
       
  2710 		break;
       
  2711 	}
       
  2712 
       
  2713 update_itr_done:
       
  2714 	return retval;
       
  2715 }
       
  2716 
       
  2717 static void e1000_set_itr(struct e1000_adapter *adapter)
       
  2718 {
       
  2719 	struct e1000_hw *hw = &adapter->hw;
       
  2720 	u16 current_itr;
       
  2721 	u32 new_itr = adapter->itr;
       
  2722 
       
  2723 	if (unlikely(hw->mac_type < e1000_82540))
       
  2724 		return;
       
  2725 
       
  2726 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
       
  2727 	if (unlikely(adapter->link_speed != SPEED_1000)) {
       
  2728 		current_itr = 0;
       
  2729 		new_itr = 4000;
       
  2730 		goto set_itr_now;
       
  2731 	}
       
  2732 
       
  2733 	adapter->tx_itr = e1000_update_itr(adapter,
       
  2734 	                            adapter->tx_itr,
       
  2735 	                            adapter->total_tx_packets,
       
  2736 	                            adapter->total_tx_bytes);
       
  2737 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2738 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
       
  2739 		adapter->tx_itr = low_latency;
       
  2740 
       
  2741 	adapter->rx_itr = e1000_update_itr(adapter,
       
  2742 	                            adapter->rx_itr,
       
  2743 	                            adapter->total_rx_packets,
       
  2744 	                            adapter->total_rx_bytes);
       
  2745 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2746 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
       
  2747 		adapter->rx_itr = low_latency;
       
  2748 
       
  2749 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
       
  2750 
       
  2751 	switch (current_itr) {
       
  2752 	/* counts and packets in update_itr are dependent on these numbers */
       
  2753 	case lowest_latency:
       
  2754 		new_itr = 70000;
       
  2755 		break;
       
  2756 	case low_latency:
       
  2757 		new_itr = 20000; /* aka hwitr = ~200 */
       
  2758 		break;
       
  2759 	case bulk_latency:
       
  2760 		new_itr = 4000;
       
  2761 		break;
       
  2762 	default:
       
  2763 		break;
       
  2764 	}
       
  2765 
       
  2766 set_itr_now:
       
  2767 	if (new_itr != adapter->itr) {
       
  2768 		/* this attempts to bias the interrupt rate towards Bulk
       
  2769 		 * by adding intermediate steps when interrupt rate is
       
  2770 		 * increasing */
       
  2771 		new_itr = new_itr > adapter->itr ?
       
  2772 		             min(adapter->itr + (new_itr >> 2), new_itr) :
       
  2773 		             new_itr;
       
  2774 		adapter->itr = new_itr;
       
  2775 		ew32(ITR, 1000000000 / (new_itr * 256));
       
  2776 	}
       
  2777 }
       
  2778 
       
  2779 #define E1000_TX_FLAGS_CSUM		0x00000001
       
  2780 #define E1000_TX_FLAGS_VLAN		0x00000002
       
  2781 #define E1000_TX_FLAGS_TSO		0x00000004
       
  2782 #define E1000_TX_FLAGS_IPV4		0x00000008
       
  2783 #define E1000_TX_FLAGS_NO_FCS		0x00000010
       
  2784 #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
       
  2785 #define E1000_TX_FLAGS_VLAN_SHIFT	16
       
  2786 
       
  2787 static int e1000_tso(struct e1000_adapter *adapter,
       
  2788 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
       
  2789 {
       
  2790 	struct e1000_context_desc *context_desc;
       
  2791 	struct e1000_buffer *buffer_info;
       
  2792 	unsigned int i;
       
  2793 	u32 cmd_length = 0;
       
  2794 	u16 ipcse = 0, tucse, mss;
       
  2795 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
       
  2796 	int err;
       
  2797 
       
  2798 	if (skb_is_gso(skb)) {
       
  2799 		if (skb_header_cloned(skb)) {
       
  2800 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
       
  2801 			if (err)
       
  2802 				return err;
       
  2803 		}
       
  2804 
       
  2805 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  2806 		mss = skb_shinfo(skb)->gso_size;
       
  2807 		if (skb->protocol == htons(ETH_P_IP)) {
       
  2808 			struct iphdr *iph = ip_hdr(skb);
       
  2809 			iph->tot_len = 0;
       
  2810 			iph->check = 0;
       
  2811 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
       
  2812 								 iph->daddr, 0,
       
  2813 								 IPPROTO_TCP,
       
  2814 								 0);
       
  2815 			cmd_length = E1000_TXD_CMD_IP;
       
  2816 			ipcse = skb_transport_offset(skb) - 1;
       
  2817 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
       
  2818 			ipv6_hdr(skb)->payload_len = 0;
       
  2819 			tcp_hdr(skb)->check =
       
  2820 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
       
  2821 						 &ipv6_hdr(skb)->daddr,
       
  2822 						 0, IPPROTO_TCP, 0);
       
  2823 			ipcse = 0;
       
  2824 		}
       
  2825 		ipcss = skb_network_offset(skb);
       
  2826 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
       
  2827 		tucss = skb_transport_offset(skb);
       
  2828 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
       
  2829 		tucse = 0;
       
  2830 
       
  2831 		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
       
  2832 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
       
  2833 
       
  2834 		i = tx_ring->next_to_use;
       
  2835 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  2836 		buffer_info = &tx_ring->buffer_info[i];
       
  2837 
       
  2838 		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
       
  2839 		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
       
  2840 		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
       
  2841 		context_desc->upper_setup.tcp_fields.tucss = tucss;
       
  2842 		context_desc->upper_setup.tcp_fields.tucso = tucso;
       
  2843 		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
       
  2844 		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
       
  2845 		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
       
  2846 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
       
  2847 
       
  2848 		buffer_info->time_stamp = jiffies;
       
  2849 		buffer_info->next_to_watch = i;
       
  2850 
       
  2851 		if (++i == tx_ring->count) i = 0;
       
  2852 		tx_ring->next_to_use = i;
       
  2853 
       
  2854 		return true;
       
  2855 	}
       
  2856 	return false;
       
  2857 }
       
  2858 
       
  2859 static bool e1000_tx_csum(struct e1000_adapter *adapter,
       
  2860 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
       
  2861 {
       
  2862 	struct e1000_context_desc *context_desc;
       
  2863 	struct e1000_buffer *buffer_info;
       
  2864 	unsigned int i;
       
  2865 	u8 css;
       
  2866 	u32 cmd_len = E1000_TXD_CMD_DEXT;
       
  2867 
       
  2868 	if (skb->ip_summed != CHECKSUM_PARTIAL)
       
  2869 		return false;
       
  2870 
       
  2871 	switch (skb->protocol) {
       
  2872 	case cpu_to_be16(ETH_P_IP):
       
  2873 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
       
  2874 			cmd_len |= E1000_TXD_CMD_TCP;
       
  2875 		break;
       
  2876 	case cpu_to_be16(ETH_P_IPV6):
       
  2877 		/* XXX not handling all IPV6 headers */
       
  2878 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
       
  2879 			cmd_len |= E1000_TXD_CMD_TCP;
       
  2880 		break;
       
  2881 	default:
       
  2882 		if (unlikely(net_ratelimit()))
       
  2883 			e_warn(drv, "checksum_partial proto=%x!\n",
       
  2884 			       skb->protocol);
       
  2885 		break;
       
  2886 	}
       
  2887 
       
  2888 	css = skb_checksum_start_offset(skb);
       
  2889 
       
  2890 	i = tx_ring->next_to_use;
       
  2891 	buffer_info = &tx_ring->buffer_info[i];
       
  2892 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  2893 
       
  2894 	context_desc->lower_setup.ip_config = 0;
       
  2895 	context_desc->upper_setup.tcp_fields.tucss = css;
       
  2896 	context_desc->upper_setup.tcp_fields.tucso =
       
  2897 		css + skb->csum_offset;
       
  2898 	context_desc->upper_setup.tcp_fields.tucse = 0;
       
  2899 	context_desc->tcp_seg_setup.data = 0;
       
  2900 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
       
  2901 
       
  2902 	buffer_info->time_stamp = jiffies;
       
  2903 	buffer_info->next_to_watch = i;
       
  2904 
       
  2905 	if (unlikely(++i == tx_ring->count)) i = 0;
       
  2906 	tx_ring->next_to_use = i;
       
  2907 
       
  2908 	return true;
       
  2909 }
       
  2910 
       
  2911 #define E1000_MAX_TXD_PWR	12
       
  2912 #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
       
  2913 
       
  2914 static int e1000_tx_map(struct e1000_adapter *adapter,
       
  2915 			struct e1000_tx_ring *tx_ring,
       
  2916 			struct sk_buff *skb, unsigned int first,
       
  2917 			unsigned int max_per_txd, unsigned int nr_frags,
       
  2918 			unsigned int mss)
       
  2919 {
       
  2920 	struct e1000_hw *hw = &adapter->hw;
       
  2921 	struct pci_dev *pdev = adapter->pdev;
       
  2922 	struct e1000_buffer *buffer_info;
       
  2923 	unsigned int len = skb_headlen(skb);
       
  2924 	unsigned int offset = 0, size, count = 0, i;
       
  2925 	unsigned int f, bytecount, segs;
       
  2926 
       
  2927 	i = tx_ring->next_to_use;
       
  2928 
       
  2929 	while (len) {
       
  2930 		buffer_info = &tx_ring->buffer_info[i];
       
  2931 		size = min(len, max_per_txd);
       
  2932 		/* Workaround for Controller erratum --
       
  2933 		 * descriptor for non-tso packet in a linear SKB that follows a
       
  2934 		 * tso gets written back prematurely before the data is fully
       
  2935 		 * DMA'd to the controller */
       
  2936 		if (!skb->data_len && tx_ring->last_tx_tso &&
       
  2937 		    !skb_is_gso(skb)) {
       
  2938 			tx_ring->last_tx_tso = false;
       
  2939 			size -= 4;
       
  2940 		}
       
  2941 
       
  2942 		/* Workaround for premature desc write-backs
       
  2943 		 * in TSO mode.  Append 4-byte sentinel desc */
       
  2944 		if (unlikely(mss && !nr_frags && size == len && size > 8))
       
  2945 			size -= 4;
       
  2946 		/* work-around for errata 10 and it applies
       
  2947 		 * to all controllers in PCI-X mode
       
  2948 		 * The fix is to make sure that the first descriptor of a
       
  2949 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
       
  2950 		 */
       
  2951 		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
       
  2952 		                (size > 2015) && count == 0))
       
  2953 		        size = 2015;
       
  2954 
       
  2955 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
       
  2956 		 * terminating buffers within evenly-aligned dwords. */
       
  2957 		if (unlikely(adapter->pcix_82544 &&
       
  2958 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
       
  2959 		   size > 4))
       
  2960 			size -= 4;
       
  2961 
       
  2962 		buffer_info->length = size;
       
  2963 		/* set time_stamp *before* dma to help avoid a possible race */
       
  2964 		buffer_info->time_stamp = jiffies;
       
  2965 		buffer_info->mapped_as_page = false;
       
  2966 		buffer_info->dma = dma_map_single(&pdev->dev,
       
  2967 						  skb->data + offset,
       
  2968 						  size,	DMA_TO_DEVICE);
       
  2969 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
       
  2970 			goto dma_error;
       
  2971 		buffer_info->next_to_watch = i;
       
  2972 
       
  2973 		len -= size;
       
  2974 		offset += size;
       
  2975 		count++;
       
  2976 		if (len) {
       
  2977 			i++;
       
  2978 			if (unlikely(i == tx_ring->count))
       
  2979 				i = 0;
       
  2980 		}
       
  2981 	}
       
  2982 
       
  2983 	for (f = 0; f < nr_frags; f++) {
       
  2984 		const struct skb_frag_struct *frag;
       
  2985 
       
  2986 		frag = &skb_shinfo(skb)->frags[f];
       
  2987 		len = skb_frag_size(frag);
       
  2988 		offset = 0;
       
  2989 
       
  2990 		while (len) {
       
  2991 			unsigned long bufend;
       
  2992 			i++;
       
  2993 			if (unlikely(i == tx_ring->count))
       
  2994 				i = 0;
       
  2995 
       
  2996 			buffer_info = &tx_ring->buffer_info[i];
       
  2997 			size = min(len, max_per_txd);
       
  2998 			/* Workaround for premature desc write-backs
       
  2999 			 * in TSO mode.  Append 4-byte sentinel desc */
       
  3000 			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
       
  3001 				size -= 4;
       
  3002 			/* Workaround for potential 82544 hang in PCI-X.
       
  3003 			 * Avoid terminating buffers within evenly-aligned
       
  3004 			 * dwords. */
       
  3005 			bufend = (unsigned long)
       
  3006 				page_to_phys(skb_frag_page(frag));
       
  3007 			bufend += offset + size - 1;
       
  3008 			if (unlikely(adapter->pcix_82544 &&
       
  3009 				     !(bufend & 4) &&
       
  3010 				     size > 4))
       
  3011 				size -= 4;
       
  3012 
       
  3013 			buffer_info->length = size;
       
  3014 			buffer_info->time_stamp = jiffies;
       
  3015 			buffer_info->mapped_as_page = true;
       
  3016 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
       
  3017 						offset, size, DMA_TO_DEVICE);
       
  3018 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
       
  3019 				goto dma_error;
       
  3020 			buffer_info->next_to_watch = i;
       
  3021 
       
  3022 			len -= size;
       
  3023 			offset += size;
       
  3024 			count++;
       
  3025 		}
       
  3026 	}
       
  3027 
       
  3028 	segs = skb_shinfo(skb)->gso_segs ?: 1;
       
  3029 	/* multiply data chunks by size of headers */
       
  3030 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
       
  3031 
       
  3032 	tx_ring->buffer_info[i].skb = skb;
       
  3033 	tx_ring->buffer_info[i].segs = segs;
       
  3034 	tx_ring->buffer_info[i].bytecount = bytecount;
       
  3035 	tx_ring->buffer_info[first].next_to_watch = i;
       
  3036 
       
  3037 	return count;
       
  3038 
       
  3039 dma_error:
       
  3040 	dev_err(&pdev->dev, "TX DMA map failed\n");
       
  3041 	buffer_info->dma = 0;
       
  3042 	if (count)
       
  3043 		count--;
       
  3044 
       
  3045 	while (count--) {
       
  3046 		if (i==0)
       
  3047 			i += tx_ring->count;
       
  3048 		i--;
       
  3049 		buffer_info = &tx_ring->buffer_info[i];
       
  3050 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  3051 	}
       
  3052 
       
  3053 	return 0;
       
  3054 }
       
  3055 
       
  3056 static void e1000_tx_queue(struct e1000_adapter *adapter,
       
  3057 			   struct e1000_tx_ring *tx_ring, int tx_flags,
       
  3058 			   int count)
       
  3059 {
       
  3060 	struct e1000_hw *hw = &adapter->hw;
       
  3061 	struct e1000_tx_desc *tx_desc = NULL;
       
  3062 	struct e1000_buffer *buffer_info;
       
  3063 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
       
  3064 	unsigned int i;
       
  3065 
       
  3066 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
       
  3067 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
       
  3068 		             E1000_TXD_CMD_TSE;
       
  3069 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  3070 
       
  3071 		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
       
  3072 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
       
  3073 	}
       
  3074 
       
  3075 	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
       
  3076 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
       
  3077 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  3078 	}
       
  3079 
       
  3080 	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
       
  3081 		txd_lower |= E1000_TXD_CMD_VLE;
       
  3082 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
       
  3083 	}
       
  3084 
       
  3085 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
       
  3086 		txd_lower &= ~(E1000_TXD_CMD_IFCS);
       
  3087 
       
  3088 	i = tx_ring->next_to_use;
       
  3089 
       
  3090 	while (count--) {
       
  3091 		buffer_info = &tx_ring->buffer_info[i];
       
  3092 		tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3093 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  3094 		tx_desc->lower.data =
       
  3095 			cpu_to_le32(txd_lower | buffer_info->length);
       
  3096 		tx_desc->upper.data = cpu_to_le32(txd_upper);
       
  3097 		if (unlikely(++i == tx_ring->count)) i = 0;
       
  3098 	}
       
  3099 
       
  3100 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
       
  3101 
       
  3102 	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
       
  3103 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
       
  3104 		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
       
  3105 
       
  3106 	/* Force memory writes to complete before letting h/w
       
  3107 	 * know there are new descriptors to fetch.  (Only
       
  3108 	 * applicable for weak-ordered memory model archs,
       
  3109 	 * such as IA-64). */
       
  3110 	wmb();
       
  3111 
       
  3112 	tx_ring->next_to_use = i;
       
  3113 	writel(i, hw->hw_addr + tx_ring->tdt);
       
  3114 	/* we need this if more than one processor can write to our tail
       
  3115 	 * at a time, it syncronizes IO on IA64/Altix systems */
       
  3116 	mmiowb();
       
  3117 }
       
  3118 
       
  3119 /**
       
  3120  * 82547 workaround to avoid controller hang in half-duplex environment.
       
  3121  * The workaround is to avoid queuing a large packet that would span
       
  3122  * the internal Tx FIFO ring boundary by notifying the stack to resend
       
  3123  * the packet at a later time.  This gives the Tx FIFO an opportunity to
       
  3124  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
       
  3125  * to the beginning of the Tx FIFO.
       
  3126  **/
       
  3127 
       
  3128 #define E1000_FIFO_HDR			0x10
       
  3129 #define E1000_82547_PAD_LEN		0x3E0
       
  3130 
       
  3131 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
       
  3132 				       struct sk_buff *skb)
       
  3133 {
       
  3134 	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
       
  3135 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
       
  3136 
       
  3137 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
       
  3138 
       
  3139 	if (adapter->link_duplex != HALF_DUPLEX)
       
  3140 		goto no_fifo_stall_required;
       
  3141 
       
  3142 	if (atomic_read(&adapter->tx_fifo_stall))
       
  3143 		return 1;
       
  3144 
       
  3145 	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
       
  3146 		atomic_set(&adapter->tx_fifo_stall, 1);
       
  3147 		return 1;
       
  3148 	}
       
  3149 
       
  3150 no_fifo_stall_required:
       
  3151 	adapter->tx_fifo_head += skb_fifo_len;
       
  3152 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
       
  3153 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
       
  3154 	return 0;
       
  3155 }
       
  3156 
       
  3157 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
       
  3158 {
       
  3159 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3160 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
       
  3161 
       
  3162 	if (adapter->ecdev) {
       
  3163 		return -EBUSY;
       
  3164 	}
       
  3165 
       
  3166 	netif_stop_queue(netdev);
       
  3167 	/* Herbert's original patch had:
       
  3168 	 *  smp_mb__after_netif_stop_queue();
       
  3169 	 * but since that doesn't exist yet, just open code it. */
       
  3170 	smp_mb();
       
  3171 
       
  3172 	/* We need to check again in a case another CPU has just
       
  3173 	 * made room available. */
       
  3174 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
       
  3175 		return -EBUSY;
       
  3176 
       
  3177 	/* A reprieve! */
       
  3178 	netif_start_queue(netdev);
       
  3179 	++adapter->restart_queue;
       
  3180 	return 0;
       
  3181 }
       
  3182 
       
  3183 static int e1000_maybe_stop_tx(struct net_device *netdev,
       
  3184                                struct e1000_tx_ring *tx_ring, int size)
       
  3185 {
       
  3186 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
       
  3187 		return 0;
       
  3188 	return __e1000_maybe_stop_tx(netdev, size);
       
  3189 }
       
  3190 
       
  3191 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
       
  3192 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
       
  3193 				    struct net_device *netdev)
       
  3194 {
       
  3195 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3196 	struct e1000_hw *hw = &adapter->hw;
       
  3197 	struct e1000_tx_ring *tx_ring;
       
  3198 	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
       
  3199 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
       
  3200 	unsigned int tx_flags = 0;
       
  3201 	unsigned int len = skb_headlen(skb);
       
  3202 	unsigned int nr_frags;
       
  3203 	unsigned int mss;
       
  3204 	int count = 0;
       
  3205 	int tso;
       
  3206 	unsigned int f;
       
  3207 
       
  3208 	/* This goes back to the question of how to logically map a tx queue
       
  3209 	 * to a flow.  Right now, performance is impacted slightly negatively
       
  3210 	 * if using multiple tx queues.  If the stack breaks away from a
       
  3211 	 * single qdisc implementation, we can look at this again. */
       
  3212 	tx_ring = adapter->tx_ring;
       
  3213 
       
  3214 	if (unlikely(skb->len <= 0)) {
       
  3215 		if (!adapter->ecdev) {
       
  3216 			dev_kfree_skb_any(skb);
       
  3217 		}
       
  3218 		return NETDEV_TX_OK;
       
  3219 	}
       
  3220 
       
  3221 	mss = skb_shinfo(skb)->gso_size;
       
  3222 	/* The controller does a simple calculation to
       
  3223 	 * make sure there is enough room in the FIFO before
       
  3224 	 * initiating the DMA for each buffer.  The calc is:
       
  3225 	 * 4 = ceil(buffer len/mss).  To make sure we don't
       
  3226 	 * overrun the FIFO, adjust the max buffer len if mss
       
  3227 	 * drops. */
       
  3228 	if (mss) {
       
  3229 		u8 hdr_len;
       
  3230 		max_per_txd = min(mss << 2, max_per_txd);
       
  3231 		max_txd_pwr = fls(max_per_txd) - 1;
       
  3232 
       
  3233 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  3234 		if (skb->data_len && hdr_len == len) {
       
  3235 			switch (hw->mac_type) {
       
  3236 				unsigned int pull_size;
       
  3237 			case e1000_82544:
       
  3238 				/* Make sure we have room to chop off 4 bytes,
       
  3239 				 * and that the end alignment will work out to
       
  3240 				 * this hardware's requirements
       
  3241 				 * NOTE: this is a TSO only workaround
       
  3242 				 * if end byte alignment not correct move us
       
  3243 				 * into the next dword */
       
  3244 				if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
       
  3245 					break;
       
  3246 				/* fall through */
       
  3247 				pull_size = min((unsigned int)4, skb->data_len);
       
  3248 				if (!__pskb_pull_tail(skb, pull_size)) {
       
  3249 					e_err(drv, "__pskb_pull_tail "
       
  3250 					      "failed.\n");
       
  3251 					if (!adapter->ecdev) {
       
  3252 						dev_kfree_skb_any(skb);
       
  3253 					}
       
  3254 					return NETDEV_TX_OK;
       
  3255 				}
       
  3256 				len = skb_headlen(skb);
       
  3257 				break;
       
  3258 			default:
       
  3259 				/* do nothing */
       
  3260 				break;
       
  3261 			}
       
  3262 		}
       
  3263 	}
       
  3264 
       
  3265 	/* reserve a descriptor for the offload context */
       
  3266 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
       
  3267 		count++;
       
  3268 	count++;
       
  3269 
       
  3270 	/* Controller Erratum workaround */
       
  3271 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
       
  3272 		count++;
       
  3273 
       
  3274 	count += TXD_USE_COUNT(len, max_txd_pwr);
       
  3275 
       
  3276 	if (adapter->pcix_82544)
       
  3277 		count++;
       
  3278 
       
  3279 	/* work-around for errata 10 and it applies to all controllers
       
  3280 	 * in PCI-X mode, so add one more descriptor to the count
       
  3281 	 */
       
  3282 	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
       
  3283 			(len > 2015)))
       
  3284 		count++;
       
  3285 
       
  3286 	nr_frags = skb_shinfo(skb)->nr_frags;
       
  3287 	for (f = 0; f < nr_frags; f++)
       
  3288 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
       
  3289 				       max_txd_pwr);
       
  3290 	if (adapter->pcix_82544)
       
  3291 		count += nr_frags;
       
  3292 
       
  3293 	/* need: count + 2 desc gap to keep tail from touching
       
  3294 	 * head, otherwise try next time */
       
  3295 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
       
  3296 		return NETDEV_TX_BUSY;
       
  3297 
       
  3298 	if (unlikely((hw->mac_type == e1000_82547) &&
       
  3299 		     (e1000_82547_fifo_workaround(adapter, skb)))) {
       
  3300 		if (!adapter->ecdev) {
       
  3301 			netif_stop_queue(netdev);
       
  3302 			if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3303 				schedule_delayed_work(&adapter->fifo_stall_task, 1);
       
  3304 		}
       
  3305 		return NETDEV_TX_BUSY;
       
  3306 	}
       
  3307 
       
  3308 	if (vlan_tx_tag_present(skb)) {
       
  3309 		tx_flags |= E1000_TX_FLAGS_VLAN;
       
  3310 		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
       
  3311 	}
       
  3312 
       
  3313 	first = tx_ring->next_to_use;
       
  3314 
       
  3315 	tso = e1000_tso(adapter, tx_ring, skb);
       
  3316 	if (tso < 0) {
       
  3317 		if (!adapter->ecdev) {
       
  3318 			dev_kfree_skb_any(skb);
       
  3319 		}
       
  3320 		return NETDEV_TX_OK;
       
  3321 	}
       
  3322 
       
  3323 	if (likely(tso)) {
       
  3324 		if (likely(hw->mac_type != e1000_82544))
       
  3325 			tx_ring->last_tx_tso = true;
       
  3326 		tx_flags |= E1000_TX_FLAGS_TSO;
       
  3327 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
       
  3328 		tx_flags |= E1000_TX_FLAGS_CSUM;
       
  3329 
       
  3330 	if (likely(skb->protocol == htons(ETH_P_IP)))
       
  3331 		tx_flags |= E1000_TX_FLAGS_IPV4;
       
  3332 
       
  3333 	if (unlikely(skb->no_fcs))
       
  3334 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
       
  3335 
       
  3336 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
       
  3337 	                     nr_frags, mss);
       
  3338 
       
  3339 	if (count) {
       
  3340 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
       
  3341 		if (!adapter->ecdev) {
       
  3342 			/* Make sure there is space in the ring for the next send. */
       
  3343 			e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
       
  3344 		}
       
  3345 
       
  3346 	} else {
       
  3347 		if (!adapter->ecdev) {
       
  3348 			dev_kfree_skb_any(skb);
       
  3349 		}
       
  3350 		tx_ring->buffer_info[first].time_stamp = 0;
       
  3351 		tx_ring->next_to_use = first;
       
  3352 	}
       
  3353 
       
  3354 	return NETDEV_TX_OK;
       
  3355 }
       
  3356 
       
  3357 #define NUM_REGS 38 /* 1 based count */
       
  3358 static void e1000_regdump(struct e1000_adapter *adapter)
       
  3359 {
       
  3360 	struct e1000_hw *hw = &adapter->hw;
       
  3361 	u32 regs[NUM_REGS];
       
  3362 	u32 *regs_buff = regs;
       
  3363 	int i = 0;
       
  3364 
       
  3365 	static const char * const reg_name[] = {
       
  3366 		"CTRL",  "STATUS",
       
  3367 		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
       
  3368 		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
       
  3369 		"TIDV", "TXDCTL", "TADV", "TARC0",
       
  3370 		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
       
  3371 		"TXDCTL1", "TARC1",
       
  3372 		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
       
  3373 		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
       
  3374 		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
       
  3375 	};
       
  3376 
       
  3377 	regs_buff[0]  = er32(CTRL);
       
  3378 	regs_buff[1]  = er32(STATUS);
       
  3379 
       
  3380 	regs_buff[2]  = er32(RCTL);
       
  3381 	regs_buff[3]  = er32(RDLEN);
       
  3382 	regs_buff[4]  = er32(RDH);
       
  3383 	regs_buff[5]  = er32(RDT);
       
  3384 	regs_buff[6]  = er32(RDTR);
       
  3385 
       
  3386 	regs_buff[7]  = er32(TCTL);
       
  3387 	regs_buff[8]  = er32(TDBAL);
       
  3388 	regs_buff[9]  = er32(TDBAH);
       
  3389 	regs_buff[10] = er32(TDLEN);
       
  3390 	regs_buff[11] = er32(TDH);
       
  3391 	regs_buff[12] = er32(TDT);
       
  3392 	regs_buff[13] = er32(TIDV);
       
  3393 	regs_buff[14] = er32(TXDCTL);
       
  3394 	regs_buff[15] = er32(TADV);
       
  3395 	regs_buff[16] = er32(TARC0);
       
  3396 
       
  3397 	regs_buff[17] = er32(TDBAL1);
       
  3398 	regs_buff[18] = er32(TDBAH1);
       
  3399 	regs_buff[19] = er32(TDLEN1);
       
  3400 	regs_buff[20] = er32(TDH1);
       
  3401 	regs_buff[21] = er32(TDT1);
       
  3402 	regs_buff[22] = er32(TXDCTL1);
       
  3403 	regs_buff[23] = er32(TARC1);
       
  3404 	regs_buff[24] = er32(CTRL_EXT);
       
  3405 	regs_buff[25] = er32(ERT);
       
  3406 	regs_buff[26] = er32(RDBAL0);
       
  3407 	regs_buff[27] = er32(RDBAH0);
       
  3408 	regs_buff[28] = er32(TDFH);
       
  3409 	regs_buff[29] = er32(TDFT);
       
  3410 	regs_buff[30] = er32(TDFHS);
       
  3411 	regs_buff[31] = er32(TDFTS);
       
  3412 	regs_buff[32] = er32(TDFPC);
       
  3413 	regs_buff[33] = er32(RDFH);
       
  3414 	regs_buff[34] = er32(RDFT);
       
  3415 	regs_buff[35] = er32(RDFHS);
       
  3416 	regs_buff[36] = er32(RDFTS);
       
  3417 	regs_buff[37] = er32(RDFPC);
       
  3418 
       
  3419 	pr_info("Register dump\n");
       
  3420 	for (i = 0; i < NUM_REGS; i++)
       
  3421 		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
       
  3422 }
       
  3423 
       
  3424 /*
       
  3425  * e1000_dump: Print registers, tx ring and rx ring
       
  3426  */
       
  3427 static void e1000_dump(struct e1000_adapter *adapter)
       
  3428 {
       
  3429 	/* this code doesn't handle multiple rings */
       
  3430 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
       
  3431 	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
       
  3432 	int i;
       
  3433 
       
  3434 	if (!netif_msg_hw(adapter))
       
  3435 		return;
       
  3436 
       
  3437 	/* Print Registers */
       
  3438 	e1000_regdump(adapter);
       
  3439 
       
  3440 	/*
       
  3441 	 * transmit dump
       
  3442 	 */
       
  3443 	pr_info("TX Desc ring0 dump\n");
       
  3444 
       
  3445 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
       
  3446 	 *
       
  3447 	 * Legacy Transmit Descriptor
       
  3448 	 *   +--------------------------------------------------------------+
       
  3449 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
       
  3450 	 *   +--------------------------------------------------------------+
       
  3451 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
       
  3452 	 *   +--------------------------------------------------------------+
       
  3453 	 *   63       48 47        36 35    32 31     24 23    16 15        0
       
  3454 	 *
       
  3455 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
       
  3456 	 *   63      48 47    40 39       32 31             16 15    8 7      0
       
  3457 	 *   +----------------------------------------------------------------+
       
  3458 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
       
  3459 	 *   +----------------------------------------------------------------+
       
  3460 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
       
  3461 	 *   +----------------------------------------------------------------+
       
  3462 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
       
  3463 	 *
       
  3464 	 * Extended Data Descriptor (DTYP=0x1)
       
  3465 	 *   +----------------------------------------------------------------+
       
  3466 	 * 0 |                     Buffer Address [63:0]                      |
       
  3467 	 *   +----------------------------------------------------------------+
       
  3468 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
       
  3469 	 *   +----------------------------------------------------------------+
       
  3470 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
       
  3471 	 */
       
  3472 	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
       
  3473 	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
       
  3474 
       
  3475 	if (!netif_msg_tx_done(adapter))
       
  3476 		goto rx_ring_summary;
       
  3477 
       
  3478 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
       
  3479 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3480 		struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
       
  3481 		struct my_u { __le64 a; __le64 b; };
       
  3482 		struct my_u *u = (struct my_u *)tx_desc;
       
  3483 		const char *type;
       
  3484 
       
  3485 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
       
  3486 			type = "NTC/U";
       
  3487 		else if (i == tx_ring->next_to_use)
       
  3488 			type = "NTU";
       
  3489 		else if (i == tx_ring->next_to_clean)
       
  3490 			type = "NTC";
       
  3491 		else
       
  3492 			type = "";
       
  3493 
       
  3494 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
       
  3495 			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
       
  3496 			le64_to_cpu(u->a), le64_to_cpu(u->b),
       
  3497 			(u64)buffer_info->dma, buffer_info->length,
       
  3498 			buffer_info->next_to_watch,
       
  3499 			(u64)buffer_info->time_stamp, buffer_info->skb, type);
       
  3500 	}
       
  3501 
       
  3502 rx_ring_summary:
       
  3503 	/*
       
  3504 	 * receive dump
       
  3505 	 */
       
  3506 	pr_info("\nRX Desc ring dump\n");
       
  3507 
       
  3508 	/* Legacy Receive Descriptor Format
       
  3509 	 *
       
  3510 	 * +-----------------------------------------------------+
       
  3511 	 * |                Buffer Address [63:0]                |
       
  3512 	 * +-----------------------------------------------------+
       
  3513 	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
       
  3514 	 * +-----------------------------------------------------+
       
  3515 	 * 63       48 47    40 39      32 31         16 15      0
       
  3516 	 */
       
  3517 	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
       
  3518 
       
  3519 	if (!netif_msg_rx_status(adapter))
       
  3520 		goto exit;
       
  3521 
       
  3522 	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
       
  3523 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  3524 		struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
       
  3525 		struct my_u { __le64 a; __le64 b; };
       
  3526 		struct my_u *u = (struct my_u *)rx_desc;
       
  3527 		const char *type;
       
  3528 
       
  3529 		if (i == rx_ring->next_to_use)
       
  3530 			type = "NTU";
       
  3531 		else if (i == rx_ring->next_to_clean)
       
  3532 			type = "NTC";
       
  3533 		else
       
  3534 			type = "";
       
  3535 
       
  3536 		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
       
  3537 			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
       
  3538 			(u64)buffer_info->dma, buffer_info->skb, type);
       
  3539 	} /* for */
       
  3540 
       
  3541 	/* dump the descriptor caches */
       
  3542 	/* rx */
       
  3543 	pr_info("Rx descriptor cache in 64bit format\n");
       
  3544 	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
       
  3545 		pr_info("R%04X: %08X|%08X %08X|%08X\n",
       
  3546 			i,
       
  3547 			readl(adapter->hw.hw_addr + i+4),
       
  3548 			readl(adapter->hw.hw_addr + i),
       
  3549 			readl(adapter->hw.hw_addr + i+12),
       
  3550 			readl(adapter->hw.hw_addr + i+8));
       
  3551 	}
       
  3552 	/* tx */
       
  3553 	pr_info("Tx descriptor cache in 64bit format\n");
       
  3554 	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
       
  3555 		pr_info("T%04X: %08X|%08X %08X|%08X\n",
       
  3556 			i,
       
  3557 			readl(adapter->hw.hw_addr + i+4),
       
  3558 			readl(adapter->hw.hw_addr + i),
       
  3559 			readl(adapter->hw.hw_addr + i+12),
       
  3560 			readl(adapter->hw.hw_addr + i+8));
       
  3561 	}
       
  3562 exit:
       
  3563 	return;
       
  3564 }
       
  3565 
       
  3566 /**
       
  3567  * e1000_tx_timeout - Respond to a Tx Hang
       
  3568  * @netdev: network interface device structure
       
  3569  **/
       
  3570 
       
  3571 static void e1000_tx_timeout(struct net_device *netdev)
       
  3572 {
       
  3573 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3574 
       
  3575 	/* Do the reset outside of interrupt context */
       
  3576 	adapter->tx_timeout_count++;
       
  3577 	schedule_work(&adapter->reset_task);
       
  3578 }
       
  3579 
       
  3580 static void e1000_reset_task(struct work_struct *work)
       
  3581 {
       
  3582 	struct e1000_adapter *adapter =
       
  3583 		container_of(work, struct e1000_adapter, reset_task);
       
  3584 
       
  3585 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  3586 		return;
       
  3587 	e_err(drv, "Reset adapter\n");
       
  3588 	e1000_reinit_safe(adapter);
       
  3589 }
       
  3590 
       
  3591 /**
       
  3592  * e1000_get_stats - Get System Network Statistics
       
  3593  * @netdev: network interface device structure
       
  3594  *
       
  3595  * Returns the address of the device statistics structure.
       
  3596  * The statistics are actually updated from the watchdog.
       
  3597  **/
       
  3598 
       
  3599 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
       
  3600 {
       
  3601 	/* only return the current stats */
       
  3602 	return &netdev->stats;
       
  3603 }
       
  3604 
       
  3605 /**
       
  3606  * e1000_change_mtu - Change the Maximum Transfer Unit
       
  3607  * @netdev: network interface device structure
       
  3608  * @new_mtu: new value for maximum frame size
       
  3609  *
       
  3610  * Returns 0 on success, negative on failure
       
  3611  **/
       
  3612 
       
  3613 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
       
  3614 {
       
  3615 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3616 	struct e1000_hw *hw = &adapter->hw;
       
  3617 	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
       
  3618 
       
  3619 	if (adapter->ecdev) {
       
  3620 		return -EBUSY;
       
  3621 	}
       
  3622 
       
  3623 	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
       
  3624 	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
       
  3625 		e_err(probe, "Invalid MTU setting\n");
       
  3626 		return -EINVAL;
       
  3627 	}
       
  3628 
       
  3629 	/* Adapter-specific max frame size limits. */
       
  3630 	switch (hw->mac_type) {
       
  3631 	case e1000_undefined ... e1000_82542_rev2_1:
       
  3632 		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
       
  3633 			e_err(probe, "Jumbo Frames not supported.\n");
       
  3634 			return -EINVAL;
       
  3635 		}
       
  3636 		break;
       
  3637 	default:
       
  3638 		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
       
  3639 		break;
       
  3640 	}
       
  3641 
       
  3642 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
  3643 		msleep(1);
       
  3644 	/* e1000_down has a dependency on max_frame_size */
       
  3645 	hw->max_frame_size = max_frame;
       
  3646 	if (netif_running(netdev))
       
  3647 		e1000_down(adapter);
       
  3648 
       
  3649 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
       
  3650 	 * means we reserve 2 more, this pushes us to allocate from the next
       
  3651 	 * larger slab size.
       
  3652 	 * i.e. RXBUFFER_2048 --> size-4096 slab
       
  3653 	 *  however with the new *_jumbo_rx* routines, jumbo receives will use
       
  3654 	 *  fragmented skbs */
       
  3655 
       
  3656 	if (max_frame <= E1000_RXBUFFER_2048)
       
  3657 		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
       
  3658 	else
       
  3659 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
       
  3660 		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
       
  3661 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
       
  3662 		adapter->rx_buffer_len = PAGE_SIZE;
       
  3663 #endif
       
  3664 
       
  3665 	/* adjust allocation if LPE protects us, and we aren't using SBP */
       
  3666 	if (!hw->tbi_compatibility_on &&
       
  3667 	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
       
  3668 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
       
  3669 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
       
  3670 
       
  3671 	pr_info("%s changing MTU from %d to %d\n",
       
  3672 		netdev->name, netdev->mtu, new_mtu);
       
  3673 	netdev->mtu = new_mtu;
       
  3674 
       
  3675 	if (netif_running(netdev))
       
  3676 		e1000_up(adapter);
       
  3677 	else
       
  3678 		e1000_reset(adapter);
       
  3679 
       
  3680 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
  3681 
       
  3682 	return 0;
       
  3683 }
       
  3684 
       
  3685 /**
       
  3686  * e1000_update_stats - Update the board statistics counters
       
  3687  * @adapter: board private structure
       
  3688  **/
       
  3689 
       
  3690 void e1000_update_stats(struct e1000_adapter *adapter)
       
  3691 {
       
  3692 	struct net_device *netdev = adapter->netdev;
       
  3693 	struct e1000_hw *hw = &adapter->hw;
       
  3694 	struct pci_dev *pdev = adapter->pdev;
       
  3695 	unsigned long flags = 0;
       
  3696 	u16 phy_tmp;
       
  3697 
       
  3698 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
       
  3699 
       
  3700 	/*
       
  3701 	 * Prevent stats update while adapter is being reset, or if the pci
       
  3702 	 * connection is down.
       
  3703 	 */
       
  3704 	if (adapter->link_speed == 0)
       
  3705 		return;
       
  3706 	if (pci_channel_offline(pdev))
       
  3707 		return;
       
  3708 
       
  3709 	if (!adapter->ecdev) {
       
  3710 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  3711 	}
       
  3712 
       
  3713 	/* these counters are modified from e1000_tbi_adjust_stats,
       
  3714 	 * called from the interrupt context, so they must only
       
  3715 	 * be written while holding adapter->stats_lock
       
  3716 	 */
       
  3717 
       
  3718 	adapter->stats.crcerrs += er32(CRCERRS);
       
  3719 	adapter->stats.gprc += er32(GPRC);
       
  3720 	adapter->stats.gorcl += er32(GORCL);
       
  3721 	adapter->stats.gorch += er32(GORCH);
       
  3722 	adapter->stats.bprc += er32(BPRC);
       
  3723 	adapter->stats.mprc += er32(MPRC);
       
  3724 	adapter->stats.roc += er32(ROC);
       
  3725 
       
  3726 	adapter->stats.prc64 += er32(PRC64);
       
  3727 	adapter->stats.prc127 += er32(PRC127);
       
  3728 	adapter->stats.prc255 += er32(PRC255);
       
  3729 	adapter->stats.prc511 += er32(PRC511);
       
  3730 	adapter->stats.prc1023 += er32(PRC1023);
       
  3731 	adapter->stats.prc1522 += er32(PRC1522);
       
  3732 
       
  3733 	adapter->stats.symerrs += er32(SYMERRS);
       
  3734 	adapter->stats.mpc += er32(MPC);
       
  3735 	adapter->stats.scc += er32(SCC);
       
  3736 	adapter->stats.ecol += er32(ECOL);
       
  3737 	adapter->stats.mcc += er32(MCC);
       
  3738 	adapter->stats.latecol += er32(LATECOL);
       
  3739 	adapter->stats.dc += er32(DC);
       
  3740 	adapter->stats.sec += er32(SEC);
       
  3741 	adapter->stats.rlec += er32(RLEC);
       
  3742 	adapter->stats.xonrxc += er32(XONRXC);
       
  3743 	adapter->stats.xontxc += er32(XONTXC);
       
  3744 	adapter->stats.xoffrxc += er32(XOFFRXC);
       
  3745 	adapter->stats.xofftxc += er32(XOFFTXC);
       
  3746 	adapter->stats.fcruc += er32(FCRUC);
       
  3747 	adapter->stats.gptc += er32(GPTC);
       
  3748 	adapter->stats.gotcl += er32(GOTCL);
       
  3749 	adapter->stats.gotch += er32(GOTCH);
       
  3750 	adapter->stats.rnbc += er32(RNBC);
       
  3751 	adapter->stats.ruc += er32(RUC);
       
  3752 	adapter->stats.rfc += er32(RFC);
       
  3753 	adapter->stats.rjc += er32(RJC);
       
  3754 	adapter->stats.torl += er32(TORL);
       
  3755 	adapter->stats.torh += er32(TORH);
       
  3756 	adapter->stats.totl += er32(TOTL);
       
  3757 	adapter->stats.toth += er32(TOTH);
       
  3758 	adapter->stats.tpr += er32(TPR);
       
  3759 
       
  3760 	adapter->stats.ptc64 += er32(PTC64);
       
  3761 	adapter->stats.ptc127 += er32(PTC127);
       
  3762 	adapter->stats.ptc255 += er32(PTC255);
       
  3763 	adapter->stats.ptc511 += er32(PTC511);
       
  3764 	adapter->stats.ptc1023 += er32(PTC1023);
       
  3765 	adapter->stats.ptc1522 += er32(PTC1522);
       
  3766 
       
  3767 	adapter->stats.mptc += er32(MPTC);
       
  3768 	adapter->stats.bptc += er32(BPTC);
       
  3769 
       
  3770 	/* used for adaptive IFS */
       
  3771 
       
  3772 	hw->tx_packet_delta = er32(TPT);
       
  3773 	adapter->stats.tpt += hw->tx_packet_delta;
       
  3774 	hw->collision_delta = er32(COLC);
       
  3775 	adapter->stats.colc += hw->collision_delta;
       
  3776 
       
  3777 	if (hw->mac_type >= e1000_82543) {
       
  3778 		adapter->stats.algnerrc += er32(ALGNERRC);
       
  3779 		adapter->stats.rxerrc += er32(RXERRC);
       
  3780 		adapter->stats.tncrs += er32(TNCRS);
       
  3781 		adapter->stats.cexterr += er32(CEXTERR);
       
  3782 		adapter->stats.tsctc += er32(TSCTC);
       
  3783 		adapter->stats.tsctfc += er32(TSCTFC);
       
  3784 	}
       
  3785 
       
  3786 	/* Fill out the OS statistics structure */
       
  3787 	netdev->stats.multicast = adapter->stats.mprc;
       
  3788 	netdev->stats.collisions = adapter->stats.colc;
       
  3789 
       
  3790 	/* Rx Errors */
       
  3791 
       
  3792 	/* RLEC on some newer hardware can be incorrect so build
       
  3793 	* our own version based on RUC and ROC */
       
  3794 	netdev->stats.rx_errors = adapter->stats.rxerrc +
       
  3795 		adapter->stats.crcerrs + adapter->stats.algnerrc +
       
  3796 		adapter->stats.ruc + adapter->stats.roc +
       
  3797 		adapter->stats.cexterr;
       
  3798 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
       
  3799 	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
       
  3800 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
       
  3801 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
       
  3802 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
       
  3803 
       
  3804 	/* Tx Errors */
       
  3805 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
       
  3806 	netdev->stats.tx_errors = adapter->stats.txerrc;
       
  3807 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
       
  3808 	netdev->stats.tx_window_errors = adapter->stats.latecol;
       
  3809 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
       
  3810 	if (hw->bad_tx_carr_stats_fd &&
       
  3811 	    adapter->link_duplex == FULL_DUPLEX) {
       
  3812 		netdev->stats.tx_carrier_errors = 0;
       
  3813 		adapter->stats.tncrs = 0;
       
  3814 	}
       
  3815 
       
  3816 	/* Tx Dropped needs to be maintained elsewhere */
       
  3817 
       
  3818 	/* Phy Stats */
       
  3819 	if (hw->media_type == e1000_media_type_copper) {
       
  3820 		if ((adapter->link_speed == SPEED_1000) &&
       
  3821 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
       
  3822 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
       
  3823 			adapter->phy_stats.idle_errors += phy_tmp;
       
  3824 		}
       
  3825 
       
  3826 		if ((hw->mac_type <= e1000_82546) &&
       
  3827 		   (hw->phy_type == e1000_phy_m88) &&
       
  3828 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
       
  3829 			adapter->phy_stats.receive_errors += phy_tmp;
       
  3830 	}
       
  3831 
       
  3832 	/* Management Stats */
       
  3833 	if (hw->has_smbus) {
       
  3834 		adapter->stats.mgptc += er32(MGTPTC);
       
  3835 		adapter->stats.mgprc += er32(MGTPRC);
       
  3836 		adapter->stats.mgpdc += er32(MGTPDC);
       
  3837 	}
       
  3838 
       
  3839 	if (!adapter->ecdev) {
       
  3840 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  3841 	}
       
  3842 }
       
  3843 
       
  3844 void ec_poll(struct net_device *netdev)
       
  3845 {
       
  3846 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3847 	if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) {
       
  3848 		e1000_watchdog(&adapter->watchdog_task.work);
       
  3849 		adapter->ec_watchdog_jiffies = jiffies;
       
  3850 	}
       
  3851 
       
  3852 	e1000_intr(0, netdev);
       
  3853 }
       
  3854 
       
  3855 /**
       
  3856  * e1000_intr - Interrupt Handler
       
  3857  * @irq: interrupt number
       
  3858  * @data: pointer to a network interface device structure
       
  3859  **/
       
  3860 
       
  3861 static irqreturn_t e1000_intr(int irq, void *data)
       
  3862 {
       
  3863 	struct net_device *netdev = data;
       
  3864 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3865 	struct e1000_hw *hw = &adapter->hw;
       
  3866 	u32 icr = er32(ICR);
       
  3867 
       
  3868 	if (unlikely((!icr)))
       
  3869 		return IRQ_NONE;  /* Not our interrupt */
       
  3870 
       
  3871 	/*
       
  3872 	 * we might have caused the interrupt, but the above
       
  3873 	 * read cleared it, and just in case the driver is
       
  3874 	 * down there is nothing to do so return handled
       
  3875 	 */
       
  3876 	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
       
  3877 		return IRQ_HANDLED;
       
  3878 
       
  3879 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
       
  3880 		hw->get_link_status = 1;
       
  3881 		/* guard against interrupt when we're going down */
       
  3882 		if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags))
       
  3883 			schedule_delayed_work(&adapter->watchdog_task, 1);
       
  3884 	}
       
  3885 
       
  3886 	if (adapter->ecdev) {
       
  3887 		int i, ec_work_done = 0;
       
  3888 		for (i = 0; i < E1000_MAX_INTR; i++) {
       
  3889 			if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
       
  3890 							&ec_work_done, 100) &&
       
  3891 						!e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
       
  3892 				break;
       
  3893 			}
       
  3894 		}
       
  3895  	} else {
       
  3896 		/* disable interrupts, without the synchronize_irq bit */
       
  3897 		ew32(IMC, ~0);
       
  3898 		E1000_WRITE_FLUSH();
       
  3899 
       
  3900 		if (likely(napi_schedule_prep(&adapter->napi))) {
       
  3901 			adapter->total_tx_bytes = 0;
       
  3902 			adapter->total_tx_packets = 0;
       
  3903 			adapter->total_rx_bytes = 0;
       
  3904 			adapter->total_rx_packets = 0;
       
  3905 			__napi_schedule(&adapter->napi);
       
  3906 		} else {
       
  3907 			/* this really should not happen! if it does it is basically a
       
  3908 			 * bug, but not a hard error, so enable ints and continue */
       
  3909 			if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3910 				e1000_irq_enable(adapter);
       
  3911 		}
       
  3912 	}
       
  3913 
       
  3914 	return IRQ_HANDLED;
       
  3915 }
       
  3916 
       
  3917 /**
       
  3918  * e1000_clean - NAPI Rx polling callback
       
  3919  * @adapter: board private structure
       
  3920  * EtherCAT: never called
       
  3921  **/
       
  3922 static int e1000_clean(struct napi_struct *napi, int budget)
       
  3923 {
       
  3924 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
       
  3925 	int tx_clean_complete = 0, work_done = 0;
       
  3926 
       
  3927 	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
       
  3928 
       
  3929 	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
       
  3930 
       
  3931 	if (!tx_clean_complete)
       
  3932 		work_done = budget;
       
  3933 
       
  3934 	/* If budget not fully consumed, exit the polling mode */
       
  3935 	if (work_done < budget) {
       
  3936 		if (likely(adapter->itr_setting & 3))
       
  3937 			e1000_set_itr(adapter);
       
  3938 		napi_complete(napi);
       
  3939 		if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3940 			e1000_irq_enable(adapter);
       
  3941 	}
       
  3942 
       
  3943 	return work_done;
       
  3944 }
       
  3945 
       
  3946 /**
       
  3947  * e1000_clean_tx_irq - Reclaim resources after transmit completes
       
  3948  * @adapter: board private structure
       
  3949  **/
       
  3950 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
       
  3951 			       struct e1000_tx_ring *tx_ring)
       
  3952 {
       
  3953 	struct e1000_hw *hw = &adapter->hw;
       
  3954 	struct net_device *netdev = adapter->netdev;
       
  3955 	struct e1000_tx_desc *tx_desc, *eop_desc;
       
  3956 	struct e1000_buffer *buffer_info;
       
  3957 	unsigned int i, eop;
       
  3958 	unsigned int count = 0;
       
  3959 	unsigned int total_tx_bytes=0, total_tx_packets=0;
       
  3960 
       
  3961 	i = tx_ring->next_to_clean;
       
  3962 	eop = tx_ring->buffer_info[i].next_to_watch;
       
  3963 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  3964 
       
  3965 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
       
  3966 	       (count < tx_ring->count)) {
       
  3967 		bool cleaned = false;
       
  3968 		rmb();	/* read buffer_info after eop_desc */
       
  3969 		for ( ; !cleaned; count++) {
       
  3970 			tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3971 			buffer_info = &tx_ring->buffer_info[i];
       
  3972 			cleaned = (i == eop);
       
  3973 
       
  3974 			if (cleaned) {
       
  3975 				total_tx_packets += buffer_info->segs;
       
  3976 				total_tx_bytes += buffer_info->bytecount;
       
  3977 			}
       
  3978 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  3979 			tx_desc->upper.data = 0;
       
  3980 
       
  3981 			if (unlikely(++i == tx_ring->count)) i = 0;
       
  3982 		}
       
  3983 
       
  3984 		eop = tx_ring->buffer_info[i].next_to_watch;
       
  3985 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  3986 	}
       
  3987 
       
  3988 	tx_ring->next_to_clean = i;
       
  3989 
       
  3990 #define TX_WAKE_THRESHOLD 32
       
  3991 	if (!adapter->ecdev && unlikely(count && netif_carrier_ok(netdev) &&
       
  3992 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
       
  3993 		/* Make sure that anybody stopping the queue after this
       
  3994 		 * sees the new next_to_clean.
       
  3995 		 */
       
  3996 		smp_mb();
       
  3997 
       
  3998 		if (netif_queue_stopped(netdev) &&
       
  3999 		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
       
  4000 			netif_wake_queue(netdev);
       
  4001 			++adapter->restart_queue;
       
  4002 		}
       
  4003 	}
       
  4004 
       
  4005 	if (!adapter->ecdev && adapter->detect_tx_hung) {
       
  4006 		/* Detect a transmit hang in hardware, this serializes the
       
  4007 		 * check with the clearing of time_stamp and movement of i */
       
  4008 		adapter->detect_tx_hung = false;
       
  4009 		if (tx_ring->buffer_info[eop].time_stamp &&
       
  4010 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
       
  4011 		               (adapter->tx_timeout_factor * HZ)) &&
       
  4012 		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
       
  4013 
       
  4014 			/* detected Tx unit hang */
       
  4015 			e_err(drv, "Detected Tx Unit Hang\n"
       
  4016 			      "  Tx Queue             <%lu>\n"
       
  4017 			      "  TDH                  <%x>\n"
       
  4018 			      "  TDT                  <%x>\n"
       
  4019 			      "  next_to_use          <%x>\n"
       
  4020 			      "  next_to_clean        <%x>\n"
       
  4021 			      "buffer_info[next_to_clean]\n"
       
  4022 			      "  time_stamp           <%lx>\n"
       
  4023 			      "  next_to_watch        <%x>\n"
       
  4024 			      "  jiffies              <%lx>\n"
       
  4025 			      "  next_to_watch.status <%x>\n",
       
  4026 				(unsigned long)((tx_ring - adapter->tx_ring) /
       
  4027 					sizeof(struct e1000_tx_ring)),
       
  4028 				readl(hw->hw_addr + tx_ring->tdh),
       
  4029 				readl(hw->hw_addr + tx_ring->tdt),
       
  4030 				tx_ring->next_to_use,
       
  4031 				tx_ring->next_to_clean,
       
  4032 				tx_ring->buffer_info[eop].time_stamp,
       
  4033 				eop,
       
  4034 				jiffies,
       
  4035 				eop_desc->upper.fields.status);
       
  4036 			e1000_dump(adapter);
       
  4037 			netif_stop_queue(netdev);
       
  4038 		}
       
  4039 	}
       
  4040 	adapter->total_tx_bytes += total_tx_bytes;
       
  4041 	adapter->total_tx_packets += total_tx_packets;
       
  4042 	netdev->stats.tx_bytes += total_tx_bytes;
       
  4043 	netdev->stats.tx_packets += total_tx_packets;
       
  4044 	return count < tx_ring->count;
       
  4045 }
       
  4046 
       
  4047 /**
       
  4048  * e1000_rx_checksum - Receive Checksum Offload for 82543
       
  4049  * @adapter:     board private structure
       
  4050  * @status_err:  receive descriptor status and error fields
       
  4051  * @csum:        receive descriptor csum field
       
  4052  * @sk_buff:     socket buffer with received data
       
  4053  **/
       
  4054 
       
  4055 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
       
  4056 			      u32 csum, struct sk_buff *skb)
       
  4057 {
       
  4058 	struct e1000_hw *hw = &adapter->hw;
       
  4059 	u16 status = (u16)status_err;
       
  4060 	u8 errors = (u8)(status_err >> 24);
       
  4061 
       
  4062 	skb_checksum_none_assert(skb);
       
  4063 
       
  4064 	/* 82543 or newer only */
       
  4065 	if (unlikely(hw->mac_type < e1000_82543)) return;
       
  4066 	/* Ignore Checksum bit is set */
       
  4067 	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
       
  4068 	/* TCP/UDP checksum error bit is set */
       
  4069 	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
       
  4070 		/* let the stack verify checksum errors */
       
  4071 		adapter->hw_csum_err++;
       
  4072 		return;
       
  4073 	}
       
  4074 	/* TCP/UDP Checksum has not been calculated */
       
  4075 	if (!(status & E1000_RXD_STAT_TCPCS))
       
  4076 		return;
       
  4077 
       
  4078 	/* It must be a TCP or UDP packet with a valid checksum */
       
  4079 	if (likely(status & E1000_RXD_STAT_TCPCS)) {
       
  4080 		/* TCP checksum is good */
       
  4081 		skb->ip_summed = CHECKSUM_UNNECESSARY;
       
  4082 	}
       
  4083 	adapter->hw_csum_good++;
       
  4084 }
       
  4085 
       
  4086 /**
       
  4087  * e1000_consume_page - helper function
       
  4088  **/
       
  4089 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
       
  4090                                u16 length)
       
  4091 {
       
  4092 	bi->page = NULL;
       
  4093 	skb->len += length;
       
  4094 	skb->data_len += length;
       
  4095 	skb->truesize += PAGE_SIZE;
       
  4096 }
       
  4097 
       
  4098 /**
       
  4099  * e1000_receive_skb - helper function to handle rx indications
       
  4100  * @adapter: board private structure
       
  4101  * @status: descriptor status field as written by hardware
       
  4102  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
       
  4103  * @skb: pointer to sk_buff to be indicated to stack
       
  4104  */
       
  4105 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
       
  4106 			      __le16 vlan, struct sk_buff *skb)
       
  4107 {
       
  4108 	skb->protocol = eth_type_trans(skb, adapter->netdev);
       
  4109 
       
  4110 	if (status & E1000_RXD_STAT_VP) {
       
  4111 		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
       
  4112 
       
  4113 		__vlan_hwaccel_put_tag(skb, vid);
       
  4114 	}
       
  4115 	napi_gro_receive(&adapter->napi, skb);
       
  4116 }
       
  4117 
       
  4118 /**
       
  4119  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
       
  4120  * @adapter: board private structure
       
  4121  * @rx_ring: ring to clean
       
  4122  * @work_done: amount of napi work completed this call
       
  4123  * @work_to_do: max amount of work allowed for this call to do
       
  4124  *
       
  4125  * the return value indicates whether actual cleaning was done, there
       
  4126  * is no guarantee that everything was cleaned
       
  4127  */
       
  4128 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
       
  4129 				     struct e1000_rx_ring *rx_ring,
       
  4130 				     int *work_done, int work_to_do)
       
  4131 {
       
  4132 	struct e1000_hw *hw = &adapter->hw;
       
  4133 	struct net_device *netdev = adapter->netdev;
       
  4134 	struct pci_dev *pdev = adapter->pdev;
       
  4135 	struct e1000_rx_desc *rx_desc, *next_rxd;
       
  4136 	struct e1000_buffer *buffer_info, *next_buffer;
       
  4137 	unsigned long irq_flags;
       
  4138 	u32 length;
       
  4139 	unsigned int i;
       
  4140 	int cleaned_count = 0;
       
  4141 	bool cleaned = false;
       
  4142 	unsigned int total_rx_bytes=0, total_rx_packets=0;
       
  4143 
       
  4144 	i = rx_ring->next_to_clean;
       
  4145 	rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4146 	buffer_info = &rx_ring->buffer_info[i];
       
  4147 
       
  4148 	while (rx_desc->status & E1000_RXD_STAT_DD) {
       
  4149 		struct sk_buff *skb;
       
  4150 		u8 status;
       
  4151 
       
  4152 		if (*work_done >= work_to_do)
       
  4153 			break;
       
  4154 		(*work_done)++;
       
  4155 		rmb(); /* read descriptor and rx_buffer_info after status DD */
       
  4156 
       
  4157 		status = rx_desc->status;
       
  4158 		skb = buffer_info->skb;
       
  4159 		if (!adapter->ecdev) {
       
  4160 			buffer_info->skb = NULL;
       
  4161 		}
       
  4162 
       
  4163 		if (++i == rx_ring->count) i = 0;
       
  4164 		next_rxd = E1000_RX_DESC(*rx_ring, i);
       
  4165 		prefetch(next_rxd);
       
  4166 
       
  4167 		next_buffer = &rx_ring->buffer_info[i];
       
  4168 
       
  4169 		cleaned = true;
       
  4170 		cleaned_count++;
       
  4171 		dma_unmap_page(&pdev->dev, buffer_info->dma,
       
  4172 			       buffer_info->length, DMA_FROM_DEVICE);
       
  4173 		buffer_info->dma = 0;
       
  4174 
       
  4175 		length = le16_to_cpu(rx_desc->length);
       
  4176 
       
  4177 		/* errors is only valid for DD + EOP descriptors */
       
  4178 		if (!adapter->ecdev &&
       
  4179 		    unlikely((status & E1000_RXD_STAT_EOP) &&
       
  4180 		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
       
  4181 			u8 last_byte = *(skb->data + length - 1);
       
  4182 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
       
  4183 				       last_byte)) {
       
  4184 				spin_lock_irqsave(&adapter->stats_lock,
       
  4185 				                  irq_flags);
       
  4186 				e1000_tbi_adjust_stats(hw, &adapter->stats,
       
  4187 				                       length, skb->data);
       
  4188 				spin_unlock_irqrestore(&adapter->stats_lock,
       
  4189 				                       irq_flags);
       
  4190 				length--;
       
  4191 			} else {
       
  4192 				/* recycle both page and skb */
       
  4193 				buffer_info->skb = skb;
       
  4194 				/* an error means any chain goes out the window
       
  4195 				 * too */
       
  4196 				if (rx_ring->rx_skb_top)
       
  4197 					dev_kfree_skb(rx_ring->rx_skb_top);
       
  4198 				rx_ring->rx_skb_top = NULL;
       
  4199 				goto next_desc;
       
  4200 			}
       
  4201 		}
       
  4202 
       
  4203 #define rxtop rx_ring->rx_skb_top
       
  4204 		if (!(status & E1000_RXD_STAT_EOP)) {
       
  4205 			/* this descriptor is only the beginning (or middle) */
       
  4206 			if (!rxtop) {
       
  4207 				/* this is the beginning of a chain */
       
  4208 				rxtop = skb;
       
  4209 				skb_fill_page_desc(rxtop, 0, buffer_info->page,
       
  4210 				                   0, length);
       
  4211 			} else {
       
  4212 				/* this is the middle of a chain */
       
  4213 				skb_fill_page_desc(rxtop,
       
  4214 				    skb_shinfo(rxtop)->nr_frags,
       
  4215 				    buffer_info->page, 0, length);
       
  4216 				/* re-use the skb, only consumed the page */
       
  4217 				buffer_info->skb = skb;
       
  4218 			}
       
  4219 			e1000_consume_page(buffer_info, rxtop, length);
       
  4220 			goto next_desc;
       
  4221 		} else {
       
  4222 			if (rxtop) {
       
  4223 				/* end of the chain */
       
  4224 				skb_fill_page_desc(rxtop,
       
  4225 				    skb_shinfo(rxtop)->nr_frags,
       
  4226 				    buffer_info->page, 0, length);
       
  4227 				/* re-use the current skb, we only consumed the
       
  4228 				 * page */
       
  4229 				buffer_info->skb = skb;
       
  4230 				skb = rxtop;
       
  4231 				rxtop = NULL;
       
  4232 				e1000_consume_page(buffer_info, skb, length);
       
  4233 			} else {
       
  4234 				/* no chain, got EOP, this buf is the packet
       
  4235 				 * copybreak to save the put_page/alloc_page */
       
  4236 				if (length <= copybreak &&
       
  4237 				    skb_tailroom(skb) >= length) {
       
  4238 					u8 *vaddr;
       
  4239 					vaddr = kmap_atomic(buffer_info->page);
       
  4240 					memcpy(skb_tail_pointer(skb), vaddr, length);
       
  4241 					kunmap_atomic(vaddr);
       
  4242 					/* re-use the page, so don't erase
       
  4243 					 * buffer_info->page */
       
  4244 					skb_put(skb, length);
       
  4245 				} else {
       
  4246 					skb_fill_page_desc(skb, 0,
       
  4247 					                   buffer_info->page, 0,
       
  4248 				                           length);
       
  4249 					e1000_consume_page(buffer_info, skb,
       
  4250 					                   length);
       
  4251 				}
       
  4252 			}
       
  4253 		}
       
  4254 
       
  4255 		/* Receive Checksum Offload XXX recompute due to CRC strip? */
       
  4256 		e1000_rx_checksum(adapter,
       
  4257 		                  (u32)(status) |
       
  4258 		                  ((u32)(rx_desc->errors) << 24),
       
  4259 		                  le16_to_cpu(rx_desc->csum), skb);
       
  4260 
       
  4261 		total_rx_bytes += (skb->len - 4); /* don't count FCS */
       
  4262 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
       
  4263 			pskb_trim(skb, skb->len - 4);
       
  4264 		total_rx_packets++;
       
  4265 
       
  4266 		/* eth type trans needs skb->data to point to something */
       
  4267 		if (!pskb_may_pull(skb, ETH_HLEN)) {
       
  4268 			e_err(drv, "pskb_may_pull failed.\n");
       
  4269 			if (!adapter->ecdev) {
       
  4270 				dev_kfree_skb(skb);
       
  4271 			}
       
  4272 			goto next_desc;
       
  4273 		}
       
  4274 
       
  4275 		if (adapter->ecdev) {
       
  4276 			ecdev_receive(adapter->ecdev, skb->data, length);
       
  4277 
       
  4278 			// No need to detect link status as
       
  4279 			// long as frames are received: Reset watchdog.
       
  4280 			adapter->ec_watchdog_jiffies = jiffies;
       
  4281 		} else {
       
  4282 			e1000_receive_skb(adapter, status, rx_desc->special, skb);
       
  4283 		}
       
  4284 
       
  4285 next_desc:
       
  4286 		rx_desc->status = 0;
       
  4287 
       
  4288 		/* return some buffers to hardware, one at a time is too slow */
       
  4289 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
       
  4290 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4291 			cleaned_count = 0;
       
  4292 		}
       
  4293 
       
  4294 		/* use prefetched values */
       
  4295 		rx_desc = next_rxd;
       
  4296 		buffer_info = next_buffer;
       
  4297 	}
       
  4298 	rx_ring->next_to_clean = i;
       
  4299 
       
  4300 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
       
  4301 	if (cleaned_count)
       
  4302 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4303 
       
  4304 	adapter->total_rx_packets += total_rx_packets;
       
  4305 	adapter->total_rx_bytes += total_rx_bytes;
       
  4306 	netdev->stats.rx_bytes += total_rx_bytes;
       
  4307 	netdev->stats.rx_packets += total_rx_packets;
       
  4308 	return cleaned;
       
  4309 }
       
  4310 
       
  4311 /*
       
  4312  * this should improve performance for small packets with large amounts
       
  4313  * of reassembly being done in the stack
       
  4314  */
       
  4315 static void e1000_check_copybreak(struct net_device *netdev,
       
  4316 				 struct e1000_buffer *buffer_info,
       
  4317 				 u32 length, struct sk_buff **skb)
       
  4318 {
       
  4319 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4320 	struct sk_buff *new_skb;
       
  4321 
       
  4322 	if (adapter->ecdev || length > copybreak)
       
  4323 		return;
       
  4324 
       
  4325 	new_skb = netdev_alloc_skb_ip_align(netdev, length);
       
  4326 	if (!new_skb)
       
  4327 		return;
       
  4328 
       
  4329 	skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
       
  4330 				       (*skb)->data - NET_IP_ALIGN,
       
  4331 				       length + NET_IP_ALIGN);
       
  4332 	/* save the skb in buffer_info as good */
       
  4333 	buffer_info->skb = *skb;
       
  4334 	*skb = new_skb;
       
  4335 }
       
  4336 
       
  4337 /**
       
  4338  * e1000_clean_rx_irq - Send received data up the network stack; legacy
       
  4339  * @adapter: board private structure
       
  4340  * @rx_ring: ring to clean
       
  4341  * @work_done: amount of napi work completed this call
       
  4342  * @work_to_do: max amount of work allowed for this call to do
       
  4343  */
       
  4344 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
  4345 			       struct e1000_rx_ring *rx_ring,
       
  4346 			       int *work_done, int work_to_do)
       
  4347 {
       
  4348 	struct e1000_hw *hw = &adapter->hw;
       
  4349 	struct net_device *netdev = adapter->netdev;
       
  4350 	struct pci_dev *pdev = adapter->pdev;
       
  4351 	struct e1000_rx_desc *rx_desc, *next_rxd;
       
  4352 	struct e1000_buffer *buffer_info, *next_buffer;
       
  4353 	unsigned long flags;
       
  4354 	u32 length;
       
  4355 	unsigned int i;
       
  4356 	int cleaned_count = 0;
       
  4357 	bool cleaned = false;
       
  4358 	unsigned int total_rx_bytes=0, total_rx_packets=0;
       
  4359 
       
  4360 	i = rx_ring->next_to_clean;
       
  4361 	rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4362 	buffer_info = &rx_ring->buffer_info[i];
       
  4363 
       
  4364 	while (rx_desc->status & E1000_RXD_STAT_DD) {
       
  4365 		struct sk_buff *skb;
       
  4366 		u8 status;
       
  4367 
       
  4368 		if (*work_done >= work_to_do)
       
  4369 			break;
       
  4370 		(*work_done)++;
       
  4371 		rmb(); /* read descriptor and rx_buffer_info after status DD */
       
  4372 
       
  4373 		status = rx_desc->status;
       
  4374 		skb = buffer_info->skb;
       
  4375 		if (!adapter->ecdev) {
       
  4376 			buffer_info->skb = NULL;
       
  4377 		}
       
  4378 
       
  4379 		prefetch(skb->data - NET_IP_ALIGN);
       
  4380 
       
  4381 		if (++i == rx_ring->count) i = 0;
       
  4382 		next_rxd = E1000_RX_DESC(*rx_ring, i);
       
  4383 		prefetch(next_rxd);
       
  4384 
       
  4385 		next_buffer = &rx_ring->buffer_info[i];
       
  4386 
       
  4387 		cleaned = true;
       
  4388 		cleaned_count++;
       
  4389 		dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  4390 				 buffer_info->length, DMA_FROM_DEVICE);
       
  4391 		buffer_info->dma = 0;
       
  4392 
       
  4393 		length = le16_to_cpu(rx_desc->length);
       
  4394 		/* !EOP means multiple descriptors were used to store a single
       
  4395 		 * packet, if thats the case we need to toss it.  In fact, we
       
  4396 		 * to toss every packet with the EOP bit clear and the next
       
  4397 		 * frame that _does_ have the EOP bit set, as it is by
       
  4398 		 * definition only a frame fragment
       
  4399 		 */
       
  4400 		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
       
  4401 			adapter->discarding = true;
       
  4402 
       
  4403 		if (adapter->discarding) {
       
  4404 			/* All receives must fit into a single buffer */
       
  4405 			e_dbg("Receive packet consumed multiple buffers\n");
       
  4406 			/* recycle */
       
  4407 			buffer_info->skb = skb;
       
  4408 			if (status & E1000_RXD_STAT_EOP)
       
  4409 				adapter->discarding = false;
       
  4410 			goto next_desc;
       
  4411 		}
       
  4412 
       
  4413 		if (!adapter->ecdev &&
       
  4414 		    unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
       
  4415 			u8 last_byte = *(skb->data + length - 1);
       
  4416 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
       
  4417 				       last_byte)) {
       
  4418 				spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4419 				e1000_tbi_adjust_stats(hw, &adapter->stats,
       
  4420 				                       length, skb->data);
       
  4421 				spin_unlock_irqrestore(&adapter->stats_lock,
       
  4422 				                       flags);
       
  4423 				length--;
       
  4424 			} else {
       
  4425 				/* recycle */
       
  4426 				buffer_info->skb = skb;
       
  4427 				goto next_desc;
       
  4428 			}
       
  4429 		}
       
  4430 
       
  4431 		total_rx_bytes += (length - 4); /* don't count FCS */
       
  4432 		total_rx_packets++;
       
  4433 
       
  4434 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
       
  4435 			/* adjust length to remove Ethernet CRC, this must be
       
  4436 			 * done after the TBI_ACCEPT workaround above
       
  4437 			 */
       
  4438 			length -= 4;
       
  4439 
       
  4440 		e1000_check_copybreak(netdev, buffer_info, length, &skb);
       
  4441 
       
  4442 		skb_put(skb, length);
       
  4443 
       
  4444 		/* Receive Checksum Offload */
       
  4445 		e1000_rx_checksum(adapter,
       
  4446 				  (u32)(status) |
       
  4447 				  ((u32)(rx_desc->errors) << 24),
       
  4448 				  le16_to_cpu(rx_desc->csum), skb);
       
  4449 
       
  4450 		if (adapter->ecdev) {
       
  4451 			ecdev_receive(adapter->ecdev, skb->data, length);
       
  4452 
       
  4453 			// No need to detect link status as
       
  4454 			// long as frames are received: Reset watchdog.
       
  4455 			adapter->ec_watchdog_jiffies = jiffies;
       
  4456 		} else {
       
  4457 			e1000_receive_skb(adapter, status, rx_desc->special, skb);
       
  4458 		}
       
  4459 
       
  4460 next_desc:
       
  4461 		rx_desc->status = 0;
       
  4462 
       
  4463 		/* return some buffers to hardware, one at a time is too slow */
       
  4464 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
       
  4465 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4466 			cleaned_count = 0;
       
  4467 		}
       
  4468 
       
  4469 		/* use prefetched values */
       
  4470 		rx_desc = next_rxd;
       
  4471 		buffer_info = next_buffer;
       
  4472 	}
       
  4473 	rx_ring->next_to_clean = i;
       
  4474 
       
  4475 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
       
  4476 	if (cleaned_count)
       
  4477 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4478 
       
  4479 	adapter->total_rx_packets += total_rx_packets;
       
  4480 	adapter->total_rx_bytes += total_rx_bytes;
       
  4481 	netdev->stats.rx_bytes += total_rx_bytes;
       
  4482 	netdev->stats.rx_packets += total_rx_packets;
       
  4483 	return cleaned;
       
  4484 }
       
  4485 
       
  4486 /**
       
  4487  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
       
  4488  * @adapter: address of board private structure
       
  4489  * @rx_ring: pointer to receive ring structure
       
  4490  * @cleaned_count: number of buffers to allocate this pass
       
  4491  **/
       
  4492 
       
  4493 static void
       
  4494 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
       
  4495                              struct e1000_rx_ring *rx_ring, int cleaned_count)
       
  4496 {
       
  4497 	struct net_device *netdev = adapter->netdev;
       
  4498 	struct pci_dev *pdev = adapter->pdev;
       
  4499 	struct e1000_rx_desc *rx_desc;
       
  4500 	struct e1000_buffer *buffer_info;
       
  4501 	struct sk_buff *skb;
       
  4502 	unsigned int i;
       
  4503 	unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
       
  4504 
       
  4505 	i = rx_ring->next_to_use;
       
  4506 	buffer_info = &rx_ring->buffer_info[i];
       
  4507 
       
  4508 	while (cleaned_count--) {
       
  4509 		skb = buffer_info->skb;
       
  4510 		if (skb) {
       
  4511 			skb_trim(skb, 0);
       
  4512 			goto check_page;
       
  4513 		}
       
  4514 
       
  4515 		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4516 		if (unlikely(!skb)) {
       
  4517 			/* Better luck next round */
       
  4518 			adapter->alloc_rx_buff_failed++;
       
  4519 			break;
       
  4520 		}
       
  4521 
       
  4522 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4523 		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4524 			struct sk_buff *oldskb = skb;
       
  4525 			e_err(rx_err, "skb align check failed: %u bytes at "
       
  4526 			      "%p\n", bufsz, skb->data);
       
  4527 			/* Try again, without freeing the previous */
       
  4528 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4529 			/* Failed allocation, critical failure */
       
  4530 			if (!skb) {
       
  4531 				dev_kfree_skb(oldskb);
       
  4532 				adapter->alloc_rx_buff_failed++;
       
  4533 				break;
       
  4534 			}
       
  4535 
       
  4536 			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4537 				/* give up */
       
  4538 				dev_kfree_skb(skb);
       
  4539 				dev_kfree_skb(oldskb);
       
  4540 				break; /* while (cleaned_count--) */
       
  4541 			}
       
  4542 
       
  4543 			/* Use new allocation */
       
  4544 			dev_kfree_skb(oldskb);
       
  4545 		}
       
  4546 		buffer_info->skb = skb;
       
  4547 		buffer_info->length = adapter->rx_buffer_len;
       
  4548 check_page:
       
  4549 		/* allocate a new page if necessary */
       
  4550 		if (!buffer_info->page) {
       
  4551 			buffer_info->page = alloc_page(GFP_ATOMIC);
       
  4552 			if (unlikely(!buffer_info->page)) {
       
  4553 				adapter->alloc_rx_buff_failed++;
       
  4554 				break;
       
  4555 			}
       
  4556 		}
       
  4557 
       
  4558 		if (!buffer_info->dma) {
       
  4559 			buffer_info->dma = dma_map_page(&pdev->dev,
       
  4560 			                                buffer_info->page, 0,
       
  4561 							buffer_info->length,
       
  4562 							DMA_FROM_DEVICE);
       
  4563 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
       
  4564 				put_page(buffer_info->page);
       
  4565 				dev_kfree_skb(skb);
       
  4566 				buffer_info->page = NULL;
       
  4567 				buffer_info->skb = NULL;
       
  4568 				buffer_info->dma = 0;
       
  4569 				adapter->alloc_rx_buff_failed++;
       
  4570 				break; /* while !buffer_info->skb */
       
  4571 			}
       
  4572 		}
       
  4573 
       
  4574 		rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4575 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  4576 
       
  4577 		if (unlikely(++i == rx_ring->count))
       
  4578 			i = 0;
       
  4579 		buffer_info = &rx_ring->buffer_info[i];
       
  4580 	}
       
  4581 
       
  4582 	if (likely(rx_ring->next_to_use != i)) {
       
  4583 		rx_ring->next_to_use = i;
       
  4584 		if (unlikely(i-- == 0))
       
  4585 			i = (rx_ring->count - 1);
       
  4586 
       
  4587 		/* Force memory writes to complete before letting h/w
       
  4588 		 * know there are new descriptors to fetch.  (Only
       
  4589 		 * applicable for weak-ordered memory model archs,
       
  4590 		 * such as IA-64). */
       
  4591 		wmb();
       
  4592 		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
       
  4593 	}
       
  4594 }
       
  4595 
       
  4596 /**
       
  4597  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
       
  4598  * @adapter: address of board private structure
       
  4599  **/
       
  4600 
       
  4601 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
  4602 				   struct e1000_rx_ring *rx_ring,
       
  4603 				   int cleaned_count)
       
  4604 {
       
  4605 	struct e1000_hw *hw = &adapter->hw;
       
  4606 	struct net_device *netdev = adapter->netdev;
       
  4607 	struct pci_dev *pdev = adapter->pdev;
       
  4608 	struct e1000_rx_desc *rx_desc;
       
  4609 	struct e1000_buffer *buffer_info;
       
  4610 	struct sk_buff *skb;
       
  4611 	unsigned int i;
       
  4612 	unsigned int bufsz = adapter->rx_buffer_len;
       
  4613 
       
  4614 	i = rx_ring->next_to_use;
       
  4615 	buffer_info = &rx_ring->buffer_info[i];
       
  4616 
       
  4617 	while (cleaned_count--) {
       
  4618 		skb = buffer_info->skb;
       
  4619 		if (skb) {
       
  4620 			skb_trim(skb, 0);
       
  4621 			goto map_skb;
       
  4622 		}
       
  4623 
       
  4624 		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4625 		if (unlikely(!skb)) {
       
  4626 			/* Better luck next round */
       
  4627 			adapter->alloc_rx_buff_failed++;
       
  4628 			break;
       
  4629 		}
       
  4630 
       
  4631 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4632 		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4633 			struct sk_buff *oldskb = skb;
       
  4634 			e_err(rx_err, "skb align check failed: %u bytes at "
       
  4635 			      "%p\n", bufsz, skb->data);
       
  4636 			/* Try again, without freeing the previous */
       
  4637 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4638 			/* Failed allocation, critical failure */
       
  4639 			if (!skb) {
       
  4640 				dev_kfree_skb(oldskb);
       
  4641 				adapter->alloc_rx_buff_failed++;
       
  4642 				break;
       
  4643 			}
       
  4644 
       
  4645 			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4646 				/* give up */
       
  4647 				dev_kfree_skb(skb);
       
  4648 				dev_kfree_skb(oldskb);
       
  4649 				adapter->alloc_rx_buff_failed++;
       
  4650 				break; /* while !buffer_info->skb */
       
  4651 			}
       
  4652 
       
  4653 			/* Use new allocation */
       
  4654 			dev_kfree_skb(oldskb);
       
  4655 		}
       
  4656 		buffer_info->skb = skb;
       
  4657 		buffer_info->length = adapter->rx_buffer_len;
       
  4658 map_skb:
       
  4659 		buffer_info->dma = dma_map_single(&pdev->dev,
       
  4660 						  skb->data,
       
  4661 						  buffer_info->length,
       
  4662 						  DMA_FROM_DEVICE);
       
  4663 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
       
  4664 			dev_kfree_skb(skb);
       
  4665 			buffer_info->skb = NULL;
       
  4666 			buffer_info->dma = 0;
       
  4667 			adapter->alloc_rx_buff_failed++;
       
  4668 			break; /* while !buffer_info->skb */
       
  4669 		}
       
  4670 
       
  4671 		/*
       
  4672 		 * XXX if it was allocated cleanly it will never map to a
       
  4673 		 * boundary crossing
       
  4674 		 */
       
  4675 
       
  4676 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4677 		if (!e1000_check_64k_bound(adapter,
       
  4678 					(void *)(unsigned long)buffer_info->dma,
       
  4679 					adapter->rx_buffer_len)) {
       
  4680 			e_err(rx_err, "dma align check failed: %u bytes at "
       
  4681 			      "%p\n", adapter->rx_buffer_len,
       
  4682 			      (void *)(unsigned long)buffer_info->dma);
       
  4683 			dev_kfree_skb(skb);
       
  4684 			buffer_info->skb = NULL;
       
  4685 
       
  4686 			dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  4687 					 adapter->rx_buffer_len,
       
  4688 					 DMA_FROM_DEVICE);
       
  4689 			buffer_info->dma = 0;
       
  4690 
       
  4691 			adapter->alloc_rx_buff_failed++;
       
  4692 			break; /* while !buffer_info->skb */
       
  4693 		}
       
  4694 		rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4695 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  4696 
       
  4697 		if (unlikely(++i == rx_ring->count))
       
  4698 			i = 0;
       
  4699 		buffer_info = &rx_ring->buffer_info[i];
       
  4700 	}
       
  4701 
       
  4702 	if (likely(rx_ring->next_to_use != i)) {
       
  4703 		rx_ring->next_to_use = i;
       
  4704 		if (unlikely(i-- == 0))
       
  4705 			i = (rx_ring->count - 1);
       
  4706 
       
  4707 		/* Force memory writes to complete before letting h/w
       
  4708 		 * know there are new descriptors to fetch.  (Only
       
  4709 		 * applicable for weak-ordered memory model archs,
       
  4710 		 * such as IA-64). */
       
  4711 		wmb();
       
  4712 		writel(i, hw->hw_addr + rx_ring->rdt);
       
  4713 	}
       
  4714 }
       
  4715 
       
  4716 /**
       
  4717  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
       
  4718  * @adapter:
       
  4719  **/
       
  4720 
       
  4721 static void e1000_smartspeed(struct e1000_adapter *adapter)
       
  4722 {
       
  4723 	struct e1000_hw *hw = &adapter->hw;
       
  4724 	u16 phy_status;
       
  4725 	u16 phy_ctrl;
       
  4726 
       
  4727 	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
       
  4728 	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
       
  4729 		return;
       
  4730 
       
  4731 	if (adapter->smartspeed == 0) {
       
  4732 		/* If Master/Slave config fault is asserted twice,
       
  4733 		 * we assume back-to-back */
       
  4734 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
       
  4735 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
       
  4736 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
       
  4737 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
       
  4738 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
       
  4739 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
       
  4740 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
       
  4741 			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
       
  4742 					    phy_ctrl);
       
  4743 			adapter->smartspeed++;
       
  4744 			if (!e1000_phy_setup_autoneg(hw) &&
       
  4745 			   !e1000_read_phy_reg(hw, PHY_CTRL,
       
  4746 				   	       &phy_ctrl)) {
       
  4747 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
       
  4748 					     MII_CR_RESTART_AUTO_NEG);
       
  4749 				e1000_write_phy_reg(hw, PHY_CTRL,
       
  4750 						    phy_ctrl);
       
  4751 			}
       
  4752 		}
       
  4753 		return;
       
  4754 	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
       
  4755 		/* If still no link, perhaps using 2/3 pair cable */
       
  4756 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
       
  4757 		phy_ctrl |= CR_1000T_MS_ENABLE;
       
  4758 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
       
  4759 		if (!e1000_phy_setup_autoneg(hw) &&
       
  4760 		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
       
  4761 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
       
  4762 				     MII_CR_RESTART_AUTO_NEG);
       
  4763 			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
       
  4764 		}
       
  4765 	}
       
  4766 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
       
  4767 	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
       
  4768 		adapter->smartspeed = 0;
       
  4769 }
       
  4770 
       
  4771 /**
       
  4772  * e1000_ioctl -
       
  4773  * @netdev:
       
  4774  * @ifreq:
       
  4775  * @cmd:
       
  4776  **/
       
  4777 
       
  4778 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  4779 {
       
  4780 	switch (cmd) {
       
  4781 	case SIOCGMIIPHY:
       
  4782 	case SIOCGMIIREG:
       
  4783 	case SIOCSMIIREG:
       
  4784 		return e1000_mii_ioctl(netdev, ifr, cmd);
       
  4785 	default:
       
  4786 		return -EOPNOTSUPP;
       
  4787 	}
       
  4788 }
       
  4789 
       
  4790 /**
       
  4791  * e1000_mii_ioctl -
       
  4792  * @netdev:
       
  4793  * @ifreq:
       
  4794  * @cmd:
       
  4795  **/
       
  4796 
       
  4797 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
  4798 			   int cmd)
       
  4799 {
       
  4800 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4801 	struct e1000_hw *hw = &adapter->hw;
       
  4802 	struct mii_ioctl_data *data = if_mii(ifr);
       
  4803 	int retval;
       
  4804 	u16 mii_reg;
       
  4805 	unsigned long flags;
       
  4806 
       
  4807 	if (hw->media_type != e1000_media_type_copper)
       
  4808 		return -EOPNOTSUPP;
       
  4809 
       
  4810 	switch (cmd) {
       
  4811 	case SIOCGMIIPHY:
       
  4812 		data->phy_id = hw->phy_addr;
       
  4813 		break;
       
  4814 	case SIOCGMIIREG:
       
  4815 		if (adapter->ecdev) {
       
  4816 			return -EPERM;
       
  4817 		}
       
  4818 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4819 		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
       
  4820 				   &data->val_out)) {
       
  4821 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4822 			return -EIO;
       
  4823 		}
       
  4824 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4825 		break;
       
  4826 	case SIOCSMIIREG:
       
  4827 		if (adapter->ecdev) {
       
  4828 			return -EPERM;
       
  4829 		}
       
  4830 		if (data->reg_num & ~(0x1F))
       
  4831 			return -EFAULT;
       
  4832 		mii_reg = data->val_in;
       
  4833 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4834 		if (e1000_write_phy_reg(hw, data->reg_num,
       
  4835 					mii_reg)) {
       
  4836 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4837 			return -EIO;
       
  4838 		}
       
  4839 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4840 		if (hw->media_type == e1000_media_type_copper) {
       
  4841 			switch (data->reg_num) {
       
  4842 			case PHY_CTRL:
       
  4843 				if (mii_reg & MII_CR_POWER_DOWN)
       
  4844 					break;
       
  4845 				if (mii_reg & MII_CR_AUTO_NEG_EN) {
       
  4846 					hw->autoneg = 1;
       
  4847 					hw->autoneg_advertised = 0x2F;
       
  4848 				} else {
       
  4849 					u32 speed;
       
  4850 					if (mii_reg & 0x40)
       
  4851 						speed = SPEED_1000;
       
  4852 					else if (mii_reg & 0x2000)
       
  4853 						speed = SPEED_100;
       
  4854 					else
       
  4855 						speed = SPEED_10;
       
  4856 					retval = e1000_set_spd_dplx(
       
  4857 						adapter, speed,
       
  4858 						((mii_reg & 0x100)
       
  4859 						 ? DUPLEX_FULL :
       
  4860 						 DUPLEX_HALF));
       
  4861 					if (retval)
       
  4862 						return retval;
       
  4863 				}
       
  4864 				if (netif_running(adapter->netdev))
       
  4865 					e1000_reinit_locked(adapter);
       
  4866 				else
       
  4867 					e1000_reset(adapter);
       
  4868 				break;
       
  4869 			case M88E1000_PHY_SPEC_CTRL:
       
  4870 			case M88E1000_EXT_PHY_SPEC_CTRL:
       
  4871 				if (e1000_phy_reset(hw))
       
  4872 					return -EIO;
       
  4873 				break;
       
  4874 			}
       
  4875 		} else {
       
  4876 			switch (data->reg_num) {
       
  4877 			case PHY_CTRL:
       
  4878 				if (mii_reg & MII_CR_POWER_DOWN)
       
  4879 					break;
       
  4880 				if (netif_running(adapter->netdev))
       
  4881 					e1000_reinit_locked(adapter);
       
  4882 				else
       
  4883 					e1000_reset(adapter);
       
  4884 				break;
       
  4885 			}
       
  4886 		}
       
  4887 		break;
       
  4888 	default:
       
  4889 		return -EOPNOTSUPP;
       
  4890 	}
       
  4891 	return E1000_SUCCESS;
       
  4892 }
       
  4893 
       
  4894 void e1000_pci_set_mwi(struct e1000_hw *hw)
       
  4895 {
       
  4896 	struct e1000_adapter *adapter = hw->back;
       
  4897 	int ret_val = pci_set_mwi(adapter->pdev);
       
  4898 
       
  4899 	if (ret_val)
       
  4900 		e_err(probe, "Error in setting MWI\n");
       
  4901 }
       
  4902 
       
  4903 void e1000_pci_clear_mwi(struct e1000_hw *hw)
       
  4904 {
       
  4905 	struct e1000_adapter *adapter = hw->back;
       
  4906 
       
  4907 	pci_clear_mwi(adapter->pdev);
       
  4908 }
       
  4909 
       
  4910 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
       
  4911 {
       
  4912 	struct e1000_adapter *adapter = hw->back;
       
  4913 	return pcix_get_mmrbc(adapter->pdev);
       
  4914 }
       
  4915 
       
  4916 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
       
  4917 {
       
  4918 	struct e1000_adapter *adapter = hw->back;
       
  4919 	pcix_set_mmrbc(adapter->pdev, mmrbc);
       
  4920 }
       
  4921 
       
  4922 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
       
  4923 {
       
  4924 	outl(value, port);
       
  4925 }
       
  4926 
       
  4927 static bool e1000_vlan_used(struct e1000_adapter *adapter)
       
  4928 {
       
  4929 	u16 vid;
       
  4930 
       
  4931 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
       
  4932 		return true;
       
  4933 	return false;
       
  4934 }
       
  4935 
       
  4936 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
       
  4937 			      netdev_features_t features)
       
  4938 {
       
  4939 	struct e1000_hw *hw = &adapter->hw;
       
  4940 	u32 ctrl;
       
  4941 
       
  4942 	ctrl = er32(CTRL);
       
  4943 	if (features & NETIF_F_HW_VLAN_RX) {
       
  4944 		/* enable VLAN tag insert/strip */
       
  4945 		ctrl |= E1000_CTRL_VME;
       
  4946 	} else {
       
  4947 		/* disable VLAN tag insert/strip */
       
  4948 		ctrl &= ~E1000_CTRL_VME;
       
  4949 	}
       
  4950 	ew32(CTRL, ctrl);
       
  4951 }
       
  4952 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
       
  4953 				     bool filter_on)
       
  4954 {
       
  4955 	struct e1000_hw *hw = &adapter->hw;
       
  4956 	u32 rctl;
       
  4957 
       
  4958 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4959 		e1000_irq_disable(adapter);
       
  4960 
       
  4961 	__e1000_vlan_mode(adapter, adapter->netdev->features);
       
  4962 	if (filter_on) {
       
  4963 		/* enable VLAN receive filtering */
       
  4964 		rctl = er32(RCTL);
       
  4965 		rctl &= ~E1000_RCTL_CFIEN;
       
  4966 		if (!(adapter->netdev->flags & IFF_PROMISC))
       
  4967 			rctl |= E1000_RCTL_VFE;
       
  4968 		ew32(RCTL, rctl);
       
  4969 		e1000_update_mng_vlan(adapter);
       
  4970 	} else {
       
  4971 		/* disable VLAN receive filtering */
       
  4972 		rctl = er32(RCTL);
       
  4973 		rctl &= ~E1000_RCTL_VFE;
       
  4974 		ew32(RCTL, rctl);
       
  4975 	}
       
  4976 
       
  4977 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4978 		e1000_irq_enable(adapter);
       
  4979 }
       
  4980 
       
  4981 static void e1000_vlan_mode(struct net_device *netdev,
       
  4982 			    netdev_features_t features)
       
  4983 {
       
  4984 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4985 
       
  4986 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4987 		e1000_irq_disable(adapter);
       
  4988 
       
  4989 	__e1000_vlan_mode(adapter, features);
       
  4990 
       
  4991 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4992 		e1000_irq_enable(adapter);
       
  4993 }
       
  4994 
       
  4995 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
       
  4996 {
       
  4997 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4998 	struct e1000_hw *hw = &adapter->hw;
       
  4999 	u32 vfta, index;
       
  5000 
       
  5001 	if ((hw->mng_cookie.status &
       
  5002 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  5003 	    (vid == adapter->mng_vlan_id))
       
  5004 		return 0;
       
  5005 
       
  5006 	if (!e1000_vlan_used(adapter))
       
  5007 		e1000_vlan_filter_on_off(adapter, true);
       
  5008 
       
  5009 	/* add VID to filter table */
       
  5010 	index = (vid >> 5) & 0x7F;
       
  5011 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
       
  5012 	vfta |= (1 << (vid & 0x1F));
       
  5013 	e1000_write_vfta(hw, index, vfta);
       
  5014 
       
  5015 	set_bit(vid, adapter->active_vlans);
       
  5016 
       
  5017 	return 0;
       
  5018 }
       
  5019 
       
  5020 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
       
  5021 {
       
  5022 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5023 	struct e1000_hw *hw = &adapter->hw;
       
  5024 	u32 vfta, index;
       
  5025 
       
  5026 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5027 		e1000_irq_disable(adapter);
       
  5028 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5029 		e1000_irq_enable(adapter);
       
  5030 
       
  5031 	/* remove VID from filter table */
       
  5032 	index = (vid >> 5) & 0x7F;
       
  5033 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
       
  5034 	vfta &= ~(1 << (vid & 0x1F));
       
  5035 	e1000_write_vfta(hw, index, vfta);
       
  5036 
       
  5037 	clear_bit(vid, adapter->active_vlans);
       
  5038 
       
  5039 	if (!e1000_vlan_used(adapter))
       
  5040 		e1000_vlan_filter_on_off(adapter, false);
       
  5041 
       
  5042 	return 0;
       
  5043 }
       
  5044 
       
  5045 static void e1000_restore_vlan(struct e1000_adapter *adapter)
       
  5046 {
       
  5047 	u16 vid;
       
  5048 
       
  5049 	if (!e1000_vlan_used(adapter))
       
  5050 		return;
       
  5051 
       
  5052 	e1000_vlan_filter_on_off(adapter, true);
       
  5053 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
       
  5054 		e1000_vlan_rx_add_vid(adapter->netdev, vid);
       
  5055 }
       
  5056 
       
  5057 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
       
  5058 {
       
  5059 	struct e1000_hw *hw = &adapter->hw;
       
  5060 
       
  5061 	hw->autoneg = 0;
       
  5062 
       
  5063 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
       
  5064 	 * for the switch() below to work */
       
  5065 	if ((spd & 1) || (dplx & ~1))
       
  5066 		goto err_inval;
       
  5067 
       
  5068 	/* Fiber NICs only allow 1000 gbps Full duplex */
       
  5069 	if ((hw->media_type == e1000_media_type_fiber) &&
       
  5070 	    spd != SPEED_1000 &&
       
  5071 	    dplx != DUPLEX_FULL)
       
  5072 		goto err_inval;
       
  5073 
       
  5074 	switch (spd + dplx) {
       
  5075 	case SPEED_10 + DUPLEX_HALF:
       
  5076 		hw->forced_speed_duplex = e1000_10_half;
       
  5077 		break;
       
  5078 	case SPEED_10 + DUPLEX_FULL:
       
  5079 		hw->forced_speed_duplex = e1000_10_full;
       
  5080 		break;
       
  5081 	case SPEED_100 + DUPLEX_HALF:
       
  5082 		hw->forced_speed_duplex = e1000_100_half;
       
  5083 		break;
       
  5084 	case SPEED_100 + DUPLEX_FULL:
       
  5085 		hw->forced_speed_duplex = e1000_100_full;
       
  5086 		break;
       
  5087 	case SPEED_1000 + DUPLEX_FULL:
       
  5088 		hw->autoneg = 1;
       
  5089 		hw->autoneg_advertised = ADVERTISE_1000_FULL;
       
  5090 		break;
       
  5091 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
       
  5092 	default:
       
  5093 		goto err_inval;
       
  5094 	}
       
  5095 	return 0;
       
  5096 
       
  5097 err_inval:
       
  5098 	e_err(probe, "Unsupported Speed/Duplex configuration\n");
       
  5099 	return -EINVAL;
       
  5100 }
       
  5101 
       
  5102 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  5103 {
       
  5104 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5105 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5106 	struct e1000_hw *hw = &adapter->hw;
       
  5107 	u32 ctrl, ctrl_ext, rctl, status;
       
  5108 	u32 wufc = adapter->wol;
       
  5109 #ifdef CONFIG_PM
       
  5110 	int retval = 0;
       
  5111 #endif
       
  5112 
       
  5113 	if (adapter->ecdev) {
       
  5114 		return -EBUSY;
       
  5115 	}
       
  5116 
       
  5117 	netif_device_detach(netdev);
       
  5118 
       
  5119 	if (netif_running(netdev)) {
       
  5120 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
       
  5121 		e1000_down(adapter);
       
  5122 	}
       
  5123 
       
  5124 #ifdef CONFIG_PM
       
  5125 	retval = pci_save_state(pdev);
       
  5126 	if (retval)
       
  5127 		return retval;
       
  5128 #endif
       
  5129 
       
  5130 	status = er32(STATUS);
       
  5131 	if (status & E1000_STATUS_LU)
       
  5132 		wufc &= ~E1000_WUFC_LNKC;
       
  5133 
       
  5134 	if (wufc) {
       
  5135 		e1000_setup_rctl(adapter);
       
  5136 		e1000_set_rx_mode(netdev);
       
  5137 
       
  5138 		rctl = er32(RCTL);
       
  5139 
       
  5140 		/* turn on all-multi mode if wake on multicast is enabled */
       
  5141 		if (wufc & E1000_WUFC_MC)
       
  5142 			rctl |= E1000_RCTL_MPE;
       
  5143 
       
  5144 		/* enable receives in the hardware */
       
  5145 		ew32(RCTL, rctl | E1000_RCTL_EN);
       
  5146 
       
  5147 		if (hw->mac_type >= e1000_82540) {
       
  5148 			ctrl = er32(CTRL);
       
  5149 			/* advertise wake from D3Cold */
       
  5150 			#define E1000_CTRL_ADVD3WUC 0x00100000
       
  5151 			/* phy power management enable */
       
  5152 			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
       
  5153 			ctrl |= E1000_CTRL_ADVD3WUC |
       
  5154 				E1000_CTRL_EN_PHY_PWR_MGMT;
       
  5155 			ew32(CTRL, ctrl);
       
  5156 		}
       
  5157 
       
  5158 		if (hw->media_type == e1000_media_type_fiber ||
       
  5159 		    hw->media_type == e1000_media_type_internal_serdes) {
       
  5160 			/* keep the laser running in D3 */
       
  5161 			ctrl_ext = er32(CTRL_EXT);
       
  5162 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
       
  5163 			ew32(CTRL_EXT, ctrl_ext);
       
  5164 		}
       
  5165 
       
  5166 		ew32(WUC, E1000_WUC_PME_EN);
       
  5167 		ew32(WUFC, wufc);
       
  5168 	} else {
       
  5169 		ew32(WUC, 0);
       
  5170 		ew32(WUFC, 0);
       
  5171 	}
       
  5172 
       
  5173 	e1000_release_manageability(adapter);
       
  5174 
       
  5175 	*enable_wake = !!wufc;
       
  5176 
       
  5177 	/* make sure adapter isn't asleep if manageability is enabled */
       
  5178 	if (adapter->en_mng_pt)
       
  5179 		*enable_wake = true;
       
  5180 
       
  5181 	if (netif_running(netdev))
       
  5182 		e1000_free_irq(adapter);
       
  5183 
       
  5184 	pci_disable_device(pdev);
       
  5185 
       
  5186 	return 0;
       
  5187 }
       
  5188 
       
  5189 #ifdef CONFIG_PM
       
  5190 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
       
  5191 {
       
  5192 	int retval;
       
  5193 	bool wake;
       
  5194 
       
  5195 	retval = __e1000_shutdown(pdev, &wake);
       
  5196 	if (retval)
       
  5197 		return retval;
       
  5198 
       
  5199 	if (wake) {
       
  5200 		pci_prepare_to_sleep(pdev);
       
  5201 	} else {
       
  5202 		pci_wake_from_d3(pdev, false);
       
  5203 		pci_set_power_state(pdev, PCI_D3hot);
       
  5204 	}
       
  5205 
       
  5206 	return 0;
       
  5207 }
       
  5208 
       
  5209 static int e1000_resume(struct pci_dev *pdev)
       
  5210 {
       
  5211 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5212 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5213 	struct e1000_hw *hw = &adapter->hw;
       
  5214 	u32 err;
       
  5215 
       
  5216 	if (adapter->ecdev) {
       
  5217 		return -EBUSY;
       
  5218 	}
       
  5219 
       
  5220 	pci_set_power_state(pdev, PCI_D0);
       
  5221 	pci_restore_state(pdev);
       
  5222 	pci_save_state(pdev);
       
  5223 
       
  5224 	if (adapter->need_ioport)
       
  5225 		err = pci_enable_device(pdev);
       
  5226 	else
       
  5227 		err = pci_enable_device_mem(pdev);
       
  5228 	if (err) {
       
  5229 		pr_err("Cannot enable PCI device from suspend\n");
       
  5230 		return err;
       
  5231 	}
       
  5232 	pci_set_master(pdev);
       
  5233 
       
  5234 	pci_enable_wake(pdev, PCI_D3hot, 0);
       
  5235 	pci_enable_wake(pdev, PCI_D3cold, 0);
       
  5236 
       
  5237 	if (netif_running(netdev)) {
       
  5238 		err = e1000_request_irq(adapter);
       
  5239 		if (err)
       
  5240 			return err;
       
  5241 	}
       
  5242 
       
  5243 	e1000_power_up_phy(adapter);
       
  5244 	e1000_reset(adapter);
       
  5245 	ew32(WUS, ~0);
       
  5246 
       
  5247 	e1000_init_manageability(adapter);
       
  5248 
       
  5249 	if (netif_running(netdev))
       
  5250 		e1000_up(adapter);
       
  5251 
       
  5252 	if (!adapter->ecdev) {
       
  5253 		netif_device_attach(netdev);
       
  5254 	}
       
  5255 
       
  5256 	return 0;
       
  5257 }
       
  5258 #endif
       
  5259 
       
  5260 static void e1000_shutdown(struct pci_dev *pdev)
       
  5261 {
       
  5262 	bool wake;
       
  5263 
       
  5264 	__e1000_shutdown(pdev, &wake);
       
  5265 
       
  5266 	if (system_state == SYSTEM_POWER_OFF) {
       
  5267 		pci_wake_from_d3(pdev, wake);
       
  5268 		pci_set_power_state(pdev, PCI_D3hot);
       
  5269 	}
       
  5270 }
       
  5271 
       
  5272 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  5273 /*
       
  5274  * Polling 'interrupt' - used by things like netconsole to send skbs
       
  5275  * without having to re-enable interrupts. It's not called while
       
  5276  * the interrupt routine is executing.
       
  5277  */
       
  5278 static void e1000_netpoll(struct net_device *netdev)
       
  5279 {
       
  5280 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5281 
       
  5282 	disable_irq(adapter->pdev->irq);
       
  5283 	e1000_intr(adapter->pdev->irq, netdev);
       
  5284 	enable_irq(adapter->pdev->irq);
       
  5285 }
       
  5286 #endif
       
  5287 
       
  5288 /**
       
  5289  * e1000_io_error_detected - called when PCI error is detected
       
  5290  * @pdev: Pointer to PCI device
       
  5291  * @state: The current pci connection state
       
  5292  *
       
  5293  * This function is called after a PCI bus error affecting
       
  5294  * this device has been detected.
       
  5295  */
       
  5296 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
  5297 						pci_channel_state_t state)
       
  5298 {
       
  5299 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5300 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5301 
       
  5302 	netif_device_detach(netdev);
       
  5303 
       
  5304 	if (state == pci_channel_io_perm_failure)
       
  5305 		return PCI_ERS_RESULT_DISCONNECT;
       
  5306 
       
  5307 	if (netif_running(netdev))
       
  5308 		e1000_down(adapter);
       
  5309 	pci_disable_device(pdev);
       
  5310 
       
  5311 	/* Request a slot slot reset. */
       
  5312 	return PCI_ERS_RESULT_NEED_RESET;
       
  5313 }
       
  5314 
       
  5315 /**
       
  5316  * e1000_io_slot_reset - called after the pci bus has been reset.
       
  5317  * @pdev: Pointer to PCI device
       
  5318  *
       
  5319  * Restart the card from scratch, as if from a cold-boot. Implementation
       
  5320  * resembles the first-half of the e1000_resume routine.
       
  5321  */
       
  5322 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
       
  5323 {
       
  5324 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5325 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5326 	struct e1000_hw *hw = &adapter->hw;
       
  5327 	int err;
       
  5328 
       
  5329 	if (adapter->need_ioport)
       
  5330 		err = pci_enable_device(pdev);
       
  5331 	else
       
  5332 		err = pci_enable_device_mem(pdev);
       
  5333 	if (err) {
       
  5334 		pr_err("Cannot re-enable PCI device after reset.\n");
       
  5335 		return PCI_ERS_RESULT_DISCONNECT;
       
  5336 	}
       
  5337 	pci_set_master(pdev);
       
  5338 
       
  5339 	pci_enable_wake(pdev, PCI_D3hot, 0);
       
  5340 	pci_enable_wake(pdev, PCI_D3cold, 0);
       
  5341 
       
  5342 	e1000_reset(adapter);
       
  5343 	ew32(WUS, ~0);
       
  5344 
       
  5345 	return PCI_ERS_RESULT_RECOVERED;
       
  5346 }
       
  5347 
       
  5348 /**
       
  5349  * e1000_io_resume - called when traffic can start flowing again.
       
  5350  * @pdev: Pointer to PCI device
       
  5351  *
       
  5352  * This callback is called when the error recovery driver tells us that
       
  5353  * its OK to resume normal operation. Implementation resembles the
       
  5354  * second-half of the e1000_resume routine.
       
  5355  */
       
  5356 static void e1000_io_resume(struct pci_dev *pdev)
       
  5357 {
       
  5358 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5359 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5360 
       
  5361 	e1000_init_manageability(adapter);
       
  5362 
       
  5363 	if (netif_running(netdev)) {
       
  5364 		if (e1000_up(adapter)) {
       
  5365 			pr_info("can't bring device back up after reset\n");
       
  5366 			return;
       
  5367 		}
       
  5368 	}
       
  5369 
       
  5370 	netif_device_attach(netdev);
       
  5371 }
       
  5372 
       
  5373 /* e1000_main.c */