devices/e1000/e1000_main-3.6-ethercat.c
changeset 2589 2b9c78543663
equal deleted inserted replaced
2415:af21f0bdc7c9 2589:2b9c78543663
       
     1 /*******************************************************************************
       
     2 
       
     3   Intel PRO/1000 Linux driver
       
     4   Copyright(c) 1999 - 2006 Intel Corporation.
       
     5 
       
     6   This program is free software; you can redistribute it and/or modify it
       
     7   under the terms and conditions of the GNU General Public License,
       
     8   version 2, as published by the Free Software Foundation.
       
     9 
       
    10   This program is distributed in the hope it will be useful, but WITHOUT
       
    11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    13   more details.
       
    14 
       
    15   You should have received a copy of the GNU General Public License along with
       
    16   this program; if not, write to the Free Software Foundation, Inc.,
       
    17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    18 
       
    19   The full GNU General Public License is included in this distribution in
       
    20   the file called "COPYING".
       
    21 
       
    22   Contact Information:
       
    23   Linux NICS <linux.nics@intel.com>
       
    24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    26 
       
    27   vim: noexpandtab
       
    28 
       
    29 *******************************************************************************/
       
    30 
       
    31 #include "e1000-3.6-ethercat.h"
       
    32 #include <net/ip6_checksum.h>
       
    33 #include <linux/io.h>
       
    34 #include <linux/prefetch.h>
       
    35 #include <linux/bitops.h>
       
    36 #include <linux/if_vlan.h>
       
    37 
       
    38 char e1000_driver_name[] = "ec_e1000";
       
    39 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
       
    40 #define DRV_VERSION "7.3.21-k8-NAPI"
       
    41 const char e1000_driver_version[] = DRV_VERSION;
       
    42 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
       
    43 
       
    44 /* e1000_pci_tbl - PCI Device ID Table
       
    45  *
       
    46  * Last entry must be all 0s
       
    47  *
       
    48  * Macro expands to...
       
    49  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
       
    50  */
       
    51 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
       
    52 	INTEL_E1000_ETHERNET_DEVICE(0x1000),
       
    53 	INTEL_E1000_ETHERNET_DEVICE(0x1001),
       
    54 	INTEL_E1000_ETHERNET_DEVICE(0x1004),
       
    55 	INTEL_E1000_ETHERNET_DEVICE(0x1008),
       
    56 	INTEL_E1000_ETHERNET_DEVICE(0x1009),
       
    57 	INTEL_E1000_ETHERNET_DEVICE(0x100C),
       
    58 	INTEL_E1000_ETHERNET_DEVICE(0x100D),
       
    59 	INTEL_E1000_ETHERNET_DEVICE(0x100E),
       
    60 	INTEL_E1000_ETHERNET_DEVICE(0x100F),
       
    61 	INTEL_E1000_ETHERNET_DEVICE(0x1010),
       
    62 	INTEL_E1000_ETHERNET_DEVICE(0x1011),
       
    63 	INTEL_E1000_ETHERNET_DEVICE(0x1012),
       
    64 	INTEL_E1000_ETHERNET_DEVICE(0x1013),
       
    65 	INTEL_E1000_ETHERNET_DEVICE(0x1014),
       
    66 	INTEL_E1000_ETHERNET_DEVICE(0x1015),
       
    67 	INTEL_E1000_ETHERNET_DEVICE(0x1016),
       
    68 	INTEL_E1000_ETHERNET_DEVICE(0x1017),
       
    69 	INTEL_E1000_ETHERNET_DEVICE(0x1018),
       
    70 	INTEL_E1000_ETHERNET_DEVICE(0x1019),
       
    71 	INTEL_E1000_ETHERNET_DEVICE(0x101A),
       
    72 	INTEL_E1000_ETHERNET_DEVICE(0x101D),
       
    73 	INTEL_E1000_ETHERNET_DEVICE(0x101E),
       
    74 	INTEL_E1000_ETHERNET_DEVICE(0x1026),
       
    75 	INTEL_E1000_ETHERNET_DEVICE(0x1027),
       
    76 	INTEL_E1000_ETHERNET_DEVICE(0x1028),
       
    77 	INTEL_E1000_ETHERNET_DEVICE(0x1075),
       
    78 	INTEL_E1000_ETHERNET_DEVICE(0x1076),
       
    79 	INTEL_E1000_ETHERNET_DEVICE(0x1077),
       
    80 	INTEL_E1000_ETHERNET_DEVICE(0x1078),
       
    81 	INTEL_E1000_ETHERNET_DEVICE(0x1079),
       
    82 	INTEL_E1000_ETHERNET_DEVICE(0x107A),
       
    83 	INTEL_E1000_ETHERNET_DEVICE(0x107B),
       
    84 	INTEL_E1000_ETHERNET_DEVICE(0x107C),
       
    85 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
       
    86 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
       
    87 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
       
    88 	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
       
    89 	/* required last entry */
       
    90 	{0,}
       
    91 };
       
    92 
       
    93 // do not auto-load driver
       
    94 // MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
       
    95 
       
    96 int e1000_up(struct e1000_adapter *adapter);
       
    97 void e1000_down(struct e1000_adapter *adapter);
       
    98 void e1000_reinit_locked(struct e1000_adapter *adapter);
       
    99 void e1000_reset(struct e1000_adapter *adapter);
       
   100 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
       
   101 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
       
   102 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
       
   103 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
       
   104 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
       
   105                              struct e1000_tx_ring *txdr);
       
   106 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
       
   107                              struct e1000_rx_ring *rxdr);
       
   108 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
       
   109                              struct e1000_tx_ring *tx_ring);
       
   110 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
       
   111                              struct e1000_rx_ring *rx_ring);
       
   112 void e1000_update_stats(struct e1000_adapter *adapter);
       
   113 
       
   114 static int e1000_init_module(void);
       
   115 static void e1000_exit_module(void);
       
   116 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
       
   117 static void __devexit e1000_remove(struct pci_dev *pdev);
       
   118 static int e1000_alloc_queues(struct e1000_adapter *adapter);
       
   119 static int e1000_sw_init(struct e1000_adapter *adapter);
       
   120 static int e1000_open(struct net_device *netdev);
       
   121 static int e1000_close(struct net_device *netdev);
       
   122 static void e1000_configure_tx(struct e1000_adapter *adapter);
       
   123 static void e1000_configure_rx(struct e1000_adapter *adapter);
       
   124 static void e1000_setup_rctl(struct e1000_adapter *adapter);
       
   125 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
       
   126 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
       
   127 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
       
   128                                 struct e1000_tx_ring *tx_ring);
       
   129 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
       
   130                                 struct e1000_rx_ring *rx_ring);
       
   131 static void e1000_set_rx_mode(struct net_device *netdev);
       
   132 static void e1000_update_phy_info_task(struct work_struct *work);
       
   133 static void e1000_watchdog(struct work_struct *work);
       
   134 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
       
   135 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
       
   136 				    struct net_device *netdev);
       
   137 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
       
   138 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
       
   139 static int e1000_set_mac(struct net_device *netdev, void *p);
       
   140 void ec_poll(struct net_device *);
       
   141 static irqreturn_t e1000_intr(int irq, void *data);
       
   142 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
       
   143 			       struct e1000_tx_ring *tx_ring);
       
   144 static int e1000_clean(struct napi_struct *napi, int budget);
       
   145 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
   146 			       struct e1000_rx_ring *rx_ring,
       
   147 			       int *work_done, int work_to_do);
       
   148 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
       
   149 				     struct e1000_rx_ring *rx_ring,
       
   150 				     int *work_done, int work_to_do);
       
   151 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
   152 				   struct e1000_rx_ring *rx_ring,
       
   153 				   int cleaned_count);
       
   154 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
       
   155 					 struct e1000_rx_ring *rx_ring,
       
   156 					 int cleaned_count);
       
   157 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
       
   158 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
   159 			   int cmd);
       
   160 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
       
   161 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
       
   162 static void e1000_tx_timeout(struct net_device *dev);
       
   163 static void e1000_reset_task(struct work_struct *work);
       
   164 static void e1000_smartspeed(struct e1000_adapter *adapter);
       
   165 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
       
   166                                        struct sk_buff *skb);
       
   167 
       
   168 static bool e1000_vlan_used(struct e1000_adapter *adapter);
       
   169 static void e1000_vlan_mode(struct net_device *netdev,
       
   170 			    netdev_features_t features);
       
   171 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
       
   172 				     bool filter_on);
       
   173 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
       
   174 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
       
   175 static void e1000_restore_vlan(struct e1000_adapter *adapter);
       
   176 
       
   177 #ifdef CONFIG_PM
       
   178 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
       
   179 static int e1000_resume(struct pci_dev *pdev);
       
   180 #endif
       
   181 static void e1000_shutdown(struct pci_dev *pdev);
       
   182 
       
   183 #ifdef CONFIG_NET_POLL_CONTROLLER
       
   184 /* for netdump / net console */
       
   185 static void e1000_netpoll (struct net_device *netdev);
       
   186 #endif
       
   187 
       
   188 #define COPYBREAK_DEFAULT 256
       
   189 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
       
   190 module_param(copybreak, uint, 0644);
       
   191 MODULE_PARM_DESC(copybreak,
       
   192 	"Maximum size of packet that is copied to a new buffer on receive");
       
   193 
       
   194 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
   195                      pci_channel_state_t state);
       
   196 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
       
   197 static void e1000_io_resume(struct pci_dev *pdev);
       
   198 
       
   199 static struct pci_error_handlers e1000_err_handler = {
       
   200 	.error_detected = e1000_io_error_detected,
       
   201 	.slot_reset = e1000_io_slot_reset,
       
   202 	.resume = e1000_io_resume,
       
   203 };
       
   204 
       
   205 static struct pci_driver e1000_driver = {
       
   206 	.name     = e1000_driver_name,
       
   207 	.id_table = e1000_pci_tbl,
       
   208 	.probe    = e1000_probe,
       
   209 	.remove   = __devexit_p(e1000_remove),
       
   210 #ifdef CONFIG_PM
       
   211 	/* Power Management Hooks */
       
   212 	.suspend  = e1000_suspend,
       
   213 	.resume   = e1000_resume,
       
   214 #endif
       
   215 	.shutdown = e1000_shutdown,
       
   216 	.err_handler = &e1000_err_handler
       
   217 };
       
   218 
       
   219 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   220 MODULE_DESCRIPTION("EtherCAT-capable Intel(R) PRO/1000 Network Driver");
       
   221 MODULE_LICENSE("GPL");
       
   222 MODULE_VERSION(DRV_VERSION);
       
   223 
       
   224 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
       
   225 static int debug = -1;
       
   226 module_param(debug, int, 0);
       
   227 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   228 
       
   229 /**
       
   230  * e1000_get_hw_dev - return device
       
   231  * used by hardware layer to print debugging information
       
   232  *
       
   233  **/
       
   234 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
       
   235 {
       
   236 	struct e1000_adapter *adapter = hw->back;
       
   237 	return adapter->netdev;
       
   238 }
       
   239 
       
   240 /**
       
   241  * e1000_init_module - Driver Registration Routine
       
   242  *
       
   243  * e1000_init_module is the first routine called when the driver is
       
   244  * loaded. All it does is register with the PCI subsystem.
       
   245  **/
       
   246 
       
   247 static int __init e1000_init_module(void)
       
   248 {
       
   249 	int ret;
       
   250 	pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
       
   251 
       
   252 	pr_info("%s\n", e1000_copyright);
       
   253 
       
   254 	ret = pci_register_driver(&e1000_driver);
       
   255 	if (copybreak != COPYBREAK_DEFAULT) {
       
   256 		if (copybreak == 0)
       
   257 			pr_info("copybreak disabled\n");
       
   258 		else
       
   259 			pr_info("copybreak enabled for "
       
   260 				   "packets <= %u bytes\n", copybreak);
       
   261 	}
       
   262 	return ret;
       
   263 }
       
   264 
       
   265 module_init(e1000_init_module);
       
   266 
       
   267 /**
       
   268  * e1000_exit_module - Driver Exit Cleanup Routine
       
   269  *
       
   270  * e1000_exit_module is called just before the driver is removed
       
   271  * from memory.
       
   272  **/
       
   273 
       
   274 static void __exit e1000_exit_module(void)
       
   275 {
       
   276 	pci_unregister_driver(&e1000_driver);
       
   277 }
       
   278 
       
   279 module_exit(e1000_exit_module);
       
   280 
       
   281 static int e1000_request_irq(struct e1000_adapter *adapter)
       
   282 {
       
   283 	struct net_device *netdev = adapter->netdev;
       
   284 	irq_handler_t handler = e1000_intr;
       
   285 	int irq_flags = IRQF_SHARED;
       
   286 	int err;
       
   287 
       
   288 	if (adapter->ecdev) {
       
   289 		return 0;
       
   290 	}
       
   291 
       
   292 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
       
   293 	                  netdev);
       
   294 	if (err) {
       
   295 		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
       
   296 	}
       
   297 
       
   298 	return err;
       
   299 }
       
   300 
       
   301 static void e1000_free_irq(struct e1000_adapter *adapter)
       
   302 {
       
   303 	struct net_device *netdev = adapter->netdev;
       
   304 
       
   305 	if (adapter->ecdev) {
       
   306 		return;
       
   307 	}
       
   308 
       
   309 	free_irq(adapter->pdev->irq, netdev);
       
   310 }
       
   311 
       
   312 /**
       
   313  * e1000_irq_disable - Mask off interrupt generation on the NIC
       
   314  * @adapter: board private structure
       
   315  **/
       
   316 
       
   317 static void e1000_irq_disable(struct e1000_adapter *adapter)
       
   318 {
       
   319 	struct e1000_hw *hw = &adapter->hw;
       
   320 
       
   321 	if (adapter->ecdev) {
       
   322 		return;
       
   323 	}
       
   324 
       
   325 	ew32(IMC, ~0);
       
   326 	E1000_WRITE_FLUSH();
       
   327 	synchronize_irq(adapter->pdev->irq);
       
   328 }
       
   329 
       
   330 /**
       
   331  * e1000_irq_enable - Enable default interrupt generation settings
       
   332  * @adapter: board private structure
       
   333  **/
       
   334 
       
   335 static void e1000_irq_enable(struct e1000_adapter *adapter)
       
   336 {
       
   337 	struct e1000_hw *hw = &adapter->hw;
       
   338 
       
   339 	if (adapter->ecdev) {
       
   340 		return;
       
   341 	}
       
   342 
       
   343 	ew32(IMS, IMS_ENABLE_MASK);
       
   344 	E1000_WRITE_FLUSH();
       
   345 }
       
   346 
       
   347 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
       
   348 {
       
   349 	struct e1000_hw *hw = &adapter->hw;
       
   350 	struct net_device *netdev = adapter->netdev;
       
   351 	u16 vid = hw->mng_cookie.vlan_id;
       
   352 	u16 old_vid = adapter->mng_vlan_id;
       
   353 
       
   354 	if (!e1000_vlan_used(adapter))
       
   355 		return;
       
   356 
       
   357 	if (!test_bit(vid, adapter->active_vlans)) {
       
   358 		if (hw->mng_cookie.status &
       
   359 		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
       
   360 			e1000_vlan_rx_add_vid(netdev, vid);
       
   361 			adapter->mng_vlan_id = vid;
       
   362 		} else {
       
   363 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
   364 		}
       
   365 		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
       
   366 		    (vid != old_vid) &&
       
   367 		    !test_bit(old_vid, adapter->active_vlans))
       
   368 			e1000_vlan_rx_kill_vid(netdev, old_vid);
       
   369 	} else {
       
   370 		adapter->mng_vlan_id = vid;
       
   371 	}
       
   372 }
       
   373 
       
   374 static void e1000_init_manageability(struct e1000_adapter *adapter)
       
   375 {
       
   376 	struct e1000_hw *hw = &adapter->hw;
       
   377 
       
   378 	if (adapter->en_mng_pt) {
       
   379 		u32 manc = er32(MANC);
       
   380 
       
   381 		/* disable hardware interception of ARP */
       
   382 		manc &= ~(E1000_MANC_ARP_EN);
       
   383 
       
   384 		ew32(MANC, manc);
       
   385 	}
       
   386 }
       
   387 
       
   388 static void e1000_release_manageability(struct e1000_adapter *adapter)
       
   389 {
       
   390 	struct e1000_hw *hw = &adapter->hw;
       
   391 
       
   392 	if (adapter->en_mng_pt) {
       
   393 		u32 manc = er32(MANC);
       
   394 
       
   395 		/* re-enable hardware interception of ARP */
       
   396 		manc |= E1000_MANC_ARP_EN;
       
   397 
       
   398 		ew32(MANC, manc);
       
   399 	}
       
   400 }
       
   401 
       
   402 /**
       
   403  * e1000_configure - configure the hardware for RX and TX
       
   404  * @adapter = private board structure
       
   405  **/
       
   406 static void e1000_configure(struct e1000_adapter *adapter)
       
   407 {
       
   408 	struct net_device *netdev = adapter->netdev;
       
   409 	int i;
       
   410 
       
   411 	e1000_set_rx_mode(netdev);
       
   412 
       
   413 	e1000_restore_vlan(adapter);
       
   414 	e1000_init_manageability(adapter);
       
   415 
       
   416 	e1000_configure_tx(adapter);
       
   417 	e1000_setup_rctl(adapter);
       
   418 	e1000_configure_rx(adapter);
       
   419 	/* call E1000_DESC_UNUSED which always leaves
       
   420 	 * at least 1 descriptor unused to make sure
       
   421 	 * next_to_use != next_to_clean */
       
   422 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
   423 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
       
   424 		if (adapter->ecdev) {
       
   425 			/* fill rx ring completely! */
       
   426 			adapter->alloc_rx_buf(adapter, ring, ring->count);
       
   427 		} else {
       
   428 			/* this one leaves the last ring element unallocated! */
       
   429 			adapter->alloc_rx_buf(adapter, ring,
       
   430 					E1000_DESC_UNUSED(ring));
       
   431 		}
       
   432 	}
       
   433 }
       
   434 
       
   435 int e1000_up(struct e1000_adapter *adapter)
       
   436 {
       
   437 	struct e1000_hw *hw = &adapter->hw;
       
   438 
       
   439 	/* hardware has been reset, we need to reload some things */
       
   440 	e1000_configure(adapter);
       
   441 
       
   442 	clear_bit(__E1000_DOWN, &adapter->flags);
       
   443 
       
   444 	if (!adapter->ecdev) {
       
   445 		napi_enable(&adapter->napi);
       
   446 
       
   447 		e1000_irq_enable(adapter);
       
   448 
       
   449 		netif_wake_queue(adapter->netdev);
       
   450 
       
   451 		/* fire a link change interrupt to start the watchdog */
       
   452 		ew32(ICS, E1000_ICS_LSC);
       
   453 	}
       
   454 	return 0;
       
   455 }
       
   456 
       
   457 /**
       
   458  * e1000_power_up_phy - restore link in case the phy was powered down
       
   459  * @adapter: address of board private structure
       
   460  *
       
   461  * The phy may be powered down to save power and turn off link when the
       
   462  * driver is unloaded and wake on lan is not enabled (among others)
       
   463  * *** this routine MUST be followed by a call to e1000_reset ***
       
   464  *
       
   465  **/
       
   466 
       
   467 void e1000_power_up_phy(struct e1000_adapter *adapter)
       
   468 {
       
   469 	struct e1000_hw *hw = &adapter->hw;
       
   470 	u16 mii_reg = 0;
       
   471 
       
   472 	/* Just clear the power down bit to wake the phy back up */
       
   473 	if (hw->media_type == e1000_media_type_copper) {
       
   474 		/* according to the manual, the phy will retain its
       
   475 		 * settings across a power-down/up cycle */
       
   476 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
       
   477 		mii_reg &= ~MII_CR_POWER_DOWN;
       
   478 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
       
   479 	}
       
   480 }
       
   481 
       
   482 static void e1000_power_down_phy(struct e1000_adapter *adapter)
       
   483 {
       
   484 	struct e1000_hw *hw = &adapter->hw;
       
   485 
       
   486 	/* Power down the PHY so no link is implied when interface is down *
       
   487 	 * The PHY cannot be powered down if any of the following is true *
       
   488 	 * (a) WoL is enabled
       
   489 	 * (b) AMT is active
       
   490 	 * (c) SoL/IDER session is active */
       
   491 	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
       
   492 	   hw->media_type == e1000_media_type_copper) {
       
   493 		u16 mii_reg = 0;
       
   494 
       
   495 		switch (hw->mac_type) {
       
   496 		case e1000_82540:
       
   497 		case e1000_82545:
       
   498 		case e1000_82545_rev_3:
       
   499 		case e1000_82546:
       
   500 		case e1000_ce4100:
       
   501 		case e1000_82546_rev_3:
       
   502 		case e1000_82541:
       
   503 		case e1000_82541_rev_2:
       
   504 		case e1000_82547:
       
   505 		case e1000_82547_rev_2:
       
   506 			if (er32(MANC) & E1000_MANC_SMBUS_EN)
       
   507 				goto out;
       
   508 			break;
       
   509 		default:
       
   510 			goto out;
       
   511 		}
       
   512 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
       
   513 		mii_reg |= MII_CR_POWER_DOWN;
       
   514 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
       
   515 		msleep(1);
       
   516 	}
       
   517 out:
       
   518 	return;
       
   519 }
       
   520 
       
   521 static void e1000_down_and_stop(struct e1000_adapter *adapter)
       
   522 {
       
   523 	set_bit(__E1000_DOWN, &adapter->flags);
       
   524 
       
   525 	/* Only kill reset task if adapter is not resetting */
       
   526 	if (!test_bit(__E1000_RESETTING, &adapter->flags))
       
   527 		cancel_work_sync(&adapter->reset_task);
       
   528 
       
   529 	if (!adapter->ecdev) {
       
   530 		cancel_delayed_work_sync(&adapter->watchdog_task);
       
   531 		cancel_delayed_work_sync(&adapter->phy_info_task);
       
   532 		cancel_delayed_work_sync(&adapter->fifo_stall_task);
       
   533 	}
       
   534 }
       
   535 
       
   536 void e1000_down(struct e1000_adapter *adapter)
       
   537 {
       
   538 	struct e1000_hw *hw = &adapter->hw;
       
   539 	struct net_device *netdev = adapter->netdev;
       
   540 	u32 rctl, tctl;
       
   541 
       
   542 
       
   543 	/* disable receives in the hardware */	
       
   544 	rctl = er32(RCTL);
       
   545 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
   546 
       
   547 	if (!adapter->ecdev) {
       
   548 		/* flush and sleep below */
       
   549 		netif_tx_disable(netdev);
       
   550 	}
       
   551 
       
   552 	/* disable transmits in the hardware */
       
   553 	tctl = er32(TCTL);
       
   554 	tctl &= ~E1000_TCTL_EN;
       
   555 	ew32(TCTL, tctl);
       
   556 	/* flush both disables and wait for them to finish */
       
   557 	E1000_WRITE_FLUSH();
       
   558 	msleep(10);
       
   559 
       
   560 	if (!adapter->ecdev) {
       
   561 		napi_disable(&adapter->napi);
       
   562 
       
   563 		e1000_irq_disable(adapter);
       
   564 	}
       
   565 
       
   566 	/*
       
   567 	 * Setting DOWN must be after irq_disable to prevent
       
   568 	 * a screaming interrupt.  Setting DOWN also prevents
       
   569 	 * tasks from rescheduling.
       
   570 	 */
       
   571 	e1000_down_and_stop(adapter);
       
   572 
       
   573 	adapter->link_speed = 0;
       
   574 	adapter->link_duplex = 0;
       
   575 
       
   576 	if (!adapter->ecdev) {
       
   577 		netif_carrier_off(netdev);
       
   578 	}
       
   579 
       
   580 	e1000_reset(adapter);
       
   581 	e1000_clean_all_tx_rings(adapter);
       
   582 	e1000_clean_all_rx_rings(adapter);
       
   583 }
       
   584 
       
   585 static void e1000_reinit_safe(struct e1000_adapter *adapter)
       
   586 {
       
   587 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
   588 		msleep(1);
       
   589 	mutex_lock(&adapter->mutex);
       
   590 	e1000_down(adapter);
       
   591 	e1000_up(adapter);
       
   592 	mutex_unlock(&adapter->mutex);
       
   593 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
   594 }
       
   595 
       
   596 void e1000_reinit_locked(struct e1000_adapter *adapter)
       
   597 {
       
   598 	/* if rtnl_lock is not held the call path is bogus */
       
   599 	ASSERT_RTNL();
       
   600 	WARN_ON(in_interrupt());
       
   601 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
   602 		msleep(1);
       
   603 	e1000_down(adapter);
       
   604 	e1000_up(adapter);
       
   605 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
   606 }
       
   607 
       
   608 void e1000_reset(struct e1000_adapter *adapter)
       
   609 {
       
   610 	struct e1000_hw *hw = &adapter->hw;
       
   611 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
       
   612 	bool legacy_pba_adjust = false;
       
   613 	u16 hwm;
       
   614 
       
   615 	/* Repartition Pba for greater than 9k mtu
       
   616 	 * To take effect CTRL.RST is required.
       
   617 	 */
       
   618 
       
   619 	switch (hw->mac_type) {
       
   620 	case e1000_82542_rev2_0:
       
   621 	case e1000_82542_rev2_1:
       
   622 	case e1000_82543:
       
   623 	case e1000_82544:
       
   624 	case e1000_82540:
       
   625 	case e1000_82541:
       
   626 	case e1000_82541_rev_2:
       
   627 		legacy_pba_adjust = true;
       
   628 		pba = E1000_PBA_48K;
       
   629 		break;
       
   630 	case e1000_82545:
       
   631 	case e1000_82545_rev_3:
       
   632 	case e1000_82546:
       
   633 	case e1000_ce4100:
       
   634 	case e1000_82546_rev_3:
       
   635 		pba = E1000_PBA_48K;
       
   636 		break;
       
   637 	case e1000_82547:
       
   638 	case e1000_82547_rev_2:
       
   639 		legacy_pba_adjust = true;
       
   640 		pba = E1000_PBA_30K;
       
   641 		break;
       
   642 	case e1000_undefined:
       
   643 	case e1000_num_macs:
       
   644 		break;
       
   645 	}
       
   646 
       
   647 	if (legacy_pba_adjust) {
       
   648 		if (hw->max_frame_size > E1000_RXBUFFER_8192)
       
   649 			pba -= 8; /* allocate more FIFO for Tx */
       
   650 
       
   651 		if (hw->mac_type == e1000_82547) {
       
   652 			adapter->tx_fifo_head = 0;
       
   653 			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
       
   654 			adapter->tx_fifo_size =
       
   655 				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
       
   656 			atomic_set(&adapter->tx_fifo_stall, 0);
       
   657 		}
       
   658 	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
       
   659 		/* adjust PBA for jumbo frames */
       
   660 		ew32(PBA, pba);
       
   661 
       
   662 		/* To maintain wire speed transmits, the Tx FIFO should be
       
   663 		 * large enough to accommodate two full transmit packets,
       
   664 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
       
   665 		 * the Rx FIFO should be large enough to accommodate at least
       
   666 		 * one full receive packet and is similarly rounded up and
       
   667 		 * expressed in KB. */
       
   668 		pba = er32(PBA);
       
   669 		/* upper 16 bits has Tx packet buffer allocation size in KB */
       
   670 		tx_space = pba >> 16;
       
   671 		/* lower 16 bits has Rx packet buffer allocation size in KB */
       
   672 		pba &= 0xffff;
       
   673 		/*
       
   674 		 * the tx fifo also stores 16 bytes of information about the tx
       
   675 		 * but don't include ethernet FCS because hardware appends it
       
   676 		 */
       
   677 		min_tx_space = (hw->max_frame_size +
       
   678 		                sizeof(struct e1000_tx_desc) -
       
   679 		                ETH_FCS_LEN) * 2;
       
   680 		min_tx_space = ALIGN(min_tx_space, 1024);
       
   681 		min_tx_space >>= 10;
       
   682 		/* software strips receive CRC, so leave room for it */
       
   683 		min_rx_space = hw->max_frame_size;
       
   684 		min_rx_space = ALIGN(min_rx_space, 1024);
       
   685 		min_rx_space >>= 10;
       
   686 
       
   687 		/* If current Tx allocation is less than the min Tx FIFO size,
       
   688 		 * and the min Tx FIFO size is less than the current Rx FIFO
       
   689 		 * allocation, take space away from current Rx allocation */
       
   690 		if (tx_space < min_tx_space &&
       
   691 		    ((min_tx_space - tx_space) < pba)) {
       
   692 			pba = pba - (min_tx_space - tx_space);
       
   693 
       
   694 			/* PCI/PCIx hardware has PBA alignment constraints */
       
   695 			switch (hw->mac_type) {
       
   696 			case e1000_82545 ... e1000_82546_rev_3:
       
   697 				pba &= ~(E1000_PBA_8K - 1);
       
   698 				break;
       
   699 			default:
       
   700 				break;
       
   701 			}
       
   702 
       
   703 			/* if short on rx space, rx wins and must trump tx
       
   704 			 * adjustment or use Early Receive if available */
       
   705 			if (pba < min_rx_space)
       
   706 				pba = min_rx_space;
       
   707 		}
       
   708 	}
       
   709 
       
   710 	ew32(PBA, pba);
       
   711 
       
   712 	/*
       
   713 	 * flow control settings:
       
   714 	 * The high water mark must be low enough to fit one full frame
       
   715 	 * (or the size used for early receive) above it in the Rx FIFO.
       
   716 	 * Set it to the lower of:
       
   717 	 * - 90% of the Rx FIFO size, and
       
   718 	 * - the full Rx FIFO size minus the early receive size (for parts
       
   719 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
       
   720 	 * - the full Rx FIFO size minus one full frame
       
   721 	 */
       
   722 	hwm = min(((pba << 10) * 9 / 10),
       
   723 		  ((pba << 10) - hw->max_frame_size));
       
   724 
       
   725 	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
       
   726 	hw->fc_low_water = hw->fc_high_water - 8;
       
   727 	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
       
   728 	hw->fc_send_xon = 1;
       
   729 	hw->fc = hw->original_fc;
       
   730 
       
   731 	/* Allow time for pending master requests to run */
       
   732 	e1000_reset_hw(hw);
       
   733 	if (hw->mac_type >= e1000_82544)
       
   734 		ew32(WUC, 0);
       
   735 
       
   736 	if (e1000_init_hw(hw))
       
   737 		e_dev_err("Hardware Error\n");
       
   738 	e1000_update_mng_vlan(adapter);
       
   739 
       
   740 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
       
   741 	if (hw->mac_type >= e1000_82544 &&
       
   742 	    hw->autoneg == 1 &&
       
   743 	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
       
   744 		u32 ctrl = er32(CTRL);
       
   745 		/* clear phy power management bit if we are in gig only mode,
       
   746 		 * which if enabled will attempt negotiation to 100Mb, which
       
   747 		 * can cause a loss of link at power off or driver unload */
       
   748 		ctrl &= ~E1000_CTRL_SWDPIN3;
       
   749 		ew32(CTRL, ctrl);
       
   750 	}
       
   751 
       
   752 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
       
   753 	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
       
   754 
       
   755 	e1000_reset_adaptive(hw);
       
   756 	e1000_phy_get_info(hw, &adapter->phy_info);
       
   757 
       
   758 	e1000_release_manageability(adapter);
       
   759 }
       
   760 
       
   761 /* Dump the eeprom for users having checksum issues */
       
   762 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
       
   763 {
       
   764 	struct net_device *netdev = adapter->netdev;
       
   765 	struct ethtool_eeprom eeprom;
       
   766 	const struct ethtool_ops *ops = netdev->ethtool_ops;
       
   767 	u8 *data;
       
   768 	int i;
       
   769 	u16 csum_old, csum_new = 0;
       
   770 
       
   771 	eeprom.len = ops->get_eeprom_len(netdev);
       
   772 	eeprom.offset = 0;
       
   773 
       
   774 	data = kmalloc(eeprom.len, GFP_KERNEL);
       
   775 	if (!data)
       
   776 		return;
       
   777 
       
   778 	ops->get_eeprom(netdev, &eeprom, data);
       
   779 
       
   780 	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
       
   781 		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
       
   782 	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
       
   783 		csum_new += data[i] + (data[i + 1] << 8);
       
   784 	csum_new = EEPROM_SUM - csum_new;
       
   785 
       
   786 	pr_err("/*********************/\n");
       
   787 	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
       
   788 	pr_err("Calculated              : 0x%04x\n", csum_new);
       
   789 
       
   790 	pr_err("Offset    Values\n");
       
   791 	pr_err("========  ======\n");
       
   792 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
       
   793 
       
   794 	pr_err("Include this output when contacting your support provider.\n");
       
   795 	pr_err("This is not a software error! Something bad happened to\n");
       
   796 	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
       
   797 	pr_err("result in further problems, possibly loss of data,\n");
       
   798 	pr_err("corruption or system hangs!\n");
       
   799 	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
       
   800 	pr_err("which is invalid and requires you to set the proper MAC\n");
       
   801 	pr_err("address manually before continuing to enable this network\n");
       
   802 	pr_err("device. Please inspect the EEPROM dump and report the\n");
       
   803 	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
       
   804 	pr_err("/*********************/\n");
       
   805 
       
   806 	kfree(data);
       
   807 }
       
   808 
       
   809 /**
       
   810  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
       
   811  * @pdev: PCI device information struct
       
   812  *
       
   813  * Return true if an adapter needs ioport resources
       
   814  **/
       
   815 static int e1000_is_need_ioport(struct pci_dev *pdev)
       
   816 {
       
   817 	switch (pdev->device) {
       
   818 	case E1000_DEV_ID_82540EM:
       
   819 	case E1000_DEV_ID_82540EM_LOM:
       
   820 	case E1000_DEV_ID_82540EP:
       
   821 	case E1000_DEV_ID_82540EP_LOM:
       
   822 	case E1000_DEV_ID_82540EP_LP:
       
   823 	case E1000_DEV_ID_82541EI:
       
   824 	case E1000_DEV_ID_82541EI_MOBILE:
       
   825 	case E1000_DEV_ID_82541ER:
       
   826 	case E1000_DEV_ID_82541ER_LOM:
       
   827 	case E1000_DEV_ID_82541GI:
       
   828 	case E1000_DEV_ID_82541GI_LF:
       
   829 	case E1000_DEV_ID_82541GI_MOBILE:
       
   830 	case E1000_DEV_ID_82544EI_COPPER:
       
   831 	case E1000_DEV_ID_82544EI_FIBER:
       
   832 	case E1000_DEV_ID_82544GC_COPPER:
       
   833 	case E1000_DEV_ID_82544GC_LOM:
       
   834 	case E1000_DEV_ID_82545EM_COPPER:
       
   835 	case E1000_DEV_ID_82545EM_FIBER:
       
   836 	case E1000_DEV_ID_82546EB_COPPER:
       
   837 	case E1000_DEV_ID_82546EB_FIBER:
       
   838 	case E1000_DEV_ID_82546EB_QUAD_COPPER:
       
   839 		return true;
       
   840 	default:
       
   841 		return false;
       
   842 	}
       
   843 }
       
   844 
       
   845 static netdev_features_t e1000_fix_features(struct net_device *netdev,
       
   846 	netdev_features_t features)
       
   847 {
       
   848 	/*
       
   849 	 * Since there is no support for separate rx/tx vlan accel
       
   850 	 * enable/disable make sure tx flag is always in same state as rx.
       
   851 	 */
       
   852 	if (features & NETIF_F_HW_VLAN_RX)
       
   853 		features |= NETIF_F_HW_VLAN_TX;
       
   854 	else
       
   855 		features &= ~NETIF_F_HW_VLAN_TX;
       
   856 
       
   857 	return features;
       
   858 }
       
   859 
       
   860 static int e1000_set_features(struct net_device *netdev,
       
   861 	netdev_features_t features)
       
   862 {
       
   863 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
   864 	netdev_features_t changed = features ^ netdev->features;
       
   865 
       
   866 	if (changed & NETIF_F_HW_VLAN_RX)
       
   867 		e1000_vlan_mode(netdev, features);
       
   868 
       
   869 	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
       
   870 		return 0;
       
   871 
       
   872 	netdev->features = features;
       
   873 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
       
   874 
       
   875 	if (netif_running(netdev))
       
   876 		e1000_reinit_locked(adapter);
       
   877 	else
       
   878 		e1000_reset(adapter);
       
   879 
       
   880 	return 0;
       
   881 }
       
   882 
       
   883 static const struct net_device_ops e1000_netdev_ops = {
       
   884 	.ndo_open		= e1000_open,
       
   885 	.ndo_stop		= e1000_close,
       
   886 	.ndo_start_xmit		= e1000_xmit_frame,
       
   887 	.ndo_get_stats		= e1000_get_stats,
       
   888 	.ndo_set_rx_mode	= e1000_set_rx_mode,
       
   889 	.ndo_set_mac_address	= e1000_set_mac,
       
   890 	.ndo_tx_timeout		= e1000_tx_timeout,
       
   891 	.ndo_change_mtu		= e1000_change_mtu,
       
   892 	.ndo_do_ioctl		= e1000_ioctl,
       
   893 	.ndo_validate_addr	= eth_validate_addr,
       
   894 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
       
   895 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
       
   896 #ifdef CONFIG_NET_POLL_CONTROLLER
       
   897 	.ndo_poll_controller	= e1000_netpoll,
       
   898 #endif
       
   899 	.ndo_fix_features	= e1000_fix_features,
       
   900 	.ndo_set_features	= e1000_set_features,
       
   901 };
       
   902 
       
   903 /**
       
   904  * e1000_init_hw_struct - initialize members of hw struct
       
   905  * @adapter: board private struct
       
   906  * @hw: structure used by e1000_hw.c
       
   907  *
       
   908  * Factors out initialization of the e1000_hw struct to its own function
       
   909  * that can be called very early at init (just after struct allocation).
       
   910  * Fields are initialized based on PCI device information and
       
   911  * OS network device settings (MTU size).
       
   912  * Returns negative error codes if MAC type setup fails.
       
   913  */
       
   914 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
       
   915 				struct e1000_hw *hw)
       
   916 {
       
   917 	struct pci_dev *pdev = adapter->pdev;
       
   918 
       
   919 	/* PCI config space info */
       
   920 	hw->vendor_id = pdev->vendor;
       
   921 	hw->device_id = pdev->device;
       
   922 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
       
   923 	hw->subsystem_id = pdev->subsystem_device;
       
   924 	hw->revision_id = pdev->revision;
       
   925 
       
   926 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
       
   927 
       
   928 	hw->max_frame_size = adapter->netdev->mtu +
       
   929 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
       
   930 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
       
   931 
       
   932 	/* identify the MAC */
       
   933 	if (e1000_set_mac_type(hw)) {
       
   934 		e_err(probe, "Unknown MAC Type\n");
       
   935 		return -EIO;
       
   936 	}
       
   937 
       
   938 	switch (hw->mac_type) {
       
   939 	default:
       
   940 		break;
       
   941 	case e1000_82541:
       
   942 	case e1000_82547:
       
   943 	case e1000_82541_rev_2:
       
   944 	case e1000_82547_rev_2:
       
   945 		hw->phy_init_script = 1;
       
   946 		break;
       
   947 	}
       
   948 
       
   949 	e1000_set_media_type(hw);
       
   950 	e1000_get_bus_info(hw);
       
   951 
       
   952 	hw->wait_autoneg_complete = false;
       
   953 	hw->tbi_compatibility_en = true;
       
   954 	hw->adaptive_ifs = true;
       
   955 
       
   956 	/* Copper options */
       
   957 
       
   958 	if (hw->media_type == e1000_media_type_copper) {
       
   959 		hw->mdix = AUTO_ALL_MODES;
       
   960 		hw->disable_polarity_correction = false;
       
   961 		hw->master_slave = E1000_MASTER_SLAVE;
       
   962 	}
       
   963 
       
   964 	return 0;
       
   965 }
       
   966 
       
   967 /**
       
   968  * e1000_probe - Device Initialization Routine
       
   969  * @pdev: PCI device information struct
       
   970  * @ent: entry in e1000_pci_tbl
       
   971  *
       
   972  * Returns 0 on success, negative on failure
       
   973  *
       
   974  * e1000_probe initializes an adapter identified by a pci_dev structure.
       
   975  * The OS initialization, configuring of the adapter private structure,
       
   976  * and a hardware reset occur.
       
   977  **/
       
   978 static int __devinit e1000_probe(struct pci_dev *pdev,
       
   979 				 const struct pci_device_id *ent)
       
   980 {
       
   981 	struct net_device *netdev;
       
   982 	struct e1000_adapter *adapter;
       
   983 	struct e1000_hw *hw;
       
   984 
       
   985 	static int cards_found = 0;
       
   986 	static int global_quad_port_a = 0; /* global ksp3 port a indication */
       
   987 	int i, err, pci_using_dac;
       
   988 	u16 eeprom_data = 0;
       
   989 	u16 tmp = 0;
       
   990 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
       
   991 	int bars, need_ioport;
       
   992 
       
   993 	/* do not allocate ioport bars when not needed */
       
   994 	need_ioport = e1000_is_need_ioport(pdev);
       
   995 	if (need_ioport) {
       
   996 		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
       
   997 		err = pci_enable_device(pdev);
       
   998 	} else {
       
   999 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
       
  1000 		err = pci_enable_device_mem(pdev);
       
  1001 	}
       
  1002 	if (err)
       
  1003 		return err;
       
  1004 
       
  1005 	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
       
  1006 	if (err)
       
  1007 		goto err_pci_reg;
       
  1008 
       
  1009 	pci_set_master(pdev);
       
  1010 	err = pci_save_state(pdev);
       
  1011 	if (err)
       
  1012 		goto err_alloc_etherdev;
       
  1013 
       
  1014 	err = -ENOMEM;
       
  1015 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
       
  1016 	if (!netdev)
       
  1017 		goto err_alloc_etherdev;
       
  1018 
       
  1019 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  1020 
       
  1021 	pci_set_drvdata(pdev, netdev);
       
  1022 	adapter = netdev_priv(netdev);
       
  1023 	adapter->netdev = netdev;
       
  1024 	adapter->pdev = pdev;
       
  1025 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
       
  1026 	adapter->bars = bars;
       
  1027 	adapter->need_ioport = need_ioport;
       
  1028 
       
  1029 	hw = &adapter->hw;
       
  1030 	hw->back = adapter;
       
  1031 
       
  1032 	err = -EIO;
       
  1033 	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
       
  1034 	if (!hw->hw_addr)
       
  1035 		goto err_ioremap;
       
  1036 
       
  1037 	if (adapter->need_ioport) {
       
  1038 		for (i = BAR_1; i <= BAR_5; i++) {
       
  1039 			if (pci_resource_len(pdev, i) == 0)
       
  1040 				continue;
       
  1041 			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
       
  1042 				hw->io_base = pci_resource_start(pdev, i);
       
  1043 				break;
       
  1044 			}
       
  1045 		}
       
  1046 	}
       
  1047 
       
  1048 	/* make ready for any if (hw->...) below */
       
  1049 	err = e1000_init_hw_struct(adapter, hw);
       
  1050 	if (err)
       
  1051 		goto err_sw_init;
       
  1052 
       
  1053 	/*
       
  1054 	 * there is a workaround being applied below that limits
       
  1055 	 * 64-bit DMA addresses to 64-bit hardware.  There are some
       
  1056 	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
       
  1057 	 */
       
  1058 	pci_using_dac = 0;
       
  1059 	if ((hw->bus_type == e1000_bus_type_pcix) &&
       
  1060 	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
       
  1061 		/*
       
  1062 		 * according to DMA-API-HOWTO, coherent calls will always
       
  1063 		 * succeed if the set call did
       
  1064 		 */
       
  1065 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
       
  1066 		pci_using_dac = 1;
       
  1067 	} else {
       
  1068 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
       
  1069 		if (err) {
       
  1070 			pr_err("No usable DMA config, aborting\n");
       
  1071 			goto err_dma;
       
  1072 		}
       
  1073 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
       
  1074 	}
       
  1075 
       
  1076 	netdev->netdev_ops = &e1000_netdev_ops;
       
  1077 	e1000_set_ethtool_ops(netdev);
       
  1078 	netdev->watchdog_timeo = 5 * HZ;
       
  1079 	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
       
  1080 
       
  1081 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  1082 
       
  1083 	adapter->bd_number = cards_found;
       
  1084 
       
  1085 	/* setup the private structure */
       
  1086 
       
  1087 	err = e1000_sw_init(adapter);
       
  1088 	if (err)
       
  1089 		goto err_sw_init;
       
  1090 
       
  1091 	err = -EIO;
       
  1092 	if (hw->mac_type == e1000_ce4100) {
       
  1093 		hw->ce4100_gbe_mdio_base_virt =
       
  1094 					ioremap(pci_resource_start(pdev, BAR_1),
       
  1095 		                                pci_resource_len(pdev, BAR_1));
       
  1096 
       
  1097 		if (!hw->ce4100_gbe_mdio_base_virt)
       
  1098 			goto err_mdio_ioremap;
       
  1099 	}
       
  1100 
       
  1101 	if (hw->mac_type >= e1000_82543) {
       
  1102 		netdev->hw_features = NETIF_F_SG |
       
  1103 				   NETIF_F_HW_CSUM |
       
  1104 				   NETIF_F_HW_VLAN_RX;
       
  1105 		netdev->features = NETIF_F_HW_VLAN_TX |
       
  1106 				   NETIF_F_HW_VLAN_FILTER;
       
  1107 	}
       
  1108 
       
  1109 	if ((hw->mac_type >= e1000_82544) &&
       
  1110 	   (hw->mac_type != e1000_82547))
       
  1111 		netdev->hw_features |= NETIF_F_TSO;
       
  1112 
       
  1113 	netdev->priv_flags |= IFF_SUPP_NOFCS;
       
  1114 
       
  1115 	netdev->features |= netdev->hw_features;
       
  1116 	netdev->hw_features |= (NETIF_F_RXCSUM |
       
  1117 				NETIF_F_RXALL |
       
  1118 				NETIF_F_RXFCS);
       
  1119 
       
  1120 	if (pci_using_dac) {
       
  1121 		netdev->features |= NETIF_F_HIGHDMA;
       
  1122 		netdev->vlan_features |= NETIF_F_HIGHDMA;
       
  1123 	}
       
  1124 
       
  1125 	netdev->vlan_features |= (NETIF_F_TSO |
       
  1126 				  NETIF_F_HW_CSUM |
       
  1127 				  NETIF_F_SG);
       
  1128 
       
  1129 	netdev->priv_flags |= IFF_UNICAST_FLT;
       
  1130 
       
  1131 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
       
  1132 
       
  1133 	/* initialize eeprom parameters */
       
  1134 	if (e1000_init_eeprom_params(hw)) {
       
  1135 		e_err(probe, "EEPROM initialization failed\n");
       
  1136 		goto err_eeprom;
       
  1137 	}
       
  1138 
       
  1139 	/* before reading the EEPROM, reset the controller to
       
  1140 	 * put the device in a known good starting state */
       
  1141 
       
  1142 	e1000_reset_hw(hw);
       
  1143 
       
  1144 	/* make sure the EEPROM is good */
       
  1145 	if (e1000_validate_eeprom_checksum(hw) < 0) {
       
  1146 		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
       
  1147 		e1000_dump_eeprom(adapter);
       
  1148 		/*
       
  1149 		 * set MAC address to all zeroes to invalidate and temporary
       
  1150 		 * disable this device for the user. This blocks regular
       
  1151 		 * traffic while still permitting ethtool ioctls from reaching
       
  1152 		 * the hardware as well as allowing the user to run the
       
  1153 		 * interface after manually setting a hw addr using
       
  1154 		 * `ip set address`
       
  1155 		 */
       
  1156 		memset(hw->mac_addr, 0, netdev->addr_len);
       
  1157 	} else {
       
  1158 		/* copy the MAC address out of the EEPROM */
       
  1159 		if (e1000_read_mac_addr(hw))
       
  1160 			e_err(probe, "EEPROM Read Error\n");
       
  1161 	}
       
  1162 	/* don't block initalization here due to bad MAC address */
       
  1163 	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
       
  1164 	memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
       
  1165 
       
  1166 	if (!is_valid_ether_addr(netdev->perm_addr))
       
  1167 		e_err(probe, "Invalid MAC Address\n");
       
  1168 
       
  1169 
       
  1170 	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
       
  1171 	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
       
  1172 			  e1000_82547_tx_fifo_stall_task);
       
  1173 	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
       
  1174 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
       
  1175 
       
  1176 	e1000_check_options(adapter);
       
  1177 
       
  1178 	/* Initial Wake on LAN setting
       
  1179 	 * If APM wake is enabled in the EEPROM,
       
  1180 	 * enable the ACPI Magic Packet filter
       
  1181 	 */
       
  1182 
       
  1183 	switch (hw->mac_type) {
       
  1184 	case e1000_82542_rev2_0:
       
  1185 	case e1000_82542_rev2_1:
       
  1186 	case e1000_82543:
       
  1187 		break;
       
  1188 	case e1000_82544:
       
  1189 		e1000_read_eeprom(hw,
       
  1190 			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
       
  1191 		eeprom_apme_mask = E1000_EEPROM_82544_APM;
       
  1192 		break;
       
  1193 	case e1000_82546:
       
  1194 	case e1000_82546_rev_3:
       
  1195 		if (er32(STATUS) & E1000_STATUS_FUNC_1){
       
  1196 			e1000_read_eeprom(hw,
       
  1197 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
       
  1198 			break;
       
  1199 		}
       
  1200 		/* Fall Through */
       
  1201 	default:
       
  1202 		e1000_read_eeprom(hw,
       
  1203 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
       
  1204 		break;
       
  1205 	}
       
  1206 	if (eeprom_data & eeprom_apme_mask)
       
  1207 		adapter->eeprom_wol |= E1000_WUFC_MAG;
       
  1208 
       
  1209 	/* now that we have the eeprom settings, apply the special cases
       
  1210 	 * where the eeprom may be wrong or the board simply won't support
       
  1211 	 * wake on lan on a particular port */
       
  1212 	switch (pdev->device) {
       
  1213 	case E1000_DEV_ID_82546GB_PCIE:
       
  1214 		adapter->eeprom_wol = 0;
       
  1215 		break;
       
  1216 	case E1000_DEV_ID_82546EB_FIBER:
       
  1217 	case E1000_DEV_ID_82546GB_FIBER:
       
  1218 		/* Wake events only supported on port A for dual fiber
       
  1219 		 * regardless of eeprom setting */
       
  1220 		if (er32(STATUS) & E1000_STATUS_FUNC_1)
       
  1221 			adapter->eeprom_wol = 0;
       
  1222 		break;
       
  1223 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
       
  1224 		/* if quad port adapter, disable WoL on all but port A */
       
  1225 		if (global_quad_port_a != 0)
       
  1226 			adapter->eeprom_wol = 0;
       
  1227 		else
       
  1228 			adapter->quad_port_a = true;
       
  1229 		/* Reset for multiple quad port adapters */
       
  1230 		if (++global_quad_port_a == 4)
       
  1231 			global_quad_port_a = 0;
       
  1232 		break;
       
  1233 	}
       
  1234 
       
  1235 	/* initialize the wol settings based on the eeprom settings */
       
  1236 	adapter->wol = adapter->eeprom_wol;
       
  1237 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
       
  1238 
       
  1239 	/* Auto detect PHY address */
       
  1240 	if (hw->mac_type == e1000_ce4100) {
       
  1241 		for (i = 0; i < 32; i++) {
       
  1242 			hw->phy_addr = i;
       
  1243 			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
       
  1244 			if (tmp == 0 || tmp == 0xFF) {
       
  1245 				if (i == 31)
       
  1246 					goto err_eeprom;
       
  1247 				continue;
       
  1248 			} else
       
  1249 				break;
       
  1250 		}
       
  1251 	}
       
  1252 
       
  1253 	/* reset the hardware with the new settings */
       
  1254 	e1000_reset(adapter);
       
  1255 
       
  1256  	// offer device to EtherCAT master module
       
  1257 	adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE);
       
  1258 	if (adapter->ecdev) {
       
  1259 		err = ecdev_open(adapter->ecdev);
       
  1260 		if (err) {
       
  1261 			ecdev_withdraw(adapter->ecdev);
       
  1262 			goto err_register;
       
  1263 		}
       
  1264 	} else {
       
  1265 		strcpy(netdev->name, "eth%d");
       
  1266 		err = register_netdev(netdev);
       
  1267 		if (err)
       
  1268 			goto err_register;
       
  1269 	}
       
  1270 
       
  1271 	e1000_vlan_filter_on_off(adapter, false);
       
  1272 
       
  1273 	/* print bus type/speed/width info */
       
  1274 	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
       
  1275 	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
       
  1276 	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
       
  1277 		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
       
  1278 		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
       
  1279 		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
       
  1280 	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
       
  1281 	       netdev->dev_addr);
       
  1282 
       
  1283 	if (!adapter->ecdev) {
       
  1284 		/* carrier off reporting is important to ethtool even BEFORE open */
       
  1285 		netif_carrier_off(netdev);
       
  1286 	}
       
  1287 
       
  1288 	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
       
  1289 
       
  1290 	cards_found++;
       
  1291 	return 0;
       
  1292 
       
  1293 err_register:
       
  1294 err_eeprom:
       
  1295 	e1000_phy_hw_reset(hw);
       
  1296 
       
  1297 	if (hw->flash_address)
       
  1298 		iounmap(hw->flash_address);
       
  1299 	kfree(adapter->tx_ring);
       
  1300 	kfree(adapter->rx_ring);
       
  1301 err_dma:
       
  1302 err_sw_init:
       
  1303 err_mdio_ioremap:
       
  1304 	iounmap(hw->ce4100_gbe_mdio_base_virt);
       
  1305 	iounmap(hw->hw_addr);
       
  1306 err_ioremap:
       
  1307 	free_netdev(netdev);
       
  1308 err_alloc_etherdev:
       
  1309 	pci_release_selected_regions(pdev, bars);
       
  1310 err_pci_reg:
       
  1311 	pci_disable_device(pdev);
       
  1312 	return err;
       
  1313 }
       
  1314 
       
  1315 /**
       
  1316  * e1000_remove - Device Removal Routine
       
  1317  * @pdev: PCI device information struct
       
  1318  *
       
  1319  * e1000_remove is called by the PCI subsystem to alert the driver
       
  1320  * that it should release a PCI device.  The could be caused by a
       
  1321  * Hot-Plug event, or because the driver is going to be removed from
       
  1322  * memory.
       
  1323  **/
       
  1324 
       
  1325 static void __devexit e1000_remove(struct pci_dev *pdev)
       
  1326 {
       
  1327 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  1328 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1329 	struct e1000_hw *hw = &adapter->hw;
       
  1330 
       
  1331 	e1000_down_and_stop(adapter);
       
  1332 	e1000_release_manageability(adapter);
       
  1333 
       
  1334 	if (adapter->ecdev) {
       
  1335 		ecdev_close(adapter->ecdev);
       
  1336 		ecdev_withdraw(adapter->ecdev);
       
  1337 	} else {
       
  1338 		unregister_netdev(netdev);
       
  1339 	}
       
  1340 
       
  1341 	e1000_phy_hw_reset(hw);
       
  1342 
       
  1343 	kfree(adapter->tx_ring);
       
  1344 	kfree(adapter->rx_ring);
       
  1345 
       
  1346 	if (hw->mac_type == e1000_ce4100)
       
  1347 		iounmap(hw->ce4100_gbe_mdio_base_virt);
       
  1348 	iounmap(hw->hw_addr);
       
  1349 	if (hw->flash_address)
       
  1350 		iounmap(hw->flash_address);
       
  1351 	pci_release_selected_regions(pdev, adapter->bars);
       
  1352 
       
  1353 	free_netdev(netdev);
       
  1354 
       
  1355 	pci_disable_device(pdev);
       
  1356 }
       
  1357 
       
  1358 /**
       
  1359  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
       
  1360  * @adapter: board private structure to initialize
       
  1361  *
       
  1362  * e1000_sw_init initializes the Adapter private data structure.
       
  1363  * e1000_init_hw_struct MUST be called before this function
       
  1364  **/
       
  1365 
       
  1366 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
       
  1367 {
       
  1368 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
       
  1369 
       
  1370 	adapter->num_tx_queues = 1;
       
  1371 	adapter->num_rx_queues = 1;
       
  1372 
       
  1373 	if (e1000_alloc_queues(adapter)) {
       
  1374 		e_err(probe, "Unable to allocate memory for queues\n");
       
  1375 		return -ENOMEM;
       
  1376 	}
       
  1377 
       
  1378 	/* Explicitly disable IRQ since the NIC can be in any state. */
       
  1379 	e1000_irq_disable(adapter);
       
  1380 
       
  1381 	spin_lock_init(&adapter->stats_lock);
       
  1382 	mutex_init(&adapter->mutex);
       
  1383 
       
  1384 	set_bit(__E1000_DOWN, &adapter->flags);
       
  1385 
       
  1386 	return 0;
       
  1387 }
       
  1388 
       
  1389 /**
       
  1390  * e1000_alloc_queues - Allocate memory for all rings
       
  1391  * @adapter: board private structure to initialize
       
  1392  *
       
  1393  * We allocate one ring per queue at run-time since we don't know the
       
  1394  * number of queues at compile-time.
       
  1395  **/
       
  1396 
       
  1397 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
       
  1398 {
       
  1399 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
       
  1400 	                           sizeof(struct e1000_tx_ring), GFP_KERNEL);
       
  1401 	if (!adapter->tx_ring)
       
  1402 		return -ENOMEM;
       
  1403 
       
  1404 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
       
  1405 	                           sizeof(struct e1000_rx_ring), GFP_KERNEL);
       
  1406 	if (!adapter->rx_ring) {
       
  1407 		kfree(adapter->tx_ring);
       
  1408 		return -ENOMEM;
       
  1409 	}
       
  1410 
       
  1411 	return E1000_SUCCESS;
       
  1412 }
       
  1413 
       
  1414 /**
       
  1415  * e1000_open - Called when a network interface is made active
       
  1416  * @netdev: network interface device structure
       
  1417  *
       
  1418  * Returns 0 on success, negative value on failure
       
  1419  *
       
  1420  * The open entry point is called when a network interface is made
       
  1421  * active by the system (IFF_UP).  At this point all resources needed
       
  1422  * for transmit and receive operations are allocated, the interrupt
       
  1423  * handler is registered with the OS, the watchdog task is started,
       
  1424  * and the stack is notified that the interface is ready.
       
  1425  **/
       
  1426 
       
  1427 static int e1000_open(struct net_device *netdev)
       
  1428 {
       
  1429 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1430 	struct e1000_hw *hw = &adapter->hw;
       
  1431 	int err;
       
  1432 
       
  1433 	/* disallow open during test */
       
  1434 	if (test_bit(__E1000_TESTING, &adapter->flags))
       
  1435 		return -EBUSY;
       
  1436 
       
  1437 	netif_carrier_off(netdev);
       
  1438 
       
  1439 	/* allocate transmit descriptors */
       
  1440 	err = e1000_setup_all_tx_resources(adapter);
       
  1441 	if (err)
       
  1442 		goto err_setup_tx;
       
  1443 
       
  1444 	/* allocate receive descriptors */
       
  1445 	err = e1000_setup_all_rx_resources(adapter);
       
  1446 	if (err)
       
  1447 		goto err_setup_rx;
       
  1448 
       
  1449 	e1000_power_up_phy(adapter);
       
  1450 
       
  1451 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
  1452 	if ((hw->mng_cookie.status &
       
  1453 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
       
  1454 		e1000_update_mng_vlan(adapter);
       
  1455 	}
       
  1456 
       
  1457 	/* before we allocate an interrupt, we must be ready to handle it.
       
  1458 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
       
  1459 	 * as soon as we call pci_request_irq, so we have to setup our
       
  1460 	 * clean_rx handler before we do so.  */
       
  1461 	e1000_configure(adapter);
       
  1462 
       
  1463 	err = e1000_request_irq(adapter);
       
  1464 	if (err)
       
  1465 		goto err_req_irq;
       
  1466 
       
  1467 	/* From here on the code is the same as e1000_up() */
       
  1468 	clear_bit(__E1000_DOWN, &adapter->flags);
       
  1469 
       
  1470 	if (!adapter->ecdev) {
       
  1471 		napi_enable(&adapter->napi);
       
  1472 
       
  1473 		e1000_irq_enable(adapter);
       
  1474 
       
  1475 		netif_start_queue(netdev);
       
  1476 	}
       
  1477 
       
  1478 	/* fire a link status change interrupt to start the watchdog */
       
  1479 	ew32(ICS, E1000_ICS_LSC);
       
  1480 
       
  1481 	return E1000_SUCCESS;
       
  1482 
       
  1483 err_req_irq:
       
  1484 	e1000_power_down_phy(adapter);
       
  1485 	e1000_free_all_rx_resources(adapter);
       
  1486 err_setup_rx:
       
  1487 	e1000_free_all_tx_resources(adapter);
       
  1488 err_setup_tx:
       
  1489 	e1000_reset(adapter);
       
  1490 
       
  1491 	return err;
       
  1492 }
       
  1493 
       
  1494 /**
       
  1495  * e1000_close - Disables a network interface
       
  1496  * @netdev: network interface device structure
       
  1497  *
       
  1498  * Returns 0, this is not allowed to fail
       
  1499  *
       
  1500  * The close entry point is called when an interface is de-activated
       
  1501  * by the OS.  The hardware is still under the drivers control, but
       
  1502  * needs to be disabled.  A global MAC reset is issued to stop the
       
  1503  * hardware, and all transmit and receive resources are freed.
       
  1504  **/
       
  1505 
       
  1506 static int e1000_close(struct net_device *netdev)
       
  1507 {
       
  1508 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1509 	struct e1000_hw *hw = &adapter->hw;
       
  1510 
       
  1511 	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
       
  1512 	e1000_down(adapter);
       
  1513 	e1000_power_down_phy(adapter);
       
  1514 	e1000_free_irq(adapter);
       
  1515 
       
  1516 	e1000_free_all_tx_resources(adapter);
       
  1517 	e1000_free_all_rx_resources(adapter);
       
  1518 
       
  1519 	/* kill manageability vlan ID if supported, but not if a vlan with
       
  1520 	 * the same ID is registered on the host OS (let 8021q kill it) */
       
  1521 	if ((hw->mng_cookie.status &
       
  1522 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  1523 	     !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
       
  1524 		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
       
  1525 	}
       
  1526 
       
  1527 	return 0;
       
  1528 }
       
  1529 
       
  1530 /**
       
  1531  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
       
  1532  * @adapter: address of board private structure
       
  1533  * @start: address of beginning of memory
       
  1534  * @len: length of memory
       
  1535  **/
       
  1536 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
       
  1537 				  unsigned long len)
       
  1538 {
       
  1539 	struct e1000_hw *hw = &adapter->hw;
       
  1540 	unsigned long begin = (unsigned long)start;
       
  1541 	unsigned long end = begin + len;
       
  1542 
       
  1543 	/* First rev 82545 and 82546 need to not allow any memory
       
  1544 	 * write location to cross 64k boundary due to errata 23 */
       
  1545 	if (hw->mac_type == e1000_82545 ||
       
  1546 	    hw->mac_type == e1000_ce4100 ||
       
  1547 	    hw->mac_type == e1000_82546) {
       
  1548 		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
       
  1549 	}
       
  1550 
       
  1551 	return true;
       
  1552 }
       
  1553 
       
  1554 /**
       
  1555  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
       
  1556  * @adapter: board private structure
       
  1557  * @txdr:    tx descriptor ring (for a specific queue) to setup
       
  1558  *
       
  1559  * Return 0 on success, negative on failure
       
  1560  **/
       
  1561 
       
  1562 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
       
  1563 				    struct e1000_tx_ring *txdr)
       
  1564 {
       
  1565 	struct pci_dev *pdev = adapter->pdev;
       
  1566 	int size;
       
  1567 
       
  1568 	size = sizeof(struct e1000_buffer) * txdr->count;
       
  1569 	txdr->buffer_info = vzalloc(size);
       
  1570 	if (!txdr->buffer_info) {
       
  1571 		e_err(probe, "Unable to allocate memory for the Tx descriptor "
       
  1572 		      "ring\n");
       
  1573 		return -ENOMEM;
       
  1574 	}
       
  1575 
       
  1576 	/* round up to nearest 4K */
       
  1577 
       
  1578 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
       
  1579 	txdr->size = ALIGN(txdr->size, 4096);
       
  1580 
       
  1581 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
       
  1582 					GFP_KERNEL);
       
  1583 	if (!txdr->desc) {
       
  1584 setup_tx_desc_die:
       
  1585 		vfree(txdr->buffer_info);
       
  1586 		e_err(probe, "Unable to allocate memory for the Tx descriptor "
       
  1587 		      "ring\n");
       
  1588 		return -ENOMEM;
       
  1589 	}
       
  1590 
       
  1591 	/* Fix for errata 23, can't cross 64kB boundary */
       
  1592 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
       
  1593 		void *olddesc = txdr->desc;
       
  1594 		dma_addr_t olddma = txdr->dma;
       
  1595 		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
       
  1596 		      txdr->size, txdr->desc);
       
  1597 		/* Try again, without freeing the previous */
       
  1598 		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
       
  1599 						&txdr->dma, GFP_KERNEL);
       
  1600 		/* Failed allocation, critical failure */
       
  1601 		if (!txdr->desc) {
       
  1602 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1603 					  olddma);
       
  1604 			goto setup_tx_desc_die;
       
  1605 		}
       
  1606 
       
  1607 		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
       
  1608 			/* give up */
       
  1609 			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
       
  1610 					  txdr->dma);
       
  1611 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1612 					  olddma);
       
  1613 			e_err(probe, "Unable to allocate aligned memory "
       
  1614 			      "for the transmit descriptor ring\n");
       
  1615 			vfree(txdr->buffer_info);
       
  1616 			return -ENOMEM;
       
  1617 		} else {
       
  1618 			/* Free old allocation, new allocation was successful */
       
  1619 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1620 					  olddma);
       
  1621 		}
       
  1622 	}
       
  1623 	memset(txdr->desc, 0, txdr->size);
       
  1624 
       
  1625 	txdr->next_to_use = 0;
       
  1626 	txdr->next_to_clean = 0;
       
  1627 
       
  1628 	return 0;
       
  1629 }
       
  1630 
       
  1631 /**
       
  1632  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
       
  1633  * 				  (Descriptors) for all queues
       
  1634  * @adapter: board private structure
       
  1635  *
       
  1636  * Return 0 on success, negative on failure
       
  1637  **/
       
  1638 
       
  1639 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
       
  1640 {
       
  1641 	int i, err = 0;
       
  1642 
       
  1643 	for (i = 0; i < adapter->num_tx_queues; i++) {
       
  1644 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
       
  1645 		if (err) {
       
  1646 			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
       
  1647 			for (i-- ; i >= 0; i--)
       
  1648 				e1000_free_tx_resources(adapter,
       
  1649 							&adapter->tx_ring[i]);
       
  1650 			break;
       
  1651 		}
       
  1652 	}
       
  1653 
       
  1654 	return err;
       
  1655 }
       
  1656 
       
  1657 /**
       
  1658  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
       
  1659  * @adapter: board private structure
       
  1660  *
       
  1661  * Configure the Tx unit of the MAC after a reset.
       
  1662  **/
       
  1663 
       
  1664 static void e1000_configure_tx(struct e1000_adapter *adapter)
       
  1665 {
       
  1666 	u64 tdba;
       
  1667 	struct e1000_hw *hw = &adapter->hw;
       
  1668 	u32 tdlen, tctl, tipg;
       
  1669 	u32 ipgr1, ipgr2;
       
  1670 
       
  1671 	/* Setup the HW Tx Head and Tail descriptor pointers */
       
  1672 
       
  1673 	switch (adapter->num_tx_queues) {
       
  1674 	case 1:
       
  1675 	default:
       
  1676 		tdba = adapter->tx_ring[0].dma;
       
  1677 		tdlen = adapter->tx_ring[0].count *
       
  1678 			sizeof(struct e1000_tx_desc);
       
  1679 		ew32(TDLEN, tdlen);
       
  1680 		ew32(TDBAH, (tdba >> 32));
       
  1681 		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
       
  1682 		ew32(TDT, 0);
       
  1683 		ew32(TDH, 0);
       
  1684 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
       
  1685 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
       
  1686 		break;
       
  1687 	}
       
  1688 
       
  1689 	/* Set the default values for the Tx Inter Packet Gap timer */
       
  1690 	if ((hw->media_type == e1000_media_type_fiber ||
       
  1691 	     hw->media_type == e1000_media_type_internal_serdes))
       
  1692 		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
       
  1693 	else
       
  1694 		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
       
  1695 
       
  1696 	switch (hw->mac_type) {
       
  1697 	case e1000_82542_rev2_0:
       
  1698 	case e1000_82542_rev2_1:
       
  1699 		tipg = DEFAULT_82542_TIPG_IPGT;
       
  1700 		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
       
  1701 		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
       
  1702 		break;
       
  1703 	default:
       
  1704 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
       
  1705 		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
       
  1706 		break;
       
  1707 	}
       
  1708 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
       
  1709 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
       
  1710 	ew32(TIPG, tipg);
       
  1711 
       
  1712 	/* Set the Tx Interrupt Delay register */
       
  1713 
       
  1714 	ew32(TIDV, adapter->tx_int_delay);
       
  1715 	if (hw->mac_type >= e1000_82540)
       
  1716 		ew32(TADV, adapter->tx_abs_int_delay);
       
  1717 
       
  1718 	/* Program the Transmit Control Register */
       
  1719 
       
  1720 	tctl = er32(TCTL);
       
  1721 	tctl &= ~E1000_TCTL_CT;
       
  1722 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
       
  1723 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
       
  1724 
       
  1725 	e1000_config_collision_dist(hw);
       
  1726 
       
  1727 	/* Setup Transmit Descriptor Settings for eop descriptor */
       
  1728 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
       
  1729 
       
  1730 	/* only set IDE if we are delaying interrupts using the timers */
       
  1731 	if (adapter->tx_int_delay)
       
  1732 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
       
  1733 
       
  1734 	if (hw->mac_type < e1000_82543)
       
  1735 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
       
  1736 	else
       
  1737 		adapter->txd_cmd |= E1000_TXD_CMD_RS;
       
  1738 
       
  1739 	/* Cache if we're 82544 running in PCI-X because we'll
       
  1740 	 * need this to apply a workaround later in the send path. */
       
  1741 	if (hw->mac_type == e1000_82544 &&
       
  1742 	    hw->bus_type == e1000_bus_type_pcix)
       
  1743 		adapter->pcix_82544 = true;
       
  1744 
       
  1745 	ew32(TCTL, tctl);
       
  1746 
       
  1747 }
       
  1748 
       
  1749 /**
       
  1750  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
       
  1751  * @adapter: board private structure
       
  1752  * @rxdr:    rx descriptor ring (for a specific queue) to setup
       
  1753  *
       
  1754  * Returns 0 on success, negative on failure
       
  1755  **/
       
  1756 
       
  1757 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
       
  1758 				    struct e1000_rx_ring *rxdr)
       
  1759 {
       
  1760 	struct pci_dev *pdev = adapter->pdev;
       
  1761 	int size, desc_len;
       
  1762 
       
  1763 	size = sizeof(struct e1000_buffer) * rxdr->count;
       
  1764 	rxdr->buffer_info = vzalloc(size);
       
  1765 	if (!rxdr->buffer_info) {
       
  1766 		e_err(probe, "Unable to allocate memory for the Rx descriptor "
       
  1767 		      "ring\n");
       
  1768 		return -ENOMEM;
       
  1769 	}
       
  1770 
       
  1771 	desc_len = sizeof(struct e1000_rx_desc);
       
  1772 
       
  1773 	/* Round up to nearest 4K */
       
  1774 
       
  1775 	rxdr->size = rxdr->count * desc_len;
       
  1776 	rxdr->size = ALIGN(rxdr->size, 4096);
       
  1777 
       
  1778 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
       
  1779 					GFP_KERNEL);
       
  1780 
       
  1781 	if (!rxdr->desc) {
       
  1782 		e_err(probe, "Unable to allocate memory for the Rx descriptor "
       
  1783 		      "ring\n");
       
  1784 setup_rx_desc_die:
       
  1785 		vfree(rxdr->buffer_info);
       
  1786 		return -ENOMEM;
       
  1787 	}
       
  1788 
       
  1789 	/* Fix for errata 23, can't cross 64kB boundary */
       
  1790 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
       
  1791 		void *olddesc = rxdr->desc;
       
  1792 		dma_addr_t olddma = rxdr->dma;
       
  1793 		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
       
  1794 		      rxdr->size, rxdr->desc);
       
  1795 		/* Try again, without freeing the previous */
       
  1796 		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
       
  1797 						&rxdr->dma, GFP_KERNEL);
       
  1798 		/* Failed allocation, critical failure */
       
  1799 		if (!rxdr->desc) {
       
  1800 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1801 					  olddma);
       
  1802 			e_err(probe, "Unable to allocate memory for the Rx "
       
  1803 			      "descriptor ring\n");
       
  1804 			goto setup_rx_desc_die;
       
  1805 		}
       
  1806 
       
  1807 		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
       
  1808 			/* give up */
       
  1809 			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
       
  1810 					  rxdr->dma);
       
  1811 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1812 					  olddma);
       
  1813 			e_err(probe, "Unable to allocate aligned memory for "
       
  1814 			      "the Rx descriptor ring\n");
       
  1815 			goto setup_rx_desc_die;
       
  1816 		} else {
       
  1817 			/* Free old allocation, new allocation was successful */
       
  1818 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1819 					  olddma);
       
  1820 		}
       
  1821 	}
       
  1822 	memset(rxdr->desc, 0, rxdr->size);
       
  1823 
       
  1824 	rxdr->next_to_clean = 0;
       
  1825 	rxdr->next_to_use = 0;
       
  1826 	rxdr->rx_skb_top = NULL;
       
  1827 
       
  1828 	return 0;
       
  1829 }
       
  1830 
       
  1831 /**
       
  1832  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
       
  1833  * 				  (Descriptors) for all queues
       
  1834  * @adapter: board private structure
       
  1835  *
       
  1836  * Return 0 on success, negative on failure
       
  1837  **/
       
  1838 
       
  1839 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
       
  1840 {
       
  1841 	int i, err = 0;
       
  1842 
       
  1843 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
  1844 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
       
  1845 		if (err) {
       
  1846 			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
       
  1847 			for (i-- ; i >= 0; i--)
       
  1848 				e1000_free_rx_resources(adapter,
       
  1849 							&adapter->rx_ring[i]);
       
  1850 			break;
       
  1851 		}
       
  1852 	}
       
  1853 
       
  1854 	return err;
       
  1855 }
       
  1856 
       
  1857 /**
       
  1858  * e1000_setup_rctl - configure the receive control registers
       
  1859  * @adapter: Board private structure
       
  1860  **/
       
  1861 static void e1000_setup_rctl(struct e1000_adapter *adapter)
       
  1862 {
       
  1863 	struct e1000_hw *hw = &adapter->hw;
       
  1864 	u32 rctl;
       
  1865 
       
  1866 	rctl = er32(RCTL);
       
  1867 
       
  1868 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
       
  1869 
       
  1870 	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
       
  1871 		E1000_RCTL_RDMTS_HALF |
       
  1872 		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
       
  1873 
       
  1874 	if (hw->tbi_compatibility_on == 1)
       
  1875 		rctl |= E1000_RCTL_SBP;
       
  1876 	else
       
  1877 		rctl &= ~E1000_RCTL_SBP;
       
  1878 
       
  1879 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
       
  1880 		rctl &= ~E1000_RCTL_LPE;
       
  1881 	else
       
  1882 		rctl |= E1000_RCTL_LPE;
       
  1883 
       
  1884 	/* Setup buffer sizes */
       
  1885 	rctl &= ~E1000_RCTL_SZ_4096;
       
  1886 	rctl |= E1000_RCTL_BSEX;
       
  1887 	switch (adapter->rx_buffer_len) {
       
  1888 		case E1000_RXBUFFER_2048:
       
  1889 		default:
       
  1890 			rctl |= E1000_RCTL_SZ_2048;
       
  1891 			rctl &= ~E1000_RCTL_BSEX;
       
  1892 			break;
       
  1893 		case E1000_RXBUFFER_4096:
       
  1894 			rctl |= E1000_RCTL_SZ_4096;
       
  1895 			break;
       
  1896 		case E1000_RXBUFFER_8192:
       
  1897 			rctl |= E1000_RCTL_SZ_8192;
       
  1898 			break;
       
  1899 		case E1000_RXBUFFER_16384:
       
  1900 			rctl |= E1000_RCTL_SZ_16384;
       
  1901 			break;
       
  1902 	}
       
  1903 
       
  1904 	/* This is useful for sniffing bad packets. */
       
  1905 	if (adapter->netdev->features & NETIF_F_RXALL) {
       
  1906 		/* UPE and MPE will be handled by normal PROMISC logic
       
  1907 		 * in e1000e_set_rx_mode */
       
  1908 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
       
  1909 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
       
  1910 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
       
  1911 
       
  1912 		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
       
  1913 			  E1000_RCTL_DPF | /* Allow filtered pause */
       
  1914 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
       
  1915 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
       
  1916 		 * and that breaks VLANs.
       
  1917 		 */
       
  1918 	}
       
  1919 
       
  1920 	ew32(RCTL, rctl);
       
  1921 }
       
  1922 
       
  1923 /**
       
  1924  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
       
  1925  * @adapter: board private structure
       
  1926  *
       
  1927  * Configure the Rx unit of the MAC after a reset.
       
  1928  **/
       
  1929 
       
  1930 static void e1000_configure_rx(struct e1000_adapter *adapter)
       
  1931 {
       
  1932 	u64 rdba;
       
  1933 	struct e1000_hw *hw = &adapter->hw;
       
  1934 	u32 rdlen, rctl, rxcsum;
       
  1935 
       
  1936 	if (adapter->netdev->mtu > ETH_DATA_LEN) {
       
  1937 		rdlen = adapter->rx_ring[0].count *
       
  1938 		        sizeof(struct e1000_rx_desc);
       
  1939 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
       
  1940 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
       
  1941 	} else {
       
  1942 		rdlen = adapter->rx_ring[0].count *
       
  1943 		        sizeof(struct e1000_rx_desc);
       
  1944 		adapter->clean_rx = e1000_clean_rx_irq;
       
  1945 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
       
  1946 	}
       
  1947 
       
  1948 	/* disable receives while setting up the descriptors */
       
  1949 	rctl = er32(RCTL);
       
  1950 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
  1951 
       
  1952 	/* set the Receive Delay Timer Register */
       
  1953 	ew32(RDTR, adapter->rx_int_delay);
       
  1954 
       
  1955 	if (hw->mac_type >= e1000_82540) {
       
  1956 		ew32(RADV, adapter->rx_abs_int_delay);
       
  1957 		if (adapter->itr_setting != 0)
       
  1958 			ew32(ITR, 1000000000 / (adapter->itr * 256));
       
  1959 	}
       
  1960 
       
  1961 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
       
  1962 	 * the Base and Length of the Rx Descriptor Ring */
       
  1963 	switch (adapter->num_rx_queues) {
       
  1964 	case 1:
       
  1965 	default:
       
  1966 		rdba = adapter->rx_ring[0].dma;
       
  1967 		ew32(RDLEN, rdlen);
       
  1968 		ew32(RDBAH, (rdba >> 32));
       
  1969 		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
       
  1970 		ew32(RDT, 0);
       
  1971 		ew32(RDH, 0);
       
  1972 		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
       
  1973 		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
       
  1974 		break;
       
  1975 	}
       
  1976 
       
  1977 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
       
  1978 	if (hw->mac_type >= e1000_82543) {
       
  1979 		rxcsum = er32(RXCSUM);
       
  1980 		if (adapter->rx_csum)
       
  1981 			rxcsum |= E1000_RXCSUM_TUOFL;
       
  1982 		else
       
  1983 			/* don't need to clear IPPCSE as it defaults to 0 */
       
  1984 			rxcsum &= ~E1000_RXCSUM_TUOFL;
       
  1985 		ew32(RXCSUM, rxcsum);
       
  1986 	}
       
  1987 
       
  1988 	/* Enable Receives */
       
  1989 	ew32(RCTL, rctl | E1000_RCTL_EN);
       
  1990 }
       
  1991 
       
  1992 /**
       
  1993  * e1000_free_tx_resources - Free Tx Resources per Queue
       
  1994  * @adapter: board private structure
       
  1995  * @tx_ring: Tx descriptor ring for a specific queue
       
  1996  *
       
  1997  * Free all transmit software resources
       
  1998  **/
       
  1999 
       
  2000 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
       
  2001 				    struct e1000_tx_ring *tx_ring)
       
  2002 {
       
  2003 	struct pci_dev *pdev = adapter->pdev;
       
  2004 
       
  2005 	e1000_clean_tx_ring(adapter, tx_ring);
       
  2006 
       
  2007 	vfree(tx_ring->buffer_info);
       
  2008 	tx_ring->buffer_info = NULL;
       
  2009 
       
  2010 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
       
  2011 			  tx_ring->dma);
       
  2012 
       
  2013 	tx_ring->desc = NULL;
       
  2014 }
       
  2015 
       
  2016 /**
       
  2017  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
       
  2018  * @adapter: board private structure
       
  2019  *
       
  2020  * Free all transmit software resources
       
  2021  **/
       
  2022 
       
  2023 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
       
  2024 {
       
  2025 	int i;
       
  2026 
       
  2027 	for (i = 0; i < adapter->num_tx_queues; i++)
       
  2028 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
       
  2029 }
       
  2030 
       
  2031 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
       
  2032 					     struct e1000_buffer *buffer_info)
       
  2033 {
       
  2034 	if (adapter->ecdev) {
       
  2035 		return;
       
  2036 	}
       
  2037 
       
  2038 	if (buffer_info->dma) {
       
  2039 		if (buffer_info->mapped_as_page)
       
  2040 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
       
  2041 				       buffer_info->length, DMA_TO_DEVICE);
       
  2042 		else
       
  2043 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
       
  2044 					 buffer_info->length,
       
  2045 					 DMA_TO_DEVICE);
       
  2046 		buffer_info->dma = 0;
       
  2047 	}
       
  2048 	if (buffer_info->skb) {
       
  2049 		dev_kfree_skb_any(buffer_info->skb);
       
  2050 		buffer_info->skb = NULL;
       
  2051 	}
       
  2052 	buffer_info->time_stamp = 0;
       
  2053 	/* buffer_info must be completely set up in the transmit path */
       
  2054 }
       
  2055 
       
  2056 /**
       
  2057  * e1000_clean_tx_ring - Free Tx Buffers
       
  2058  * @adapter: board private structure
       
  2059  * @tx_ring: ring to be cleaned
       
  2060  **/
       
  2061 
       
  2062 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
       
  2063 				struct e1000_tx_ring *tx_ring)
       
  2064 {
       
  2065 	struct e1000_hw *hw = &adapter->hw;
       
  2066 	struct e1000_buffer *buffer_info;
       
  2067 	unsigned long size;
       
  2068 	unsigned int i;
       
  2069 
       
  2070 	/* Free all the Tx ring sk_buffs */
       
  2071 
       
  2072 	for (i = 0; i < tx_ring->count; i++) {
       
  2073 		buffer_info = &tx_ring->buffer_info[i];
       
  2074 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  2075 	}
       
  2076 
       
  2077 	size = sizeof(struct e1000_buffer) * tx_ring->count;
       
  2078 	memset(tx_ring->buffer_info, 0, size);
       
  2079 
       
  2080 	/* Zero out the descriptor ring */
       
  2081 
       
  2082 	memset(tx_ring->desc, 0, tx_ring->size);
       
  2083 
       
  2084 	tx_ring->next_to_use = 0;
       
  2085 	tx_ring->next_to_clean = 0;
       
  2086 	tx_ring->last_tx_tso = false;
       
  2087 
       
  2088 	writel(0, hw->hw_addr + tx_ring->tdh);
       
  2089 	writel(0, hw->hw_addr + tx_ring->tdt);
       
  2090 }
       
  2091 
       
  2092 /**
       
  2093  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
       
  2094  * @adapter: board private structure
       
  2095  **/
       
  2096 
       
  2097 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
       
  2098 {
       
  2099 	int i;
       
  2100 
       
  2101 	for (i = 0; i < adapter->num_tx_queues; i++)
       
  2102 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
       
  2103 }
       
  2104 
       
  2105 /**
       
  2106  * e1000_free_rx_resources - Free Rx Resources
       
  2107  * @adapter: board private structure
       
  2108  * @rx_ring: ring to clean the resources from
       
  2109  *
       
  2110  * Free all receive software resources
       
  2111  **/
       
  2112 
       
  2113 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
       
  2114 				    struct e1000_rx_ring *rx_ring)
       
  2115 {
       
  2116 	struct pci_dev *pdev = adapter->pdev;
       
  2117 
       
  2118 	e1000_clean_rx_ring(adapter, rx_ring);
       
  2119 
       
  2120 	vfree(rx_ring->buffer_info);
       
  2121 	rx_ring->buffer_info = NULL;
       
  2122 
       
  2123 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
       
  2124 			  rx_ring->dma);
       
  2125 
       
  2126 	rx_ring->desc = NULL;
       
  2127 }
       
  2128 
       
  2129 /**
       
  2130  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
       
  2131  * @adapter: board private structure
       
  2132  *
       
  2133  * Free all receive software resources
       
  2134  **/
       
  2135 
       
  2136 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
       
  2137 {
       
  2138 	int i;
       
  2139 
       
  2140 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  2141 		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
       
  2142 }
       
  2143 
       
  2144 /**
       
  2145  * e1000_clean_rx_ring - Free Rx Buffers per Queue
       
  2146  * @adapter: board private structure
       
  2147  * @rx_ring: ring to free buffers from
       
  2148  **/
       
  2149 
       
  2150 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
       
  2151 				struct e1000_rx_ring *rx_ring)
       
  2152 {
       
  2153 	struct e1000_hw *hw = &adapter->hw;
       
  2154 	struct e1000_buffer *buffer_info;
       
  2155 	struct pci_dev *pdev = adapter->pdev;
       
  2156 	unsigned long size;
       
  2157 	unsigned int i;
       
  2158 
       
  2159 	/* Free all the Rx ring sk_buffs */
       
  2160 	for (i = 0; i < rx_ring->count; i++) {
       
  2161 		buffer_info = &rx_ring->buffer_info[i];
       
  2162 		if (buffer_info->dma &&
       
  2163 		    adapter->clean_rx == e1000_clean_rx_irq) {
       
  2164 			dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  2165 			                 buffer_info->length,
       
  2166 					 DMA_FROM_DEVICE);
       
  2167 		} else if (buffer_info->dma &&
       
  2168 		           adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
       
  2169 			dma_unmap_page(&pdev->dev, buffer_info->dma,
       
  2170 				       buffer_info->length,
       
  2171 				       DMA_FROM_DEVICE);
       
  2172 		}
       
  2173 
       
  2174 		buffer_info->dma = 0;
       
  2175 		if (buffer_info->page) {
       
  2176 			put_page(buffer_info->page);
       
  2177 			buffer_info->page = NULL;
       
  2178 		}
       
  2179 		if (buffer_info->skb) {
       
  2180 			dev_kfree_skb(buffer_info->skb);
       
  2181 			buffer_info->skb = NULL;
       
  2182 		}
       
  2183 	}
       
  2184 
       
  2185 	/* there also may be some cached data from a chained receive */
       
  2186 	if (rx_ring->rx_skb_top) {
       
  2187 		dev_kfree_skb(rx_ring->rx_skb_top);
       
  2188 		rx_ring->rx_skb_top = NULL;
       
  2189 	}
       
  2190 
       
  2191 	size = sizeof(struct e1000_buffer) * rx_ring->count;
       
  2192 	memset(rx_ring->buffer_info, 0, size);
       
  2193 
       
  2194 	/* Zero out the descriptor ring */
       
  2195 	memset(rx_ring->desc, 0, rx_ring->size);
       
  2196 
       
  2197 	rx_ring->next_to_clean = 0;
       
  2198 	rx_ring->next_to_use = 0;
       
  2199 
       
  2200 	writel(0, hw->hw_addr + rx_ring->rdh);
       
  2201 	writel(0, hw->hw_addr + rx_ring->rdt);
       
  2202 }
       
  2203 
       
  2204 /**
       
  2205  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
       
  2206  * @adapter: board private structure
       
  2207  **/
       
  2208 
       
  2209 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
       
  2210 {
       
  2211 	int i;
       
  2212 
       
  2213 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  2214 		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
       
  2215 }
       
  2216 
       
  2217 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
       
  2218  * and memory write and invalidate disabled for certain operations
       
  2219  */
       
  2220 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
       
  2221 {
       
  2222 	struct e1000_hw *hw = &adapter->hw;
       
  2223 	struct net_device *netdev = adapter->netdev;
       
  2224 	u32 rctl;
       
  2225 
       
  2226 	e1000_pci_clear_mwi(hw);
       
  2227 
       
  2228 	rctl = er32(RCTL);
       
  2229 	rctl |= E1000_RCTL_RST;
       
  2230 	ew32(RCTL, rctl);
       
  2231 	E1000_WRITE_FLUSH();
       
  2232 	mdelay(5);
       
  2233 
       
  2234 	if (!adapter->ecdev && netif_running(netdev))
       
  2235 		e1000_clean_all_rx_rings(adapter);
       
  2236 }
       
  2237 
       
  2238 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
       
  2239 {
       
  2240 	struct e1000_hw *hw = &adapter->hw;
       
  2241 	struct net_device *netdev = adapter->netdev;
       
  2242 	u32 rctl;
       
  2243 
       
  2244 	rctl = er32(RCTL);
       
  2245 	rctl &= ~E1000_RCTL_RST;
       
  2246 	ew32(RCTL, rctl);
       
  2247 	E1000_WRITE_FLUSH();
       
  2248 	mdelay(5);
       
  2249 
       
  2250 	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
       
  2251 		e1000_pci_set_mwi(hw);
       
  2252 
       
  2253 	if (!adapter->netdev && netif_running(netdev)) {
       
  2254 		/* No need to loop, because 82542 supports only 1 queue */
       
  2255 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
       
  2256 		e1000_configure_rx(adapter);
       
  2257 		if (adapter->ecdev) {
       
  2258 			/* fill rx ring completely! */
       
  2259 			adapter->alloc_rx_buf(adapter, ring, ring->count);
       
  2260 		} else {
       
  2261 			/* this one leaves the last ring element unallocated! */
       
  2262 			adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
       
  2263 		}
       
  2264 
       
  2265 	}
       
  2266 }
       
  2267 
       
  2268 /**
       
  2269  * e1000_set_mac - Change the Ethernet Address of the NIC
       
  2270  * @netdev: network interface device structure
       
  2271  * @p: pointer to an address structure
       
  2272  *
       
  2273  * Returns 0 on success, negative on failure
       
  2274  **/
       
  2275 
       
  2276 static int e1000_set_mac(struct net_device *netdev, void *p)
       
  2277 {
       
  2278 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2279 	struct e1000_hw *hw = &adapter->hw;
       
  2280 	struct sockaddr *addr = p;
       
  2281 
       
  2282 	if (!is_valid_ether_addr(addr->sa_data))
       
  2283 		return -EADDRNOTAVAIL;
       
  2284 
       
  2285 	/* 82542 2.0 needs to be in reset to write receive address registers */
       
  2286 
       
  2287 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2288 		e1000_enter_82542_rst(adapter);
       
  2289 
       
  2290 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2291 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
       
  2292 
       
  2293 	e1000_rar_set(hw, hw->mac_addr, 0);
       
  2294 
       
  2295 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2296 		e1000_leave_82542_rst(adapter);
       
  2297 
       
  2298 	return 0;
       
  2299 }
       
  2300 
       
  2301 /**
       
  2302  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
       
  2303  * @netdev: network interface device structure
       
  2304  *
       
  2305  * The set_rx_mode entry point is called whenever the unicast or multicast
       
  2306  * address lists or the network interface flags are updated. This routine is
       
  2307  * responsible for configuring the hardware for proper unicast, multicast,
       
  2308  * promiscuous mode, and all-multi behavior.
       
  2309  **/
       
  2310 
       
  2311 static void e1000_set_rx_mode(struct net_device *netdev)
       
  2312 {
       
  2313 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2314 	struct e1000_hw *hw = &adapter->hw;
       
  2315 	struct netdev_hw_addr *ha;
       
  2316 	bool use_uc = false;
       
  2317 	u32 rctl;
       
  2318 	u32 hash_value;
       
  2319 	int i, rar_entries = E1000_RAR_ENTRIES;
       
  2320 	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
       
  2321 	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
       
  2322 
       
  2323 	if (!mcarray) {
       
  2324 		e_err(probe, "memory allocation failed\n");
       
  2325 		return;
       
  2326 	}
       
  2327 
       
  2328 	/* Check for Promiscuous and All Multicast modes */
       
  2329 
       
  2330 	rctl = er32(RCTL);
       
  2331 
       
  2332 	if (netdev->flags & IFF_PROMISC) {
       
  2333 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
       
  2334 		rctl &= ~E1000_RCTL_VFE;
       
  2335 	} else {
       
  2336 		if (netdev->flags & IFF_ALLMULTI)
       
  2337 			rctl |= E1000_RCTL_MPE;
       
  2338 		else
       
  2339 			rctl &= ~E1000_RCTL_MPE;
       
  2340 		/* Enable VLAN filter if there is a VLAN */
       
  2341 		if (e1000_vlan_used(adapter))
       
  2342 			rctl |= E1000_RCTL_VFE;
       
  2343 	}
       
  2344 
       
  2345 	if (netdev_uc_count(netdev) > rar_entries - 1) {
       
  2346 		rctl |= E1000_RCTL_UPE;
       
  2347 	} else if (!(netdev->flags & IFF_PROMISC)) {
       
  2348 		rctl &= ~E1000_RCTL_UPE;
       
  2349 		use_uc = true;
       
  2350 	}
       
  2351 
       
  2352 	ew32(RCTL, rctl);
       
  2353 
       
  2354 	/* 82542 2.0 needs to be in reset to write receive address registers */
       
  2355 
       
  2356 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2357 		e1000_enter_82542_rst(adapter);
       
  2358 
       
  2359 	/* load the first 14 addresses into the exact filters 1-14. Unicast
       
  2360 	 * addresses take precedence to avoid disabling unicast filtering
       
  2361 	 * when possible.
       
  2362 	 *
       
  2363 	 * RAR 0 is used for the station MAC address
       
  2364 	 * if there are not 14 addresses, go ahead and clear the filters
       
  2365 	 */
       
  2366 	i = 1;
       
  2367 	if (use_uc)
       
  2368 		netdev_for_each_uc_addr(ha, netdev) {
       
  2369 			if (i == rar_entries)
       
  2370 				break;
       
  2371 			e1000_rar_set(hw, ha->addr, i++);
       
  2372 		}
       
  2373 
       
  2374 	netdev_for_each_mc_addr(ha, netdev) {
       
  2375 		if (i == rar_entries) {
       
  2376 			/* load any remaining addresses into the hash table */
       
  2377 			u32 hash_reg, hash_bit, mta;
       
  2378 			hash_value = e1000_hash_mc_addr(hw, ha->addr);
       
  2379 			hash_reg = (hash_value >> 5) & 0x7F;
       
  2380 			hash_bit = hash_value & 0x1F;
       
  2381 			mta = (1 << hash_bit);
       
  2382 			mcarray[hash_reg] |= mta;
       
  2383 		} else {
       
  2384 			e1000_rar_set(hw, ha->addr, i++);
       
  2385 		}
       
  2386 	}
       
  2387 
       
  2388 	for (; i < rar_entries; i++) {
       
  2389 		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
       
  2390 		E1000_WRITE_FLUSH();
       
  2391 		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
       
  2392 		E1000_WRITE_FLUSH();
       
  2393 	}
       
  2394 
       
  2395 	/* write the hash table completely, write from bottom to avoid
       
  2396 	 * both stupid write combining chipsets, and flushing each write */
       
  2397 	for (i = mta_reg_count - 1; i >= 0 ; i--) {
       
  2398 		/*
       
  2399 		 * If we are on an 82544 has an errata where writing odd
       
  2400 		 * offsets overwrites the previous even offset, but writing
       
  2401 		 * backwards over the range solves the issue by always
       
  2402 		 * writing the odd offset first
       
  2403 		 */
       
  2404 		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
       
  2405 	}
       
  2406 	E1000_WRITE_FLUSH();
       
  2407 
       
  2408 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2409 		e1000_leave_82542_rst(adapter);
       
  2410 
       
  2411 	kfree(mcarray);
       
  2412 }
       
  2413 
       
  2414 /**
       
  2415  * e1000_update_phy_info_task - get phy info
       
  2416  * @work: work struct contained inside adapter struct
       
  2417  *
       
  2418  * Need to wait a few seconds after link up to get diagnostic information from
       
  2419  * the phy
       
  2420  */
       
  2421 static void e1000_update_phy_info_task(struct work_struct *work)
       
  2422 {
       
  2423 	struct e1000_adapter *adapter = container_of(work,
       
  2424 						     struct e1000_adapter,
       
  2425 						     phy_info_task.work);
       
  2426 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2427 		return;
       
  2428 	mutex_lock(&adapter->mutex);
       
  2429 	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
       
  2430 	mutex_unlock(&adapter->mutex);
       
  2431 }
       
  2432 
       
  2433 /**
       
  2434  * e1000_82547_tx_fifo_stall_task - task to complete work
       
  2435  * @work: work struct contained inside adapter struct
       
  2436  **/
       
  2437 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
       
  2438 {
       
  2439 	struct e1000_adapter *adapter = container_of(work,
       
  2440 						     struct e1000_adapter,
       
  2441 						     fifo_stall_task.work);
       
  2442 	struct e1000_hw *hw = &adapter->hw;
       
  2443 	struct net_device *netdev = adapter->netdev;
       
  2444 	u32 tctl;
       
  2445 
       
  2446 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2447 		return;
       
  2448 	mutex_lock(&adapter->mutex);
       
  2449 	if (atomic_read(&adapter->tx_fifo_stall)) {
       
  2450 		if ((er32(TDT) == er32(TDH)) &&
       
  2451 		   (er32(TDFT) == er32(TDFH)) &&
       
  2452 		   (er32(TDFTS) == er32(TDFHS))) {
       
  2453 			tctl = er32(TCTL);
       
  2454 			ew32(TCTL, tctl & ~E1000_TCTL_EN);
       
  2455 			ew32(TDFT, adapter->tx_head_addr);
       
  2456 			ew32(TDFH, adapter->tx_head_addr);
       
  2457 			ew32(TDFTS, adapter->tx_head_addr);
       
  2458 			ew32(TDFHS, adapter->tx_head_addr);
       
  2459 			ew32(TCTL, tctl);
       
  2460 			E1000_WRITE_FLUSH();
       
  2461 
       
  2462 			adapter->tx_fifo_head = 0;
       
  2463 			atomic_set(&adapter->tx_fifo_stall, 0);
       
  2464 			netif_wake_queue(netdev);
       
  2465 		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
       
  2466 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
       
  2467 		}
       
  2468 	}
       
  2469 	mutex_unlock(&adapter->mutex);
       
  2470 }
       
  2471 
       
  2472 bool e1000_has_link(struct e1000_adapter *adapter)
       
  2473 {
       
  2474 	struct e1000_hw *hw = &adapter->hw;
       
  2475 	bool link_active = false;
       
  2476 
       
  2477 	/* get_link_status is set on LSC (link status) interrupt or rx
       
  2478 	 * sequence error interrupt (except on intel ce4100).
       
  2479 	 * get_link_status will stay false until the
       
  2480 	 * e1000_check_for_link establishes link for copper adapters
       
  2481 	 * ONLY
       
  2482 	 */
       
  2483 	switch (hw->media_type) {
       
  2484 	case e1000_media_type_copper:
       
  2485 		if (hw->mac_type == e1000_ce4100)
       
  2486 			hw->get_link_status = 1;
       
  2487 		if (hw->get_link_status) {
       
  2488 			e1000_check_for_link(hw);
       
  2489 			link_active = !hw->get_link_status;
       
  2490 		} else {
       
  2491 			link_active = true;
       
  2492 		}
       
  2493 		break;
       
  2494 	case e1000_media_type_fiber:
       
  2495 		e1000_check_for_link(hw);
       
  2496 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
       
  2497 		break;
       
  2498 	case e1000_media_type_internal_serdes:
       
  2499 		e1000_check_for_link(hw);
       
  2500 		link_active = hw->serdes_has_link;
       
  2501 		break;
       
  2502 	default:
       
  2503 		break;
       
  2504 	}
       
  2505 
       
  2506 	return link_active;
       
  2507 }
       
  2508 
       
  2509 /**
       
  2510  * e1000_watchdog - work function
       
  2511  * @work: work struct contained inside adapter struct
       
  2512  **/
       
  2513 static void e1000_watchdog(struct work_struct *work)
       
  2514 {
       
  2515 	struct e1000_adapter *adapter = container_of(work,
       
  2516 						     struct e1000_adapter,
       
  2517 						     watchdog_task.work);
       
  2518 	struct e1000_hw *hw = &adapter->hw;
       
  2519 	struct net_device *netdev = adapter->netdev;
       
  2520 	struct e1000_tx_ring *txdr = adapter->tx_ring;
       
  2521 	u32 link, tctl;
       
  2522 
       
  2523 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2524 		return;
       
  2525 
       
  2526 	mutex_lock(&adapter->mutex);
       
  2527 	link = e1000_has_link(adapter);
       
  2528 	if (!adapter->ecdev && (netif_carrier_ok(netdev)) && link)
       
  2529 		goto link_up;
       
  2530 
       
  2531 	if (link) {
       
  2532 		if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev))
       
  2533 				|| (!adapter->ecdev && !netif_carrier_ok(netdev))) {
       
  2534 			u32 ctrl;
       
  2535 			bool txb2b __attribute__ ((unused)) = true;
       
  2536 			/* update snapshot of PHY registers on LSC */
       
  2537 			e1000_get_speed_and_duplex(hw,
       
  2538 			                           &adapter->link_speed,
       
  2539 			                           &adapter->link_duplex);
       
  2540 
       
  2541 			ctrl = er32(CTRL);
       
  2542 			pr_info("%s NIC Link is Up %d Mbps %s, "
       
  2543 				"Flow Control: %s\n",
       
  2544 				netdev->name,
       
  2545 				adapter->link_speed,
       
  2546 				adapter->link_duplex == FULL_DUPLEX ?
       
  2547 				"Full Duplex" : "Half Duplex",
       
  2548 				((ctrl & E1000_CTRL_TFCE) && (ctrl &
       
  2549 				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
       
  2550 				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
       
  2551 				E1000_CTRL_TFCE) ? "TX" : "None")));
       
  2552 
       
  2553 			/* adjust timeout factor according to speed/duplex */
       
  2554 			adapter->tx_timeout_factor = 1;
       
  2555 			switch (adapter->link_speed) {
       
  2556 			case SPEED_10:
       
  2557 				txb2b = false;
       
  2558 				adapter->tx_timeout_factor = 16;
       
  2559 				break;
       
  2560 			case SPEED_100:
       
  2561 				txb2b = false;
       
  2562 				/* maybe add some timeout factor ? */
       
  2563 				break;
       
  2564 			}
       
  2565 
       
  2566 			/* enable transmits in the hardware */
       
  2567 			tctl = er32(TCTL);
       
  2568 			tctl |= E1000_TCTL_EN;
       
  2569 			ew32(TCTL, tctl);
       
  2570 
       
  2571 			if (adapter->ecdev) {
       
  2572 				ecdev_set_link(adapter->ecdev, 1);
       
  2573 			}
       
  2574 			else {
       
  2575 				netif_carrier_on(netdev);
       
  2576 				if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  2577 					schedule_delayed_work(&adapter->phy_info_task,
       
  2578 							2 * HZ);
       
  2579 			}
       
  2580 			adapter->smartspeed = 0;
       
  2581 		}
       
  2582 	} else {
       
  2583 		if ((adapter->ecdev && ecdev_get_link(adapter->ecdev))
       
  2584 				|| (!adapter->ecdev && netif_carrier_ok(netdev))) {
       
  2585 			adapter->link_speed = 0;
       
  2586 			adapter->link_duplex = 0;
       
  2587 			pr_info("%s NIC Link is Down\n",
       
  2588 				netdev->name);
       
  2589 
       
  2590 			if (adapter->ecdev) {
       
  2591 				ecdev_set_link(adapter->ecdev, 0);
       
  2592 			} else {
       
  2593 				netif_carrier_off(netdev);
       
  2594 
       
  2595 				if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  2596 					schedule_delayed_work(&adapter->phy_info_task,
       
  2597 							2 * HZ);
       
  2598 			}
       
  2599 		}
       
  2600 
       
  2601 		e1000_smartspeed(adapter);
       
  2602 	}
       
  2603 
       
  2604 link_up:
       
  2605 	e1000_update_stats(adapter);
       
  2606 
       
  2607 	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
       
  2608 	adapter->tpt_old = adapter->stats.tpt;
       
  2609 	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
       
  2610 	adapter->colc_old = adapter->stats.colc;
       
  2611 
       
  2612 	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
       
  2613 	adapter->gorcl_old = adapter->stats.gorcl;
       
  2614 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
       
  2615 	adapter->gotcl_old = adapter->stats.gotcl;
       
  2616 
       
  2617 	e1000_update_adaptive(hw);
       
  2618 
       
  2619 	if (!adapter->ecdev && !netif_carrier_ok(netdev)) {
       
  2620 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
       
  2621 			/* We've lost link, so the controller stops DMA,
       
  2622 			 * but we've got queued Tx work that's never going
       
  2623 			 * to get done, so reset controller to flush Tx.
       
  2624 			 * (Do the reset outside of interrupt context). */
       
  2625 			adapter->tx_timeout_count++;
       
  2626 			schedule_work(&adapter->reset_task);
       
  2627 			/* exit immediately since reset is imminent */
       
  2628 			goto unlock;
       
  2629 		}
       
  2630 	}
       
  2631 
       
  2632 	/* Simple mode for Interrupt Throttle Rate (ITR) */
       
  2633 	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
       
  2634 		/*
       
  2635 		 * Symmetric Tx/Rx gets a reduced ITR=2000;
       
  2636 		 * Total asymmetrical Tx or Rx gets ITR=8000;
       
  2637 		 * everyone else is between 2000-8000.
       
  2638 		 */
       
  2639 		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
       
  2640 		u32 dif = (adapter->gotcl > adapter->gorcl ?
       
  2641 			    adapter->gotcl - adapter->gorcl :
       
  2642 			    adapter->gorcl - adapter->gotcl) / 10000;
       
  2643 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
       
  2644 
       
  2645 		ew32(ITR, 1000000000 / (itr * 256));
       
  2646 	}
       
  2647 
       
  2648 	/* Cause software interrupt to ensure rx ring is cleaned */
       
  2649 	ew32(ICS, E1000_ICS_RXDMT0);
       
  2650 
       
  2651 	/* Force detection of hung controller every watchdog period */
       
  2652 	adapter->detect_tx_hung = true;
       
  2653 
       
  2654 	/* Reschedule the task */
       
  2655 	if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags))
       
  2656 		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
       
  2657 
       
  2658 unlock:
       
  2659 	mutex_unlock(&adapter->mutex);
       
  2660 }
       
  2661 
       
  2662 enum latency_range {
       
  2663 	lowest_latency = 0,
       
  2664 	low_latency = 1,
       
  2665 	bulk_latency = 2,
       
  2666 	latency_invalid = 255
       
  2667 };
       
  2668 
       
  2669 /**
       
  2670  * e1000_update_itr - update the dynamic ITR value based on statistics
       
  2671  * @adapter: pointer to adapter
       
  2672  * @itr_setting: current adapter->itr
       
  2673  * @packets: the number of packets during this measurement interval
       
  2674  * @bytes: the number of bytes during this measurement interval
       
  2675  *
       
  2676  *      Stores a new ITR value based on packets and byte
       
  2677  *      counts during the last interrupt.  The advantage of per interrupt
       
  2678  *      computation is faster updates and more accurate ITR for the current
       
  2679  *      traffic pattern.  Constants in this function were computed
       
  2680  *      based on theoretical maximum wire speed and thresholds were set based
       
  2681  *      on testing data as well as attempting to minimize response time
       
  2682  *      while increasing bulk throughput.
       
  2683  *      this functionality is controlled by the InterruptThrottleRate module
       
  2684  *      parameter (see e1000_param.c)
       
  2685  **/
       
  2686 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
       
  2687 				     u16 itr_setting, int packets, int bytes)
       
  2688 {
       
  2689 	unsigned int retval = itr_setting;
       
  2690 	struct e1000_hw *hw = &adapter->hw;
       
  2691 
       
  2692 	if (unlikely(hw->mac_type < e1000_82540))
       
  2693 		goto update_itr_done;
       
  2694 
       
  2695 	if (packets == 0)
       
  2696 		goto update_itr_done;
       
  2697 
       
  2698 	switch (itr_setting) {
       
  2699 	case lowest_latency:
       
  2700 		/* jumbo frames get bulk treatment*/
       
  2701 		if (bytes/packets > 8000)
       
  2702 			retval = bulk_latency;
       
  2703 		else if ((packets < 5) && (bytes > 512))
       
  2704 			retval = low_latency;
       
  2705 		break;
       
  2706 	case low_latency:  /* 50 usec aka 20000 ints/s */
       
  2707 		if (bytes > 10000) {
       
  2708 			/* jumbo frames need bulk latency setting */
       
  2709 			if (bytes/packets > 8000)
       
  2710 				retval = bulk_latency;
       
  2711 			else if ((packets < 10) || ((bytes/packets) > 1200))
       
  2712 				retval = bulk_latency;
       
  2713 			else if ((packets > 35))
       
  2714 				retval = lowest_latency;
       
  2715 		} else if (bytes/packets > 2000)
       
  2716 			retval = bulk_latency;
       
  2717 		else if (packets <= 2 && bytes < 512)
       
  2718 			retval = lowest_latency;
       
  2719 		break;
       
  2720 	case bulk_latency: /* 250 usec aka 4000 ints/s */
       
  2721 		if (bytes > 25000) {
       
  2722 			if (packets > 35)
       
  2723 				retval = low_latency;
       
  2724 		} else if (bytes < 6000) {
       
  2725 			retval = low_latency;
       
  2726 		}
       
  2727 		break;
       
  2728 	}
       
  2729 
       
  2730 update_itr_done:
       
  2731 	return retval;
       
  2732 }
       
  2733 
       
  2734 static void e1000_set_itr(struct e1000_adapter *adapter)
       
  2735 {
       
  2736 	struct e1000_hw *hw = &adapter->hw;
       
  2737 	u16 current_itr;
       
  2738 	u32 new_itr = adapter->itr;
       
  2739 
       
  2740 	if (unlikely(hw->mac_type < e1000_82540))
       
  2741 		return;
       
  2742 
       
  2743 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
       
  2744 	if (unlikely(adapter->link_speed != SPEED_1000)) {
       
  2745 		current_itr = 0;
       
  2746 		new_itr = 4000;
       
  2747 		goto set_itr_now;
       
  2748 	}
       
  2749 
       
  2750 	adapter->tx_itr = e1000_update_itr(adapter,
       
  2751 	                            adapter->tx_itr,
       
  2752 	                            adapter->total_tx_packets,
       
  2753 	                            adapter->total_tx_bytes);
       
  2754 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2755 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
       
  2756 		adapter->tx_itr = low_latency;
       
  2757 
       
  2758 	adapter->rx_itr = e1000_update_itr(adapter,
       
  2759 	                            adapter->rx_itr,
       
  2760 	                            adapter->total_rx_packets,
       
  2761 	                            adapter->total_rx_bytes);
       
  2762 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2763 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
       
  2764 		adapter->rx_itr = low_latency;
       
  2765 
       
  2766 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
       
  2767 
       
  2768 	switch (current_itr) {
       
  2769 	/* counts and packets in update_itr are dependent on these numbers */
       
  2770 	case lowest_latency:
       
  2771 		new_itr = 70000;
       
  2772 		break;
       
  2773 	case low_latency:
       
  2774 		new_itr = 20000; /* aka hwitr = ~200 */
       
  2775 		break;
       
  2776 	case bulk_latency:
       
  2777 		new_itr = 4000;
       
  2778 		break;
       
  2779 	default:
       
  2780 		break;
       
  2781 	}
       
  2782 
       
  2783 set_itr_now:
       
  2784 	if (new_itr != adapter->itr) {
       
  2785 		/* this attempts to bias the interrupt rate towards Bulk
       
  2786 		 * by adding intermediate steps when interrupt rate is
       
  2787 		 * increasing */
       
  2788 		new_itr = new_itr > adapter->itr ?
       
  2789 		             min(adapter->itr + (new_itr >> 2), new_itr) :
       
  2790 		             new_itr;
       
  2791 		adapter->itr = new_itr;
       
  2792 		ew32(ITR, 1000000000 / (new_itr * 256));
       
  2793 	}
       
  2794 }
       
  2795 
       
  2796 #define E1000_TX_FLAGS_CSUM		0x00000001
       
  2797 #define E1000_TX_FLAGS_VLAN		0x00000002
       
  2798 #define E1000_TX_FLAGS_TSO		0x00000004
       
  2799 #define E1000_TX_FLAGS_IPV4		0x00000008
       
  2800 #define E1000_TX_FLAGS_NO_FCS		0x00000010
       
  2801 #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
       
  2802 #define E1000_TX_FLAGS_VLAN_SHIFT	16
       
  2803 
       
  2804 static int e1000_tso(struct e1000_adapter *adapter,
       
  2805 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
       
  2806 {
       
  2807 	struct e1000_context_desc *context_desc;
       
  2808 	struct e1000_buffer *buffer_info;
       
  2809 	unsigned int i;
       
  2810 	u32 cmd_length = 0;
       
  2811 	u16 ipcse = 0, tucse, mss;
       
  2812 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
       
  2813 	int err;
       
  2814 
       
  2815 	if (skb_is_gso(skb)) {
       
  2816 		if (skb_header_cloned(skb)) {
       
  2817 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
       
  2818 			if (err)
       
  2819 				return err;
       
  2820 		}
       
  2821 
       
  2822 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  2823 		mss = skb_shinfo(skb)->gso_size;
       
  2824 		if (skb->protocol == htons(ETH_P_IP)) {
       
  2825 			struct iphdr *iph = ip_hdr(skb);
       
  2826 			iph->tot_len = 0;
       
  2827 			iph->check = 0;
       
  2828 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
       
  2829 								 iph->daddr, 0,
       
  2830 								 IPPROTO_TCP,
       
  2831 								 0);
       
  2832 			cmd_length = E1000_TXD_CMD_IP;
       
  2833 			ipcse = skb_transport_offset(skb) - 1;
       
  2834 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
       
  2835 			ipv6_hdr(skb)->payload_len = 0;
       
  2836 			tcp_hdr(skb)->check =
       
  2837 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
       
  2838 						 &ipv6_hdr(skb)->daddr,
       
  2839 						 0, IPPROTO_TCP, 0);
       
  2840 			ipcse = 0;
       
  2841 		}
       
  2842 		ipcss = skb_network_offset(skb);
       
  2843 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
       
  2844 		tucss = skb_transport_offset(skb);
       
  2845 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
       
  2846 		tucse = 0;
       
  2847 
       
  2848 		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
       
  2849 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
       
  2850 
       
  2851 		i = tx_ring->next_to_use;
       
  2852 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  2853 		buffer_info = &tx_ring->buffer_info[i];
       
  2854 
       
  2855 		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
       
  2856 		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
       
  2857 		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
       
  2858 		context_desc->upper_setup.tcp_fields.tucss = tucss;
       
  2859 		context_desc->upper_setup.tcp_fields.tucso = tucso;
       
  2860 		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
       
  2861 		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
       
  2862 		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
       
  2863 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
       
  2864 
       
  2865 		buffer_info->time_stamp = jiffies;
       
  2866 		buffer_info->next_to_watch = i;
       
  2867 
       
  2868 		if (++i == tx_ring->count) i = 0;
       
  2869 		tx_ring->next_to_use = i;
       
  2870 
       
  2871 		return true;
       
  2872 	}
       
  2873 	return false;
       
  2874 }
       
  2875 
       
  2876 static bool e1000_tx_csum(struct e1000_adapter *adapter,
       
  2877 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
       
  2878 {
       
  2879 	struct e1000_context_desc *context_desc;
       
  2880 	struct e1000_buffer *buffer_info;
       
  2881 	unsigned int i;
       
  2882 	u8 css;
       
  2883 	u32 cmd_len = E1000_TXD_CMD_DEXT;
       
  2884 
       
  2885 	if (skb->ip_summed != CHECKSUM_PARTIAL)
       
  2886 		return false;
       
  2887 
       
  2888 	switch (skb->protocol) {
       
  2889 	case cpu_to_be16(ETH_P_IP):
       
  2890 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
       
  2891 			cmd_len |= E1000_TXD_CMD_TCP;
       
  2892 		break;
       
  2893 	case cpu_to_be16(ETH_P_IPV6):
       
  2894 		/* XXX not handling all IPV6 headers */
       
  2895 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
       
  2896 			cmd_len |= E1000_TXD_CMD_TCP;
       
  2897 		break;
       
  2898 	default:
       
  2899 		if (unlikely(net_ratelimit()))
       
  2900 			e_warn(drv, "checksum_partial proto=%x!\n",
       
  2901 			       skb->protocol);
       
  2902 		break;
       
  2903 	}
       
  2904 
       
  2905 	css = skb_checksum_start_offset(skb);
       
  2906 
       
  2907 	i = tx_ring->next_to_use;
       
  2908 	buffer_info = &tx_ring->buffer_info[i];
       
  2909 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  2910 
       
  2911 	context_desc->lower_setup.ip_config = 0;
       
  2912 	context_desc->upper_setup.tcp_fields.tucss = css;
       
  2913 	context_desc->upper_setup.tcp_fields.tucso =
       
  2914 		css + skb->csum_offset;
       
  2915 	context_desc->upper_setup.tcp_fields.tucse = 0;
       
  2916 	context_desc->tcp_seg_setup.data = 0;
       
  2917 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
       
  2918 
       
  2919 	buffer_info->time_stamp = jiffies;
       
  2920 	buffer_info->next_to_watch = i;
       
  2921 
       
  2922 	if (unlikely(++i == tx_ring->count)) i = 0;
       
  2923 	tx_ring->next_to_use = i;
       
  2924 
       
  2925 	return true;
       
  2926 }
       
  2927 
       
  2928 #define E1000_MAX_TXD_PWR	12
       
  2929 #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
       
  2930 
       
  2931 static int e1000_tx_map(struct e1000_adapter *adapter,
       
  2932 			struct e1000_tx_ring *tx_ring,
       
  2933 			struct sk_buff *skb, unsigned int first,
       
  2934 			unsigned int max_per_txd, unsigned int nr_frags,
       
  2935 			unsigned int mss)
       
  2936 {
       
  2937 	struct e1000_hw *hw = &adapter->hw;
       
  2938 	struct pci_dev *pdev = adapter->pdev;
       
  2939 	struct e1000_buffer *buffer_info;
       
  2940 	unsigned int len = skb_headlen(skb);
       
  2941 	unsigned int offset = 0, size, count = 0, i;
       
  2942 	unsigned int f, bytecount, segs;
       
  2943 
       
  2944 	i = tx_ring->next_to_use;
       
  2945 
       
  2946 	while (len) {
       
  2947 		buffer_info = &tx_ring->buffer_info[i];
       
  2948 		size = min(len, max_per_txd);
       
  2949 		/* Workaround for Controller erratum --
       
  2950 		 * descriptor for non-tso packet in a linear SKB that follows a
       
  2951 		 * tso gets written back prematurely before the data is fully
       
  2952 		 * DMA'd to the controller */
       
  2953 		if (!skb->data_len && tx_ring->last_tx_tso &&
       
  2954 		    !skb_is_gso(skb)) {
       
  2955 			tx_ring->last_tx_tso = false;
       
  2956 			size -= 4;
       
  2957 		}
       
  2958 
       
  2959 		/* Workaround for premature desc write-backs
       
  2960 		 * in TSO mode.  Append 4-byte sentinel desc */
       
  2961 		if (unlikely(mss && !nr_frags && size == len && size > 8))
       
  2962 			size -= 4;
       
  2963 		/* work-around for errata 10 and it applies
       
  2964 		 * to all controllers in PCI-X mode
       
  2965 		 * The fix is to make sure that the first descriptor of a
       
  2966 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
       
  2967 		 */
       
  2968 		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
       
  2969 		                (size > 2015) && count == 0))
       
  2970 		        size = 2015;
       
  2971 
       
  2972 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
       
  2973 		 * terminating buffers within evenly-aligned dwords. */
       
  2974 		if (unlikely(adapter->pcix_82544 &&
       
  2975 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
       
  2976 		   size > 4))
       
  2977 			size -= 4;
       
  2978 
       
  2979 		buffer_info->length = size;
       
  2980 		/* set time_stamp *before* dma to help avoid a possible race */
       
  2981 		buffer_info->time_stamp = jiffies;
       
  2982 		buffer_info->mapped_as_page = false;
       
  2983 		buffer_info->dma = dma_map_single(&pdev->dev,
       
  2984 						  skb->data + offset,
       
  2985 						  size,	DMA_TO_DEVICE);
       
  2986 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
       
  2987 			goto dma_error;
       
  2988 		buffer_info->next_to_watch = i;
       
  2989 
       
  2990 		len -= size;
       
  2991 		offset += size;
       
  2992 		count++;
       
  2993 		if (len) {
       
  2994 			i++;
       
  2995 			if (unlikely(i == tx_ring->count))
       
  2996 				i = 0;
       
  2997 		}
       
  2998 	}
       
  2999 
       
  3000 	for (f = 0; f < nr_frags; f++) {
       
  3001 		const struct skb_frag_struct *frag;
       
  3002 
       
  3003 		frag = &skb_shinfo(skb)->frags[f];
       
  3004 		len = skb_frag_size(frag);
       
  3005 		offset = 0;
       
  3006 
       
  3007 		while (len) {
       
  3008 			unsigned long bufend;
       
  3009 			i++;
       
  3010 			if (unlikely(i == tx_ring->count))
       
  3011 				i = 0;
       
  3012 
       
  3013 			buffer_info = &tx_ring->buffer_info[i];
       
  3014 			size = min(len, max_per_txd);
       
  3015 			/* Workaround for premature desc write-backs
       
  3016 			 * in TSO mode.  Append 4-byte sentinel desc */
       
  3017 			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
       
  3018 				size -= 4;
       
  3019 			/* Workaround for potential 82544 hang in PCI-X.
       
  3020 			 * Avoid terminating buffers within evenly-aligned
       
  3021 			 * dwords. */
       
  3022 			bufend = (unsigned long)
       
  3023 				page_to_phys(skb_frag_page(frag));
       
  3024 			bufend += offset + size - 1;
       
  3025 			if (unlikely(adapter->pcix_82544 &&
       
  3026 				     !(bufend & 4) &&
       
  3027 				     size > 4))
       
  3028 				size -= 4;
       
  3029 
       
  3030 			buffer_info->length = size;
       
  3031 			buffer_info->time_stamp = jiffies;
       
  3032 			buffer_info->mapped_as_page = true;
       
  3033 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
       
  3034 						offset, size, DMA_TO_DEVICE);
       
  3035 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
       
  3036 				goto dma_error;
       
  3037 			buffer_info->next_to_watch = i;
       
  3038 
       
  3039 			len -= size;
       
  3040 			offset += size;
       
  3041 			count++;
       
  3042 		}
       
  3043 	}
       
  3044 
       
  3045 	segs = skb_shinfo(skb)->gso_segs ?: 1;
       
  3046 	/* multiply data chunks by size of headers */
       
  3047 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
       
  3048 
       
  3049 	tx_ring->buffer_info[i].skb = skb;
       
  3050 	tx_ring->buffer_info[i].segs = segs;
       
  3051 	tx_ring->buffer_info[i].bytecount = bytecount;
       
  3052 	tx_ring->buffer_info[first].next_to_watch = i;
       
  3053 
       
  3054 	return count;
       
  3055 
       
  3056 dma_error:
       
  3057 	dev_err(&pdev->dev, "TX DMA map failed\n");
       
  3058 	buffer_info->dma = 0;
       
  3059 	if (count)
       
  3060 		count--;
       
  3061 
       
  3062 	while (count--) {
       
  3063 		if (i==0)
       
  3064 			i += tx_ring->count;
       
  3065 		i--;
       
  3066 		buffer_info = &tx_ring->buffer_info[i];
       
  3067 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  3068 	}
       
  3069 
       
  3070 	return 0;
       
  3071 }
       
  3072 
       
  3073 static void e1000_tx_queue(struct e1000_adapter *adapter,
       
  3074 			   struct e1000_tx_ring *tx_ring, int tx_flags,
       
  3075 			   int count)
       
  3076 {
       
  3077 	struct e1000_hw *hw = &adapter->hw;
       
  3078 	struct e1000_tx_desc *tx_desc = NULL;
       
  3079 	struct e1000_buffer *buffer_info;
       
  3080 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
       
  3081 	unsigned int i;
       
  3082 
       
  3083 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
       
  3084 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
       
  3085 		             E1000_TXD_CMD_TSE;
       
  3086 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  3087 
       
  3088 		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
       
  3089 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
       
  3090 	}
       
  3091 
       
  3092 	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
       
  3093 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
       
  3094 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  3095 	}
       
  3096 
       
  3097 	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
       
  3098 		txd_lower |= E1000_TXD_CMD_VLE;
       
  3099 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
       
  3100 	}
       
  3101 
       
  3102 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
       
  3103 		txd_lower &= ~(E1000_TXD_CMD_IFCS);
       
  3104 
       
  3105 	i = tx_ring->next_to_use;
       
  3106 
       
  3107 	while (count--) {
       
  3108 		buffer_info = &tx_ring->buffer_info[i];
       
  3109 		tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3110 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  3111 		tx_desc->lower.data =
       
  3112 			cpu_to_le32(txd_lower | buffer_info->length);
       
  3113 		tx_desc->upper.data = cpu_to_le32(txd_upper);
       
  3114 		if (unlikely(++i == tx_ring->count)) i = 0;
       
  3115 	}
       
  3116 
       
  3117 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
       
  3118 
       
  3119 	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
       
  3120 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
       
  3121 		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
       
  3122 
       
  3123 	/* Force memory writes to complete before letting h/w
       
  3124 	 * know there are new descriptors to fetch.  (Only
       
  3125 	 * applicable for weak-ordered memory model archs,
       
  3126 	 * such as IA-64). */
       
  3127 	wmb();
       
  3128 
       
  3129 	tx_ring->next_to_use = i;
       
  3130 	writel(i, hw->hw_addr + tx_ring->tdt);
       
  3131 	/* we need this if more than one processor can write to our tail
       
  3132 	 * at a time, it syncronizes IO on IA64/Altix systems */
       
  3133 	mmiowb();
       
  3134 }
       
  3135 
       
  3136 /* 82547 workaround to avoid controller hang in half-duplex environment.
       
  3137  * The workaround is to avoid queuing a large packet that would span
       
  3138  * the internal Tx FIFO ring boundary by notifying the stack to resend
       
  3139  * the packet at a later time.  This gives the Tx FIFO an opportunity to
       
  3140  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
       
  3141  * to the beginning of the Tx FIFO.
       
  3142  */
       
  3143 
       
  3144 #define E1000_FIFO_HDR			0x10
       
  3145 #define E1000_82547_PAD_LEN		0x3E0
       
  3146 
       
  3147 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
       
  3148 				       struct sk_buff *skb)
       
  3149 {
       
  3150 	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
       
  3151 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
       
  3152 
       
  3153 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
       
  3154 
       
  3155 	if (adapter->link_duplex != HALF_DUPLEX)
       
  3156 		goto no_fifo_stall_required;
       
  3157 
       
  3158 	if (atomic_read(&adapter->tx_fifo_stall))
       
  3159 		return 1;
       
  3160 
       
  3161 	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
       
  3162 		atomic_set(&adapter->tx_fifo_stall, 1);
       
  3163 		return 1;
       
  3164 	}
       
  3165 
       
  3166 no_fifo_stall_required:
       
  3167 	adapter->tx_fifo_head += skb_fifo_len;
       
  3168 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
       
  3169 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
       
  3170 	return 0;
       
  3171 }
       
  3172 
       
  3173 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
       
  3174 {
       
  3175 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3176 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
       
  3177 
       
  3178 	if (adapter->ecdev) {
       
  3179 		return -EBUSY;
       
  3180 	}
       
  3181 
       
  3182 	netif_stop_queue(netdev);
       
  3183 	/* Herbert's original patch had:
       
  3184 	 *  smp_mb__after_netif_stop_queue();
       
  3185 	 * but since that doesn't exist yet, just open code it. */
       
  3186 	smp_mb();
       
  3187 
       
  3188 	/* We need to check again in a case another CPU has just
       
  3189 	 * made room available. */
       
  3190 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
       
  3191 		return -EBUSY;
       
  3192 
       
  3193 	/* A reprieve! */
       
  3194 	netif_start_queue(netdev);
       
  3195 	++adapter->restart_queue;
       
  3196 	return 0;
       
  3197 }
       
  3198 
       
  3199 static int e1000_maybe_stop_tx(struct net_device *netdev,
       
  3200                                struct e1000_tx_ring *tx_ring, int size)
       
  3201 {
       
  3202 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
       
  3203 		return 0;
       
  3204 	return __e1000_maybe_stop_tx(netdev, size);
       
  3205 }
       
  3206 
       
  3207 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
       
  3208 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
       
  3209 				    struct net_device *netdev)
       
  3210 {
       
  3211 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3212 	struct e1000_hw *hw = &adapter->hw;
       
  3213 	struct e1000_tx_ring *tx_ring;
       
  3214 	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
       
  3215 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
       
  3216 	unsigned int tx_flags = 0;
       
  3217 	unsigned int len = skb_headlen(skb);
       
  3218 	unsigned int nr_frags;
       
  3219 	unsigned int mss;
       
  3220 	int count = 0;
       
  3221 	int tso;
       
  3222 	unsigned int f;
       
  3223 
       
  3224 	/* This goes back to the question of how to logically map a tx queue
       
  3225 	 * to a flow.  Right now, performance is impacted slightly negatively
       
  3226 	 * if using multiple tx queues.  If the stack breaks away from a
       
  3227 	 * single qdisc implementation, we can look at this again. */
       
  3228 	tx_ring = adapter->tx_ring;
       
  3229 
       
  3230 	if (unlikely(skb->len <= 0)) {
       
  3231 		if (!adapter->ecdev) {
       
  3232 			dev_kfree_skb_any(skb);
       
  3233 		}
       
  3234 		return NETDEV_TX_OK;
       
  3235 	}
       
  3236 
       
  3237 	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
       
  3238 	 * packets may get corrupted during padding by HW.
       
  3239 	 * To WA this issue, pad all small packets manually.
       
  3240 	 */
       
  3241 	if (skb->len < ETH_ZLEN) {
       
  3242 		if (skb_pad(skb, ETH_ZLEN - skb->len))
       
  3243 			return NETDEV_TX_OK;
       
  3244 		skb->len = ETH_ZLEN;
       
  3245 		skb_set_tail_pointer(skb, ETH_ZLEN);
       
  3246 	}
       
  3247 
       
  3248 	mss = skb_shinfo(skb)->gso_size;
       
  3249 	/* The controller does a simple calculation to
       
  3250 	 * make sure there is enough room in the FIFO before
       
  3251 	 * initiating the DMA for each buffer.  The calc is:
       
  3252 	 * 4 = ceil(buffer len/mss).  To make sure we don't
       
  3253 	 * overrun the FIFO, adjust the max buffer len if mss
       
  3254 	 * drops. */
       
  3255 	if (mss) {
       
  3256 		u8 hdr_len;
       
  3257 		max_per_txd = min(mss << 2, max_per_txd);
       
  3258 		max_txd_pwr = fls(max_per_txd) - 1;
       
  3259 
       
  3260 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  3261 		if (skb->data_len && hdr_len == len) {
       
  3262 			switch (hw->mac_type) {
       
  3263 				unsigned int pull_size;
       
  3264 			case e1000_82544:
       
  3265 				/* Make sure we have room to chop off 4 bytes,
       
  3266 				 * and that the end alignment will work out to
       
  3267 				 * this hardware's requirements
       
  3268 				 * NOTE: this is a TSO only workaround
       
  3269 				 * if end byte alignment not correct move us
       
  3270 				 * into the next dword */
       
  3271 				if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
       
  3272 					break;
       
  3273 				/* fall through */
       
  3274 				pull_size = min((unsigned int)4, skb->data_len);
       
  3275 				if (!__pskb_pull_tail(skb, pull_size)) {
       
  3276 					e_err(drv, "__pskb_pull_tail "
       
  3277 					      "failed.\n");
       
  3278 					if (!adapter->ecdev) {
       
  3279 						dev_kfree_skb_any(skb);
       
  3280 					}
       
  3281 					return NETDEV_TX_OK;
       
  3282 				}
       
  3283 				len = skb_headlen(skb);
       
  3284 				break;
       
  3285 			default:
       
  3286 				/* do nothing */
       
  3287 				break;
       
  3288 			}
       
  3289 		}
       
  3290 	}
       
  3291 
       
  3292 	/* reserve a descriptor for the offload context */
       
  3293 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
       
  3294 		count++;
       
  3295 	count++;
       
  3296 
       
  3297 	/* Controller Erratum workaround */
       
  3298 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
       
  3299 		count++;
       
  3300 
       
  3301 	count += TXD_USE_COUNT(len, max_txd_pwr);
       
  3302 
       
  3303 	if (adapter->pcix_82544)
       
  3304 		count++;
       
  3305 
       
  3306 	/* work-around for errata 10 and it applies to all controllers
       
  3307 	 * in PCI-X mode, so add one more descriptor to the count
       
  3308 	 */
       
  3309 	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
       
  3310 			(len > 2015)))
       
  3311 		count++;
       
  3312 
       
  3313 	nr_frags = skb_shinfo(skb)->nr_frags;
       
  3314 	for (f = 0; f < nr_frags; f++)
       
  3315 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
       
  3316 				       max_txd_pwr);
       
  3317 	if (adapter->pcix_82544)
       
  3318 		count += nr_frags;
       
  3319 
       
  3320 	/* need: count + 2 desc gap to keep tail from touching
       
  3321 	 * head, otherwise try next time */
       
  3322 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
       
  3323 		return NETDEV_TX_BUSY;
       
  3324 
       
  3325 	if (unlikely((hw->mac_type == e1000_82547) &&
       
  3326 		     (e1000_82547_fifo_workaround(adapter, skb)))) {
       
  3327 		if (!adapter->ecdev) {
       
  3328 			netif_stop_queue(netdev);
       
  3329 			if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3330 				schedule_delayed_work(&adapter->fifo_stall_task, 1);
       
  3331 		}
       
  3332 		return NETDEV_TX_BUSY;
       
  3333 	}
       
  3334 
       
  3335 	if (vlan_tx_tag_present(skb)) {
       
  3336 		tx_flags |= E1000_TX_FLAGS_VLAN;
       
  3337 		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
       
  3338 	}
       
  3339 
       
  3340 	first = tx_ring->next_to_use;
       
  3341 
       
  3342 	tso = e1000_tso(adapter, tx_ring, skb);
       
  3343 	if (tso < 0) {
       
  3344 		if (!adapter->ecdev) {
       
  3345 			dev_kfree_skb_any(skb);
       
  3346 		}
       
  3347 		return NETDEV_TX_OK;
       
  3348 	}
       
  3349 
       
  3350 	if (likely(tso)) {
       
  3351 		if (likely(hw->mac_type != e1000_82544))
       
  3352 			tx_ring->last_tx_tso = true;
       
  3353 		tx_flags |= E1000_TX_FLAGS_TSO;
       
  3354 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
       
  3355 		tx_flags |= E1000_TX_FLAGS_CSUM;
       
  3356 
       
  3357 	if (likely(skb->protocol == htons(ETH_P_IP)))
       
  3358 		tx_flags |= E1000_TX_FLAGS_IPV4;
       
  3359 
       
  3360 	if (unlikely(skb->no_fcs))
       
  3361 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
       
  3362 
       
  3363 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
       
  3364 	                     nr_frags, mss);
       
  3365 
       
  3366 	if (count) {
       
  3367 		skb_tx_timestamp(skb);
       
  3368 
       
  3369 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
       
  3370 		if (!adapter->ecdev) {
       
  3371 			/* Make sure there is space in the ring for the next send. */
       
  3372 			e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
       
  3373 		}
       
  3374 
       
  3375 	} else {
       
  3376 		if (!adapter->ecdev) {
       
  3377 			dev_kfree_skb_any(skb);
       
  3378 		}
       
  3379 		tx_ring->buffer_info[first].time_stamp = 0;
       
  3380 		tx_ring->next_to_use = first;
       
  3381 	}
       
  3382 
       
  3383 	return NETDEV_TX_OK;
       
  3384 }
       
  3385 
       
  3386 #define NUM_REGS 38 /* 1 based count */
       
  3387 static void e1000_regdump(struct e1000_adapter *adapter)
       
  3388 {
       
  3389 	struct e1000_hw *hw = &adapter->hw;
       
  3390 	u32 regs[NUM_REGS];
       
  3391 	u32 *regs_buff = regs;
       
  3392 	int i = 0;
       
  3393 
       
  3394 	static const char * const reg_name[] = {
       
  3395 		"CTRL",  "STATUS",
       
  3396 		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
       
  3397 		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
       
  3398 		"TIDV", "TXDCTL", "TADV", "TARC0",
       
  3399 		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
       
  3400 		"TXDCTL1", "TARC1",
       
  3401 		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
       
  3402 		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
       
  3403 		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
       
  3404 	};
       
  3405 
       
  3406 	regs_buff[0]  = er32(CTRL);
       
  3407 	regs_buff[1]  = er32(STATUS);
       
  3408 
       
  3409 	regs_buff[2]  = er32(RCTL);
       
  3410 	regs_buff[3]  = er32(RDLEN);
       
  3411 	regs_buff[4]  = er32(RDH);
       
  3412 	regs_buff[5]  = er32(RDT);
       
  3413 	regs_buff[6]  = er32(RDTR);
       
  3414 
       
  3415 	regs_buff[7]  = er32(TCTL);
       
  3416 	regs_buff[8]  = er32(TDBAL);
       
  3417 	regs_buff[9]  = er32(TDBAH);
       
  3418 	regs_buff[10] = er32(TDLEN);
       
  3419 	regs_buff[11] = er32(TDH);
       
  3420 	regs_buff[12] = er32(TDT);
       
  3421 	regs_buff[13] = er32(TIDV);
       
  3422 	regs_buff[14] = er32(TXDCTL);
       
  3423 	regs_buff[15] = er32(TADV);
       
  3424 	regs_buff[16] = er32(TARC0);
       
  3425 
       
  3426 	regs_buff[17] = er32(TDBAL1);
       
  3427 	regs_buff[18] = er32(TDBAH1);
       
  3428 	regs_buff[19] = er32(TDLEN1);
       
  3429 	regs_buff[20] = er32(TDH1);
       
  3430 	regs_buff[21] = er32(TDT1);
       
  3431 	regs_buff[22] = er32(TXDCTL1);
       
  3432 	regs_buff[23] = er32(TARC1);
       
  3433 	regs_buff[24] = er32(CTRL_EXT);
       
  3434 	regs_buff[25] = er32(ERT);
       
  3435 	regs_buff[26] = er32(RDBAL0);
       
  3436 	regs_buff[27] = er32(RDBAH0);
       
  3437 	regs_buff[28] = er32(TDFH);
       
  3438 	regs_buff[29] = er32(TDFT);
       
  3439 	regs_buff[30] = er32(TDFHS);
       
  3440 	regs_buff[31] = er32(TDFTS);
       
  3441 	regs_buff[32] = er32(TDFPC);
       
  3442 	regs_buff[33] = er32(RDFH);
       
  3443 	regs_buff[34] = er32(RDFT);
       
  3444 	regs_buff[35] = er32(RDFHS);
       
  3445 	regs_buff[36] = er32(RDFTS);
       
  3446 	regs_buff[37] = er32(RDFPC);
       
  3447 
       
  3448 	pr_info("Register dump\n");
       
  3449 	for (i = 0; i < NUM_REGS; i++)
       
  3450 		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
       
  3451 }
       
  3452 
       
  3453 /*
       
  3454  * e1000_dump: Print registers, tx ring and rx ring
       
  3455  */
       
  3456 static void e1000_dump(struct e1000_adapter *adapter)
       
  3457 {
       
  3458 	/* this code doesn't handle multiple rings */
       
  3459 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
       
  3460 	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
       
  3461 	int i;
       
  3462 
       
  3463 	if (!netif_msg_hw(adapter))
       
  3464 		return;
       
  3465 
       
  3466 	/* Print Registers */
       
  3467 	e1000_regdump(adapter);
       
  3468 
       
  3469 	/*
       
  3470 	 * transmit dump
       
  3471 	 */
       
  3472 	pr_info("TX Desc ring0 dump\n");
       
  3473 
       
  3474 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
       
  3475 	 *
       
  3476 	 * Legacy Transmit Descriptor
       
  3477 	 *   +--------------------------------------------------------------+
       
  3478 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
       
  3479 	 *   +--------------------------------------------------------------+
       
  3480 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
       
  3481 	 *   +--------------------------------------------------------------+
       
  3482 	 *   63       48 47        36 35    32 31     24 23    16 15        0
       
  3483 	 *
       
  3484 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
       
  3485 	 *   63      48 47    40 39       32 31             16 15    8 7      0
       
  3486 	 *   +----------------------------------------------------------------+
       
  3487 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
       
  3488 	 *   +----------------------------------------------------------------+
       
  3489 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
       
  3490 	 *   +----------------------------------------------------------------+
       
  3491 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
       
  3492 	 *
       
  3493 	 * Extended Data Descriptor (DTYP=0x1)
       
  3494 	 *   +----------------------------------------------------------------+
       
  3495 	 * 0 |                     Buffer Address [63:0]                      |
       
  3496 	 *   +----------------------------------------------------------------+
       
  3497 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
       
  3498 	 *   +----------------------------------------------------------------+
       
  3499 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
       
  3500 	 */
       
  3501 	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
       
  3502 	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
       
  3503 
       
  3504 	if (!netif_msg_tx_done(adapter))
       
  3505 		goto rx_ring_summary;
       
  3506 
       
  3507 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
       
  3508 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3509 		struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
       
  3510 		struct my_u { __le64 a; __le64 b; };
       
  3511 		struct my_u *u = (struct my_u *)tx_desc;
       
  3512 		const char *type;
       
  3513 
       
  3514 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
       
  3515 			type = "NTC/U";
       
  3516 		else if (i == tx_ring->next_to_use)
       
  3517 			type = "NTU";
       
  3518 		else if (i == tx_ring->next_to_clean)
       
  3519 			type = "NTC";
       
  3520 		else
       
  3521 			type = "";
       
  3522 
       
  3523 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
       
  3524 			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
       
  3525 			le64_to_cpu(u->a), le64_to_cpu(u->b),
       
  3526 			(u64)buffer_info->dma, buffer_info->length,
       
  3527 			buffer_info->next_to_watch,
       
  3528 			(u64)buffer_info->time_stamp, buffer_info->skb, type);
       
  3529 	}
       
  3530 
       
  3531 rx_ring_summary:
       
  3532 	/*
       
  3533 	 * receive dump
       
  3534 	 */
       
  3535 	pr_info("\nRX Desc ring dump\n");
       
  3536 
       
  3537 	/* Legacy Receive Descriptor Format
       
  3538 	 *
       
  3539 	 * +-----------------------------------------------------+
       
  3540 	 * |                Buffer Address [63:0]                |
       
  3541 	 * +-----------------------------------------------------+
       
  3542 	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
       
  3543 	 * +-----------------------------------------------------+
       
  3544 	 * 63       48 47    40 39      32 31         16 15      0
       
  3545 	 */
       
  3546 	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
       
  3547 
       
  3548 	if (!netif_msg_rx_status(adapter))
       
  3549 		goto exit;
       
  3550 
       
  3551 	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
       
  3552 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  3553 		struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
       
  3554 		struct my_u { __le64 a; __le64 b; };
       
  3555 		struct my_u *u = (struct my_u *)rx_desc;
       
  3556 		const char *type;
       
  3557 
       
  3558 		if (i == rx_ring->next_to_use)
       
  3559 			type = "NTU";
       
  3560 		else if (i == rx_ring->next_to_clean)
       
  3561 			type = "NTC";
       
  3562 		else
       
  3563 			type = "";
       
  3564 
       
  3565 		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
       
  3566 			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
       
  3567 			(u64)buffer_info->dma, buffer_info->skb, type);
       
  3568 	} /* for */
       
  3569 
       
  3570 	/* dump the descriptor caches */
       
  3571 	/* rx */
       
  3572 	pr_info("Rx descriptor cache in 64bit format\n");
       
  3573 	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
       
  3574 		pr_info("R%04X: %08X|%08X %08X|%08X\n",
       
  3575 			i,
       
  3576 			readl(adapter->hw.hw_addr + i+4),
       
  3577 			readl(adapter->hw.hw_addr + i),
       
  3578 			readl(adapter->hw.hw_addr + i+12),
       
  3579 			readl(adapter->hw.hw_addr + i+8));
       
  3580 	}
       
  3581 	/* tx */
       
  3582 	pr_info("Tx descriptor cache in 64bit format\n");
       
  3583 	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
       
  3584 		pr_info("T%04X: %08X|%08X %08X|%08X\n",
       
  3585 			i,
       
  3586 			readl(adapter->hw.hw_addr + i+4),
       
  3587 			readl(adapter->hw.hw_addr + i),
       
  3588 			readl(adapter->hw.hw_addr + i+12),
       
  3589 			readl(adapter->hw.hw_addr + i+8));
       
  3590 	}
       
  3591 exit:
       
  3592 	return;
       
  3593 }
       
  3594 
       
  3595 /**
       
  3596  * e1000_tx_timeout - Respond to a Tx Hang
       
  3597  * @netdev: network interface device structure
       
  3598  **/
       
  3599 
       
  3600 static void e1000_tx_timeout(struct net_device *netdev)
       
  3601 {
       
  3602 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3603 
       
  3604 	/* Do the reset outside of interrupt context */
       
  3605 	adapter->tx_timeout_count++;
       
  3606 	schedule_work(&adapter->reset_task);
       
  3607 }
       
  3608 
       
  3609 static void e1000_reset_task(struct work_struct *work)
       
  3610 {
       
  3611 	struct e1000_adapter *adapter =
       
  3612 		container_of(work, struct e1000_adapter, reset_task);
       
  3613 
       
  3614 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  3615 		return;
       
  3616 	e_err(drv, "Reset adapter\n");
       
  3617 	e1000_reinit_safe(adapter);
       
  3618 }
       
  3619 
       
  3620 /**
       
  3621  * e1000_get_stats - Get System Network Statistics
       
  3622  * @netdev: network interface device structure
       
  3623  *
       
  3624  * Returns the address of the device statistics structure.
       
  3625  * The statistics are actually updated from the watchdog.
       
  3626  **/
       
  3627 
       
  3628 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
       
  3629 {
       
  3630 	/* only return the current stats */
       
  3631 	return &netdev->stats;
       
  3632 }
       
  3633 
       
  3634 /**
       
  3635  * e1000_change_mtu - Change the Maximum Transfer Unit
       
  3636  * @netdev: network interface device structure
       
  3637  * @new_mtu: new value for maximum frame size
       
  3638  *
       
  3639  * Returns 0 on success, negative on failure
       
  3640  **/
       
  3641 
       
  3642 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
       
  3643 {
       
  3644 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3645 	struct e1000_hw *hw = &adapter->hw;
       
  3646 	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
       
  3647 
       
  3648 	if (adapter->ecdev) {
       
  3649 		return -EBUSY;
       
  3650 	}
       
  3651 
       
  3652 	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
       
  3653 	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
       
  3654 		e_err(probe, "Invalid MTU setting\n");
       
  3655 		return -EINVAL;
       
  3656 	}
       
  3657 
       
  3658 	/* Adapter-specific max frame size limits. */
       
  3659 	switch (hw->mac_type) {
       
  3660 	case e1000_undefined ... e1000_82542_rev2_1:
       
  3661 		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
       
  3662 			e_err(probe, "Jumbo Frames not supported.\n");
       
  3663 			return -EINVAL;
       
  3664 		}
       
  3665 		break;
       
  3666 	default:
       
  3667 		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
       
  3668 		break;
       
  3669 	}
       
  3670 
       
  3671 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
  3672 		msleep(1);
       
  3673 	/* e1000_down has a dependency on max_frame_size */
       
  3674 	hw->max_frame_size = max_frame;
       
  3675 	if (netif_running(netdev))
       
  3676 		e1000_down(adapter);
       
  3677 
       
  3678 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
       
  3679 	 * means we reserve 2 more, this pushes us to allocate from the next
       
  3680 	 * larger slab size.
       
  3681 	 * i.e. RXBUFFER_2048 --> size-4096 slab
       
  3682 	 *  however with the new *_jumbo_rx* routines, jumbo receives will use
       
  3683 	 *  fragmented skbs */
       
  3684 
       
  3685 	if (max_frame <= E1000_RXBUFFER_2048)
       
  3686 		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
       
  3687 	else
       
  3688 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
       
  3689 		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
       
  3690 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
       
  3691 		adapter->rx_buffer_len = PAGE_SIZE;
       
  3692 #endif
       
  3693 
       
  3694 	/* adjust allocation if LPE protects us, and we aren't using SBP */
       
  3695 	if (!hw->tbi_compatibility_on &&
       
  3696 	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
       
  3697 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
       
  3698 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
       
  3699 
       
  3700 	pr_info("%s changing MTU from %d to %d\n",
       
  3701 		netdev->name, netdev->mtu, new_mtu);
       
  3702 	netdev->mtu = new_mtu;
       
  3703 
       
  3704 	if (netif_running(netdev))
       
  3705 		e1000_up(adapter);
       
  3706 	else
       
  3707 		e1000_reset(adapter);
       
  3708 
       
  3709 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
  3710 
       
  3711 	return 0;
       
  3712 }
       
  3713 
       
  3714 /**
       
  3715  * e1000_update_stats - Update the board statistics counters
       
  3716  * @adapter: board private structure
       
  3717  **/
       
  3718 
       
  3719 void e1000_update_stats(struct e1000_adapter *adapter)
       
  3720 {
       
  3721 	struct net_device *netdev = adapter->netdev;
       
  3722 	struct e1000_hw *hw = &adapter->hw;
       
  3723 	struct pci_dev *pdev = adapter->pdev;
       
  3724 	unsigned long flags = 0;
       
  3725 	u16 phy_tmp;
       
  3726 
       
  3727 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
       
  3728 
       
  3729 	/*
       
  3730 	 * Prevent stats update while adapter is being reset, or if the pci
       
  3731 	 * connection is down.
       
  3732 	 */
       
  3733 	if (adapter->link_speed == 0)
       
  3734 		return;
       
  3735 	if (pci_channel_offline(pdev))
       
  3736 		return;
       
  3737 
       
  3738 	if (!adapter->ecdev) {
       
  3739 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  3740 	}
       
  3741 
       
  3742 	/* these counters are modified from e1000_tbi_adjust_stats,
       
  3743 	 * called from the interrupt context, so they must only
       
  3744 	 * be written while holding adapter->stats_lock
       
  3745 	 */
       
  3746 
       
  3747 	adapter->stats.crcerrs += er32(CRCERRS);
       
  3748 	adapter->stats.gprc += er32(GPRC);
       
  3749 	adapter->stats.gorcl += er32(GORCL);
       
  3750 	adapter->stats.gorch += er32(GORCH);
       
  3751 	adapter->stats.bprc += er32(BPRC);
       
  3752 	adapter->stats.mprc += er32(MPRC);
       
  3753 	adapter->stats.roc += er32(ROC);
       
  3754 
       
  3755 	adapter->stats.prc64 += er32(PRC64);
       
  3756 	adapter->stats.prc127 += er32(PRC127);
       
  3757 	adapter->stats.prc255 += er32(PRC255);
       
  3758 	adapter->stats.prc511 += er32(PRC511);
       
  3759 	adapter->stats.prc1023 += er32(PRC1023);
       
  3760 	adapter->stats.prc1522 += er32(PRC1522);
       
  3761 
       
  3762 	adapter->stats.symerrs += er32(SYMERRS);
       
  3763 	adapter->stats.mpc += er32(MPC);
       
  3764 	adapter->stats.scc += er32(SCC);
       
  3765 	adapter->stats.ecol += er32(ECOL);
       
  3766 	adapter->stats.mcc += er32(MCC);
       
  3767 	adapter->stats.latecol += er32(LATECOL);
       
  3768 	adapter->stats.dc += er32(DC);
       
  3769 	adapter->stats.sec += er32(SEC);
       
  3770 	adapter->stats.rlec += er32(RLEC);
       
  3771 	adapter->stats.xonrxc += er32(XONRXC);
       
  3772 	adapter->stats.xontxc += er32(XONTXC);
       
  3773 	adapter->stats.xoffrxc += er32(XOFFRXC);
       
  3774 	adapter->stats.xofftxc += er32(XOFFTXC);
       
  3775 	adapter->stats.fcruc += er32(FCRUC);
       
  3776 	adapter->stats.gptc += er32(GPTC);
       
  3777 	adapter->stats.gotcl += er32(GOTCL);
       
  3778 	adapter->stats.gotch += er32(GOTCH);
       
  3779 	adapter->stats.rnbc += er32(RNBC);
       
  3780 	adapter->stats.ruc += er32(RUC);
       
  3781 	adapter->stats.rfc += er32(RFC);
       
  3782 	adapter->stats.rjc += er32(RJC);
       
  3783 	adapter->stats.torl += er32(TORL);
       
  3784 	adapter->stats.torh += er32(TORH);
       
  3785 	adapter->stats.totl += er32(TOTL);
       
  3786 	adapter->stats.toth += er32(TOTH);
       
  3787 	adapter->stats.tpr += er32(TPR);
       
  3788 
       
  3789 	adapter->stats.ptc64 += er32(PTC64);
       
  3790 	adapter->stats.ptc127 += er32(PTC127);
       
  3791 	adapter->stats.ptc255 += er32(PTC255);
       
  3792 	adapter->stats.ptc511 += er32(PTC511);
       
  3793 	adapter->stats.ptc1023 += er32(PTC1023);
       
  3794 	adapter->stats.ptc1522 += er32(PTC1522);
       
  3795 
       
  3796 	adapter->stats.mptc += er32(MPTC);
       
  3797 	adapter->stats.bptc += er32(BPTC);
       
  3798 
       
  3799 	/* used for adaptive IFS */
       
  3800 
       
  3801 	hw->tx_packet_delta = er32(TPT);
       
  3802 	adapter->stats.tpt += hw->tx_packet_delta;
       
  3803 	hw->collision_delta = er32(COLC);
       
  3804 	adapter->stats.colc += hw->collision_delta;
       
  3805 
       
  3806 	if (hw->mac_type >= e1000_82543) {
       
  3807 		adapter->stats.algnerrc += er32(ALGNERRC);
       
  3808 		adapter->stats.rxerrc += er32(RXERRC);
       
  3809 		adapter->stats.tncrs += er32(TNCRS);
       
  3810 		adapter->stats.cexterr += er32(CEXTERR);
       
  3811 		adapter->stats.tsctc += er32(TSCTC);
       
  3812 		adapter->stats.tsctfc += er32(TSCTFC);
       
  3813 	}
       
  3814 
       
  3815 	/* Fill out the OS statistics structure */
       
  3816 	netdev->stats.multicast = adapter->stats.mprc;
       
  3817 	netdev->stats.collisions = adapter->stats.colc;
       
  3818 
       
  3819 	/* Rx Errors */
       
  3820 
       
  3821 	/* RLEC on some newer hardware can be incorrect so build
       
  3822 	* our own version based on RUC and ROC */
       
  3823 	netdev->stats.rx_errors = adapter->stats.rxerrc +
       
  3824 		adapter->stats.crcerrs + adapter->stats.algnerrc +
       
  3825 		adapter->stats.ruc + adapter->stats.roc +
       
  3826 		adapter->stats.cexterr;
       
  3827 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
       
  3828 	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
       
  3829 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
       
  3830 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
       
  3831 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
       
  3832 
       
  3833 	/* Tx Errors */
       
  3834 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
       
  3835 	netdev->stats.tx_errors = adapter->stats.txerrc;
       
  3836 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
       
  3837 	netdev->stats.tx_window_errors = adapter->stats.latecol;
       
  3838 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
       
  3839 	if (hw->bad_tx_carr_stats_fd &&
       
  3840 	    adapter->link_duplex == FULL_DUPLEX) {
       
  3841 		netdev->stats.tx_carrier_errors = 0;
       
  3842 		adapter->stats.tncrs = 0;
       
  3843 	}
       
  3844 
       
  3845 	/* Tx Dropped needs to be maintained elsewhere */
       
  3846 
       
  3847 	/* Phy Stats */
       
  3848 	if (hw->media_type == e1000_media_type_copper) {
       
  3849 		if ((adapter->link_speed == SPEED_1000) &&
       
  3850 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
       
  3851 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
       
  3852 			adapter->phy_stats.idle_errors += phy_tmp;
       
  3853 		}
       
  3854 
       
  3855 		if ((hw->mac_type <= e1000_82546) &&
       
  3856 		   (hw->phy_type == e1000_phy_m88) &&
       
  3857 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
       
  3858 			adapter->phy_stats.receive_errors += phy_tmp;
       
  3859 	}
       
  3860 
       
  3861 	/* Management Stats */
       
  3862 	if (hw->has_smbus) {
       
  3863 		adapter->stats.mgptc += er32(MGTPTC);
       
  3864 		adapter->stats.mgprc += er32(MGTPRC);
       
  3865 		adapter->stats.mgpdc += er32(MGTPDC);
       
  3866 	}
       
  3867 
       
  3868 	if (!adapter->ecdev) {
       
  3869 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  3870 	}
       
  3871 }
       
  3872 
       
  3873 void ec_poll(struct net_device *netdev)
       
  3874 {
       
  3875 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3876 	if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) {
       
  3877 		e1000_watchdog(&adapter->watchdog_task.work);
       
  3878 		adapter->ec_watchdog_jiffies = jiffies;
       
  3879 	}
       
  3880 
       
  3881 	e1000_intr(0, netdev);
       
  3882 }
       
  3883 
       
  3884 /**
       
  3885  * e1000_intr - Interrupt Handler
       
  3886  * @irq: interrupt number
       
  3887  * @data: pointer to a network interface device structure
       
  3888  **/
       
  3889 
       
  3890 static irqreturn_t e1000_intr(int irq, void *data)
       
  3891 {
       
  3892 	struct net_device *netdev = data;
       
  3893 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3894 	struct e1000_hw *hw = &adapter->hw;
       
  3895 	u32 icr = er32(ICR);
       
  3896 
       
  3897 	if (unlikely((!icr)))
       
  3898 		return IRQ_NONE;  /* Not our interrupt */
       
  3899 
       
  3900 	/*
       
  3901 	 * we might have caused the interrupt, but the above
       
  3902 	 * read cleared it, and just in case the driver is
       
  3903 	 * down there is nothing to do so return handled
       
  3904 	 */
       
  3905 	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
       
  3906 		return IRQ_HANDLED;
       
  3907 
       
  3908 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
       
  3909 		hw->get_link_status = 1;
       
  3910 		/* guard against interrupt when we're going down */
       
  3911 		if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags))
       
  3912 			schedule_delayed_work(&adapter->watchdog_task, 1);
       
  3913 	}
       
  3914 
       
  3915 	if (adapter->ecdev) {
       
  3916 		int i, ec_work_done = 0;
       
  3917 		for (i = 0; i < E1000_MAX_INTR; i++) {
       
  3918 			if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
       
  3919 							&ec_work_done, 100) &&
       
  3920 						!e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
       
  3921 				break;
       
  3922 			}
       
  3923 		}
       
  3924  	} else {
       
  3925 		/* disable interrupts, without the synchronize_irq bit */
       
  3926 		ew32(IMC, ~0);
       
  3927 		E1000_WRITE_FLUSH();
       
  3928 
       
  3929 		if (likely(napi_schedule_prep(&adapter->napi))) {
       
  3930 			adapter->total_tx_bytes = 0;
       
  3931 			adapter->total_tx_packets = 0;
       
  3932 			adapter->total_rx_bytes = 0;
       
  3933 			adapter->total_rx_packets = 0;
       
  3934 			__napi_schedule(&adapter->napi);
       
  3935 		} else {
       
  3936 			/* this really should not happen! if it does it is basically a
       
  3937 			 * bug, but not a hard error, so enable ints and continue */
       
  3938 			if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3939 				e1000_irq_enable(adapter);
       
  3940 		}
       
  3941 	}
       
  3942 
       
  3943 	return IRQ_HANDLED;
       
  3944 }
       
  3945 
       
  3946 /**
       
  3947  * e1000_clean - NAPI Rx polling callback
       
  3948  * @adapter: board private structure
       
  3949  * EtherCAT: never called
       
  3950  **/
       
  3951 static int e1000_clean(struct napi_struct *napi, int budget)
       
  3952 {
       
  3953 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
       
  3954 	int tx_clean_complete = 0, work_done = 0;
       
  3955 
       
  3956 	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
       
  3957 
       
  3958 	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
       
  3959 
       
  3960 	if (!tx_clean_complete)
       
  3961 		work_done = budget;
       
  3962 
       
  3963 	/* If budget not fully consumed, exit the polling mode */
       
  3964 	if (work_done < budget) {
       
  3965 		if (likely(adapter->itr_setting & 3))
       
  3966 			e1000_set_itr(adapter);
       
  3967 		napi_complete(napi);
       
  3968 		if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3969 			e1000_irq_enable(adapter);
       
  3970 	}
       
  3971 
       
  3972 	return work_done;
       
  3973 }
       
  3974 
       
  3975 /**
       
  3976  * e1000_clean_tx_irq - Reclaim resources after transmit completes
       
  3977  * @adapter: board private structure
       
  3978  **/
       
  3979 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
       
  3980 			       struct e1000_tx_ring *tx_ring)
       
  3981 {
       
  3982 	struct e1000_hw *hw = &adapter->hw;
       
  3983 	struct net_device *netdev = adapter->netdev;
       
  3984 	struct e1000_tx_desc *tx_desc, *eop_desc;
       
  3985 	struct e1000_buffer *buffer_info;
       
  3986 	unsigned int i, eop;
       
  3987 	unsigned int count = 0;
       
  3988 	unsigned int total_tx_bytes=0, total_tx_packets=0;
       
  3989 
       
  3990 	i = tx_ring->next_to_clean;
       
  3991 	eop = tx_ring->buffer_info[i].next_to_watch;
       
  3992 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  3993 
       
  3994 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
       
  3995 	       (count < tx_ring->count)) {
       
  3996 		bool cleaned = false;
       
  3997 		rmb();	/* read buffer_info after eop_desc */
       
  3998 		for ( ; !cleaned; count++) {
       
  3999 			tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  4000 			buffer_info = &tx_ring->buffer_info[i];
       
  4001 			cleaned = (i == eop);
       
  4002 
       
  4003 			if (cleaned) {
       
  4004 				total_tx_packets += buffer_info->segs;
       
  4005 				total_tx_bytes += buffer_info->bytecount;
       
  4006 			}
       
  4007 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  4008 			tx_desc->upper.data = 0;
       
  4009 
       
  4010 			if (unlikely(++i == tx_ring->count)) i = 0;
       
  4011 		}
       
  4012 
       
  4013 		eop = tx_ring->buffer_info[i].next_to_watch;
       
  4014 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  4015 	}
       
  4016 
       
  4017 	tx_ring->next_to_clean = i;
       
  4018 
       
  4019 #define TX_WAKE_THRESHOLD 32
       
  4020 	if (!adapter->ecdev && unlikely(count && netif_carrier_ok(netdev) &&
       
  4021 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
       
  4022 		/* Make sure that anybody stopping the queue after this
       
  4023 		 * sees the new next_to_clean.
       
  4024 		 */
       
  4025 		smp_mb();
       
  4026 
       
  4027 		if (netif_queue_stopped(netdev) &&
       
  4028 		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
       
  4029 			netif_wake_queue(netdev);
       
  4030 			++adapter->restart_queue;
       
  4031 		}
       
  4032 	}
       
  4033 
       
  4034 	if (!adapter->ecdev && adapter->detect_tx_hung) {
       
  4035 		/* Detect a transmit hang in hardware, this serializes the
       
  4036 		 * check with the clearing of time_stamp and movement of i */
       
  4037 		adapter->detect_tx_hung = false;
       
  4038 		if (tx_ring->buffer_info[eop].time_stamp &&
       
  4039 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
       
  4040 		               (adapter->tx_timeout_factor * HZ)) &&
       
  4041 		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
       
  4042 
       
  4043 			/* detected Tx unit hang */
       
  4044 			e_err(drv, "Detected Tx Unit Hang\n"
       
  4045 			      "  Tx Queue             <%lu>\n"
       
  4046 			      "  TDH                  <%x>\n"
       
  4047 			      "  TDT                  <%x>\n"
       
  4048 			      "  next_to_use          <%x>\n"
       
  4049 			      "  next_to_clean        <%x>\n"
       
  4050 			      "buffer_info[next_to_clean]\n"
       
  4051 			      "  time_stamp           <%lx>\n"
       
  4052 			      "  next_to_watch        <%x>\n"
       
  4053 			      "  jiffies              <%lx>\n"
       
  4054 			      "  next_to_watch.status <%x>\n",
       
  4055 				(unsigned long)((tx_ring - adapter->tx_ring) /
       
  4056 					sizeof(struct e1000_tx_ring)),
       
  4057 				readl(hw->hw_addr + tx_ring->tdh),
       
  4058 				readl(hw->hw_addr + tx_ring->tdt),
       
  4059 				tx_ring->next_to_use,
       
  4060 				tx_ring->next_to_clean,
       
  4061 				tx_ring->buffer_info[eop].time_stamp,
       
  4062 				eop,
       
  4063 				jiffies,
       
  4064 				eop_desc->upper.fields.status);
       
  4065 			e1000_dump(adapter);
       
  4066 			netif_stop_queue(netdev);
       
  4067 		}
       
  4068 	}
       
  4069 	adapter->total_tx_bytes += total_tx_bytes;
       
  4070 	adapter->total_tx_packets += total_tx_packets;
       
  4071 	netdev->stats.tx_bytes += total_tx_bytes;
       
  4072 	netdev->stats.tx_packets += total_tx_packets;
       
  4073 	return count < tx_ring->count;
       
  4074 }
       
  4075 
       
  4076 /**
       
  4077  * e1000_rx_checksum - Receive Checksum Offload for 82543
       
  4078  * @adapter:     board private structure
       
  4079  * @status_err:  receive descriptor status and error fields
       
  4080  * @csum:        receive descriptor csum field
       
  4081  * @sk_buff:     socket buffer with received data
       
  4082  **/
       
  4083 
       
  4084 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
       
  4085 			      u32 csum, struct sk_buff *skb)
       
  4086 {
       
  4087 	struct e1000_hw *hw = &adapter->hw;
       
  4088 	u16 status = (u16)status_err;
       
  4089 	u8 errors = (u8)(status_err >> 24);
       
  4090 
       
  4091 	skb_checksum_none_assert(skb);
       
  4092 
       
  4093 	/* 82543 or newer only */
       
  4094 	if (unlikely(hw->mac_type < e1000_82543)) return;
       
  4095 	/* Ignore Checksum bit is set */
       
  4096 	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
       
  4097 	/* TCP/UDP checksum error bit is set */
       
  4098 	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
       
  4099 		/* let the stack verify checksum errors */
       
  4100 		adapter->hw_csum_err++;
       
  4101 		return;
       
  4102 	}
       
  4103 	/* TCP/UDP Checksum has not been calculated */
       
  4104 	if (!(status & E1000_RXD_STAT_TCPCS))
       
  4105 		return;
       
  4106 
       
  4107 	/* It must be a TCP or UDP packet with a valid checksum */
       
  4108 	if (likely(status & E1000_RXD_STAT_TCPCS)) {
       
  4109 		/* TCP checksum is good */
       
  4110 		skb->ip_summed = CHECKSUM_UNNECESSARY;
       
  4111 	}
       
  4112 	adapter->hw_csum_good++;
       
  4113 }
       
  4114 
       
  4115 /**
       
  4116  * e1000_consume_page - helper function
       
  4117  **/
       
  4118 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
       
  4119                                u16 length)
       
  4120 {
       
  4121 	bi->page = NULL;
       
  4122 	skb->len += length;
       
  4123 	skb->data_len += length;
       
  4124 	skb->truesize += PAGE_SIZE;
       
  4125 }
       
  4126 
       
  4127 /**
       
  4128  * e1000_receive_skb - helper function to handle rx indications
       
  4129  * @adapter: board private structure
       
  4130  * @status: descriptor status field as written by hardware
       
  4131  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
       
  4132  * @skb: pointer to sk_buff to be indicated to stack
       
  4133  */
       
  4134 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
       
  4135 			      __le16 vlan, struct sk_buff *skb)
       
  4136 {
       
  4137 	skb->protocol = eth_type_trans(skb, adapter->netdev);
       
  4138 
       
  4139 	if (status & E1000_RXD_STAT_VP) {
       
  4140 		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
       
  4141 
       
  4142 		__vlan_hwaccel_put_tag(skb, vid);
       
  4143 	}
       
  4144 	napi_gro_receive(&adapter->napi, skb);
       
  4145 }
       
  4146 
       
  4147 /**
       
  4148  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
       
  4149  * @adapter: board private structure
       
  4150  * @rx_ring: ring to clean
       
  4151  * @work_done: amount of napi work completed this call
       
  4152  * @work_to_do: max amount of work allowed for this call to do
       
  4153  *
       
  4154  * the return value indicates whether actual cleaning was done, there
       
  4155  * is no guarantee that everything was cleaned
       
  4156  */
       
  4157 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
       
  4158 				     struct e1000_rx_ring *rx_ring,
       
  4159 				     int *work_done, int work_to_do)
       
  4160 {
       
  4161 	struct e1000_hw *hw = &adapter->hw;
       
  4162 	struct net_device *netdev = adapter->netdev;
       
  4163 	struct pci_dev *pdev = adapter->pdev;
       
  4164 	struct e1000_rx_desc *rx_desc, *next_rxd;
       
  4165 	struct e1000_buffer *buffer_info, *next_buffer;
       
  4166 	unsigned long irq_flags;
       
  4167 	u32 length;
       
  4168 	unsigned int i;
       
  4169 	int cleaned_count = 0;
       
  4170 	bool cleaned = false;
       
  4171 	unsigned int total_rx_bytes=0, total_rx_packets=0;
       
  4172 
       
  4173 	i = rx_ring->next_to_clean;
       
  4174 	rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4175 	buffer_info = &rx_ring->buffer_info[i];
       
  4176 
       
  4177 	while (rx_desc->status & E1000_RXD_STAT_DD) {
       
  4178 		struct sk_buff *skb;
       
  4179 		u8 status;
       
  4180 
       
  4181 		if (*work_done >= work_to_do)
       
  4182 			break;
       
  4183 		(*work_done)++;
       
  4184 		rmb(); /* read descriptor and rx_buffer_info after status DD */
       
  4185 
       
  4186 		status = rx_desc->status;
       
  4187 		skb = buffer_info->skb;
       
  4188 		if (!adapter->ecdev) {
       
  4189 			buffer_info->skb = NULL;
       
  4190 		}
       
  4191 
       
  4192 		if (++i == rx_ring->count) i = 0;
       
  4193 		next_rxd = E1000_RX_DESC(*rx_ring, i);
       
  4194 		prefetch(next_rxd);
       
  4195 
       
  4196 		next_buffer = &rx_ring->buffer_info[i];
       
  4197 
       
  4198 		cleaned = true;
       
  4199 		cleaned_count++;
       
  4200 		dma_unmap_page(&pdev->dev, buffer_info->dma,
       
  4201 			       buffer_info->length, DMA_FROM_DEVICE);
       
  4202 		buffer_info->dma = 0;
       
  4203 
       
  4204 		length = le16_to_cpu(rx_desc->length);
       
  4205 
       
  4206 		/* errors is only valid for DD + EOP descriptors */
       
  4207 		if (!adapter->ecdev &&
       
  4208 		    unlikely((status & E1000_RXD_STAT_EOP) &&
       
  4209 		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
       
  4210 			u8 *mapped;
       
  4211 			u8 last_byte;
       
  4212 
       
  4213 			mapped = page_address(buffer_info->page);
       
  4214 			last_byte = *(mapped + length - 1);
       
  4215 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
       
  4216 				       last_byte)) {
       
  4217 				spin_lock_irqsave(&adapter->stats_lock,
       
  4218 				                  irq_flags);
       
  4219 				e1000_tbi_adjust_stats(hw, &adapter->stats,
       
  4220 						       length, mapped);
       
  4221 				spin_unlock_irqrestore(&adapter->stats_lock,
       
  4222 				                       irq_flags);
       
  4223 				length--;
       
  4224 			} else {
       
  4225 				if (netdev->features & NETIF_F_RXALL)
       
  4226 					goto process_skb;
       
  4227 				/* recycle both page and skb */
       
  4228 				buffer_info->skb = skb;
       
  4229 				/* an error means any chain goes out the window
       
  4230 				 * too */
       
  4231 				if (rx_ring->rx_skb_top)
       
  4232 					dev_kfree_skb(rx_ring->rx_skb_top);
       
  4233 				rx_ring->rx_skb_top = NULL;
       
  4234 				goto next_desc;
       
  4235 			}
       
  4236 		}
       
  4237 
       
  4238 #define rxtop rx_ring->rx_skb_top
       
  4239 process_skb:
       
  4240 		if (!(status & E1000_RXD_STAT_EOP)) {
       
  4241 			/* this descriptor is only the beginning (or middle) */
       
  4242 			if (!rxtop) {
       
  4243 				/* this is the beginning of a chain */
       
  4244 				rxtop = skb;
       
  4245 				skb_fill_page_desc(rxtop, 0, buffer_info->page,
       
  4246 				                   0, length);
       
  4247 			} else {
       
  4248 				/* this is the middle of a chain */
       
  4249 				skb_fill_page_desc(rxtop,
       
  4250 				    skb_shinfo(rxtop)->nr_frags,
       
  4251 				    buffer_info->page, 0, length);
       
  4252 				/* re-use the skb, only consumed the page */
       
  4253 				buffer_info->skb = skb;
       
  4254 			}
       
  4255 			e1000_consume_page(buffer_info, rxtop, length);
       
  4256 			goto next_desc;
       
  4257 		} else {
       
  4258 			if (rxtop) {
       
  4259 				/* end of the chain */
       
  4260 				skb_fill_page_desc(rxtop,
       
  4261 				    skb_shinfo(rxtop)->nr_frags,
       
  4262 				    buffer_info->page, 0, length);
       
  4263 				/* re-use the current skb, we only consumed the
       
  4264 				 * page */
       
  4265 				buffer_info->skb = skb;
       
  4266 				skb = rxtop;
       
  4267 				rxtop = NULL;
       
  4268 				e1000_consume_page(buffer_info, skb, length);
       
  4269 			} else {
       
  4270 				/* no chain, got EOP, this buf is the packet
       
  4271 				 * copybreak to save the put_page/alloc_page */
       
  4272 				if (length <= copybreak &&
       
  4273 				    skb_tailroom(skb) >= length) {
       
  4274 					u8 *vaddr;
       
  4275 					vaddr = kmap_atomic(buffer_info->page);
       
  4276 					memcpy(skb_tail_pointer(skb), vaddr, length);
       
  4277 					kunmap_atomic(vaddr);
       
  4278 					/* re-use the page, so don't erase
       
  4279 					 * buffer_info->page */
       
  4280 					skb_put(skb, length);
       
  4281 				} else {
       
  4282 					skb_fill_page_desc(skb, 0,
       
  4283 					                   buffer_info->page, 0,
       
  4284 				                           length);
       
  4285 					e1000_consume_page(buffer_info, skb,
       
  4286 					                   length);
       
  4287 				}
       
  4288 			}
       
  4289 		}
       
  4290 
       
  4291 		/* Receive Checksum Offload XXX recompute due to CRC strip? */
       
  4292 		e1000_rx_checksum(adapter,
       
  4293 		                  (u32)(status) |
       
  4294 		                  ((u32)(rx_desc->errors) << 24),
       
  4295 		                  le16_to_cpu(rx_desc->csum), skb);
       
  4296 
       
  4297 		total_rx_bytes += (skb->len - 4); /* don't count FCS */
       
  4298 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
       
  4299 			pskb_trim(skb, skb->len - 4);
       
  4300 		total_rx_packets++;
       
  4301 
       
  4302 		/* eth type trans needs skb->data to point to something */
       
  4303 		if (!pskb_may_pull(skb, ETH_HLEN)) {
       
  4304 			e_err(drv, "pskb_may_pull failed.\n");
       
  4305 			if (!adapter->ecdev) {
       
  4306 				dev_kfree_skb(skb);
       
  4307 			}
       
  4308 			goto next_desc;
       
  4309 		}
       
  4310 
       
  4311 		if (adapter->ecdev) {
       
  4312 			ecdev_receive(adapter->ecdev, skb->data, length);
       
  4313 
       
  4314 			// No need to detect link status as
       
  4315 			// long as frames are received: Reset watchdog.
       
  4316 			adapter->ec_watchdog_jiffies = jiffies;
       
  4317 		} else {
       
  4318 			e1000_receive_skb(adapter, status, rx_desc->special, skb);
       
  4319 		}
       
  4320 
       
  4321 next_desc:
       
  4322 		rx_desc->status = 0;
       
  4323 
       
  4324 		/* return some buffers to hardware, one at a time is too slow */
       
  4325 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
       
  4326 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4327 			cleaned_count = 0;
       
  4328 		}
       
  4329 
       
  4330 		/* use prefetched values */
       
  4331 		rx_desc = next_rxd;
       
  4332 		buffer_info = next_buffer;
       
  4333 	}
       
  4334 	rx_ring->next_to_clean = i;
       
  4335 
       
  4336 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
       
  4337 	if (cleaned_count)
       
  4338 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4339 
       
  4340 	adapter->total_rx_packets += total_rx_packets;
       
  4341 	adapter->total_rx_bytes += total_rx_bytes;
       
  4342 	netdev->stats.rx_bytes += total_rx_bytes;
       
  4343 	netdev->stats.rx_packets += total_rx_packets;
       
  4344 	return cleaned;
       
  4345 }
       
  4346 
       
  4347 /*
       
  4348  * this should improve performance for small packets with large amounts
       
  4349  * of reassembly being done in the stack
       
  4350  */
       
  4351 static void e1000_check_copybreak(struct net_device *netdev,
       
  4352 				 struct e1000_buffer *buffer_info,
       
  4353 				 u32 length, struct sk_buff **skb)
       
  4354 {
       
  4355 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4356 	struct sk_buff *new_skb;
       
  4357 
       
  4358 	if (adapter->ecdev || length > copybreak)
       
  4359 		return;
       
  4360 
       
  4361 	new_skb = netdev_alloc_skb_ip_align(netdev, length);
       
  4362 	if (!new_skb)
       
  4363 		return;
       
  4364 
       
  4365 	skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
       
  4366 				       (*skb)->data - NET_IP_ALIGN,
       
  4367 				       length + NET_IP_ALIGN);
       
  4368 	/* save the skb in buffer_info as good */
       
  4369 	buffer_info->skb = *skb;
       
  4370 	*skb = new_skb;
       
  4371 }
       
  4372 
       
  4373 /**
       
  4374  * e1000_clean_rx_irq - Send received data up the network stack; legacy
       
  4375  * @adapter: board private structure
       
  4376  * @rx_ring: ring to clean
       
  4377  * @work_done: amount of napi work completed this call
       
  4378  * @work_to_do: max amount of work allowed for this call to do
       
  4379  */
       
  4380 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
  4381 			       struct e1000_rx_ring *rx_ring,
       
  4382 			       int *work_done, int work_to_do)
       
  4383 {
       
  4384 	struct e1000_hw *hw = &adapter->hw;
       
  4385 	struct net_device *netdev = adapter->netdev;
       
  4386 	struct pci_dev *pdev = adapter->pdev;
       
  4387 	struct e1000_rx_desc *rx_desc, *next_rxd;
       
  4388 	struct e1000_buffer *buffer_info, *next_buffer;
       
  4389 	unsigned long flags;
       
  4390 	u32 length;
       
  4391 	unsigned int i;
       
  4392 	int cleaned_count = 0;
       
  4393 	bool cleaned = false;
       
  4394 	unsigned int total_rx_bytes=0, total_rx_packets=0;
       
  4395 
       
  4396 	i = rx_ring->next_to_clean;
       
  4397 	rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4398 	buffer_info = &rx_ring->buffer_info[i];
       
  4399 
       
  4400 	while (rx_desc->status & E1000_RXD_STAT_DD) {
       
  4401 		struct sk_buff *skb;
       
  4402 		u8 status;
       
  4403 
       
  4404 		if (*work_done >= work_to_do)
       
  4405 			break;
       
  4406 		(*work_done)++;
       
  4407 		rmb(); /* read descriptor and rx_buffer_info after status DD */
       
  4408 
       
  4409 		status = rx_desc->status;
       
  4410 		skb = buffer_info->skb;
       
  4411 		if (!adapter->ecdev) {
       
  4412 			buffer_info->skb = NULL;
       
  4413 		}
       
  4414 
       
  4415 		prefetch(skb->data - NET_IP_ALIGN);
       
  4416 
       
  4417 		if (++i == rx_ring->count) i = 0;
       
  4418 		next_rxd = E1000_RX_DESC(*rx_ring, i);
       
  4419 		prefetch(next_rxd);
       
  4420 
       
  4421 		next_buffer = &rx_ring->buffer_info[i];
       
  4422 
       
  4423 		cleaned = true;
       
  4424 		cleaned_count++;
       
  4425 		dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  4426 				 buffer_info->length, DMA_FROM_DEVICE);
       
  4427 		buffer_info->dma = 0;
       
  4428 
       
  4429 		length = le16_to_cpu(rx_desc->length);
       
  4430 		/* !EOP means multiple descriptors were used to store a single
       
  4431 		 * packet, if thats the case we need to toss it.  In fact, we
       
  4432 		 * to toss every packet with the EOP bit clear and the next
       
  4433 		 * frame that _does_ have the EOP bit set, as it is by
       
  4434 		 * definition only a frame fragment
       
  4435 		 */
       
  4436 		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
       
  4437 			adapter->discarding = true;
       
  4438 
       
  4439 		if (adapter->discarding) {
       
  4440 			/* All receives must fit into a single buffer */
       
  4441 			e_dbg("Receive packet consumed multiple buffers\n");
       
  4442 			/* recycle */
       
  4443 			buffer_info->skb = skb;
       
  4444 			if (status & E1000_RXD_STAT_EOP)
       
  4445 				adapter->discarding = false;
       
  4446 			goto next_desc;
       
  4447 		}
       
  4448 
       
  4449 		if (!adapter->ecdev &&
       
  4450 		    unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
       
  4451 			u8 last_byte = *(skb->data + length - 1);
       
  4452 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
       
  4453 				       last_byte)) {
       
  4454 				spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4455 				e1000_tbi_adjust_stats(hw, &adapter->stats,
       
  4456 				                       length, skb->data);
       
  4457 				spin_unlock_irqrestore(&adapter->stats_lock,
       
  4458 				                       flags);
       
  4459 				length--;
       
  4460 			} else {
       
  4461 				if (netdev->features & NETIF_F_RXALL)
       
  4462 					goto process_skb;
       
  4463 				/* recycle */
       
  4464 				buffer_info->skb = skb;
       
  4465 				goto next_desc;
       
  4466 			}
       
  4467 		}
       
  4468 
       
  4469 process_skb:
       
  4470 		total_rx_bytes += (length - 4); /* don't count FCS */
       
  4471 		total_rx_packets++;
       
  4472 
       
  4473 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
       
  4474 			/* adjust length to remove Ethernet CRC, this must be
       
  4475 			 * done after the TBI_ACCEPT workaround above
       
  4476 			 */
       
  4477 			length -= 4;
       
  4478 
       
  4479 		e1000_check_copybreak(netdev, buffer_info, length, &skb);
       
  4480 
       
  4481 		skb_put(skb, length);
       
  4482 
       
  4483 		/* Receive Checksum Offload */
       
  4484 		e1000_rx_checksum(adapter,
       
  4485 				  (u32)(status) |
       
  4486 				  ((u32)(rx_desc->errors) << 24),
       
  4487 				  le16_to_cpu(rx_desc->csum), skb);
       
  4488 
       
  4489 		if (adapter->ecdev) {
       
  4490 			ecdev_receive(adapter->ecdev, skb->data, length);
       
  4491 
       
  4492 			// No need to detect link status as
       
  4493 			// long as frames are received: Reset watchdog.
       
  4494 			adapter->ec_watchdog_jiffies = jiffies;
       
  4495 		} else {
       
  4496 			e1000_receive_skb(adapter, status, rx_desc->special, skb);
       
  4497 		}
       
  4498 
       
  4499 next_desc:
       
  4500 		rx_desc->status = 0;
       
  4501 
       
  4502 		/* return some buffers to hardware, one at a time is too slow */
       
  4503 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
       
  4504 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4505 			cleaned_count = 0;
       
  4506 		}
       
  4507 
       
  4508 		/* use prefetched values */
       
  4509 		rx_desc = next_rxd;
       
  4510 		buffer_info = next_buffer;
       
  4511 	}
       
  4512 	rx_ring->next_to_clean = i;
       
  4513 
       
  4514 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
       
  4515 	if (cleaned_count)
       
  4516 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4517 
       
  4518 	adapter->total_rx_packets += total_rx_packets;
       
  4519 	adapter->total_rx_bytes += total_rx_bytes;
       
  4520 	netdev->stats.rx_bytes += total_rx_bytes;
       
  4521 	netdev->stats.rx_packets += total_rx_packets;
       
  4522 	return cleaned;
       
  4523 }
       
  4524 
       
  4525 /**
       
  4526  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
       
  4527  * @adapter: address of board private structure
       
  4528  * @rx_ring: pointer to receive ring structure
       
  4529  * @cleaned_count: number of buffers to allocate this pass
       
  4530  **/
       
  4531 
       
  4532 static void
       
  4533 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
       
  4534                              struct e1000_rx_ring *rx_ring, int cleaned_count)
       
  4535 {
       
  4536 	struct net_device *netdev = adapter->netdev;
       
  4537 	struct pci_dev *pdev = adapter->pdev;
       
  4538 	struct e1000_rx_desc *rx_desc;
       
  4539 	struct e1000_buffer *buffer_info;
       
  4540 	struct sk_buff *skb;
       
  4541 	unsigned int i;
       
  4542 	unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
       
  4543 
       
  4544 	i = rx_ring->next_to_use;
       
  4545 	buffer_info = &rx_ring->buffer_info[i];
       
  4546 
       
  4547 	while (cleaned_count--) {
       
  4548 		skb = buffer_info->skb;
       
  4549 		if (skb) {
       
  4550 			skb_trim(skb, 0);
       
  4551 			goto check_page;
       
  4552 		}
       
  4553 
       
  4554 		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4555 		if (unlikely(!skb)) {
       
  4556 			/* Better luck next round */
       
  4557 			adapter->alloc_rx_buff_failed++;
       
  4558 			break;
       
  4559 		}
       
  4560 
       
  4561 		buffer_info->skb = skb;
       
  4562 		buffer_info->length = adapter->rx_buffer_len;
       
  4563 check_page:
       
  4564 		/* allocate a new page if necessary */
       
  4565 		if (!buffer_info->page) {
       
  4566 			buffer_info->page = alloc_page(GFP_ATOMIC);
       
  4567 			if (unlikely(!buffer_info->page)) {
       
  4568 				adapter->alloc_rx_buff_failed++;
       
  4569 				break;
       
  4570 			}
       
  4571 		}
       
  4572 
       
  4573 		if (!buffer_info->dma) {
       
  4574 			buffer_info->dma = dma_map_page(&pdev->dev,
       
  4575 			                                buffer_info->page, 0,
       
  4576 							buffer_info->length,
       
  4577 							DMA_FROM_DEVICE);
       
  4578 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
       
  4579 				put_page(buffer_info->page);
       
  4580 				dev_kfree_skb(skb);
       
  4581 				buffer_info->page = NULL;
       
  4582 				buffer_info->skb = NULL;
       
  4583 				buffer_info->dma = 0;
       
  4584 				adapter->alloc_rx_buff_failed++;
       
  4585 				break; /* while !buffer_info->skb */
       
  4586 			}
       
  4587 		}
       
  4588 
       
  4589 		rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4590 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  4591 
       
  4592 		if (unlikely(++i == rx_ring->count))
       
  4593 			i = 0;
       
  4594 		buffer_info = &rx_ring->buffer_info[i];
       
  4595 	}
       
  4596 
       
  4597 	if (likely(rx_ring->next_to_use != i)) {
       
  4598 		rx_ring->next_to_use = i;
       
  4599 		if (unlikely(i-- == 0))
       
  4600 			i = (rx_ring->count - 1);
       
  4601 
       
  4602 		/* Force memory writes to complete before letting h/w
       
  4603 		 * know there are new descriptors to fetch.  (Only
       
  4604 		 * applicable for weak-ordered memory model archs,
       
  4605 		 * such as IA-64). */
       
  4606 		wmb();
       
  4607 		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
       
  4608 	}
       
  4609 }
       
  4610 
       
  4611 /**
       
  4612  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
       
  4613  * @adapter: address of board private structure
       
  4614  **/
       
  4615 
       
  4616 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
  4617 				   struct e1000_rx_ring *rx_ring,
       
  4618 				   int cleaned_count)
       
  4619 {
       
  4620 	struct e1000_hw *hw = &adapter->hw;
       
  4621 	struct net_device *netdev = adapter->netdev;
       
  4622 	struct pci_dev *pdev = adapter->pdev;
       
  4623 	struct e1000_rx_desc *rx_desc;
       
  4624 	struct e1000_buffer *buffer_info;
       
  4625 	struct sk_buff *skb;
       
  4626 	unsigned int i;
       
  4627 	unsigned int bufsz = adapter->rx_buffer_len;
       
  4628 
       
  4629 	i = rx_ring->next_to_use;
       
  4630 	buffer_info = &rx_ring->buffer_info[i];
       
  4631 
       
  4632 	while (cleaned_count--) {
       
  4633 		skb = buffer_info->skb;
       
  4634 		if (skb) {
       
  4635 			skb_trim(skb, 0);
       
  4636 			goto map_skb;
       
  4637 		}
       
  4638 
       
  4639 		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4640 		if (unlikely(!skb)) {
       
  4641 			/* Better luck next round */
       
  4642 			adapter->alloc_rx_buff_failed++;
       
  4643 			break;
       
  4644 		}
       
  4645 
       
  4646 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4647 		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4648 			struct sk_buff *oldskb = skb;
       
  4649 			e_err(rx_err, "skb align check failed: %u bytes at "
       
  4650 			      "%p\n", bufsz, skb->data);
       
  4651 			/* Try again, without freeing the previous */
       
  4652 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4653 			/* Failed allocation, critical failure */
       
  4654 			if (!skb) {
       
  4655 				dev_kfree_skb(oldskb);
       
  4656 				adapter->alloc_rx_buff_failed++;
       
  4657 				break;
       
  4658 			}
       
  4659 
       
  4660 			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4661 				/* give up */
       
  4662 				dev_kfree_skb(skb);
       
  4663 				dev_kfree_skb(oldskb);
       
  4664 				adapter->alloc_rx_buff_failed++;
       
  4665 				break; /* while !buffer_info->skb */
       
  4666 			}
       
  4667 
       
  4668 			/* Use new allocation */
       
  4669 			dev_kfree_skb(oldskb);
       
  4670 		}
       
  4671 		buffer_info->skb = skb;
       
  4672 		buffer_info->length = adapter->rx_buffer_len;
       
  4673 map_skb:
       
  4674 		buffer_info->dma = dma_map_single(&pdev->dev,
       
  4675 						  skb->data,
       
  4676 						  buffer_info->length,
       
  4677 						  DMA_FROM_DEVICE);
       
  4678 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
       
  4679 			dev_kfree_skb(skb);
       
  4680 			buffer_info->skb = NULL;
       
  4681 			buffer_info->dma = 0;
       
  4682 			adapter->alloc_rx_buff_failed++;
       
  4683 			break; /* while !buffer_info->skb */
       
  4684 		}
       
  4685 
       
  4686 		/*
       
  4687 		 * XXX if it was allocated cleanly it will never map to a
       
  4688 		 * boundary crossing
       
  4689 		 */
       
  4690 
       
  4691 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4692 		if (!e1000_check_64k_bound(adapter,
       
  4693 					(void *)(unsigned long)buffer_info->dma,
       
  4694 					adapter->rx_buffer_len)) {
       
  4695 			e_err(rx_err, "dma align check failed: %u bytes at "
       
  4696 			      "%p\n", adapter->rx_buffer_len,
       
  4697 			      (void *)(unsigned long)buffer_info->dma);
       
  4698 			dev_kfree_skb(skb);
       
  4699 			buffer_info->skb = NULL;
       
  4700 
       
  4701 			dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  4702 					 adapter->rx_buffer_len,
       
  4703 					 DMA_FROM_DEVICE);
       
  4704 			buffer_info->dma = 0;
       
  4705 
       
  4706 			adapter->alloc_rx_buff_failed++;
       
  4707 			break; /* while !buffer_info->skb */
       
  4708 		}
       
  4709 		rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4710 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  4711 
       
  4712 		if (unlikely(++i == rx_ring->count))
       
  4713 			i = 0;
       
  4714 		buffer_info = &rx_ring->buffer_info[i];
       
  4715 	}
       
  4716 
       
  4717 	if (likely(rx_ring->next_to_use != i)) {
       
  4718 		rx_ring->next_to_use = i;
       
  4719 		if (unlikely(i-- == 0))
       
  4720 			i = (rx_ring->count - 1);
       
  4721 
       
  4722 		/* Force memory writes to complete before letting h/w
       
  4723 		 * know there are new descriptors to fetch.  (Only
       
  4724 		 * applicable for weak-ordered memory model archs,
       
  4725 		 * such as IA-64). */
       
  4726 		wmb();
       
  4727 		writel(i, hw->hw_addr + rx_ring->rdt);
       
  4728 	}
       
  4729 }
       
  4730 
       
  4731 /**
       
  4732  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
       
  4733  * @adapter:
       
  4734  **/
       
  4735 
       
  4736 static void e1000_smartspeed(struct e1000_adapter *adapter)
       
  4737 {
       
  4738 	struct e1000_hw *hw = &adapter->hw;
       
  4739 	u16 phy_status;
       
  4740 	u16 phy_ctrl;
       
  4741 
       
  4742 	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
       
  4743 	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
       
  4744 		return;
       
  4745 
       
  4746 	if (adapter->smartspeed == 0) {
       
  4747 		/* If Master/Slave config fault is asserted twice,
       
  4748 		 * we assume back-to-back */
       
  4749 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
       
  4750 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
       
  4751 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
       
  4752 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
       
  4753 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
       
  4754 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
       
  4755 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
       
  4756 			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
       
  4757 					    phy_ctrl);
       
  4758 			adapter->smartspeed++;
       
  4759 			if (!e1000_phy_setup_autoneg(hw) &&
       
  4760 			   !e1000_read_phy_reg(hw, PHY_CTRL,
       
  4761 				   	       &phy_ctrl)) {
       
  4762 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
       
  4763 					     MII_CR_RESTART_AUTO_NEG);
       
  4764 				e1000_write_phy_reg(hw, PHY_CTRL,
       
  4765 						    phy_ctrl);
       
  4766 			}
       
  4767 		}
       
  4768 		return;
       
  4769 	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
       
  4770 		/* If still no link, perhaps using 2/3 pair cable */
       
  4771 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
       
  4772 		phy_ctrl |= CR_1000T_MS_ENABLE;
       
  4773 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
       
  4774 		if (!e1000_phy_setup_autoneg(hw) &&
       
  4775 		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
       
  4776 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
       
  4777 				     MII_CR_RESTART_AUTO_NEG);
       
  4778 			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
       
  4779 		}
       
  4780 	}
       
  4781 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
       
  4782 	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
       
  4783 		adapter->smartspeed = 0;
       
  4784 }
       
  4785 
       
  4786 /**
       
  4787  * e1000_ioctl -
       
  4788  * @netdev:
       
  4789  * @ifreq:
       
  4790  * @cmd:
       
  4791  **/
       
  4792 
       
  4793 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  4794 {
       
  4795 	switch (cmd) {
       
  4796 	case SIOCGMIIPHY:
       
  4797 	case SIOCGMIIREG:
       
  4798 	case SIOCSMIIREG:
       
  4799 		return e1000_mii_ioctl(netdev, ifr, cmd);
       
  4800 	default:
       
  4801 		return -EOPNOTSUPP;
       
  4802 	}
       
  4803 }
       
  4804 
       
  4805 /**
       
  4806  * e1000_mii_ioctl -
       
  4807  * @netdev:
       
  4808  * @ifreq:
       
  4809  * @cmd:
       
  4810  **/
       
  4811 
       
  4812 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
  4813 			   int cmd)
       
  4814 {
       
  4815 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4816 	struct e1000_hw *hw = &adapter->hw;
       
  4817 	struct mii_ioctl_data *data = if_mii(ifr);
       
  4818 	int retval;
       
  4819 	u16 mii_reg;
       
  4820 	unsigned long flags;
       
  4821 
       
  4822 	if (hw->media_type != e1000_media_type_copper)
       
  4823 		return -EOPNOTSUPP;
       
  4824 
       
  4825 	switch (cmd) {
       
  4826 	case SIOCGMIIPHY:
       
  4827 		data->phy_id = hw->phy_addr;
       
  4828 		break;
       
  4829 	case SIOCGMIIREG:
       
  4830 		if (adapter->ecdev) {
       
  4831 			return -EPERM;
       
  4832 		}
       
  4833 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4834 		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
       
  4835 				   &data->val_out)) {
       
  4836 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4837 			return -EIO;
       
  4838 		}
       
  4839 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4840 		break;
       
  4841 	case SIOCSMIIREG:
       
  4842 		if (adapter->ecdev) {
       
  4843 			return -EPERM;
       
  4844 		}
       
  4845 		if (data->reg_num & ~(0x1F))
       
  4846 			return -EFAULT;
       
  4847 		mii_reg = data->val_in;
       
  4848 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4849 		if (e1000_write_phy_reg(hw, data->reg_num,
       
  4850 					mii_reg)) {
       
  4851 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4852 			return -EIO;
       
  4853 		}
       
  4854 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4855 		if (hw->media_type == e1000_media_type_copper) {
       
  4856 			switch (data->reg_num) {
       
  4857 			case PHY_CTRL:
       
  4858 				if (mii_reg & MII_CR_POWER_DOWN)
       
  4859 					break;
       
  4860 				if (mii_reg & MII_CR_AUTO_NEG_EN) {
       
  4861 					hw->autoneg = 1;
       
  4862 					hw->autoneg_advertised = 0x2F;
       
  4863 				} else {
       
  4864 					u32 speed;
       
  4865 					if (mii_reg & 0x40)
       
  4866 						speed = SPEED_1000;
       
  4867 					else if (mii_reg & 0x2000)
       
  4868 						speed = SPEED_100;
       
  4869 					else
       
  4870 						speed = SPEED_10;
       
  4871 					retval = e1000_set_spd_dplx(
       
  4872 						adapter, speed,
       
  4873 						((mii_reg & 0x100)
       
  4874 						 ? DUPLEX_FULL :
       
  4875 						 DUPLEX_HALF));
       
  4876 					if (retval)
       
  4877 						return retval;
       
  4878 				}
       
  4879 				if (netif_running(adapter->netdev))
       
  4880 					e1000_reinit_locked(adapter);
       
  4881 				else
       
  4882 					e1000_reset(adapter);
       
  4883 				break;
       
  4884 			case M88E1000_PHY_SPEC_CTRL:
       
  4885 			case M88E1000_EXT_PHY_SPEC_CTRL:
       
  4886 				if (e1000_phy_reset(hw))
       
  4887 					return -EIO;
       
  4888 				break;
       
  4889 			}
       
  4890 		} else {
       
  4891 			switch (data->reg_num) {
       
  4892 			case PHY_CTRL:
       
  4893 				if (mii_reg & MII_CR_POWER_DOWN)
       
  4894 					break;
       
  4895 				if (netif_running(adapter->netdev))
       
  4896 					e1000_reinit_locked(adapter);
       
  4897 				else
       
  4898 					e1000_reset(adapter);
       
  4899 				break;
       
  4900 			}
       
  4901 		}
       
  4902 		break;
       
  4903 	default:
       
  4904 		return -EOPNOTSUPP;
       
  4905 	}
       
  4906 	return E1000_SUCCESS;
       
  4907 }
       
  4908 
       
  4909 void e1000_pci_set_mwi(struct e1000_hw *hw)
       
  4910 {
       
  4911 	struct e1000_adapter *adapter = hw->back;
       
  4912 	int ret_val = pci_set_mwi(adapter->pdev);
       
  4913 
       
  4914 	if (ret_val)
       
  4915 		e_err(probe, "Error in setting MWI\n");
       
  4916 }
       
  4917 
       
  4918 void e1000_pci_clear_mwi(struct e1000_hw *hw)
       
  4919 {
       
  4920 	struct e1000_adapter *adapter = hw->back;
       
  4921 
       
  4922 	pci_clear_mwi(adapter->pdev);
       
  4923 }
       
  4924 
       
  4925 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
       
  4926 {
       
  4927 	struct e1000_adapter *adapter = hw->back;
       
  4928 	return pcix_get_mmrbc(adapter->pdev);
       
  4929 }
       
  4930 
       
  4931 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
       
  4932 {
       
  4933 	struct e1000_adapter *adapter = hw->back;
       
  4934 	pcix_set_mmrbc(adapter->pdev, mmrbc);
       
  4935 }
       
  4936 
       
  4937 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
       
  4938 {
       
  4939 	outl(value, port);
       
  4940 }
       
  4941 
       
  4942 static bool e1000_vlan_used(struct e1000_adapter *adapter)
       
  4943 {
       
  4944 	u16 vid;
       
  4945 
       
  4946 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
       
  4947 		return true;
       
  4948 	return false;
       
  4949 }
       
  4950 
       
  4951 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
       
  4952 			      netdev_features_t features)
       
  4953 {
       
  4954 	struct e1000_hw *hw = &adapter->hw;
       
  4955 	u32 ctrl;
       
  4956 
       
  4957 	ctrl = er32(CTRL);
       
  4958 	if (features & NETIF_F_HW_VLAN_RX) {
       
  4959 		/* enable VLAN tag insert/strip */
       
  4960 		ctrl |= E1000_CTRL_VME;
       
  4961 	} else {
       
  4962 		/* disable VLAN tag insert/strip */
       
  4963 		ctrl &= ~E1000_CTRL_VME;
       
  4964 	}
       
  4965 	ew32(CTRL, ctrl);
       
  4966 }
       
  4967 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
       
  4968 				     bool filter_on)
       
  4969 {
       
  4970 	struct e1000_hw *hw = &adapter->hw;
       
  4971 	u32 rctl;
       
  4972 
       
  4973 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4974 		e1000_irq_disable(adapter);
       
  4975 
       
  4976 	__e1000_vlan_mode(adapter, adapter->netdev->features);
       
  4977 	if (filter_on) {
       
  4978 		/* enable VLAN receive filtering */
       
  4979 		rctl = er32(RCTL);
       
  4980 		rctl &= ~E1000_RCTL_CFIEN;
       
  4981 		if (!(adapter->netdev->flags & IFF_PROMISC))
       
  4982 			rctl |= E1000_RCTL_VFE;
       
  4983 		ew32(RCTL, rctl);
       
  4984 		e1000_update_mng_vlan(adapter);
       
  4985 	} else {
       
  4986 		/* disable VLAN receive filtering */
       
  4987 		rctl = er32(RCTL);
       
  4988 		rctl &= ~E1000_RCTL_VFE;
       
  4989 		ew32(RCTL, rctl);
       
  4990 	}
       
  4991 
       
  4992 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4993 		e1000_irq_enable(adapter);
       
  4994 }
       
  4995 
       
  4996 static void e1000_vlan_mode(struct net_device *netdev,
       
  4997 			    netdev_features_t features)
       
  4998 {
       
  4999 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5000 
       
  5001 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5002 		e1000_irq_disable(adapter);
       
  5003 
       
  5004 	__e1000_vlan_mode(adapter, features);
       
  5005 
       
  5006 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5007 		e1000_irq_enable(adapter);
       
  5008 }
       
  5009 
       
  5010 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
       
  5011 {
       
  5012 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5013 	struct e1000_hw *hw = &adapter->hw;
       
  5014 	u32 vfta, index;
       
  5015 
       
  5016 	if ((hw->mng_cookie.status &
       
  5017 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  5018 	    (vid == adapter->mng_vlan_id))
       
  5019 		return 0;
       
  5020 
       
  5021 	if (!e1000_vlan_used(adapter))
       
  5022 		e1000_vlan_filter_on_off(adapter, true);
       
  5023 
       
  5024 	/* add VID to filter table */
       
  5025 	index = (vid >> 5) & 0x7F;
       
  5026 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
       
  5027 	vfta |= (1 << (vid & 0x1F));
       
  5028 	e1000_write_vfta(hw, index, vfta);
       
  5029 
       
  5030 	set_bit(vid, adapter->active_vlans);
       
  5031 
       
  5032 	return 0;
       
  5033 }
       
  5034 
       
  5035 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
       
  5036 {
       
  5037 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5038 	struct e1000_hw *hw = &adapter->hw;
       
  5039 	u32 vfta, index;
       
  5040 
       
  5041 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5042 		e1000_irq_disable(adapter);
       
  5043 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5044 		e1000_irq_enable(adapter);
       
  5045 
       
  5046 	/* remove VID from filter table */
       
  5047 	index = (vid >> 5) & 0x7F;
       
  5048 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
       
  5049 	vfta &= ~(1 << (vid & 0x1F));
       
  5050 	e1000_write_vfta(hw, index, vfta);
       
  5051 
       
  5052 	clear_bit(vid, adapter->active_vlans);
       
  5053 
       
  5054 	if (!e1000_vlan_used(adapter))
       
  5055 		e1000_vlan_filter_on_off(adapter, false);
       
  5056 
       
  5057 	return 0;
       
  5058 }
       
  5059 
       
  5060 static void e1000_restore_vlan(struct e1000_adapter *adapter)
       
  5061 {
       
  5062 	u16 vid;
       
  5063 
       
  5064 	if (!e1000_vlan_used(adapter))
       
  5065 		return;
       
  5066 
       
  5067 	e1000_vlan_filter_on_off(adapter, true);
       
  5068 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
       
  5069 		e1000_vlan_rx_add_vid(adapter->netdev, vid);
       
  5070 }
       
  5071 
       
  5072 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
       
  5073 {
       
  5074 	struct e1000_hw *hw = &adapter->hw;
       
  5075 
       
  5076 	hw->autoneg = 0;
       
  5077 
       
  5078 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
       
  5079 	 * for the switch() below to work */
       
  5080 	if ((spd & 1) || (dplx & ~1))
       
  5081 		goto err_inval;
       
  5082 
       
  5083 	/* Fiber NICs only allow 1000 gbps Full duplex */
       
  5084 	if ((hw->media_type == e1000_media_type_fiber) &&
       
  5085 	    spd != SPEED_1000 &&
       
  5086 	    dplx != DUPLEX_FULL)
       
  5087 		goto err_inval;
       
  5088 
       
  5089 	switch (spd + dplx) {
       
  5090 	case SPEED_10 + DUPLEX_HALF:
       
  5091 		hw->forced_speed_duplex = e1000_10_half;
       
  5092 		break;
       
  5093 	case SPEED_10 + DUPLEX_FULL:
       
  5094 		hw->forced_speed_duplex = e1000_10_full;
       
  5095 		break;
       
  5096 	case SPEED_100 + DUPLEX_HALF:
       
  5097 		hw->forced_speed_duplex = e1000_100_half;
       
  5098 		break;
       
  5099 	case SPEED_100 + DUPLEX_FULL:
       
  5100 		hw->forced_speed_duplex = e1000_100_full;
       
  5101 		break;
       
  5102 	case SPEED_1000 + DUPLEX_FULL:
       
  5103 		hw->autoneg = 1;
       
  5104 		hw->autoneg_advertised = ADVERTISE_1000_FULL;
       
  5105 		break;
       
  5106 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
       
  5107 	default:
       
  5108 		goto err_inval;
       
  5109 	}
       
  5110 	return 0;
       
  5111 
       
  5112 err_inval:
       
  5113 	e_err(probe, "Unsupported Speed/Duplex configuration\n");
       
  5114 	return -EINVAL;
       
  5115 }
       
  5116 
       
  5117 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  5118 {
       
  5119 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5120 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5121 	struct e1000_hw *hw = &adapter->hw;
       
  5122 	u32 ctrl, ctrl_ext, rctl, status;
       
  5123 	u32 wufc = adapter->wol;
       
  5124 #ifdef CONFIG_PM
       
  5125 	int retval = 0;
       
  5126 #endif
       
  5127 
       
  5128 	if (adapter->ecdev) {
       
  5129 		return -EBUSY;
       
  5130 	}
       
  5131 
       
  5132 	netif_device_detach(netdev);
       
  5133 
       
  5134 	if (netif_running(netdev)) {
       
  5135 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
       
  5136 		e1000_down(adapter);
       
  5137 	}
       
  5138 
       
  5139 #ifdef CONFIG_PM
       
  5140 	retval = pci_save_state(pdev);
       
  5141 	if (retval)
       
  5142 		return retval;
       
  5143 #endif
       
  5144 
       
  5145 	status = er32(STATUS);
       
  5146 	if (status & E1000_STATUS_LU)
       
  5147 		wufc &= ~E1000_WUFC_LNKC;
       
  5148 
       
  5149 	if (wufc) {
       
  5150 		e1000_setup_rctl(adapter);
       
  5151 		e1000_set_rx_mode(netdev);
       
  5152 
       
  5153 		rctl = er32(RCTL);
       
  5154 
       
  5155 		/* turn on all-multi mode if wake on multicast is enabled */
       
  5156 		if (wufc & E1000_WUFC_MC)
       
  5157 			rctl |= E1000_RCTL_MPE;
       
  5158 
       
  5159 		/* enable receives in the hardware */
       
  5160 		ew32(RCTL, rctl | E1000_RCTL_EN);
       
  5161 
       
  5162 		if (hw->mac_type >= e1000_82540) {
       
  5163 			ctrl = er32(CTRL);
       
  5164 			/* advertise wake from D3Cold */
       
  5165 			#define E1000_CTRL_ADVD3WUC 0x00100000
       
  5166 			/* phy power management enable */
       
  5167 			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
       
  5168 			ctrl |= E1000_CTRL_ADVD3WUC |
       
  5169 				E1000_CTRL_EN_PHY_PWR_MGMT;
       
  5170 			ew32(CTRL, ctrl);
       
  5171 		}
       
  5172 
       
  5173 		if (hw->media_type == e1000_media_type_fiber ||
       
  5174 		    hw->media_type == e1000_media_type_internal_serdes) {
       
  5175 			/* keep the laser running in D3 */
       
  5176 			ctrl_ext = er32(CTRL_EXT);
       
  5177 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
       
  5178 			ew32(CTRL_EXT, ctrl_ext);
       
  5179 		}
       
  5180 
       
  5181 		ew32(WUC, E1000_WUC_PME_EN);
       
  5182 		ew32(WUFC, wufc);
       
  5183 	} else {
       
  5184 		ew32(WUC, 0);
       
  5185 		ew32(WUFC, 0);
       
  5186 	}
       
  5187 
       
  5188 	e1000_release_manageability(adapter);
       
  5189 
       
  5190 	*enable_wake = !!wufc;
       
  5191 
       
  5192 	/* make sure adapter isn't asleep if manageability is enabled */
       
  5193 	if (adapter->en_mng_pt)
       
  5194 		*enable_wake = true;
       
  5195 
       
  5196 	if (netif_running(netdev))
       
  5197 		e1000_free_irq(adapter);
       
  5198 
       
  5199 	pci_disable_device(pdev);
       
  5200 
       
  5201 	return 0;
       
  5202 }
       
  5203 
       
  5204 #ifdef CONFIG_PM
       
  5205 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
       
  5206 {
       
  5207 	int retval;
       
  5208 	bool wake;
       
  5209 
       
  5210 	retval = __e1000_shutdown(pdev, &wake);
       
  5211 	if (retval)
       
  5212 		return retval;
       
  5213 
       
  5214 	if (wake) {
       
  5215 		pci_prepare_to_sleep(pdev);
       
  5216 	} else {
       
  5217 		pci_wake_from_d3(pdev, false);
       
  5218 		pci_set_power_state(pdev, PCI_D3hot);
       
  5219 	}
       
  5220 
       
  5221 	return 0;
       
  5222 }
       
  5223 
       
  5224 static int e1000_resume(struct pci_dev *pdev)
       
  5225 {
       
  5226 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5227 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5228 	struct e1000_hw *hw = &adapter->hw;
       
  5229 	u32 err;
       
  5230 
       
  5231 	if (adapter->ecdev) {
       
  5232 		return -EBUSY;
       
  5233 	}
       
  5234 
       
  5235 	pci_set_power_state(pdev, PCI_D0);
       
  5236 	pci_restore_state(pdev);
       
  5237 	pci_save_state(pdev);
       
  5238 
       
  5239 	if (adapter->need_ioport)
       
  5240 		err = pci_enable_device(pdev);
       
  5241 	else
       
  5242 		err = pci_enable_device_mem(pdev);
       
  5243 	if (err) {
       
  5244 		pr_err("Cannot enable PCI device from suspend\n");
       
  5245 		return err;
       
  5246 	}
       
  5247 	pci_set_master(pdev);
       
  5248 
       
  5249 	pci_enable_wake(pdev, PCI_D3hot, 0);
       
  5250 	pci_enable_wake(pdev, PCI_D3cold, 0);
       
  5251 
       
  5252 	if (netif_running(netdev)) {
       
  5253 		err = e1000_request_irq(adapter);
       
  5254 		if (err)
       
  5255 			return err;
       
  5256 	}
       
  5257 
       
  5258 	e1000_power_up_phy(adapter);
       
  5259 	e1000_reset(adapter);
       
  5260 	ew32(WUS, ~0);
       
  5261 
       
  5262 	e1000_init_manageability(adapter);
       
  5263 
       
  5264 	if (netif_running(netdev))
       
  5265 		e1000_up(adapter);
       
  5266 
       
  5267 	if (!adapter->ecdev) {
       
  5268 		netif_device_attach(netdev);
       
  5269 	}
       
  5270 
       
  5271 	return 0;
       
  5272 }
       
  5273 #endif
       
  5274 
       
  5275 static void e1000_shutdown(struct pci_dev *pdev)
       
  5276 {
       
  5277 	bool wake;
       
  5278 
       
  5279 	__e1000_shutdown(pdev, &wake);
       
  5280 
       
  5281 	if (system_state == SYSTEM_POWER_OFF) {
       
  5282 		pci_wake_from_d3(pdev, wake);
       
  5283 		pci_set_power_state(pdev, PCI_D3hot);
       
  5284 	}
       
  5285 }
       
  5286 
       
  5287 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  5288 /*
       
  5289  * Polling 'interrupt' - used by things like netconsole to send skbs
       
  5290  * without having to re-enable interrupts. It's not called while
       
  5291  * the interrupt routine is executing.
       
  5292  */
       
  5293 static void e1000_netpoll(struct net_device *netdev)
       
  5294 {
       
  5295 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5296 
       
  5297 	disable_irq(adapter->pdev->irq);
       
  5298 	e1000_intr(adapter->pdev->irq, netdev);
       
  5299 	enable_irq(adapter->pdev->irq);
       
  5300 }
       
  5301 #endif
       
  5302 
       
  5303 /**
       
  5304  * e1000_io_error_detected - called when PCI error is detected
       
  5305  * @pdev: Pointer to PCI device
       
  5306  * @state: The current pci connection state
       
  5307  *
       
  5308  * This function is called after a PCI bus error affecting
       
  5309  * this device has been detected.
       
  5310  */
       
  5311 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
  5312 						pci_channel_state_t state)
       
  5313 {
       
  5314 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5315 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5316 
       
  5317 	netif_device_detach(netdev);
       
  5318 
       
  5319 	if (state == pci_channel_io_perm_failure)
       
  5320 		return PCI_ERS_RESULT_DISCONNECT;
       
  5321 
       
  5322 	if (netif_running(netdev))
       
  5323 		e1000_down(adapter);
       
  5324 	pci_disable_device(pdev);
       
  5325 
       
  5326 	/* Request a slot slot reset. */
       
  5327 	return PCI_ERS_RESULT_NEED_RESET;
       
  5328 }
       
  5329 
       
  5330 /**
       
  5331  * e1000_io_slot_reset - called after the pci bus has been reset.
       
  5332  * @pdev: Pointer to PCI device
       
  5333  *
       
  5334  * Restart the card from scratch, as if from a cold-boot. Implementation
       
  5335  * resembles the first-half of the e1000_resume routine.
       
  5336  */
       
  5337 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
       
  5338 {
       
  5339 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5340 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5341 	struct e1000_hw *hw = &adapter->hw;
       
  5342 	int err;
       
  5343 
       
  5344 	if (adapter->need_ioport)
       
  5345 		err = pci_enable_device(pdev);
       
  5346 	else
       
  5347 		err = pci_enable_device_mem(pdev);
       
  5348 	if (err) {
       
  5349 		pr_err("Cannot re-enable PCI device after reset.\n");
       
  5350 		return PCI_ERS_RESULT_DISCONNECT;
       
  5351 	}
       
  5352 	pci_set_master(pdev);
       
  5353 
       
  5354 	pci_enable_wake(pdev, PCI_D3hot, 0);
       
  5355 	pci_enable_wake(pdev, PCI_D3cold, 0);
       
  5356 
       
  5357 	e1000_reset(adapter);
       
  5358 	ew32(WUS, ~0);
       
  5359 
       
  5360 	return PCI_ERS_RESULT_RECOVERED;
       
  5361 }
       
  5362 
       
  5363 /**
       
  5364  * e1000_io_resume - called when traffic can start flowing again.
       
  5365  * @pdev: Pointer to PCI device
       
  5366  *
       
  5367  * This callback is called when the error recovery driver tells us that
       
  5368  * its OK to resume normal operation. Implementation resembles the
       
  5369  * second-half of the e1000_resume routine.
       
  5370  */
       
  5371 static void e1000_io_resume(struct pci_dev *pdev)
       
  5372 {
       
  5373 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5374 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5375 
       
  5376 	e1000_init_manageability(adapter);
       
  5377 
       
  5378 	if (netif_running(netdev)) {
       
  5379 		if (e1000_up(adapter)) {
       
  5380 			pr_info("can't bring device back up after reset\n");
       
  5381 			return;
       
  5382 		}
       
  5383 	}
       
  5384 
       
  5385 	netif_device_attach(netdev);
       
  5386 }
       
  5387 
       
  5388 /* e1000_main.c */