devices/e1000/e1000_main-3.4-ethercat.c
changeset 2589 2b9c78543663
equal deleted inserted replaced
2415:af21f0bdc7c9 2589:2b9c78543663
       
     1 /*******************************************************************************
       
     2 
       
     3   Intel PRO/1000 Linux driver
       
     4   Copyright(c) 1999 - 2006 Intel Corporation.
       
     5 
       
     6   This program is free software; you can redistribute it and/or modify it
       
     7   under the terms and conditions of the GNU General Public License,
       
     8   version 2, as published by the Free Software Foundation.
       
     9 
       
    10   This program is distributed in the hope it will be useful, but WITHOUT
       
    11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    13   more details.
       
    14 
       
    15   You should have received a copy of the GNU General Public License along with
       
    16   this program; if not, write to the Free Software Foundation, Inc.,
       
    17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    18 
       
    19   The full GNU General Public License is included in this distribution in
       
    20   the file called "COPYING".
       
    21 
       
    22   Contact Information:
       
    23   Linux NICS <linux.nics@intel.com>
       
    24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    26 
       
    27   vim: noexpandtab
       
    28 
       
    29 *******************************************************************************/
       
    30 
       
    31 #include "e1000-3.4-ethercat.h"
       
    32 #include <net/ip6_checksum.h>
       
    33 #include <linux/io.h>
       
    34 #include <linux/prefetch.h>
       
    35 #include <linux/bitops.h>
       
    36 #include <linux/if_vlan.h>
       
    37 
       
    38 char e1000_driver_name[] = "ec_e1000";
       
    39 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
       
    40 #define DRV_VERSION "7.3.21-k8-NAPI"
       
    41 const char e1000_driver_version[] = DRV_VERSION;
       
    42 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
       
    43 
       
    44 /* e1000_pci_tbl - PCI Device ID Table
       
    45  *
       
    46  * Last entry must be all 0s
       
    47  *
       
    48  * Macro expands to...
       
    49  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
       
    50  */
       
    51 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
       
    52 	INTEL_E1000_ETHERNET_DEVICE(0x1000),
       
    53 	INTEL_E1000_ETHERNET_DEVICE(0x1001),
       
    54 	INTEL_E1000_ETHERNET_DEVICE(0x1004),
       
    55 	INTEL_E1000_ETHERNET_DEVICE(0x1008),
       
    56 	INTEL_E1000_ETHERNET_DEVICE(0x1009),
       
    57 	INTEL_E1000_ETHERNET_DEVICE(0x100C),
       
    58 	INTEL_E1000_ETHERNET_DEVICE(0x100D),
       
    59 	INTEL_E1000_ETHERNET_DEVICE(0x100E),
       
    60 	INTEL_E1000_ETHERNET_DEVICE(0x100F),
       
    61 	INTEL_E1000_ETHERNET_DEVICE(0x1010),
       
    62 	INTEL_E1000_ETHERNET_DEVICE(0x1011),
       
    63 	INTEL_E1000_ETHERNET_DEVICE(0x1012),
       
    64 	INTEL_E1000_ETHERNET_DEVICE(0x1013),
       
    65 	INTEL_E1000_ETHERNET_DEVICE(0x1014),
       
    66 	INTEL_E1000_ETHERNET_DEVICE(0x1015),
       
    67 	INTEL_E1000_ETHERNET_DEVICE(0x1016),
       
    68 	INTEL_E1000_ETHERNET_DEVICE(0x1017),
       
    69 	INTEL_E1000_ETHERNET_DEVICE(0x1018),
       
    70 	INTEL_E1000_ETHERNET_DEVICE(0x1019),
       
    71 	INTEL_E1000_ETHERNET_DEVICE(0x101A),
       
    72 	INTEL_E1000_ETHERNET_DEVICE(0x101D),
       
    73 	INTEL_E1000_ETHERNET_DEVICE(0x101E),
       
    74 	INTEL_E1000_ETHERNET_DEVICE(0x1026),
       
    75 	INTEL_E1000_ETHERNET_DEVICE(0x1027),
       
    76 	INTEL_E1000_ETHERNET_DEVICE(0x1028),
       
    77 	INTEL_E1000_ETHERNET_DEVICE(0x1075),
       
    78 	INTEL_E1000_ETHERNET_DEVICE(0x1076),
       
    79 	INTEL_E1000_ETHERNET_DEVICE(0x1077),
       
    80 	INTEL_E1000_ETHERNET_DEVICE(0x1078),
       
    81 	INTEL_E1000_ETHERNET_DEVICE(0x1079),
       
    82 	INTEL_E1000_ETHERNET_DEVICE(0x107A),
       
    83 	INTEL_E1000_ETHERNET_DEVICE(0x107B),
       
    84 	INTEL_E1000_ETHERNET_DEVICE(0x107C),
       
    85 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
       
    86 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
       
    87 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
       
    88 	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
       
    89 	/* required last entry */
       
    90 	{0,}
       
    91 };
       
    92 
       
    93 // do not auto-load driver
       
    94 // MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
       
    95 
       
    96 int e1000_up(struct e1000_adapter *adapter);
       
    97 void e1000_down(struct e1000_adapter *adapter);
       
    98 void e1000_reinit_locked(struct e1000_adapter *adapter);
       
    99 void e1000_reset(struct e1000_adapter *adapter);
       
   100 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
       
   101 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
       
   102 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
       
   103 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
       
   104 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
       
   105                              struct e1000_tx_ring *txdr);
       
   106 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
       
   107                              struct e1000_rx_ring *rxdr);
       
   108 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
       
   109                              struct e1000_tx_ring *tx_ring);
       
   110 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
       
   111                              struct e1000_rx_ring *rx_ring);
       
   112 void e1000_update_stats(struct e1000_adapter *adapter);
       
   113 
       
   114 static int e1000_init_module(void);
       
   115 static void e1000_exit_module(void);
       
   116 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
       
   117 static void __devexit e1000_remove(struct pci_dev *pdev);
       
   118 static int e1000_alloc_queues(struct e1000_adapter *adapter);
       
   119 static int e1000_sw_init(struct e1000_adapter *adapter);
       
   120 static int e1000_open(struct net_device *netdev);
       
   121 static int e1000_close(struct net_device *netdev);
       
   122 static void e1000_configure_tx(struct e1000_adapter *adapter);
       
   123 static void e1000_configure_rx(struct e1000_adapter *adapter);
       
   124 static void e1000_setup_rctl(struct e1000_adapter *adapter);
       
   125 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
       
   126 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
       
   127 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
       
   128                                 struct e1000_tx_ring *tx_ring);
       
   129 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
       
   130                                 struct e1000_rx_ring *rx_ring);
       
   131 static void e1000_set_rx_mode(struct net_device *netdev);
       
   132 static void e1000_update_phy_info_task(struct work_struct *work);
       
   133 static void e1000_watchdog(struct work_struct *work);
       
   134 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
       
   135 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
       
   136 				    struct net_device *netdev);
       
   137 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
       
   138 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
       
   139 static int e1000_set_mac(struct net_device *netdev, void *p);
       
   140 void ec_poll(struct net_device *);
       
   141 static irqreturn_t e1000_intr(int irq, void *data);
       
   142 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
       
   143 			       struct e1000_tx_ring *tx_ring);
       
   144 static int e1000_clean(struct napi_struct *napi, int budget);
       
   145 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
   146 			       struct e1000_rx_ring *rx_ring,
       
   147 			       int *work_done, int work_to_do);
       
   148 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
       
   149 				     struct e1000_rx_ring *rx_ring,
       
   150 				     int *work_done, int work_to_do);
       
   151 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
   152 				   struct e1000_rx_ring *rx_ring,
       
   153 				   int cleaned_count);
       
   154 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
       
   155 					 struct e1000_rx_ring *rx_ring,
       
   156 					 int cleaned_count);
       
   157 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
       
   158 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
   159 			   int cmd);
       
   160 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
       
   161 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
       
   162 static void e1000_tx_timeout(struct net_device *dev);
       
   163 static void e1000_reset_task(struct work_struct *work);
       
   164 static void e1000_smartspeed(struct e1000_adapter *adapter);
       
   165 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
       
   166                                        struct sk_buff *skb);
       
   167 
       
   168 static bool e1000_vlan_used(struct e1000_adapter *adapter);
       
   169 static void e1000_vlan_mode(struct net_device *netdev,
       
   170 			    netdev_features_t features);
       
   171 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
       
   172 				     bool filter_on);
       
   173 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
       
   174 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
       
   175 static void e1000_restore_vlan(struct e1000_adapter *adapter);
       
   176 
       
   177 #ifdef CONFIG_PM
       
   178 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
       
   179 static int e1000_resume(struct pci_dev *pdev);
       
   180 #endif
       
   181 static void e1000_shutdown(struct pci_dev *pdev);
       
   182 
       
   183 #ifdef CONFIG_NET_POLL_CONTROLLER
       
   184 /* for netdump / net console */
       
   185 static void e1000_netpoll (struct net_device *netdev);
       
   186 #endif
       
   187 
       
   188 #define COPYBREAK_DEFAULT 256
       
   189 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
       
   190 module_param(copybreak, uint, 0644);
       
   191 MODULE_PARM_DESC(copybreak,
       
   192 	"Maximum size of packet that is copied to a new buffer on receive");
       
   193 
       
   194 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
   195                      pci_channel_state_t state);
       
   196 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
       
   197 static void e1000_io_resume(struct pci_dev *pdev);
       
   198 
       
   199 static struct pci_error_handlers e1000_err_handler = {
       
   200 	.error_detected = e1000_io_error_detected,
       
   201 	.slot_reset = e1000_io_slot_reset,
       
   202 	.resume = e1000_io_resume,
       
   203 };
       
   204 
       
   205 static struct pci_driver e1000_driver = {
       
   206 	.name     = e1000_driver_name,
       
   207 	.id_table = e1000_pci_tbl,
       
   208 	.probe    = e1000_probe,
       
   209 	.remove   = __devexit_p(e1000_remove),
       
   210 #ifdef CONFIG_PM
       
   211 	/* Power Management Hooks */
       
   212 	.suspend  = e1000_suspend,
       
   213 	.resume   = e1000_resume,
       
   214 #endif
       
   215 	.shutdown = e1000_shutdown,
       
   216 	.err_handler = &e1000_err_handler
       
   217 };
       
   218 
       
   219 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   220 MODULE_DESCRIPTION("EtherCAT-capable Intel(R) PRO/1000 Network Driver");
       
   221 MODULE_LICENSE("GPL");
       
   222 MODULE_VERSION(DRV_VERSION);
       
   223 
       
   224 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
       
   225 static int debug = -1;
       
   226 module_param(debug, int, 0);
       
   227 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   228 
       
   229 /**
       
   230  * e1000_get_hw_dev - return device
       
   231  * used by hardware layer to print debugging information
       
   232  *
       
   233  **/
       
   234 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
       
   235 {
       
   236 	struct e1000_adapter *adapter = hw->back;
       
   237 	return adapter->netdev;
       
   238 }
       
   239 
       
   240 /**
       
   241  * e1000_init_module - Driver Registration Routine
       
   242  *
       
   243  * e1000_init_module is the first routine called when the driver is
       
   244  * loaded. All it does is register with the PCI subsystem.
       
   245  **/
       
   246 
       
   247 static int __init e1000_init_module(void)
       
   248 {
       
   249 	int ret;
       
   250 	pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
       
   251 
       
   252 	pr_info("%s\n", e1000_copyright);
       
   253 
       
   254 	ret = pci_register_driver(&e1000_driver);
       
   255 	if (copybreak != COPYBREAK_DEFAULT) {
       
   256 		if (copybreak == 0)
       
   257 			pr_info("copybreak disabled\n");
       
   258 		else
       
   259 			pr_info("copybreak enabled for "
       
   260 				   "packets <= %u bytes\n", copybreak);
       
   261 	}
       
   262 	return ret;
       
   263 }
       
   264 
       
   265 module_init(e1000_init_module);
       
   266 
       
   267 /**
       
   268  * e1000_exit_module - Driver Exit Cleanup Routine
       
   269  *
       
   270  * e1000_exit_module is called just before the driver is removed
       
   271  * from memory.
       
   272  **/
       
   273 
       
   274 static void __exit e1000_exit_module(void)
       
   275 {
       
   276 	pci_unregister_driver(&e1000_driver);
       
   277 }
       
   278 
       
   279 module_exit(e1000_exit_module);
       
   280 
       
   281 static int e1000_request_irq(struct e1000_adapter *adapter)
       
   282 {
       
   283 	struct net_device *netdev = adapter->netdev;
       
   284 	irq_handler_t handler = e1000_intr;
       
   285 	int irq_flags = IRQF_SHARED;
       
   286 	int err;
       
   287 
       
   288 	if (adapter->ecdev) {
       
   289 		return 0;
       
   290 	}
       
   291 
       
   292 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
       
   293 	                  netdev);
       
   294 	if (err) {
       
   295 		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
       
   296 	}
       
   297 
       
   298 	return err;
       
   299 }
       
   300 
       
   301 static void e1000_free_irq(struct e1000_adapter *adapter)
       
   302 {
       
   303 	struct net_device *netdev = adapter->netdev;
       
   304 
       
   305 	if (adapter->ecdev) {
       
   306 		return;
       
   307 	}
       
   308 
       
   309 	free_irq(adapter->pdev->irq, netdev);
       
   310 }
       
   311 
       
   312 /**
       
   313  * e1000_irq_disable - Mask off interrupt generation on the NIC
       
   314  * @adapter: board private structure
       
   315  **/
       
   316 
       
   317 static void e1000_irq_disable(struct e1000_adapter *adapter)
       
   318 {
       
   319 	struct e1000_hw *hw = &adapter->hw;
       
   320 
       
   321 	if (adapter->ecdev) {
       
   322 		return;
       
   323 	}
       
   324 
       
   325 	ew32(IMC, ~0);
       
   326 	E1000_WRITE_FLUSH();
       
   327 	synchronize_irq(adapter->pdev->irq);
       
   328 }
       
   329 
       
   330 /**
       
   331  * e1000_irq_enable - Enable default interrupt generation settings
       
   332  * @adapter: board private structure
       
   333  **/
       
   334 
       
   335 static void e1000_irq_enable(struct e1000_adapter *adapter)
       
   336 {
       
   337 	struct e1000_hw *hw = &adapter->hw;
       
   338 
       
   339 	if (adapter->ecdev) {
       
   340 		return;
       
   341 	}
       
   342 
       
   343 	ew32(IMS, IMS_ENABLE_MASK);
       
   344 	E1000_WRITE_FLUSH();
       
   345 }
       
   346 
       
   347 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
       
   348 {
       
   349 	struct e1000_hw *hw = &adapter->hw;
       
   350 	struct net_device *netdev = adapter->netdev;
       
   351 	u16 vid = hw->mng_cookie.vlan_id;
       
   352 	u16 old_vid = adapter->mng_vlan_id;
       
   353 
       
   354 	if (!e1000_vlan_used(adapter))
       
   355 		return;
       
   356 
       
   357 	if (!test_bit(vid, adapter->active_vlans)) {
       
   358 		if (hw->mng_cookie.status &
       
   359 		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
       
   360 			e1000_vlan_rx_add_vid(netdev, vid);
       
   361 			adapter->mng_vlan_id = vid;
       
   362 		} else {
       
   363 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
   364 		}
       
   365 		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
       
   366 		    (vid != old_vid) &&
       
   367 		    !test_bit(old_vid, adapter->active_vlans))
       
   368 			e1000_vlan_rx_kill_vid(netdev, old_vid);
       
   369 	} else {
       
   370 		adapter->mng_vlan_id = vid;
       
   371 	}
       
   372 }
       
   373 
       
   374 static void e1000_init_manageability(struct e1000_adapter *adapter)
       
   375 {
       
   376 	struct e1000_hw *hw = &adapter->hw;
       
   377 
       
   378 	if (adapter->en_mng_pt) {
       
   379 		u32 manc = er32(MANC);
       
   380 
       
   381 		/* disable hardware interception of ARP */
       
   382 		manc &= ~(E1000_MANC_ARP_EN);
       
   383 
       
   384 		ew32(MANC, manc);
       
   385 	}
       
   386 }
       
   387 
       
   388 static void e1000_release_manageability(struct e1000_adapter *adapter)
       
   389 {
       
   390 	struct e1000_hw *hw = &adapter->hw;
       
   391 
       
   392 	if (adapter->en_mng_pt) {
       
   393 		u32 manc = er32(MANC);
       
   394 
       
   395 		/* re-enable hardware interception of ARP */
       
   396 		manc |= E1000_MANC_ARP_EN;
       
   397 
       
   398 		ew32(MANC, manc);
       
   399 	}
       
   400 }
       
   401 
       
   402 /**
       
   403  * e1000_configure - configure the hardware for RX and TX
       
   404  * @adapter = private board structure
       
   405  **/
       
   406 static void e1000_configure(struct e1000_adapter *adapter)
       
   407 {
       
   408 	struct net_device *netdev = adapter->netdev;
       
   409 	int i;
       
   410 
       
   411 	e1000_set_rx_mode(netdev);
       
   412 
       
   413 	e1000_restore_vlan(adapter);
       
   414 	e1000_init_manageability(adapter);
       
   415 
       
   416 	e1000_configure_tx(adapter);
       
   417 	e1000_setup_rctl(adapter);
       
   418 	e1000_configure_rx(adapter);
       
   419 	/* call E1000_DESC_UNUSED which always leaves
       
   420 	 * at least 1 descriptor unused to make sure
       
   421 	 * next_to_use != next_to_clean */
       
   422 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
   423 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
       
   424 		if (adapter->ecdev) {
       
   425 			/* fill rx ring completely! */
       
   426 			adapter->alloc_rx_buf(adapter, ring, ring->count);
       
   427 		} else {
       
   428 			/* this one leaves the last ring element unallocated! */
       
   429 			adapter->alloc_rx_buf(adapter, ring,
       
   430 					E1000_DESC_UNUSED(ring));
       
   431 		}
       
   432 	}
       
   433 }
       
   434 
       
   435 int e1000_up(struct e1000_adapter *adapter)
       
   436 {
       
   437 	struct e1000_hw *hw = &adapter->hw;
       
   438 
       
   439 	/* hardware has been reset, we need to reload some things */
       
   440 	e1000_configure(adapter);
       
   441 
       
   442 	clear_bit(__E1000_DOWN, &adapter->flags);
       
   443 
       
   444 	if (!adapter->ecdev) {
       
   445 		napi_enable(&adapter->napi);
       
   446 
       
   447 		e1000_irq_enable(adapter);
       
   448 
       
   449 		netif_wake_queue(adapter->netdev);
       
   450 
       
   451 		/* fire a link change interrupt to start the watchdog */
       
   452 		ew32(ICS, E1000_ICS_LSC);
       
   453 	}
       
   454 	return 0;
       
   455 }
       
   456 
       
   457 /**
       
   458  * e1000_power_up_phy - restore link in case the phy was powered down
       
   459  * @adapter: address of board private structure
       
   460  *
       
   461  * The phy may be powered down to save power and turn off link when the
       
   462  * driver is unloaded and wake on lan is not enabled (among others)
       
   463  * *** this routine MUST be followed by a call to e1000_reset ***
       
   464  *
       
   465  **/
       
   466 
       
   467 void e1000_power_up_phy(struct e1000_adapter *adapter)
       
   468 {
       
   469 	struct e1000_hw *hw = &adapter->hw;
       
   470 	u16 mii_reg = 0;
       
   471 
       
   472 	/* Just clear the power down bit to wake the phy back up */
       
   473 	if (hw->media_type == e1000_media_type_copper) {
       
   474 		/* according to the manual, the phy will retain its
       
   475 		 * settings across a power-down/up cycle */
       
   476 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
       
   477 		mii_reg &= ~MII_CR_POWER_DOWN;
       
   478 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
       
   479 	}
       
   480 }
       
   481 
       
   482 static void e1000_power_down_phy(struct e1000_adapter *adapter)
       
   483 {
       
   484 	struct e1000_hw *hw = &adapter->hw;
       
   485 
       
   486 	/* Power down the PHY so no link is implied when interface is down *
       
   487 	 * The PHY cannot be powered down if any of the following is true *
       
   488 	 * (a) WoL is enabled
       
   489 	 * (b) AMT is active
       
   490 	 * (c) SoL/IDER session is active */
       
   491 	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
       
   492 	   hw->media_type == e1000_media_type_copper) {
       
   493 		u16 mii_reg = 0;
       
   494 
       
   495 		switch (hw->mac_type) {
       
   496 		case e1000_82540:
       
   497 		case e1000_82545:
       
   498 		case e1000_82545_rev_3:
       
   499 		case e1000_82546:
       
   500 		case e1000_ce4100:
       
   501 		case e1000_82546_rev_3:
       
   502 		case e1000_82541:
       
   503 		case e1000_82541_rev_2:
       
   504 		case e1000_82547:
       
   505 		case e1000_82547_rev_2:
       
   506 			if (er32(MANC) & E1000_MANC_SMBUS_EN)
       
   507 				goto out;
       
   508 			break;
       
   509 		default:
       
   510 			goto out;
       
   511 		}
       
   512 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
       
   513 		mii_reg |= MII_CR_POWER_DOWN;
       
   514 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
       
   515 		msleep(1);
       
   516 	}
       
   517 out:
       
   518 	return;
       
   519 }
       
   520 
       
   521 static void e1000_down_and_stop(struct e1000_adapter *adapter)
       
   522 {
       
   523 	set_bit(__E1000_DOWN, &adapter->flags);
       
   524 
       
   525 	/* Only kill reset task if adapter is not resetting */
       
   526 	if (!test_bit(__E1000_RESETTING, &adapter->flags))
       
   527 		cancel_work_sync(&adapter->reset_task);
       
   528 
       
   529 	if (!adapter->ecdev) {
       
   530 		cancel_delayed_work_sync(&adapter->watchdog_task);
       
   531 		cancel_delayed_work_sync(&adapter->phy_info_task);
       
   532 		cancel_delayed_work_sync(&adapter->fifo_stall_task);
       
   533 	}
       
   534 }
       
   535 
       
   536 void e1000_down(struct e1000_adapter *adapter)
       
   537 {
       
   538 	struct e1000_hw *hw = &adapter->hw;
       
   539 	struct net_device *netdev = adapter->netdev;
       
   540 	u32 rctl, tctl;
       
   541 
       
   542 
       
   543 	/* disable receives in the hardware */	
       
   544 	rctl = er32(RCTL);
       
   545 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
   546 
       
   547 	if (!adapter->ecdev) {
       
   548 		/* flush and sleep below */
       
   549 		netif_tx_disable(netdev);
       
   550 	}
       
   551 
       
   552 	/* disable transmits in the hardware */
       
   553 	tctl = er32(TCTL);
       
   554 	tctl &= ~E1000_TCTL_EN;
       
   555 	ew32(TCTL, tctl);
       
   556 	/* flush both disables and wait for them to finish */
       
   557 	E1000_WRITE_FLUSH();
       
   558 	msleep(10);
       
   559 
       
   560 	if (!adapter->ecdev) {
       
   561 		napi_disable(&adapter->napi);
       
   562 
       
   563 		e1000_irq_disable(adapter);
       
   564 	}
       
   565 
       
   566 	/*
       
   567 	 * Setting DOWN must be after irq_disable to prevent
       
   568 	 * a screaming interrupt.  Setting DOWN also prevents
       
   569 	 * tasks from rescheduling.
       
   570 	 */
       
   571 	e1000_down_and_stop(adapter);
       
   572 
       
   573 	adapter->link_speed = 0;
       
   574 	adapter->link_duplex = 0;
       
   575 
       
   576 	if (!adapter->ecdev) {
       
   577 		netif_carrier_off(netdev);
       
   578 	}
       
   579 
       
   580 	e1000_reset(adapter);
       
   581 	e1000_clean_all_tx_rings(adapter);
       
   582 	e1000_clean_all_rx_rings(adapter);
       
   583 }
       
   584 
       
   585 static void e1000_reinit_safe(struct e1000_adapter *adapter)
       
   586 {
       
   587 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
   588 		msleep(1);
       
   589 	mutex_lock(&adapter->mutex);
       
   590 	e1000_down(adapter);
       
   591 	e1000_up(adapter);
       
   592 	mutex_unlock(&adapter->mutex);
       
   593 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
   594 }
       
   595 
       
   596 void e1000_reinit_locked(struct e1000_adapter *adapter)
       
   597 {
       
   598 	/* if rtnl_lock is not held the call path is bogus */
       
   599 	ASSERT_RTNL();
       
   600 	WARN_ON(in_interrupt());
       
   601 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
   602 		msleep(1);
       
   603 	e1000_down(adapter);
       
   604 	e1000_up(adapter);
       
   605 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
   606 }
       
   607 
       
   608 void e1000_reset(struct e1000_adapter *adapter)
       
   609 {
       
   610 	struct e1000_hw *hw = &adapter->hw;
       
   611 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
       
   612 	bool legacy_pba_adjust = false;
       
   613 	u16 hwm;
       
   614 
       
   615 	/* Repartition Pba for greater than 9k mtu
       
   616 	 * To take effect CTRL.RST is required.
       
   617 	 */
       
   618 
       
   619 	switch (hw->mac_type) {
       
   620 	case e1000_82542_rev2_0:
       
   621 	case e1000_82542_rev2_1:
       
   622 	case e1000_82543:
       
   623 	case e1000_82544:
       
   624 	case e1000_82540:
       
   625 	case e1000_82541:
       
   626 	case e1000_82541_rev_2:
       
   627 		legacy_pba_adjust = true;
       
   628 		pba = E1000_PBA_48K;
       
   629 		break;
       
   630 	case e1000_82545:
       
   631 	case e1000_82545_rev_3:
       
   632 	case e1000_82546:
       
   633 	case e1000_ce4100:
       
   634 	case e1000_82546_rev_3:
       
   635 		pba = E1000_PBA_48K;
       
   636 		break;
       
   637 	case e1000_82547:
       
   638 	case e1000_82547_rev_2:
       
   639 		legacy_pba_adjust = true;
       
   640 		pba = E1000_PBA_30K;
       
   641 		break;
       
   642 	case e1000_undefined:
       
   643 	case e1000_num_macs:
       
   644 		break;
       
   645 	}
       
   646 
       
   647 	if (legacy_pba_adjust) {
       
   648 		if (hw->max_frame_size > E1000_RXBUFFER_8192)
       
   649 			pba -= 8; /* allocate more FIFO for Tx */
       
   650 
       
   651 		if (hw->mac_type == e1000_82547) {
       
   652 			adapter->tx_fifo_head = 0;
       
   653 			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
       
   654 			adapter->tx_fifo_size =
       
   655 				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
       
   656 			atomic_set(&adapter->tx_fifo_stall, 0);
       
   657 		}
       
   658 	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
       
   659 		/* adjust PBA for jumbo frames */
       
   660 		ew32(PBA, pba);
       
   661 
       
   662 		/* To maintain wire speed transmits, the Tx FIFO should be
       
   663 		 * large enough to accommodate two full transmit packets,
       
   664 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
       
   665 		 * the Rx FIFO should be large enough to accommodate at least
       
   666 		 * one full receive packet and is similarly rounded up and
       
   667 		 * expressed in KB. */
       
   668 		pba = er32(PBA);
       
   669 		/* upper 16 bits has Tx packet buffer allocation size in KB */
       
   670 		tx_space = pba >> 16;
       
   671 		/* lower 16 bits has Rx packet buffer allocation size in KB */
       
   672 		pba &= 0xffff;
       
   673 		/*
       
   674 		 * the tx fifo also stores 16 bytes of information about the tx
       
   675 		 * but don't include ethernet FCS because hardware appends it
       
   676 		 */
       
   677 		min_tx_space = (hw->max_frame_size +
       
   678 		                sizeof(struct e1000_tx_desc) -
       
   679 		                ETH_FCS_LEN) * 2;
       
   680 		min_tx_space = ALIGN(min_tx_space, 1024);
       
   681 		min_tx_space >>= 10;
       
   682 		/* software strips receive CRC, so leave room for it */
       
   683 		min_rx_space = hw->max_frame_size;
       
   684 		min_rx_space = ALIGN(min_rx_space, 1024);
       
   685 		min_rx_space >>= 10;
       
   686 
       
   687 		/* If current Tx allocation is less than the min Tx FIFO size,
       
   688 		 * and the min Tx FIFO size is less than the current Rx FIFO
       
   689 		 * allocation, take space away from current Rx allocation */
       
   690 		if (tx_space < min_tx_space &&
       
   691 		    ((min_tx_space - tx_space) < pba)) {
       
   692 			pba = pba - (min_tx_space - tx_space);
       
   693 
       
   694 			/* PCI/PCIx hardware has PBA alignment constraints */
       
   695 			switch (hw->mac_type) {
       
   696 			case e1000_82545 ... e1000_82546_rev_3:
       
   697 				pba &= ~(E1000_PBA_8K - 1);
       
   698 				break;
       
   699 			default:
       
   700 				break;
       
   701 			}
       
   702 
       
   703 			/* if short on rx space, rx wins and must trump tx
       
   704 			 * adjustment or use Early Receive if available */
       
   705 			if (pba < min_rx_space)
       
   706 				pba = min_rx_space;
       
   707 		}
       
   708 	}
       
   709 
       
   710 	ew32(PBA, pba);
       
   711 
       
   712 	/*
       
   713 	 * flow control settings:
       
   714 	 * The high water mark must be low enough to fit one full frame
       
   715 	 * (or the size used for early receive) above it in the Rx FIFO.
       
   716 	 * Set it to the lower of:
       
   717 	 * - 90% of the Rx FIFO size, and
       
   718 	 * - the full Rx FIFO size minus the early receive size (for parts
       
   719 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
       
   720 	 * - the full Rx FIFO size minus one full frame
       
   721 	 */
       
   722 	hwm = min(((pba << 10) * 9 / 10),
       
   723 		  ((pba << 10) - hw->max_frame_size));
       
   724 
       
   725 	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
       
   726 	hw->fc_low_water = hw->fc_high_water - 8;
       
   727 	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
       
   728 	hw->fc_send_xon = 1;
       
   729 	hw->fc = hw->original_fc;
       
   730 
       
   731 	/* Allow time for pending master requests to run */
       
   732 	e1000_reset_hw(hw);
       
   733 	if (hw->mac_type >= e1000_82544)
       
   734 		ew32(WUC, 0);
       
   735 
       
   736 	if (e1000_init_hw(hw))
       
   737 		e_dev_err("Hardware Error\n");
       
   738 	e1000_update_mng_vlan(adapter);
       
   739 
       
   740 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
       
   741 	if (hw->mac_type >= e1000_82544 &&
       
   742 	    hw->autoneg == 1 &&
       
   743 	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
       
   744 		u32 ctrl = er32(CTRL);
       
   745 		/* clear phy power management bit if we are in gig only mode,
       
   746 		 * which if enabled will attempt negotiation to 100Mb, which
       
   747 		 * can cause a loss of link at power off or driver unload */
       
   748 		ctrl &= ~E1000_CTRL_SWDPIN3;
       
   749 		ew32(CTRL, ctrl);
       
   750 	}
       
   751 
       
   752 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
       
   753 	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
       
   754 
       
   755 	e1000_reset_adaptive(hw);
       
   756 	e1000_phy_get_info(hw, &adapter->phy_info);
       
   757 
       
   758 	e1000_release_manageability(adapter);
       
   759 }
       
   760 
       
   761 /**
       
   762  *  Dump the eeprom for users having checksum issues
       
   763  **/
       
   764 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
       
   765 {
       
   766 	struct net_device *netdev = adapter->netdev;
       
   767 	struct ethtool_eeprom eeprom;
       
   768 	const struct ethtool_ops *ops = netdev->ethtool_ops;
       
   769 	u8 *data;
       
   770 	int i;
       
   771 	u16 csum_old, csum_new = 0;
       
   772 
       
   773 	eeprom.len = ops->get_eeprom_len(netdev);
       
   774 	eeprom.offset = 0;
       
   775 
       
   776 	data = kmalloc(eeprom.len, GFP_KERNEL);
       
   777 	if (!data)
       
   778 		return;
       
   779 
       
   780 	ops->get_eeprom(netdev, &eeprom, data);
       
   781 
       
   782 	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
       
   783 		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
       
   784 	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
       
   785 		csum_new += data[i] + (data[i + 1] << 8);
       
   786 	csum_new = EEPROM_SUM - csum_new;
       
   787 
       
   788 	pr_err("/*********************/\n");
       
   789 	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
       
   790 	pr_err("Calculated              : 0x%04x\n", csum_new);
       
   791 
       
   792 	pr_err("Offset    Values\n");
       
   793 	pr_err("========  ======\n");
       
   794 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
       
   795 
       
   796 	pr_err("Include this output when contacting your support provider.\n");
       
   797 	pr_err("This is not a software error! Something bad happened to\n");
       
   798 	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
       
   799 	pr_err("result in further problems, possibly loss of data,\n");
       
   800 	pr_err("corruption or system hangs!\n");
       
   801 	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
       
   802 	pr_err("which is invalid and requires you to set the proper MAC\n");
       
   803 	pr_err("address manually before continuing to enable this network\n");
       
   804 	pr_err("device. Please inspect the EEPROM dump and report the\n");
       
   805 	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
       
   806 	pr_err("/*********************/\n");
       
   807 
       
   808 	kfree(data);
       
   809 }
       
   810 
       
   811 /**
       
   812  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
       
   813  * @pdev: PCI device information struct
       
   814  *
       
   815  * Return true if an adapter needs ioport resources
       
   816  **/
       
   817 static int e1000_is_need_ioport(struct pci_dev *pdev)
       
   818 {
       
   819 	switch (pdev->device) {
       
   820 	case E1000_DEV_ID_82540EM:
       
   821 	case E1000_DEV_ID_82540EM_LOM:
       
   822 	case E1000_DEV_ID_82540EP:
       
   823 	case E1000_DEV_ID_82540EP_LOM:
       
   824 	case E1000_DEV_ID_82540EP_LP:
       
   825 	case E1000_DEV_ID_82541EI:
       
   826 	case E1000_DEV_ID_82541EI_MOBILE:
       
   827 	case E1000_DEV_ID_82541ER:
       
   828 	case E1000_DEV_ID_82541ER_LOM:
       
   829 	case E1000_DEV_ID_82541GI:
       
   830 	case E1000_DEV_ID_82541GI_LF:
       
   831 	case E1000_DEV_ID_82541GI_MOBILE:
       
   832 	case E1000_DEV_ID_82544EI_COPPER:
       
   833 	case E1000_DEV_ID_82544EI_FIBER:
       
   834 	case E1000_DEV_ID_82544GC_COPPER:
       
   835 	case E1000_DEV_ID_82544GC_LOM:
       
   836 	case E1000_DEV_ID_82545EM_COPPER:
       
   837 	case E1000_DEV_ID_82545EM_FIBER:
       
   838 	case E1000_DEV_ID_82546EB_COPPER:
       
   839 	case E1000_DEV_ID_82546EB_FIBER:
       
   840 	case E1000_DEV_ID_82546EB_QUAD_COPPER:
       
   841 		return true;
       
   842 	default:
       
   843 		return false;
       
   844 	}
       
   845 }
       
   846 
       
   847 static netdev_features_t e1000_fix_features(struct net_device *netdev,
       
   848 	netdev_features_t features)
       
   849 {
       
   850 	/*
       
   851 	 * Since there is no support for separate rx/tx vlan accel
       
   852 	 * enable/disable make sure tx flag is always in same state as rx.
       
   853 	 */
       
   854 	if (features & NETIF_F_HW_VLAN_RX)
       
   855 		features |= NETIF_F_HW_VLAN_TX;
       
   856 	else
       
   857 		features &= ~NETIF_F_HW_VLAN_TX;
       
   858 
       
   859 	return features;
       
   860 }
       
   861 
       
   862 static int e1000_set_features(struct net_device *netdev,
       
   863 	netdev_features_t features)
       
   864 {
       
   865 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
   866 	netdev_features_t changed = features ^ netdev->features;
       
   867 
       
   868 	if (changed & NETIF_F_HW_VLAN_RX)
       
   869 		e1000_vlan_mode(netdev, features);
       
   870 
       
   871 	if (!(changed & NETIF_F_RXCSUM))
       
   872 		return 0;
       
   873 
       
   874 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
       
   875 
       
   876 	if (netif_running(netdev))
       
   877 		e1000_reinit_locked(adapter);
       
   878 	else
       
   879 		e1000_reset(adapter);
       
   880 
       
   881 	return 0;
       
   882 }
       
   883 
       
   884 static const struct net_device_ops e1000_netdev_ops = {
       
   885 	.ndo_open		= e1000_open,
       
   886 	.ndo_stop		= e1000_close,
       
   887 	.ndo_start_xmit		= e1000_xmit_frame,
       
   888 	.ndo_get_stats		= e1000_get_stats,
       
   889 	.ndo_set_rx_mode	= e1000_set_rx_mode,
       
   890 	.ndo_set_mac_address	= e1000_set_mac,
       
   891 	.ndo_tx_timeout		= e1000_tx_timeout,
       
   892 	.ndo_change_mtu		= e1000_change_mtu,
       
   893 	.ndo_do_ioctl		= e1000_ioctl,
       
   894 	.ndo_validate_addr	= eth_validate_addr,
       
   895 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
       
   896 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
       
   897 #ifdef CONFIG_NET_POLL_CONTROLLER
       
   898 	.ndo_poll_controller	= e1000_netpoll,
       
   899 #endif
       
   900 	.ndo_fix_features	= e1000_fix_features,
       
   901 	.ndo_set_features	= e1000_set_features,
       
   902 };
       
   903 
       
   904 /**
       
   905  * e1000_init_hw_struct - initialize members of hw struct
       
   906  * @adapter: board private struct
       
   907  * @hw: structure used by e1000_hw.c
       
   908  *
       
   909  * Factors out initialization of the e1000_hw struct to its own function
       
   910  * that can be called very early at init (just after struct allocation).
       
   911  * Fields are initialized based on PCI device information and
       
   912  * OS network device settings (MTU size).
       
   913  * Returns negative error codes if MAC type setup fails.
       
   914  */
       
   915 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
       
   916 				struct e1000_hw *hw)
       
   917 {
       
   918 	struct pci_dev *pdev = adapter->pdev;
       
   919 
       
   920 	/* PCI config space info */
       
   921 	hw->vendor_id = pdev->vendor;
       
   922 	hw->device_id = pdev->device;
       
   923 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
       
   924 	hw->subsystem_id = pdev->subsystem_device;
       
   925 	hw->revision_id = pdev->revision;
       
   926 
       
   927 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
       
   928 
       
   929 	hw->max_frame_size = adapter->netdev->mtu +
       
   930 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
       
   931 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
       
   932 
       
   933 	/* identify the MAC */
       
   934 	if (e1000_set_mac_type(hw)) {
       
   935 		e_err(probe, "Unknown MAC Type\n");
       
   936 		return -EIO;
       
   937 	}
       
   938 
       
   939 	switch (hw->mac_type) {
       
   940 	default:
       
   941 		break;
       
   942 	case e1000_82541:
       
   943 	case e1000_82547:
       
   944 	case e1000_82541_rev_2:
       
   945 	case e1000_82547_rev_2:
       
   946 		hw->phy_init_script = 1;
       
   947 		break;
       
   948 	}
       
   949 
       
   950 	e1000_set_media_type(hw);
       
   951 	e1000_get_bus_info(hw);
       
   952 
       
   953 	hw->wait_autoneg_complete = false;
       
   954 	hw->tbi_compatibility_en = true;
       
   955 	hw->adaptive_ifs = true;
       
   956 
       
   957 	/* Copper options */
       
   958 
       
   959 	if (hw->media_type == e1000_media_type_copper) {
       
   960 		hw->mdix = AUTO_ALL_MODES;
       
   961 		hw->disable_polarity_correction = false;
       
   962 		hw->master_slave = E1000_MASTER_SLAVE;
       
   963 	}
       
   964 
       
   965 	return 0;
       
   966 }
       
   967 
       
   968 /**
       
   969  * e1000_probe - Device Initialization Routine
       
   970  * @pdev: PCI device information struct
       
   971  * @ent: entry in e1000_pci_tbl
       
   972  *
       
   973  * Returns 0 on success, negative on failure
       
   974  *
       
   975  * e1000_probe initializes an adapter identified by a pci_dev structure.
       
   976  * The OS initialization, configuring of the adapter private structure,
       
   977  * and a hardware reset occur.
       
   978  **/
       
   979 static int __devinit e1000_probe(struct pci_dev *pdev,
       
   980 				 const struct pci_device_id *ent)
       
   981 {
       
   982 	struct net_device *netdev;
       
   983 	struct e1000_adapter *adapter;
       
   984 	struct e1000_hw *hw;
       
   985 
       
   986 	static int cards_found = 0;
       
   987 	static int global_quad_port_a = 0; /* global ksp3 port a indication */
       
   988 	int i, err, pci_using_dac;
       
   989 	u16 eeprom_data = 0;
       
   990 	u16 tmp = 0;
       
   991 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
       
   992 	int bars, need_ioport;
       
   993 
       
   994 	/* do not allocate ioport bars when not needed */
       
   995 	need_ioport = e1000_is_need_ioport(pdev);
       
   996 	if (need_ioport) {
       
   997 		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
       
   998 		err = pci_enable_device(pdev);
       
   999 	} else {
       
  1000 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
       
  1001 		err = pci_enable_device_mem(pdev);
       
  1002 	}
       
  1003 	if (err)
       
  1004 		return err;
       
  1005 
       
  1006 	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
       
  1007 	if (err)
       
  1008 		goto err_pci_reg;
       
  1009 
       
  1010 	pci_set_master(pdev);
       
  1011 	err = pci_save_state(pdev);
       
  1012 	if (err)
       
  1013 		goto err_alloc_etherdev;
       
  1014 
       
  1015 	err = -ENOMEM;
       
  1016 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
       
  1017 	if (!netdev)
       
  1018 		goto err_alloc_etherdev;
       
  1019 
       
  1020 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  1021 
       
  1022 	pci_set_drvdata(pdev, netdev);
       
  1023 	adapter = netdev_priv(netdev);
       
  1024 	adapter->netdev = netdev;
       
  1025 	adapter->pdev = pdev;
       
  1026 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
       
  1027 	adapter->bars = bars;
       
  1028 	adapter->need_ioport = need_ioport;
       
  1029 
       
  1030 	hw = &adapter->hw;
       
  1031 	hw->back = adapter;
       
  1032 
       
  1033 	err = -EIO;
       
  1034 	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
       
  1035 	if (!hw->hw_addr)
       
  1036 		goto err_ioremap;
       
  1037 
       
  1038 	if (adapter->need_ioport) {
       
  1039 		for (i = BAR_1; i <= BAR_5; i++) {
       
  1040 			if (pci_resource_len(pdev, i) == 0)
       
  1041 				continue;
       
  1042 			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
       
  1043 				hw->io_base = pci_resource_start(pdev, i);
       
  1044 				break;
       
  1045 			}
       
  1046 		}
       
  1047 	}
       
  1048 
       
  1049 	/* make ready for any if (hw->...) below */
       
  1050 	err = e1000_init_hw_struct(adapter, hw);
       
  1051 	if (err)
       
  1052 		goto err_sw_init;
       
  1053 
       
  1054 	/*
       
  1055 	 * there is a workaround being applied below that limits
       
  1056 	 * 64-bit DMA addresses to 64-bit hardware.  There are some
       
  1057 	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
       
  1058 	 */
       
  1059 	pci_using_dac = 0;
       
  1060 	if ((hw->bus_type == e1000_bus_type_pcix) &&
       
  1061 	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
       
  1062 		/*
       
  1063 		 * according to DMA-API-HOWTO, coherent calls will always
       
  1064 		 * succeed if the set call did
       
  1065 		 */
       
  1066 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
       
  1067 		pci_using_dac = 1;
       
  1068 	} else {
       
  1069 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
       
  1070 		if (err) {
       
  1071 			pr_err("No usable DMA config, aborting\n");
       
  1072 			goto err_dma;
       
  1073 		}
       
  1074 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
       
  1075 	}
       
  1076 
       
  1077 	netdev->netdev_ops = &e1000_netdev_ops;
       
  1078 	e1000_set_ethtool_ops(netdev);
       
  1079 	netdev->watchdog_timeo = 5 * HZ;
       
  1080 	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
       
  1081 
       
  1082 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  1083 
       
  1084 	adapter->bd_number = cards_found;
       
  1085 
       
  1086 	/* setup the private structure */
       
  1087 
       
  1088 	err = e1000_sw_init(adapter);
       
  1089 	if (err)
       
  1090 		goto err_sw_init;
       
  1091 
       
  1092 	err = -EIO;
       
  1093 	if (hw->mac_type == e1000_ce4100) {
       
  1094 		hw->ce4100_gbe_mdio_base_virt =
       
  1095 					ioremap(pci_resource_start(pdev, BAR_1),
       
  1096 		                                pci_resource_len(pdev, BAR_1));
       
  1097 
       
  1098 		if (!hw->ce4100_gbe_mdio_base_virt)
       
  1099 			goto err_mdio_ioremap;
       
  1100 	}
       
  1101 
       
  1102 	if (hw->mac_type >= e1000_82543) {
       
  1103 		netdev->hw_features = NETIF_F_SG |
       
  1104 				   NETIF_F_HW_CSUM |
       
  1105 				   NETIF_F_HW_VLAN_RX;
       
  1106 		netdev->features = NETIF_F_HW_VLAN_TX |
       
  1107 				   NETIF_F_HW_VLAN_FILTER;
       
  1108 	}
       
  1109 
       
  1110 	if ((hw->mac_type >= e1000_82544) &&
       
  1111 	   (hw->mac_type != e1000_82547))
       
  1112 		netdev->hw_features |= NETIF_F_TSO;
       
  1113 
       
  1114 	netdev->priv_flags |= IFF_SUPP_NOFCS;
       
  1115 
       
  1116 	netdev->features |= netdev->hw_features;
       
  1117 	netdev->hw_features |= NETIF_F_RXCSUM;
       
  1118 	netdev->hw_features |= NETIF_F_RXFCS;
       
  1119 
       
  1120 	if (pci_using_dac) {
       
  1121 		netdev->features |= NETIF_F_HIGHDMA;
       
  1122 		netdev->vlan_features |= NETIF_F_HIGHDMA;
       
  1123 	}
       
  1124 
       
  1125 	netdev->vlan_features |= NETIF_F_TSO;
       
  1126 	netdev->vlan_features |= NETIF_F_HW_CSUM;
       
  1127 	netdev->vlan_features |= NETIF_F_SG;
       
  1128 
       
  1129 	netdev->priv_flags |= IFF_UNICAST_FLT;
       
  1130 
       
  1131 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
       
  1132 
       
  1133 	/* initialize eeprom parameters */
       
  1134 	if (e1000_init_eeprom_params(hw)) {
       
  1135 		e_err(probe, "EEPROM initialization failed\n");
       
  1136 		goto err_eeprom;
       
  1137 	}
       
  1138 
       
  1139 	/* before reading the EEPROM, reset the controller to
       
  1140 	 * put the device in a known good starting state */
       
  1141 
       
  1142 	e1000_reset_hw(hw);
       
  1143 
       
  1144 	/* make sure the EEPROM is good */
       
  1145 	if (e1000_validate_eeprom_checksum(hw) < 0) {
       
  1146 		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
       
  1147 		e1000_dump_eeprom(adapter);
       
  1148 		/*
       
  1149 		 * set MAC address to all zeroes to invalidate and temporary
       
  1150 		 * disable this device for the user. This blocks regular
       
  1151 		 * traffic while still permitting ethtool ioctls from reaching
       
  1152 		 * the hardware as well as allowing the user to run the
       
  1153 		 * interface after manually setting a hw addr using
       
  1154 		 * `ip set address`
       
  1155 		 */
       
  1156 		memset(hw->mac_addr, 0, netdev->addr_len);
       
  1157 	} else {
       
  1158 		/* copy the MAC address out of the EEPROM */
       
  1159 		if (e1000_read_mac_addr(hw))
       
  1160 			e_err(probe, "EEPROM Read Error\n");
       
  1161 	}
       
  1162 	/* don't block initalization here due to bad MAC address */
       
  1163 	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
       
  1164 	memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
       
  1165 
       
  1166 	if (!is_valid_ether_addr(netdev->perm_addr))
       
  1167 		e_err(probe, "Invalid MAC Address\n");
       
  1168 
       
  1169 
       
  1170 	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
       
  1171 	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
       
  1172 			  e1000_82547_tx_fifo_stall_task);
       
  1173 	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
       
  1174 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
       
  1175 
       
  1176 	e1000_check_options(adapter);
       
  1177 
       
  1178 	/* Initial Wake on LAN setting
       
  1179 	 * If APM wake is enabled in the EEPROM,
       
  1180 	 * enable the ACPI Magic Packet filter
       
  1181 	 */
       
  1182 
       
  1183 	switch (hw->mac_type) {
       
  1184 	case e1000_82542_rev2_0:
       
  1185 	case e1000_82542_rev2_1:
       
  1186 	case e1000_82543:
       
  1187 		break;
       
  1188 	case e1000_82544:
       
  1189 		e1000_read_eeprom(hw,
       
  1190 			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
       
  1191 		eeprom_apme_mask = E1000_EEPROM_82544_APM;
       
  1192 		break;
       
  1193 	case e1000_82546:
       
  1194 	case e1000_82546_rev_3:
       
  1195 		if (er32(STATUS) & E1000_STATUS_FUNC_1){
       
  1196 			e1000_read_eeprom(hw,
       
  1197 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
       
  1198 			break;
       
  1199 		}
       
  1200 		/* Fall Through */
       
  1201 	default:
       
  1202 		e1000_read_eeprom(hw,
       
  1203 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
       
  1204 		break;
       
  1205 	}
       
  1206 	if (eeprom_data & eeprom_apme_mask)
       
  1207 		adapter->eeprom_wol |= E1000_WUFC_MAG;
       
  1208 
       
  1209 	/* now that we have the eeprom settings, apply the special cases
       
  1210 	 * where the eeprom may be wrong or the board simply won't support
       
  1211 	 * wake on lan on a particular port */
       
  1212 	switch (pdev->device) {
       
  1213 	case E1000_DEV_ID_82546GB_PCIE:
       
  1214 		adapter->eeprom_wol = 0;
       
  1215 		break;
       
  1216 	case E1000_DEV_ID_82546EB_FIBER:
       
  1217 	case E1000_DEV_ID_82546GB_FIBER:
       
  1218 		/* Wake events only supported on port A for dual fiber
       
  1219 		 * regardless of eeprom setting */
       
  1220 		if (er32(STATUS) & E1000_STATUS_FUNC_1)
       
  1221 			adapter->eeprom_wol = 0;
       
  1222 		break;
       
  1223 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
       
  1224 		/* if quad port adapter, disable WoL on all but port A */
       
  1225 		if (global_quad_port_a != 0)
       
  1226 			adapter->eeprom_wol = 0;
       
  1227 		else
       
  1228 			adapter->quad_port_a = true;
       
  1229 		/* Reset for multiple quad port adapters */
       
  1230 		if (++global_quad_port_a == 4)
       
  1231 			global_quad_port_a = 0;
       
  1232 		break;
       
  1233 	}
       
  1234 
       
  1235 	/* initialize the wol settings based on the eeprom settings */
       
  1236 	adapter->wol = adapter->eeprom_wol;
       
  1237 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
       
  1238 
       
  1239 	/* Auto detect PHY address */
       
  1240 	if (hw->mac_type == e1000_ce4100) {
       
  1241 		for (i = 0; i < 32; i++) {
       
  1242 			hw->phy_addr = i;
       
  1243 			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
       
  1244 			if (tmp == 0 || tmp == 0xFF) {
       
  1245 				if (i == 31)
       
  1246 					goto err_eeprom;
       
  1247 				continue;
       
  1248 			} else
       
  1249 				break;
       
  1250 		}
       
  1251 	}
       
  1252 
       
  1253 	/* reset the hardware with the new settings */
       
  1254 	e1000_reset(adapter);
       
  1255 
       
  1256  	// offer device to EtherCAT master module
       
  1257 	adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE);
       
  1258 	if (adapter->ecdev) {
       
  1259 		err = ecdev_open(adapter->ecdev);
       
  1260 		if (err) {
       
  1261 			ecdev_withdraw(adapter->ecdev);
       
  1262 			goto err_register;
       
  1263 		}
       
  1264 	} else {
       
  1265 		strcpy(netdev->name, "eth%d");
       
  1266 		err = register_netdev(netdev);
       
  1267 		if (err)
       
  1268 			goto err_register;
       
  1269 	}
       
  1270 
       
  1271 	e1000_vlan_filter_on_off(adapter, false);
       
  1272 
       
  1273 	/* print bus type/speed/width info */
       
  1274 	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
       
  1275 	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
       
  1276 	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
       
  1277 		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
       
  1278 		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
       
  1279 		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
       
  1280 	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
       
  1281 	       netdev->dev_addr);
       
  1282 
       
  1283 	if (!adapter->ecdev) {
       
  1284 		/* carrier off reporting is important to ethtool even BEFORE open */
       
  1285 		netif_carrier_off(netdev);
       
  1286 	}
       
  1287 
       
  1288 	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
       
  1289 
       
  1290 	cards_found++;
       
  1291 	return 0;
       
  1292 
       
  1293 err_register:
       
  1294 err_eeprom:
       
  1295 	e1000_phy_hw_reset(hw);
       
  1296 
       
  1297 	if (hw->flash_address)
       
  1298 		iounmap(hw->flash_address);
       
  1299 	kfree(adapter->tx_ring);
       
  1300 	kfree(adapter->rx_ring);
       
  1301 err_dma:
       
  1302 err_sw_init:
       
  1303 err_mdio_ioremap:
       
  1304 	iounmap(hw->ce4100_gbe_mdio_base_virt);
       
  1305 	iounmap(hw->hw_addr);
       
  1306 err_ioremap:
       
  1307 	free_netdev(netdev);
       
  1308 err_alloc_etherdev:
       
  1309 	pci_release_selected_regions(pdev, bars);
       
  1310 err_pci_reg:
       
  1311 	pci_disable_device(pdev);
       
  1312 	return err;
       
  1313 }
       
  1314 
       
  1315 /**
       
  1316  * e1000_remove - Device Removal Routine
       
  1317  * @pdev: PCI device information struct
       
  1318  *
       
  1319  * e1000_remove is called by the PCI subsystem to alert the driver
       
  1320  * that it should release a PCI device.  The could be caused by a
       
  1321  * Hot-Plug event, or because the driver is going to be removed from
       
  1322  * memory.
       
  1323  **/
       
  1324 
       
  1325 static void __devexit e1000_remove(struct pci_dev *pdev)
       
  1326 {
       
  1327 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  1328 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1329 	struct e1000_hw *hw = &adapter->hw;
       
  1330 
       
  1331 	e1000_down_and_stop(adapter);
       
  1332 	e1000_release_manageability(adapter);
       
  1333 
       
  1334 	if (adapter->ecdev) {
       
  1335 		ecdev_close(adapter->ecdev);
       
  1336 		ecdev_withdraw(adapter->ecdev);
       
  1337 	} else {
       
  1338 		unregister_netdev(netdev);
       
  1339 	}
       
  1340 
       
  1341 	e1000_phy_hw_reset(hw);
       
  1342 
       
  1343 	kfree(adapter->tx_ring);
       
  1344 	kfree(adapter->rx_ring);
       
  1345 
       
  1346 	if (hw->mac_type == e1000_ce4100)
       
  1347 		iounmap(hw->ce4100_gbe_mdio_base_virt);
       
  1348 	iounmap(hw->hw_addr);
       
  1349 	if (hw->flash_address)
       
  1350 		iounmap(hw->flash_address);
       
  1351 	pci_release_selected_regions(pdev, adapter->bars);
       
  1352 
       
  1353 	free_netdev(netdev);
       
  1354 
       
  1355 	pci_disable_device(pdev);
       
  1356 }
       
  1357 
       
  1358 /**
       
  1359  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
       
  1360  * @adapter: board private structure to initialize
       
  1361  *
       
  1362  * e1000_sw_init initializes the Adapter private data structure.
       
  1363  * e1000_init_hw_struct MUST be called before this function
       
  1364  **/
       
  1365 
       
  1366 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
       
  1367 {
       
  1368 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
       
  1369 
       
  1370 	adapter->num_tx_queues = 1;
       
  1371 	adapter->num_rx_queues = 1;
       
  1372 
       
  1373 	if (e1000_alloc_queues(adapter)) {
       
  1374 		e_err(probe, "Unable to allocate memory for queues\n");
       
  1375 		return -ENOMEM;
       
  1376 	}
       
  1377 
       
  1378 	/* Explicitly disable IRQ since the NIC can be in any state. */
       
  1379 	e1000_irq_disable(adapter);
       
  1380 
       
  1381 	spin_lock_init(&adapter->stats_lock);
       
  1382 	mutex_init(&adapter->mutex);
       
  1383 
       
  1384 	set_bit(__E1000_DOWN, &adapter->flags);
       
  1385 
       
  1386 	return 0;
       
  1387 }
       
  1388 
       
  1389 /**
       
  1390  * e1000_alloc_queues - Allocate memory for all rings
       
  1391  * @adapter: board private structure to initialize
       
  1392  *
       
  1393  * We allocate one ring per queue at run-time since we don't know the
       
  1394  * number of queues at compile-time.
       
  1395  **/
       
  1396 
       
  1397 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
       
  1398 {
       
  1399 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
       
  1400 	                           sizeof(struct e1000_tx_ring), GFP_KERNEL);
       
  1401 	if (!adapter->tx_ring)
       
  1402 		return -ENOMEM;
       
  1403 
       
  1404 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
       
  1405 	                           sizeof(struct e1000_rx_ring), GFP_KERNEL);
       
  1406 	if (!adapter->rx_ring) {
       
  1407 		kfree(adapter->tx_ring);
       
  1408 		return -ENOMEM;
       
  1409 	}
       
  1410 
       
  1411 	return E1000_SUCCESS;
       
  1412 }
       
  1413 
       
  1414 /**
       
  1415  * e1000_open - Called when a network interface is made active
       
  1416  * @netdev: network interface device structure
       
  1417  *
       
  1418  * Returns 0 on success, negative value on failure
       
  1419  *
       
  1420  * The open entry point is called when a network interface is made
       
  1421  * active by the system (IFF_UP).  At this point all resources needed
       
  1422  * for transmit and receive operations are allocated, the interrupt
       
  1423  * handler is registered with the OS, the watchdog task is started,
       
  1424  * and the stack is notified that the interface is ready.
       
  1425  **/
       
  1426 
       
  1427 static int e1000_open(struct net_device *netdev)
       
  1428 {
       
  1429 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1430 	struct e1000_hw *hw = &adapter->hw;
       
  1431 	int err;
       
  1432 
       
  1433 	/* disallow open during test */
       
  1434 	if (test_bit(__E1000_TESTING, &adapter->flags))
       
  1435 		return -EBUSY;
       
  1436 
       
  1437 	netif_carrier_off(netdev);
       
  1438 
       
  1439 	/* allocate transmit descriptors */
       
  1440 	err = e1000_setup_all_tx_resources(adapter);
       
  1441 	if (err)
       
  1442 		goto err_setup_tx;
       
  1443 
       
  1444 	/* allocate receive descriptors */
       
  1445 	err = e1000_setup_all_rx_resources(adapter);
       
  1446 	if (err)
       
  1447 		goto err_setup_rx;
       
  1448 
       
  1449 	e1000_power_up_phy(adapter);
       
  1450 
       
  1451 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
  1452 	if ((hw->mng_cookie.status &
       
  1453 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
       
  1454 		e1000_update_mng_vlan(adapter);
       
  1455 	}
       
  1456 
       
  1457 	/* before we allocate an interrupt, we must be ready to handle it.
       
  1458 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
       
  1459 	 * as soon as we call pci_request_irq, so we have to setup our
       
  1460 	 * clean_rx handler before we do so.  */
       
  1461 	e1000_configure(adapter);
       
  1462 
       
  1463 	err = e1000_request_irq(adapter);
       
  1464 	if (err)
       
  1465 		goto err_req_irq;
       
  1466 
       
  1467 	/* From here on the code is the same as e1000_up() */
       
  1468 	clear_bit(__E1000_DOWN, &adapter->flags);
       
  1469 
       
  1470 	if (!adapter->ecdev) {
       
  1471 		napi_enable(&adapter->napi);
       
  1472 
       
  1473 		e1000_irq_enable(adapter);
       
  1474 
       
  1475 		netif_start_queue(netdev);
       
  1476 	}
       
  1477 
       
  1478 	/* fire a link status change interrupt to start the watchdog */
       
  1479 	ew32(ICS, E1000_ICS_LSC);
       
  1480 
       
  1481 	return E1000_SUCCESS;
       
  1482 
       
  1483 err_req_irq:
       
  1484 	e1000_power_down_phy(adapter);
       
  1485 	e1000_free_all_rx_resources(adapter);
       
  1486 err_setup_rx:
       
  1487 	e1000_free_all_tx_resources(adapter);
       
  1488 err_setup_tx:
       
  1489 	e1000_reset(adapter);
       
  1490 
       
  1491 	return err;
       
  1492 }
       
  1493 
       
  1494 /**
       
  1495  * e1000_close - Disables a network interface
       
  1496  * @netdev: network interface device structure
       
  1497  *
       
  1498  * Returns 0, this is not allowed to fail
       
  1499  *
       
  1500  * The close entry point is called when an interface is de-activated
       
  1501  * by the OS.  The hardware is still under the drivers control, but
       
  1502  * needs to be disabled.  A global MAC reset is issued to stop the
       
  1503  * hardware, and all transmit and receive resources are freed.
       
  1504  **/
       
  1505 
       
  1506 static int e1000_close(struct net_device *netdev)
       
  1507 {
       
  1508 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1509 	struct e1000_hw *hw = &adapter->hw;
       
  1510 
       
  1511 	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
       
  1512 	e1000_down(adapter);
       
  1513 	e1000_power_down_phy(adapter);
       
  1514 	e1000_free_irq(adapter);
       
  1515 
       
  1516 	e1000_free_all_tx_resources(adapter);
       
  1517 	e1000_free_all_rx_resources(adapter);
       
  1518 
       
  1519 	/* kill manageability vlan ID if supported, but not if a vlan with
       
  1520 	 * the same ID is registered on the host OS (let 8021q kill it) */
       
  1521 	if ((hw->mng_cookie.status &
       
  1522 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  1523 	     !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
       
  1524 		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
       
  1525 	}
       
  1526 
       
  1527 	return 0;
       
  1528 }
       
  1529 
       
  1530 /**
       
  1531  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
       
  1532  * @adapter: address of board private structure
       
  1533  * @start: address of beginning of memory
       
  1534  * @len: length of memory
       
  1535  **/
       
  1536 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
       
  1537 				  unsigned long len)
       
  1538 {
       
  1539 	struct e1000_hw *hw = &adapter->hw;
       
  1540 	unsigned long begin = (unsigned long)start;
       
  1541 	unsigned long end = begin + len;
       
  1542 
       
  1543 	/* First rev 82545 and 82546 need to not allow any memory
       
  1544 	 * write location to cross 64k boundary due to errata 23 */
       
  1545 	if (hw->mac_type == e1000_82545 ||
       
  1546 	    hw->mac_type == e1000_ce4100 ||
       
  1547 	    hw->mac_type == e1000_82546) {
       
  1548 		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
       
  1549 	}
       
  1550 
       
  1551 	return true;
       
  1552 }
       
  1553 
       
  1554 /**
       
  1555  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
       
  1556  * @adapter: board private structure
       
  1557  * @txdr:    tx descriptor ring (for a specific queue) to setup
       
  1558  *
       
  1559  * Return 0 on success, negative on failure
       
  1560  **/
       
  1561 
       
  1562 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
       
  1563 				    struct e1000_tx_ring *txdr)
       
  1564 {
       
  1565 	struct pci_dev *pdev = adapter->pdev;
       
  1566 	int size;
       
  1567 
       
  1568 	size = sizeof(struct e1000_buffer) * txdr->count;
       
  1569 	txdr->buffer_info = vzalloc(size);
       
  1570 	if (!txdr->buffer_info) {
       
  1571 		e_err(probe, "Unable to allocate memory for the Tx descriptor "
       
  1572 		      "ring\n");
       
  1573 		return -ENOMEM;
       
  1574 	}
       
  1575 
       
  1576 	/* round up to nearest 4K */
       
  1577 
       
  1578 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
       
  1579 	txdr->size = ALIGN(txdr->size, 4096);
       
  1580 
       
  1581 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
       
  1582 					GFP_KERNEL);
       
  1583 	if (!txdr->desc) {
       
  1584 setup_tx_desc_die:
       
  1585 		vfree(txdr->buffer_info);
       
  1586 		e_err(probe, "Unable to allocate memory for the Tx descriptor "
       
  1587 		      "ring\n");
       
  1588 		return -ENOMEM;
       
  1589 	}
       
  1590 
       
  1591 	/* Fix for errata 23, can't cross 64kB boundary */
       
  1592 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
       
  1593 		void *olddesc = txdr->desc;
       
  1594 		dma_addr_t olddma = txdr->dma;
       
  1595 		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
       
  1596 		      txdr->size, txdr->desc);
       
  1597 		/* Try again, without freeing the previous */
       
  1598 		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
       
  1599 						&txdr->dma, GFP_KERNEL);
       
  1600 		/* Failed allocation, critical failure */
       
  1601 		if (!txdr->desc) {
       
  1602 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1603 					  olddma);
       
  1604 			goto setup_tx_desc_die;
       
  1605 		}
       
  1606 
       
  1607 		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
       
  1608 			/* give up */
       
  1609 			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
       
  1610 					  txdr->dma);
       
  1611 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1612 					  olddma);
       
  1613 			e_err(probe, "Unable to allocate aligned memory "
       
  1614 			      "for the transmit descriptor ring\n");
       
  1615 			vfree(txdr->buffer_info);
       
  1616 			return -ENOMEM;
       
  1617 		} else {
       
  1618 			/* Free old allocation, new allocation was successful */
       
  1619 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1620 					  olddma);
       
  1621 		}
       
  1622 	}
       
  1623 	memset(txdr->desc, 0, txdr->size);
       
  1624 
       
  1625 	txdr->next_to_use = 0;
       
  1626 	txdr->next_to_clean = 0;
       
  1627 
       
  1628 	return 0;
       
  1629 }
       
  1630 
       
  1631 /**
       
  1632  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
       
  1633  * 				  (Descriptors) for all queues
       
  1634  * @adapter: board private structure
       
  1635  *
       
  1636  * Return 0 on success, negative on failure
       
  1637  **/
       
  1638 
       
  1639 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
       
  1640 {
       
  1641 	int i, err = 0;
       
  1642 
       
  1643 	for (i = 0; i < adapter->num_tx_queues; i++) {
       
  1644 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
       
  1645 		if (err) {
       
  1646 			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
       
  1647 			for (i-- ; i >= 0; i--)
       
  1648 				e1000_free_tx_resources(adapter,
       
  1649 							&adapter->tx_ring[i]);
       
  1650 			break;
       
  1651 		}
       
  1652 	}
       
  1653 
       
  1654 	return err;
       
  1655 }
       
  1656 
       
  1657 /**
       
  1658  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
       
  1659  * @adapter: board private structure
       
  1660  *
       
  1661  * Configure the Tx unit of the MAC after a reset.
       
  1662  **/
       
  1663 
       
  1664 static void e1000_configure_tx(struct e1000_adapter *adapter)
       
  1665 {
       
  1666 	u64 tdba;
       
  1667 	struct e1000_hw *hw = &adapter->hw;
       
  1668 	u32 tdlen, tctl, tipg;
       
  1669 	u32 ipgr1, ipgr2;
       
  1670 
       
  1671 	/* Setup the HW Tx Head and Tail descriptor pointers */
       
  1672 
       
  1673 	switch (adapter->num_tx_queues) {
       
  1674 	case 1:
       
  1675 	default:
       
  1676 		tdba = adapter->tx_ring[0].dma;
       
  1677 		tdlen = adapter->tx_ring[0].count *
       
  1678 			sizeof(struct e1000_tx_desc);
       
  1679 		ew32(TDLEN, tdlen);
       
  1680 		ew32(TDBAH, (tdba >> 32));
       
  1681 		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
       
  1682 		ew32(TDT, 0);
       
  1683 		ew32(TDH, 0);
       
  1684 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
       
  1685 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
       
  1686 		break;
       
  1687 	}
       
  1688 
       
  1689 	/* Set the default values for the Tx Inter Packet Gap timer */
       
  1690 	if ((hw->media_type == e1000_media_type_fiber ||
       
  1691 	     hw->media_type == e1000_media_type_internal_serdes))
       
  1692 		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
       
  1693 	else
       
  1694 		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
       
  1695 
       
  1696 	switch (hw->mac_type) {
       
  1697 	case e1000_82542_rev2_0:
       
  1698 	case e1000_82542_rev2_1:
       
  1699 		tipg = DEFAULT_82542_TIPG_IPGT;
       
  1700 		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
       
  1701 		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
       
  1702 		break;
       
  1703 	default:
       
  1704 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
       
  1705 		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
       
  1706 		break;
       
  1707 	}
       
  1708 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
       
  1709 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
       
  1710 	ew32(TIPG, tipg);
       
  1711 
       
  1712 	/* Set the Tx Interrupt Delay register */
       
  1713 
       
  1714 	ew32(TIDV, adapter->tx_int_delay);
       
  1715 	if (hw->mac_type >= e1000_82540)
       
  1716 		ew32(TADV, adapter->tx_abs_int_delay);
       
  1717 
       
  1718 	/* Program the Transmit Control Register */
       
  1719 
       
  1720 	tctl = er32(TCTL);
       
  1721 	tctl &= ~E1000_TCTL_CT;
       
  1722 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
       
  1723 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
       
  1724 
       
  1725 	e1000_config_collision_dist(hw);
       
  1726 
       
  1727 	/* Setup Transmit Descriptor Settings for eop descriptor */
       
  1728 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
       
  1729 
       
  1730 	/* only set IDE if we are delaying interrupts using the timers */
       
  1731 	if (adapter->tx_int_delay)
       
  1732 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
       
  1733 
       
  1734 	if (hw->mac_type < e1000_82543)
       
  1735 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
       
  1736 	else
       
  1737 		adapter->txd_cmd |= E1000_TXD_CMD_RS;
       
  1738 
       
  1739 	/* Cache if we're 82544 running in PCI-X because we'll
       
  1740 	 * need this to apply a workaround later in the send path. */
       
  1741 	if (hw->mac_type == e1000_82544 &&
       
  1742 	    hw->bus_type == e1000_bus_type_pcix)
       
  1743 		adapter->pcix_82544 = true;
       
  1744 
       
  1745 	ew32(TCTL, tctl);
       
  1746 
       
  1747 }
       
  1748 
       
  1749 /**
       
  1750  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
       
  1751  * @adapter: board private structure
       
  1752  * @rxdr:    rx descriptor ring (for a specific queue) to setup
       
  1753  *
       
  1754  * Returns 0 on success, negative on failure
       
  1755  **/
       
  1756 
       
  1757 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
       
  1758 				    struct e1000_rx_ring *rxdr)
       
  1759 {
       
  1760 	struct pci_dev *pdev = adapter->pdev;
       
  1761 	int size, desc_len;
       
  1762 
       
  1763 	size = sizeof(struct e1000_buffer) * rxdr->count;
       
  1764 	rxdr->buffer_info = vzalloc(size);
       
  1765 	if (!rxdr->buffer_info) {
       
  1766 		e_err(probe, "Unable to allocate memory for the Rx descriptor "
       
  1767 		      "ring\n");
       
  1768 		return -ENOMEM;
       
  1769 	}
       
  1770 
       
  1771 	desc_len = sizeof(struct e1000_rx_desc);
       
  1772 
       
  1773 	/* Round up to nearest 4K */
       
  1774 
       
  1775 	rxdr->size = rxdr->count * desc_len;
       
  1776 	rxdr->size = ALIGN(rxdr->size, 4096);
       
  1777 
       
  1778 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
       
  1779 					GFP_KERNEL);
       
  1780 
       
  1781 	if (!rxdr->desc) {
       
  1782 		e_err(probe, "Unable to allocate memory for the Rx descriptor "
       
  1783 		      "ring\n");
       
  1784 setup_rx_desc_die:
       
  1785 		vfree(rxdr->buffer_info);
       
  1786 		return -ENOMEM;
       
  1787 	}
       
  1788 
       
  1789 	/* Fix for errata 23, can't cross 64kB boundary */
       
  1790 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
       
  1791 		void *olddesc = rxdr->desc;
       
  1792 		dma_addr_t olddma = rxdr->dma;
       
  1793 		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
       
  1794 		      rxdr->size, rxdr->desc);
       
  1795 		/* Try again, without freeing the previous */
       
  1796 		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
       
  1797 						&rxdr->dma, GFP_KERNEL);
       
  1798 		/* Failed allocation, critical failure */
       
  1799 		if (!rxdr->desc) {
       
  1800 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1801 					  olddma);
       
  1802 			e_err(probe, "Unable to allocate memory for the Rx "
       
  1803 			      "descriptor ring\n");
       
  1804 			goto setup_rx_desc_die;
       
  1805 		}
       
  1806 
       
  1807 		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
       
  1808 			/* give up */
       
  1809 			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
       
  1810 					  rxdr->dma);
       
  1811 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1812 					  olddma);
       
  1813 			e_err(probe, "Unable to allocate aligned memory for "
       
  1814 			      "the Rx descriptor ring\n");
       
  1815 			goto setup_rx_desc_die;
       
  1816 		} else {
       
  1817 			/* Free old allocation, new allocation was successful */
       
  1818 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1819 					  olddma);
       
  1820 		}
       
  1821 	}
       
  1822 	memset(rxdr->desc, 0, rxdr->size);
       
  1823 
       
  1824 	rxdr->next_to_clean = 0;
       
  1825 	rxdr->next_to_use = 0;
       
  1826 	rxdr->rx_skb_top = NULL;
       
  1827 
       
  1828 	return 0;
       
  1829 }
       
  1830 
       
  1831 /**
       
  1832  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
       
  1833  * 				  (Descriptors) for all queues
       
  1834  * @adapter: board private structure
       
  1835  *
       
  1836  * Return 0 on success, negative on failure
       
  1837  **/
       
  1838 
       
  1839 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
       
  1840 {
       
  1841 	int i, err = 0;
       
  1842 
       
  1843 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
  1844 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
       
  1845 		if (err) {
       
  1846 			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
       
  1847 			for (i-- ; i >= 0; i--)
       
  1848 				e1000_free_rx_resources(adapter,
       
  1849 							&adapter->rx_ring[i]);
       
  1850 			break;
       
  1851 		}
       
  1852 	}
       
  1853 
       
  1854 	return err;
       
  1855 }
       
  1856 
       
  1857 /**
       
  1858  * e1000_setup_rctl - configure the receive control registers
       
  1859  * @adapter: Board private structure
       
  1860  **/
       
  1861 static void e1000_setup_rctl(struct e1000_adapter *adapter)
       
  1862 {
       
  1863 	struct e1000_hw *hw = &adapter->hw;
       
  1864 	u32 rctl;
       
  1865 
       
  1866 	rctl = er32(RCTL);
       
  1867 
       
  1868 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
       
  1869 
       
  1870 	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
       
  1871 		E1000_RCTL_RDMTS_HALF |
       
  1872 		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
       
  1873 
       
  1874 	if (hw->tbi_compatibility_on == 1)
       
  1875 		rctl |= E1000_RCTL_SBP;
       
  1876 	else
       
  1877 		rctl &= ~E1000_RCTL_SBP;
       
  1878 
       
  1879 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
       
  1880 		rctl &= ~E1000_RCTL_LPE;
       
  1881 	else
       
  1882 		rctl |= E1000_RCTL_LPE;
       
  1883 
       
  1884 	/* Setup buffer sizes */
       
  1885 	rctl &= ~E1000_RCTL_SZ_4096;
       
  1886 	rctl |= E1000_RCTL_BSEX;
       
  1887 	switch (adapter->rx_buffer_len) {
       
  1888 		case E1000_RXBUFFER_2048:
       
  1889 		default:
       
  1890 			rctl |= E1000_RCTL_SZ_2048;
       
  1891 			rctl &= ~E1000_RCTL_BSEX;
       
  1892 			break;
       
  1893 		case E1000_RXBUFFER_4096:
       
  1894 			rctl |= E1000_RCTL_SZ_4096;
       
  1895 			break;
       
  1896 		case E1000_RXBUFFER_8192:
       
  1897 			rctl |= E1000_RCTL_SZ_8192;
       
  1898 			break;
       
  1899 		case E1000_RXBUFFER_16384:
       
  1900 			rctl |= E1000_RCTL_SZ_16384;
       
  1901 			break;
       
  1902 	}
       
  1903 
       
  1904 	ew32(RCTL, rctl);
       
  1905 }
       
  1906 
       
  1907 /**
       
  1908  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
       
  1909  * @adapter: board private structure
       
  1910  *
       
  1911  * Configure the Rx unit of the MAC after a reset.
       
  1912  **/
       
  1913 
       
  1914 static void e1000_configure_rx(struct e1000_adapter *adapter)
       
  1915 {
       
  1916 	u64 rdba;
       
  1917 	struct e1000_hw *hw = &adapter->hw;
       
  1918 	u32 rdlen, rctl, rxcsum;
       
  1919 
       
  1920 	if (adapter->netdev->mtu > ETH_DATA_LEN) {
       
  1921 		rdlen = adapter->rx_ring[0].count *
       
  1922 		        sizeof(struct e1000_rx_desc);
       
  1923 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
       
  1924 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
       
  1925 	} else {
       
  1926 		rdlen = adapter->rx_ring[0].count *
       
  1927 		        sizeof(struct e1000_rx_desc);
       
  1928 		adapter->clean_rx = e1000_clean_rx_irq;
       
  1929 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
       
  1930 	}
       
  1931 
       
  1932 	/* disable receives while setting up the descriptors */
       
  1933 	rctl = er32(RCTL);
       
  1934 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
  1935 
       
  1936 	/* set the Receive Delay Timer Register */
       
  1937 	ew32(RDTR, adapter->rx_int_delay);
       
  1938 
       
  1939 	if (hw->mac_type >= e1000_82540) {
       
  1940 		ew32(RADV, adapter->rx_abs_int_delay);
       
  1941 		if (adapter->itr_setting != 0)
       
  1942 			ew32(ITR, 1000000000 / (adapter->itr * 256));
       
  1943 	}
       
  1944 
       
  1945 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
       
  1946 	 * the Base and Length of the Rx Descriptor Ring */
       
  1947 	switch (adapter->num_rx_queues) {
       
  1948 	case 1:
       
  1949 	default:
       
  1950 		rdba = adapter->rx_ring[0].dma;
       
  1951 		ew32(RDLEN, rdlen);
       
  1952 		ew32(RDBAH, (rdba >> 32));
       
  1953 		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
       
  1954 		ew32(RDT, 0);
       
  1955 		ew32(RDH, 0);
       
  1956 		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
       
  1957 		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
       
  1958 		break;
       
  1959 	}
       
  1960 
       
  1961 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
       
  1962 	if (hw->mac_type >= e1000_82543) {
       
  1963 		rxcsum = er32(RXCSUM);
       
  1964 		if (adapter->rx_csum)
       
  1965 			rxcsum |= E1000_RXCSUM_TUOFL;
       
  1966 		else
       
  1967 			/* don't need to clear IPPCSE as it defaults to 0 */
       
  1968 			rxcsum &= ~E1000_RXCSUM_TUOFL;
       
  1969 		ew32(RXCSUM, rxcsum);
       
  1970 	}
       
  1971 
       
  1972 	/* Enable Receives */
       
  1973 	ew32(RCTL, rctl | E1000_RCTL_EN);
       
  1974 }
       
  1975 
       
  1976 /**
       
  1977  * e1000_free_tx_resources - Free Tx Resources per Queue
       
  1978  * @adapter: board private structure
       
  1979  * @tx_ring: Tx descriptor ring for a specific queue
       
  1980  *
       
  1981  * Free all transmit software resources
       
  1982  **/
       
  1983 
       
  1984 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
       
  1985 				    struct e1000_tx_ring *tx_ring)
       
  1986 {
       
  1987 	struct pci_dev *pdev = adapter->pdev;
       
  1988 
       
  1989 	e1000_clean_tx_ring(adapter, tx_ring);
       
  1990 
       
  1991 	vfree(tx_ring->buffer_info);
       
  1992 	tx_ring->buffer_info = NULL;
       
  1993 
       
  1994 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
       
  1995 			  tx_ring->dma);
       
  1996 
       
  1997 	tx_ring->desc = NULL;
       
  1998 }
       
  1999 
       
  2000 /**
       
  2001  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
       
  2002  * @adapter: board private structure
       
  2003  *
       
  2004  * Free all transmit software resources
       
  2005  **/
       
  2006 
       
  2007 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
       
  2008 {
       
  2009 	int i;
       
  2010 
       
  2011 	for (i = 0; i < adapter->num_tx_queues; i++)
       
  2012 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
       
  2013 }
       
  2014 
       
  2015 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
       
  2016 					     struct e1000_buffer *buffer_info)
       
  2017 {
       
  2018 	if (adapter->ecdev) {
       
  2019 		return;
       
  2020 	}
       
  2021 
       
  2022 	if (buffer_info->dma) {
       
  2023 		if (buffer_info->mapped_as_page)
       
  2024 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
       
  2025 				       buffer_info->length, DMA_TO_DEVICE);
       
  2026 		else
       
  2027 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
       
  2028 					 buffer_info->length,
       
  2029 					 DMA_TO_DEVICE);
       
  2030 		buffer_info->dma = 0;
       
  2031 	}
       
  2032 	if (buffer_info->skb) {
       
  2033 		dev_kfree_skb_any(buffer_info->skb);
       
  2034 		buffer_info->skb = NULL;
       
  2035 	}
       
  2036 	buffer_info->time_stamp = 0;
       
  2037 	/* buffer_info must be completely set up in the transmit path */
       
  2038 }
       
  2039 
       
  2040 /**
       
  2041  * e1000_clean_tx_ring - Free Tx Buffers
       
  2042  * @adapter: board private structure
       
  2043  * @tx_ring: ring to be cleaned
       
  2044  **/
       
  2045 
       
  2046 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
       
  2047 				struct e1000_tx_ring *tx_ring)
       
  2048 {
       
  2049 	struct e1000_hw *hw = &adapter->hw;
       
  2050 	struct e1000_buffer *buffer_info;
       
  2051 	unsigned long size;
       
  2052 	unsigned int i;
       
  2053 
       
  2054 	/* Free all the Tx ring sk_buffs */
       
  2055 
       
  2056 	for (i = 0; i < tx_ring->count; i++) {
       
  2057 		buffer_info = &tx_ring->buffer_info[i];
       
  2058 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  2059 	}
       
  2060 
       
  2061 	size = sizeof(struct e1000_buffer) * tx_ring->count;
       
  2062 	memset(tx_ring->buffer_info, 0, size);
       
  2063 
       
  2064 	/* Zero out the descriptor ring */
       
  2065 
       
  2066 	memset(tx_ring->desc, 0, tx_ring->size);
       
  2067 
       
  2068 	tx_ring->next_to_use = 0;
       
  2069 	tx_ring->next_to_clean = 0;
       
  2070 	tx_ring->last_tx_tso = false;
       
  2071 
       
  2072 	writel(0, hw->hw_addr + tx_ring->tdh);
       
  2073 	writel(0, hw->hw_addr + tx_ring->tdt);
       
  2074 }
       
  2075 
       
  2076 /**
       
  2077  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
       
  2078  * @adapter: board private structure
       
  2079  **/
       
  2080 
       
  2081 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
       
  2082 {
       
  2083 	int i;
       
  2084 
       
  2085 	for (i = 0; i < adapter->num_tx_queues; i++)
       
  2086 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
       
  2087 }
       
  2088 
       
  2089 /**
       
  2090  * e1000_free_rx_resources - Free Rx Resources
       
  2091  * @adapter: board private structure
       
  2092  * @rx_ring: ring to clean the resources from
       
  2093  *
       
  2094  * Free all receive software resources
       
  2095  **/
       
  2096 
       
  2097 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
       
  2098 				    struct e1000_rx_ring *rx_ring)
       
  2099 {
       
  2100 	struct pci_dev *pdev = adapter->pdev;
       
  2101 
       
  2102 	e1000_clean_rx_ring(adapter, rx_ring);
       
  2103 
       
  2104 	vfree(rx_ring->buffer_info);
       
  2105 	rx_ring->buffer_info = NULL;
       
  2106 
       
  2107 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
       
  2108 			  rx_ring->dma);
       
  2109 
       
  2110 	rx_ring->desc = NULL;
       
  2111 }
       
  2112 
       
  2113 /**
       
  2114  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
       
  2115  * @adapter: board private structure
       
  2116  *
       
  2117  * Free all receive software resources
       
  2118  **/
       
  2119 
       
  2120 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
       
  2121 {
       
  2122 	int i;
       
  2123 
       
  2124 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  2125 		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
       
  2126 }
       
  2127 
       
  2128 /**
       
  2129  * e1000_clean_rx_ring - Free Rx Buffers per Queue
       
  2130  * @adapter: board private structure
       
  2131  * @rx_ring: ring to free buffers from
       
  2132  **/
       
  2133 
       
  2134 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
       
  2135 				struct e1000_rx_ring *rx_ring)
       
  2136 {
       
  2137 	struct e1000_hw *hw = &adapter->hw;
       
  2138 	struct e1000_buffer *buffer_info;
       
  2139 	struct pci_dev *pdev = adapter->pdev;
       
  2140 	unsigned long size;
       
  2141 	unsigned int i;
       
  2142 
       
  2143 	/* Free all the Rx ring sk_buffs */
       
  2144 	for (i = 0; i < rx_ring->count; i++) {
       
  2145 		buffer_info = &rx_ring->buffer_info[i];
       
  2146 		if (buffer_info->dma &&
       
  2147 		    adapter->clean_rx == e1000_clean_rx_irq) {
       
  2148 			dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  2149 			                 buffer_info->length,
       
  2150 					 DMA_FROM_DEVICE);
       
  2151 		} else if (buffer_info->dma &&
       
  2152 		           adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
       
  2153 			dma_unmap_page(&pdev->dev, buffer_info->dma,
       
  2154 				       buffer_info->length,
       
  2155 				       DMA_FROM_DEVICE);
       
  2156 		}
       
  2157 
       
  2158 		buffer_info->dma = 0;
       
  2159 		if (buffer_info->page) {
       
  2160 			put_page(buffer_info->page);
       
  2161 			buffer_info->page = NULL;
       
  2162 		}
       
  2163 		if (buffer_info->skb) {
       
  2164 			dev_kfree_skb(buffer_info->skb);
       
  2165 			buffer_info->skb = NULL;
       
  2166 		}
       
  2167 	}
       
  2168 
       
  2169 	/* there also may be some cached data from a chained receive */
       
  2170 	if (rx_ring->rx_skb_top) {
       
  2171 		dev_kfree_skb(rx_ring->rx_skb_top);
       
  2172 		rx_ring->rx_skb_top = NULL;
       
  2173 	}
       
  2174 
       
  2175 	size = sizeof(struct e1000_buffer) * rx_ring->count;
       
  2176 	memset(rx_ring->buffer_info, 0, size);
       
  2177 
       
  2178 	/* Zero out the descriptor ring */
       
  2179 	memset(rx_ring->desc, 0, rx_ring->size);
       
  2180 
       
  2181 	rx_ring->next_to_clean = 0;
       
  2182 	rx_ring->next_to_use = 0;
       
  2183 
       
  2184 	writel(0, hw->hw_addr + rx_ring->rdh);
       
  2185 	writel(0, hw->hw_addr + rx_ring->rdt);
       
  2186 }
       
  2187 
       
  2188 /**
       
  2189  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
       
  2190  * @adapter: board private structure
       
  2191  **/
       
  2192 
       
  2193 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
       
  2194 {
       
  2195 	int i;
       
  2196 
       
  2197 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  2198 		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
       
  2199 }
       
  2200 
       
  2201 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
       
  2202  * and memory write and invalidate disabled for certain operations
       
  2203  */
       
  2204 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
       
  2205 {
       
  2206 	struct e1000_hw *hw = &adapter->hw;
       
  2207 	struct net_device *netdev = adapter->netdev;
       
  2208 	u32 rctl;
       
  2209 
       
  2210 	e1000_pci_clear_mwi(hw);
       
  2211 
       
  2212 	rctl = er32(RCTL);
       
  2213 	rctl |= E1000_RCTL_RST;
       
  2214 	ew32(RCTL, rctl);
       
  2215 	E1000_WRITE_FLUSH();
       
  2216 	mdelay(5);
       
  2217 
       
  2218 	if (!adapter->ecdev && netif_running(netdev))
       
  2219 		e1000_clean_all_rx_rings(adapter);
       
  2220 }
       
  2221 
       
  2222 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
       
  2223 {
       
  2224 	struct e1000_hw *hw = &adapter->hw;
       
  2225 	struct net_device *netdev = adapter->netdev;
       
  2226 	u32 rctl;
       
  2227 
       
  2228 	rctl = er32(RCTL);
       
  2229 	rctl &= ~E1000_RCTL_RST;
       
  2230 	ew32(RCTL, rctl);
       
  2231 	E1000_WRITE_FLUSH();
       
  2232 	mdelay(5);
       
  2233 
       
  2234 	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
       
  2235 		e1000_pci_set_mwi(hw);
       
  2236 
       
  2237 	if (!adapter->netdev && netif_running(netdev)) {
       
  2238 		/* No need to loop, because 82542 supports only 1 queue */
       
  2239 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
       
  2240 		e1000_configure_rx(adapter);
       
  2241 		if (adapter->ecdev) {
       
  2242 			/* fill rx ring completely! */
       
  2243 			adapter->alloc_rx_buf(adapter, ring, ring->count);
       
  2244 		} else {
       
  2245 			/* this one leaves the last ring element unallocated! */
       
  2246 			adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
       
  2247 		}
       
  2248 
       
  2249 	}
       
  2250 }
       
  2251 
       
  2252 /**
       
  2253  * e1000_set_mac - Change the Ethernet Address of the NIC
       
  2254  * @netdev: network interface device structure
       
  2255  * @p: pointer to an address structure
       
  2256  *
       
  2257  * Returns 0 on success, negative on failure
       
  2258  **/
       
  2259 
       
  2260 static int e1000_set_mac(struct net_device *netdev, void *p)
       
  2261 {
       
  2262 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2263 	struct e1000_hw *hw = &adapter->hw;
       
  2264 	struct sockaddr *addr = p;
       
  2265 
       
  2266 	if (!is_valid_ether_addr(addr->sa_data))
       
  2267 		return -EADDRNOTAVAIL;
       
  2268 
       
  2269 	/* 82542 2.0 needs to be in reset to write receive address registers */
       
  2270 
       
  2271 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2272 		e1000_enter_82542_rst(adapter);
       
  2273 
       
  2274 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2275 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
       
  2276 
       
  2277 	e1000_rar_set(hw, hw->mac_addr, 0);
       
  2278 
       
  2279 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2280 		e1000_leave_82542_rst(adapter);
       
  2281 
       
  2282 	return 0;
       
  2283 }
       
  2284 
       
  2285 /**
       
  2286  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
       
  2287  * @netdev: network interface device structure
       
  2288  *
       
  2289  * The set_rx_mode entry point is called whenever the unicast or multicast
       
  2290  * address lists or the network interface flags are updated. This routine is
       
  2291  * responsible for configuring the hardware for proper unicast, multicast,
       
  2292  * promiscuous mode, and all-multi behavior.
       
  2293  **/
       
  2294 
       
  2295 static void e1000_set_rx_mode(struct net_device *netdev)
       
  2296 {
       
  2297 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2298 	struct e1000_hw *hw = &adapter->hw;
       
  2299 	struct netdev_hw_addr *ha;
       
  2300 	bool use_uc = false;
       
  2301 	u32 rctl;
       
  2302 	u32 hash_value;
       
  2303 	int i, rar_entries = E1000_RAR_ENTRIES;
       
  2304 	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
       
  2305 	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
       
  2306 
       
  2307 	if (!mcarray) {
       
  2308 		e_err(probe, "memory allocation failed\n");
       
  2309 		return;
       
  2310 	}
       
  2311 
       
  2312 	/* Check for Promiscuous and All Multicast modes */
       
  2313 
       
  2314 	rctl = er32(RCTL);
       
  2315 
       
  2316 	if (netdev->flags & IFF_PROMISC) {
       
  2317 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
       
  2318 		rctl &= ~E1000_RCTL_VFE;
       
  2319 	} else {
       
  2320 		if (netdev->flags & IFF_ALLMULTI)
       
  2321 			rctl |= E1000_RCTL_MPE;
       
  2322 		else
       
  2323 			rctl &= ~E1000_RCTL_MPE;
       
  2324 		/* Enable VLAN filter if there is a VLAN */
       
  2325 		if (e1000_vlan_used(adapter))
       
  2326 			rctl |= E1000_RCTL_VFE;
       
  2327 	}
       
  2328 
       
  2329 	if (netdev_uc_count(netdev) > rar_entries - 1) {
       
  2330 		rctl |= E1000_RCTL_UPE;
       
  2331 	} else if (!(netdev->flags & IFF_PROMISC)) {
       
  2332 		rctl &= ~E1000_RCTL_UPE;
       
  2333 		use_uc = true;
       
  2334 	}
       
  2335 
       
  2336 	ew32(RCTL, rctl);
       
  2337 
       
  2338 	/* 82542 2.0 needs to be in reset to write receive address registers */
       
  2339 
       
  2340 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2341 		e1000_enter_82542_rst(adapter);
       
  2342 
       
  2343 	/* load the first 14 addresses into the exact filters 1-14. Unicast
       
  2344 	 * addresses take precedence to avoid disabling unicast filtering
       
  2345 	 * when possible.
       
  2346 	 *
       
  2347 	 * RAR 0 is used for the station MAC address
       
  2348 	 * if there are not 14 addresses, go ahead and clear the filters
       
  2349 	 */
       
  2350 	i = 1;
       
  2351 	if (use_uc)
       
  2352 		netdev_for_each_uc_addr(ha, netdev) {
       
  2353 			if (i == rar_entries)
       
  2354 				break;
       
  2355 			e1000_rar_set(hw, ha->addr, i++);
       
  2356 		}
       
  2357 
       
  2358 	netdev_for_each_mc_addr(ha, netdev) {
       
  2359 		if (i == rar_entries) {
       
  2360 			/* load any remaining addresses into the hash table */
       
  2361 			u32 hash_reg, hash_bit, mta;
       
  2362 			hash_value = e1000_hash_mc_addr(hw, ha->addr);
       
  2363 			hash_reg = (hash_value >> 5) & 0x7F;
       
  2364 			hash_bit = hash_value & 0x1F;
       
  2365 			mta = (1 << hash_bit);
       
  2366 			mcarray[hash_reg] |= mta;
       
  2367 		} else {
       
  2368 			e1000_rar_set(hw, ha->addr, i++);
       
  2369 		}
       
  2370 	}
       
  2371 
       
  2372 	for (; i < rar_entries; i++) {
       
  2373 		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
       
  2374 		E1000_WRITE_FLUSH();
       
  2375 		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
       
  2376 		E1000_WRITE_FLUSH();
       
  2377 	}
       
  2378 
       
  2379 	/* write the hash table completely, write from bottom to avoid
       
  2380 	 * both stupid write combining chipsets, and flushing each write */
       
  2381 	for (i = mta_reg_count - 1; i >= 0 ; i--) {
       
  2382 		/*
       
  2383 		 * If we are on an 82544 has an errata where writing odd
       
  2384 		 * offsets overwrites the previous even offset, but writing
       
  2385 		 * backwards over the range solves the issue by always
       
  2386 		 * writing the odd offset first
       
  2387 		 */
       
  2388 		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
       
  2389 	}
       
  2390 	E1000_WRITE_FLUSH();
       
  2391 
       
  2392 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2393 		e1000_leave_82542_rst(adapter);
       
  2394 
       
  2395 	kfree(mcarray);
       
  2396 }
       
  2397 
       
  2398 /**
       
  2399  * e1000_update_phy_info_task - get phy info
       
  2400  * @work: work struct contained inside adapter struct
       
  2401  *
       
  2402  * Need to wait a few seconds after link up to get diagnostic information from
       
  2403  * the phy
       
  2404  */
       
  2405 static void e1000_update_phy_info_task(struct work_struct *work)
       
  2406 {
       
  2407 	struct e1000_adapter *adapter = container_of(work,
       
  2408 						     struct e1000_adapter,
       
  2409 						     phy_info_task.work);
       
  2410 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2411 		return;
       
  2412 	mutex_lock(&adapter->mutex);
       
  2413 	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
       
  2414 	mutex_unlock(&adapter->mutex);
       
  2415 }
       
  2416 
       
  2417 /**
       
  2418  * e1000_82547_tx_fifo_stall_task - task to complete work
       
  2419  * @work: work struct contained inside adapter struct
       
  2420  **/
       
  2421 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
       
  2422 {
       
  2423 	struct e1000_adapter *adapter = container_of(work,
       
  2424 						     struct e1000_adapter,
       
  2425 						     fifo_stall_task.work);
       
  2426 	struct e1000_hw *hw = &adapter->hw;
       
  2427 	struct net_device *netdev = adapter->netdev;
       
  2428 	u32 tctl;
       
  2429 
       
  2430 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2431 		return;
       
  2432 	mutex_lock(&adapter->mutex);
       
  2433 	if (atomic_read(&adapter->tx_fifo_stall)) {
       
  2434 		if ((er32(TDT) == er32(TDH)) &&
       
  2435 		   (er32(TDFT) == er32(TDFH)) &&
       
  2436 		   (er32(TDFTS) == er32(TDFHS))) {
       
  2437 			tctl = er32(TCTL);
       
  2438 			ew32(TCTL, tctl & ~E1000_TCTL_EN);
       
  2439 			ew32(TDFT, adapter->tx_head_addr);
       
  2440 			ew32(TDFH, adapter->tx_head_addr);
       
  2441 			ew32(TDFTS, adapter->tx_head_addr);
       
  2442 			ew32(TDFHS, adapter->tx_head_addr);
       
  2443 			ew32(TCTL, tctl);
       
  2444 			E1000_WRITE_FLUSH();
       
  2445 
       
  2446 			adapter->tx_fifo_head = 0;
       
  2447 			atomic_set(&adapter->tx_fifo_stall, 0);
       
  2448 			netif_wake_queue(netdev);
       
  2449 		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
       
  2450 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
       
  2451 		}
       
  2452 	}
       
  2453 	mutex_unlock(&adapter->mutex);
       
  2454 }
       
  2455 
       
  2456 bool e1000_has_link(struct e1000_adapter *adapter)
       
  2457 {
       
  2458 	struct e1000_hw *hw = &adapter->hw;
       
  2459 	bool link_active = false;
       
  2460 
       
  2461 	/* get_link_status is set on LSC (link status) interrupt or rx
       
  2462 	 * sequence error interrupt (except on intel ce4100).
       
  2463 	 * get_link_status will stay false until the
       
  2464 	 * e1000_check_for_link establishes link for copper adapters
       
  2465 	 * ONLY
       
  2466 	 */
       
  2467 	switch (hw->media_type) {
       
  2468 	case e1000_media_type_copper:
       
  2469 		if (hw->mac_type == e1000_ce4100)
       
  2470 			hw->get_link_status = 1;
       
  2471 		if (hw->get_link_status) {
       
  2472 			e1000_check_for_link(hw);
       
  2473 			link_active = !hw->get_link_status;
       
  2474 		} else {
       
  2475 			link_active = true;
       
  2476 		}
       
  2477 		break;
       
  2478 	case e1000_media_type_fiber:
       
  2479 		e1000_check_for_link(hw);
       
  2480 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
       
  2481 		break;
       
  2482 	case e1000_media_type_internal_serdes:
       
  2483 		e1000_check_for_link(hw);
       
  2484 		link_active = hw->serdes_has_link;
       
  2485 		break;
       
  2486 	default:
       
  2487 		break;
       
  2488 	}
       
  2489 
       
  2490 	return link_active;
       
  2491 }
       
  2492 
       
  2493 /**
       
  2494  * e1000_watchdog - work function
       
  2495  * @work: work struct contained inside adapter struct
       
  2496  **/
       
  2497 static void e1000_watchdog(struct work_struct *work)
       
  2498 {
       
  2499 	struct e1000_adapter *adapter = container_of(work,
       
  2500 						     struct e1000_adapter,
       
  2501 						     watchdog_task.work);
       
  2502 	struct e1000_hw *hw = &adapter->hw;
       
  2503 	struct net_device *netdev = adapter->netdev;
       
  2504 	struct e1000_tx_ring *txdr = adapter->tx_ring;
       
  2505 	u32 link, tctl;
       
  2506 
       
  2507 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2508 		return;
       
  2509 
       
  2510 	mutex_lock(&adapter->mutex);
       
  2511 	link = e1000_has_link(adapter);
       
  2512 	if (!adapter->ecdev && (netif_carrier_ok(netdev)) && link)
       
  2513 		goto link_up;
       
  2514 
       
  2515 	if (link) {
       
  2516 		if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev))
       
  2517 				|| (!adapter->ecdev && !netif_carrier_ok(netdev))) {
       
  2518 			u32 ctrl;
       
  2519 			bool txb2b __attribute__ ((unused)) = true;
       
  2520 			/* update snapshot of PHY registers on LSC */
       
  2521 			e1000_get_speed_and_duplex(hw,
       
  2522 			                           &adapter->link_speed,
       
  2523 			                           &adapter->link_duplex);
       
  2524 
       
  2525 			ctrl = er32(CTRL);
       
  2526 			pr_info("%s NIC Link is Up %d Mbps %s, "
       
  2527 				"Flow Control: %s\n",
       
  2528 				netdev->name,
       
  2529 				adapter->link_speed,
       
  2530 				adapter->link_duplex == FULL_DUPLEX ?
       
  2531 				"Full Duplex" : "Half Duplex",
       
  2532 				((ctrl & E1000_CTRL_TFCE) && (ctrl &
       
  2533 				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
       
  2534 				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
       
  2535 				E1000_CTRL_TFCE) ? "TX" : "None")));
       
  2536 
       
  2537 			/* adjust timeout factor according to speed/duplex */
       
  2538 			adapter->tx_timeout_factor = 1;
       
  2539 			switch (adapter->link_speed) {
       
  2540 			case SPEED_10:
       
  2541 				txb2b = false;
       
  2542 				adapter->tx_timeout_factor = 16;
       
  2543 				break;
       
  2544 			case SPEED_100:
       
  2545 				txb2b = false;
       
  2546 				/* maybe add some timeout factor ? */
       
  2547 				break;
       
  2548 			}
       
  2549 
       
  2550 			/* enable transmits in the hardware */
       
  2551 			tctl = er32(TCTL);
       
  2552 			tctl |= E1000_TCTL_EN;
       
  2553 			ew32(TCTL, tctl);
       
  2554 
       
  2555 			if (adapter->ecdev) {
       
  2556 				ecdev_set_link(adapter->ecdev, 1);
       
  2557 			}
       
  2558 			else {
       
  2559 				netif_carrier_on(netdev);
       
  2560 				if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  2561 					schedule_delayed_work(&adapter->phy_info_task,
       
  2562 							2 * HZ);
       
  2563 			}
       
  2564 			adapter->smartspeed = 0;
       
  2565 		}
       
  2566 	} else {
       
  2567 		if ((adapter->ecdev && ecdev_get_link(adapter->ecdev))
       
  2568 				|| (!adapter->ecdev && netif_carrier_ok(netdev))) {
       
  2569 			adapter->link_speed = 0;
       
  2570 			adapter->link_duplex = 0;
       
  2571 			pr_info("%s NIC Link is Down\n",
       
  2572 				netdev->name);
       
  2573 
       
  2574 			if (adapter->ecdev) {
       
  2575 				ecdev_set_link(adapter->ecdev, 0);
       
  2576 			} else {
       
  2577 				netif_carrier_off(netdev);
       
  2578 
       
  2579 				if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  2580 					schedule_delayed_work(&adapter->phy_info_task,
       
  2581 							2 * HZ);
       
  2582 			}
       
  2583 		}
       
  2584 
       
  2585 		e1000_smartspeed(adapter);
       
  2586 	}
       
  2587 
       
  2588 link_up:
       
  2589 	e1000_update_stats(adapter);
       
  2590 
       
  2591 	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
       
  2592 	adapter->tpt_old = adapter->stats.tpt;
       
  2593 	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
       
  2594 	adapter->colc_old = adapter->stats.colc;
       
  2595 
       
  2596 	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
       
  2597 	adapter->gorcl_old = adapter->stats.gorcl;
       
  2598 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
       
  2599 	adapter->gotcl_old = adapter->stats.gotcl;
       
  2600 
       
  2601 	e1000_update_adaptive(hw);
       
  2602 
       
  2603 	if (!adapter->ecdev && !netif_carrier_ok(netdev)) {
       
  2604 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
       
  2605 			/* We've lost link, so the controller stops DMA,
       
  2606 			 * but we've got queued Tx work that's never going
       
  2607 			 * to get done, so reset controller to flush Tx.
       
  2608 			 * (Do the reset outside of interrupt context). */
       
  2609 			adapter->tx_timeout_count++;
       
  2610 			schedule_work(&adapter->reset_task);
       
  2611 			/* exit immediately since reset is imminent */
       
  2612 			goto unlock;
       
  2613 		}
       
  2614 	}
       
  2615 
       
  2616 	/* Simple mode for Interrupt Throttle Rate (ITR) */
       
  2617 	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
       
  2618 		/*
       
  2619 		 * Symmetric Tx/Rx gets a reduced ITR=2000;
       
  2620 		 * Total asymmetrical Tx or Rx gets ITR=8000;
       
  2621 		 * everyone else is between 2000-8000.
       
  2622 		 */
       
  2623 		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
       
  2624 		u32 dif = (adapter->gotcl > adapter->gorcl ?
       
  2625 			    adapter->gotcl - adapter->gorcl :
       
  2626 			    adapter->gorcl - adapter->gotcl) / 10000;
       
  2627 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
       
  2628 
       
  2629 		ew32(ITR, 1000000000 / (itr * 256));
       
  2630 	}
       
  2631 
       
  2632 	/* Cause software interrupt to ensure rx ring is cleaned */
       
  2633 	ew32(ICS, E1000_ICS_RXDMT0);
       
  2634 
       
  2635 	/* Force detection of hung controller every watchdog period */
       
  2636 	adapter->detect_tx_hung = true;
       
  2637 
       
  2638 	/* Reschedule the task */
       
  2639 	if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags))
       
  2640 		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
       
  2641 
       
  2642 unlock:
       
  2643 	mutex_unlock(&adapter->mutex);
       
  2644 }
       
  2645 
       
  2646 enum latency_range {
       
  2647 	lowest_latency = 0,
       
  2648 	low_latency = 1,
       
  2649 	bulk_latency = 2,
       
  2650 	latency_invalid = 255
       
  2651 };
       
  2652 
       
  2653 /**
       
  2654  * e1000_update_itr - update the dynamic ITR value based on statistics
       
  2655  * @adapter: pointer to adapter
       
  2656  * @itr_setting: current adapter->itr
       
  2657  * @packets: the number of packets during this measurement interval
       
  2658  * @bytes: the number of bytes during this measurement interval
       
  2659  *
       
  2660  *      Stores a new ITR value based on packets and byte
       
  2661  *      counts during the last interrupt.  The advantage of per interrupt
       
  2662  *      computation is faster updates and more accurate ITR for the current
       
  2663  *      traffic pattern.  Constants in this function were computed
       
  2664  *      based on theoretical maximum wire speed and thresholds were set based
       
  2665  *      on testing data as well as attempting to minimize response time
       
  2666  *      while increasing bulk throughput.
       
  2667  *      this functionality is controlled by the InterruptThrottleRate module
       
  2668  *      parameter (see e1000_param.c)
       
  2669  **/
       
  2670 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
       
  2671 				     u16 itr_setting, int packets, int bytes)
       
  2672 {
       
  2673 	unsigned int retval = itr_setting;
       
  2674 	struct e1000_hw *hw = &adapter->hw;
       
  2675 
       
  2676 	if (unlikely(hw->mac_type < e1000_82540))
       
  2677 		goto update_itr_done;
       
  2678 
       
  2679 	if (packets == 0)
       
  2680 		goto update_itr_done;
       
  2681 
       
  2682 	switch (itr_setting) {
       
  2683 	case lowest_latency:
       
  2684 		/* jumbo frames get bulk treatment*/
       
  2685 		if (bytes/packets > 8000)
       
  2686 			retval = bulk_latency;
       
  2687 		else if ((packets < 5) && (bytes > 512))
       
  2688 			retval = low_latency;
       
  2689 		break;
       
  2690 	case low_latency:  /* 50 usec aka 20000 ints/s */
       
  2691 		if (bytes > 10000) {
       
  2692 			/* jumbo frames need bulk latency setting */
       
  2693 			if (bytes/packets > 8000)
       
  2694 				retval = bulk_latency;
       
  2695 			else if ((packets < 10) || ((bytes/packets) > 1200))
       
  2696 				retval = bulk_latency;
       
  2697 			else if ((packets > 35))
       
  2698 				retval = lowest_latency;
       
  2699 		} else if (bytes/packets > 2000)
       
  2700 			retval = bulk_latency;
       
  2701 		else if (packets <= 2 && bytes < 512)
       
  2702 			retval = lowest_latency;
       
  2703 		break;
       
  2704 	case bulk_latency: /* 250 usec aka 4000 ints/s */
       
  2705 		if (bytes > 25000) {
       
  2706 			if (packets > 35)
       
  2707 				retval = low_latency;
       
  2708 		} else if (bytes < 6000) {
       
  2709 			retval = low_latency;
       
  2710 		}
       
  2711 		break;
       
  2712 	}
       
  2713 
       
  2714 update_itr_done:
       
  2715 	return retval;
       
  2716 }
       
  2717 
       
  2718 static void e1000_set_itr(struct e1000_adapter *adapter)
       
  2719 {
       
  2720 	struct e1000_hw *hw = &adapter->hw;
       
  2721 	u16 current_itr;
       
  2722 	u32 new_itr = adapter->itr;
       
  2723 
       
  2724 	if (unlikely(hw->mac_type < e1000_82540))
       
  2725 		return;
       
  2726 
       
  2727 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
       
  2728 	if (unlikely(adapter->link_speed != SPEED_1000)) {
       
  2729 		current_itr = 0;
       
  2730 		new_itr = 4000;
       
  2731 		goto set_itr_now;
       
  2732 	}
       
  2733 
       
  2734 	adapter->tx_itr = e1000_update_itr(adapter,
       
  2735 	                            adapter->tx_itr,
       
  2736 	                            adapter->total_tx_packets,
       
  2737 	                            adapter->total_tx_bytes);
       
  2738 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2739 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
       
  2740 		adapter->tx_itr = low_latency;
       
  2741 
       
  2742 	adapter->rx_itr = e1000_update_itr(adapter,
       
  2743 	                            adapter->rx_itr,
       
  2744 	                            adapter->total_rx_packets,
       
  2745 	                            adapter->total_rx_bytes);
       
  2746 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2747 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
       
  2748 		adapter->rx_itr = low_latency;
       
  2749 
       
  2750 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
       
  2751 
       
  2752 	switch (current_itr) {
       
  2753 	/* counts and packets in update_itr are dependent on these numbers */
       
  2754 	case lowest_latency:
       
  2755 		new_itr = 70000;
       
  2756 		break;
       
  2757 	case low_latency:
       
  2758 		new_itr = 20000; /* aka hwitr = ~200 */
       
  2759 		break;
       
  2760 	case bulk_latency:
       
  2761 		new_itr = 4000;
       
  2762 		break;
       
  2763 	default:
       
  2764 		break;
       
  2765 	}
       
  2766 
       
  2767 set_itr_now:
       
  2768 	if (new_itr != adapter->itr) {
       
  2769 		/* this attempts to bias the interrupt rate towards Bulk
       
  2770 		 * by adding intermediate steps when interrupt rate is
       
  2771 		 * increasing */
       
  2772 		new_itr = new_itr > adapter->itr ?
       
  2773 		             min(adapter->itr + (new_itr >> 2), new_itr) :
       
  2774 		             new_itr;
       
  2775 		adapter->itr = new_itr;
       
  2776 		ew32(ITR, 1000000000 / (new_itr * 256));
       
  2777 	}
       
  2778 }
       
  2779 
       
  2780 #define E1000_TX_FLAGS_CSUM		0x00000001
       
  2781 #define E1000_TX_FLAGS_VLAN		0x00000002
       
  2782 #define E1000_TX_FLAGS_TSO		0x00000004
       
  2783 #define E1000_TX_FLAGS_IPV4		0x00000008
       
  2784 #define E1000_TX_FLAGS_NO_FCS		0x00000010
       
  2785 #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
       
  2786 #define E1000_TX_FLAGS_VLAN_SHIFT	16
       
  2787 
       
  2788 static int e1000_tso(struct e1000_adapter *adapter,
       
  2789 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
       
  2790 {
       
  2791 	struct e1000_context_desc *context_desc;
       
  2792 	struct e1000_buffer *buffer_info;
       
  2793 	unsigned int i;
       
  2794 	u32 cmd_length = 0;
       
  2795 	u16 ipcse = 0, tucse, mss;
       
  2796 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
       
  2797 	int err;
       
  2798 
       
  2799 	if (skb_is_gso(skb)) {
       
  2800 		if (skb_header_cloned(skb)) {
       
  2801 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
       
  2802 			if (err)
       
  2803 				return err;
       
  2804 		}
       
  2805 
       
  2806 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  2807 		mss = skb_shinfo(skb)->gso_size;
       
  2808 		if (skb->protocol == htons(ETH_P_IP)) {
       
  2809 			struct iphdr *iph = ip_hdr(skb);
       
  2810 			iph->tot_len = 0;
       
  2811 			iph->check = 0;
       
  2812 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
       
  2813 								 iph->daddr, 0,
       
  2814 								 IPPROTO_TCP,
       
  2815 								 0);
       
  2816 			cmd_length = E1000_TXD_CMD_IP;
       
  2817 			ipcse = skb_transport_offset(skb) - 1;
       
  2818 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
       
  2819 			ipv6_hdr(skb)->payload_len = 0;
       
  2820 			tcp_hdr(skb)->check =
       
  2821 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
       
  2822 						 &ipv6_hdr(skb)->daddr,
       
  2823 						 0, IPPROTO_TCP, 0);
       
  2824 			ipcse = 0;
       
  2825 		}
       
  2826 		ipcss = skb_network_offset(skb);
       
  2827 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
       
  2828 		tucss = skb_transport_offset(skb);
       
  2829 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
       
  2830 		tucse = 0;
       
  2831 
       
  2832 		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
       
  2833 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
       
  2834 
       
  2835 		i = tx_ring->next_to_use;
       
  2836 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  2837 		buffer_info = &tx_ring->buffer_info[i];
       
  2838 
       
  2839 		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
       
  2840 		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
       
  2841 		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
       
  2842 		context_desc->upper_setup.tcp_fields.tucss = tucss;
       
  2843 		context_desc->upper_setup.tcp_fields.tucso = tucso;
       
  2844 		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
       
  2845 		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
       
  2846 		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
       
  2847 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
       
  2848 
       
  2849 		buffer_info->time_stamp = jiffies;
       
  2850 		buffer_info->next_to_watch = i;
       
  2851 
       
  2852 		if (++i == tx_ring->count) i = 0;
       
  2853 		tx_ring->next_to_use = i;
       
  2854 
       
  2855 		return true;
       
  2856 	}
       
  2857 	return false;
       
  2858 }
       
  2859 
       
  2860 static bool e1000_tx_csum(struct e1000_adapter *adapter,
       
  2861 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
       
  2862 {
       
  2863 	struct e1000_context_desc *context_desc;
       
  2864 	struct e1000_buffer *buffer_info;
       
  2865 	unsigned int i;
       
  2866 	u8 css;
       
  2867 	u32 cmd_len = E1000_TXD_CMD_DEXT;
       
  2868 
       
  2869 	if (skb->ip_summed != CHECKSUM_PARTIAL)
       
  2870 		return false;
       
  2871 
       
  2872 	switch (skb->protocol) {
       
  2873 	case cpu_to_be16(ETH_P_IP):
       
  2874 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
       
  2875 			cmd_len |= E1000_TXD_CMD_TCP;
       
  2876 		break;
       
  2877 	case cpu_to_be16(ETH_P_IPV6):
       
  2878 		/* XXX not handling all IPV6 headers */
       
  2879 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
       
  2880 			cmd_len |= E1000_TXD_CMD_TCP;
       
  2881 		break;
       
  2882 	default:
       
  2883 		if (unlikely(net_ratelimit()))
       
  2884 			e_warn(drv, "checksum_partial proto=%x!\n",
       
  2885 			       skb->protocol);
       
  2886 		break;
       
  2887 	}
       
  2888 
       
  2889 	css = skb_checksum_start_offset(skb);
       
  2890 
       
  2891 	i = tx_ring->next_to_use;
       
  2892 	buffer_info = &tx_ring->buffer_info[i];
       
  2893 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  2894 
       
  2895 	context_desc->lower_setup.ip_config = 0;
       
  2896 	context_desc->upper_setup.tcp_fields.tucss = css;
       
  2897 	context_desc->upper_setup.tcp_fields.tucso =
       
  2898 		css + skb->csum_offset;
       
  2899 	context_desc->upper_setup.tcp_fields.tucse = 0;
       
  2900 	context_desc->tcp_seg_setup.data = 0;
       
  2901 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
       
  2902 
       
  2903 	buffer_info->time_stamp = jiffies;
       
  2904 	buffer_info->next_to_watch = i;
       
  2905 
       
  2906 	if (unlikely(++i == tx_ring->count)) i = 0;
       
  2907 	tx_ring->next_to_use = i;
       
  2908 
       
  2909 	return true;
       
  2910 }
       
  2911 
       
  2912 #define E1000_MAX_TXD_PWR	12
       
  2913 #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
       
  2914 
       
  2915 static int e1000_tx_map(struct e1000_adapter *adapter,
       
  2916 			struct e1000_tx_ring *tx_ring,
       
  2917 			struct sk_buff *skb, unsigned int first,
       
  2918 			unsigned int max_per_txd, unsigned int nr_frags,
       
  2919 			unsigned int mss)
       
  2920 {
       
  2921 	struct e1000_hw *hw = &adapter->hw;
       
  2922 	struct pci_dev *pdev = adapter->pdev;
       
  2923 	struct e1000_buffer *buffer_info;
       
  2924 	unsigned int len = skb_headlen(skb);
       
  2925 	unsigned int offset = 0, size, count = 0, i;
       
  2926 	unsigned int f, bytecount, segs;
       
  2927 
       
  2928 	i = tx_ring->next_to_use;
       
  2929 
       
  2930 	while (len) {
       
  2931 		buffer_info = &tx_ring->buffer_info[i];
       
  2932 		size = min(len, max_per_txd);
       
  2933 		/* Workaround for Controller erratum --
       
  2934 		 * descriptor for non-tso packet in a linear SKB that follows a
       
  2935 		 * tso gets written back prematurely before the data is fully
       
  2936 		 * DMA'd to the controller */
       
  2937 		if (!skb->data_len && tx_ring->last_tx_tso &&
       
  2938 		    !skb_is_gso(skb)) {
       
  2939 			tx_ring->last_tx_tso = false;
       
  2940 			size -= 4;
       
  2941 		}
       
  2942 
       
  2943 		/* Workaround for premature desc write-backs
       
  2944 		 * in TSO mode.  Append 4-byte sentinel desc */
       
  2945 		if (unlikely(mss && !nr_frags && size == len && size > 8))
       
  2946 			size -= 4;
       
  2947 		/* work-around for errata 10 and it applies
       
  2948 		 * to all controllers in PCI-X mode
       
  2949 		 * The fix is to make sure that the first descriptor of a
       
  2950 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
       
  2951 		 */
       
  2952 		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
       
  2953 		                (size > 2015) && count == 0))
       
  2954 		        size = 2015;
       
  2955 
       
  2956 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
       
  2957 		 * terminating buffers within evenly-aligned dwords. */
       
  2958 		if (unlikely(adapter->pcix_82544 &&
       
  2959 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
       
  2960 		   size > 4))
       
  2961 			size -= 4;
       
  2962 
       
  2963 		buffer_info->length = size;
       
  2964 		/* set time_stamp *before* dma to help avoid a possible race */
       
  2965 		buffer_info->time_stamp = jiffies;
       
  2966 		buffer_info->mapped_as_page = false;
       
  2967 		buffer_info->dma = dma_map_single(&pdev->dev,
       
  2968 						  skb->data + offset,
       
  2969 						  size,	DMA_TO_DEVICE);
       
  2970 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
       
  2971 			goto dma_error;
       
  2972 		buffer_info->next_to_watch = i;
       
  2973 
       
  2974 		len -= size;
       
  2975 		offset += size;
       
  2976 		count++;
       
  2977 		if (len) {
       
  2978 			i++;
       
  2979 			if (unlikely(i == tx_ring->count))
       
  2980 				i = 0;
       
  2981 		}
       
  2982 	}
       
  2983 
       
  2984 	for (f = 0; f < nr_frags; f++) {
       
  2985 		const struct skb_frag_struct *frag;
       
  2986 
       
  2987 		frag = &skb_shinfo(skb)->frags[f];
       
  2988 		len = skb_frag_size(frag);
       
  2989 		offset = 0;
       
  2990 
       
  2991 		while (len) {
       
  2992 			unsigned long bufend;
       
  2993 			i++;
       
  2994 			if (unlikely(i == tx_ring->count))
       
  2995 				i = 0;
       
  2996 
       
  2997 			buffer_info = &tx_ring->buffer_info[i];
       
  2998 			size = min(len, max_per_txd);
       
  2999 			/* Workaround for premature desc write-backs
       
  3000 			 * in TSO mode.  Append 4-byte sentinel desc */
       
  3001 			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
       
  3002 				size -= 4;
       
  3003 			/* Workaround for potential 82544 hang in PCI-X.
       
  3004 			 * Avoid terminating buffers within evenly-aligned
       
  3005 			 * dwords. */
       
  3006 			bufend = (unsigned long)
       
  3007 				page_to_phys(skb_frag_page(frag));
       
  3008 			bufend += offset + size - 1;
       
  3009 			if (unlikely(adapter->pcix_82544 &&
       
  3010 				     !(bufend & 4) &&
       
  3011 				     size > 4))
       
  3012 				size -= 4;
       
  3013 
       
  3014 			buffer_info->length = size;
       
  3015 			buffer_info->time_stamp = jiffies;
       
  3016 			buffer_info->mapped_as_page = true;
       
  3017 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
       
  3018 						offset, size, DMA_TO_DEVICE);
       
  3019 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
       
  3020 				goto dma_error;
       
  3021 			buffer_info->next_to_watch = i;
       
  3022 
       
  3023 			len -= size;
       
  3024 			offset += size;
       
  3025 			count++;
       
  3026 		}
       
  3027 	}
       
  3028 
       
  3029 	segs = skb_shinfo(skb)->gso_segs ?: 1;
       
  3030 	/* multiply data chunks by size of headers */
       
  3031 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
       
  3032 
       
  3033 	tx_ring->buffer_info[i].skb = skb;
       
  3034 	tx_ring->buffer_info[i].segs = segs;
       
  3035 	tx_ring->buffer_info[i].bytecount = bytecount;
       
  3036 	tx_ring->buffer_info[first].next_to_watch = i;
       
  3037 
       
  3038 	return count;
       
  3039 
       
  3040 dma_error:
       
  3041 	dev_err(&pdev->dev, "TX DMA map failed\n");
       
  3042 	buffer_info->dma = 0;
       
  3043 	if (count)
       
  3044 		count--;
       
  3045 
       
  3046 	while (count--) {
       
  3047 		if (i==0)
       
  3048 			i += tx_ring->count;
       
  3049 		i--;
       
  3050 		buffer_info = &tx_ring->buffer_info[i];
       
  3051 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  3052 	}
       
  3053 
       
  3054 	return 0;
       
  3055 }
       
  3056 
       
  3057 static void e1000_tx_queue(struct e1000_adapter *adapter,
       
  3058 			   struct e1000_tx_ring *tx_ring, int tx_flags,
       
  3059 			   int count)
       
  3060 {
       
  3061 	struct e1000_hw *hw = &adapter->hw;
       
  3062 	struct e1000_tx_desc *tx_desc = NULL;
       
  3063 	struct e1000_buffer *buffer_info;
       
  3064 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
       
  3065 	unsigned int i;
       
  3066 
       
  3067 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
       
  3068 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
       
  3069 		             E1000_TXD_CMD_TSE;
       
  3070 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  3071 
       
  3072 		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
       
  3073 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
       
  3074 	}
       
  3075 
       
  3076 	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
       
  3077 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
       
  3078 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  3079 	}
       
  3080 
       
  3081 	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
       
  3082 		txd_lower |= E1000_TXD_CMD_VLE;
       
  3083 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
       
  3084 	}
       
  3085 
       
  3086 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
       
  3087 		txd_lower &= ~(E1000_TXD_CMD_IFCS);
       
  3088 
       
  3089 	i = tx_ring->next_to_use;
       
  3090 
       
  3091 	while (count--) {
       
  3092 		buffer_info = &tx_ring->buffer_info[i];
       
  3093 		tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3094 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  3095 		tx_desc->lower.data =
       
  3096 			cpu_to_le32(txd_lower | buffer_info->length);
       
  3097 		tx_desc->upper.data = cpu_to_le32(txd_upper);
       
  3098 		if (unlikely(++i == tx_ring->count)) i = 0;
       
  3099 	}
       
  3100 
       
  3101 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
       
  3102 
       
  3103 	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
       
  3104 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
       
  3105 		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
       
  3106 
       
  3107 	/* Force memory writes to complete before letting h/w
       
  3108 	 * know there are new descriptors to fetch.  (Only
       
  3109 	 * applicable for weak-ordered memory model archs,
       
  3110 	 * such as IA-64). */
       
  3111 	wmb();
       
  3112 
       
  3113 	tx_ring->next_to_use = i;
       
  3114 	writel(i, hw->hw_addr + tx_ring->tdt);
       
  3115 	/* we need this if more than one processor can write to our tail
       
  3116 	 * at a time, it syncronizes IO on IA64/Altix systems */
       
  3117 	mmiowb();
       
  3118 }
       
  3119 
       
  3120 /**
       
  3121  * 82547 workaround to avoid controller hang in half-duplex environment.
       
  3122  * The workaround is to avoid queuing a large packet that would span
       
  3123  * the internal Tx FIFO ring boundary by notifying the stack to resend
       
  3124  * the packet at a later time.  This gives the Tx FIFO an opportunity to
       
  3125  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
       
  3126  * to the beginning of the Tx FIFO.
       
  3127  **/
       
  3128 
       
  3129 #define E1000_FIFO_HDR			0x10
       
  3130 #define E1000_82547_PAD_LEN		0x3E0
       
  3131 
       
  3132 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
       
  3133 				       struct sk_buff *skb)
       
  3134 {
       
  3135 	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
       
  3136 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
       
  3137 
       
  3138 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
       
  3139 
       
  3140 	if (adapter->link_duplex != HALF_DUPLEX)
       
  3141 		goto no_fifo_stall_required;
       
  3142 
       
  3143 	if (atomic_read(&adapter->tx_fifo_stall))
       
  3144 		return 1;
       
  3145 
       
  3146 	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
       
  3147 		atomic_set(&adapter->tx_fifo_stall, 1);
       
  3148 		return 1;
       
  3149 	}
       
  3150 
       
  3151 no_fifo_stall_required:
       
  3152 	adapter->tx_fifo_head += skb_fifo_len;
       
  3153 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
       
  3154 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
       
  3155 	return 0;
       
  3156 }
       
  3157 
       
  3158 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
       
  3159 {
       
  3160 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3161 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
       
  3162 
       
  3163 	if (adapter->ecdev) {
       
  3164 		return -EBUSY;
       
  3165 	}
       
  3166 
       
  3167 	netif_stop_queue(netdev);
       
  3168 	/* Herbert's original patch had:
       
  3169 	 *  smp_mb__after_netif_stop_queue();
       
  3170 	 * but since that doesn't exist yet, just open code it. */
       
  3171 	smp_mb();
       
  3172 
       
  3173 	/* We need to check again in a case another CPU has just
       
  3174 	 * made room available. */
       
  3175 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
       
  3176 		return -EBUSY;
       
  3177 
       
  3178 	/* A reprieve! */
       
  3179 	netif_start_queue(netdev);
       
  3180 	++adapter->restart_queue;
       
  3181 	return 0;
       
  3182 }
       
  3183 
       
  3184 static int e1000_maybe_stop_tx(struct net_device *netdev,
       
  3185                                struct e1000_tx_ring *tx_ring, int size)
       
  3186 {
       
  3187 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
       
  3188 		return 0;
       
  3189 	return __e1000_maybe_stop_tx(netdev, size);
       
  3190 }
       
  3191 
       
  3192 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
       
  3193 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
       
  3194 				    struct net_device *netdev)
       
  3195 {
       
  3196 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3197 	struct e1000_hw *hw = &adapter->hw;
       
  3198 	struct e1000_tx_ring *tx_ring;
       
  3199 	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
       
  3200 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
       
  3201 	unsigned int tx_flags = 0;
       
  3202 	unsigned int len = skb_headlen(skb);
       
  3203 	unsigned int nr_frags;
       
  3204 	unsigned int mss;
       
  3205 	int count = 0;
       
  3206 	int tso;
       
  3207 	unsigned int f;
       
  3208 
       
  3209 	/* This goes back to the question of how to logically map a tx queue
       
  3210 	 * to a flow.  Right now, performance is impacted slightly negatively
       
  3211 	 * if using multiple tx queues.  If the stack breaks away from a
       
  3212 	 * single qdisc implementation, we can look at this again. */
       
  3213 	tx_ring = adapter->tx_ring;
       
  3214 
       
  3215 	if (unlikely(skb->len <= 0)) {
       
  3216 		if (!adapter->ecdev) {
       
  3217 			dev_kfree_skb_any(skb);
       
  3218 		}
       
  3219 		return NETDEV_TX_OK;
       
  3220 	}
       
  3221 
       
  3222 	mss = skb_shinfo(skb)->gso_size;
       
  3223 	/* The controller does a simple calculation to
       
  3224 	 * make sure there is enough room in the FIFO before
       
  3225 	 * initiating the DMA for each buffer.  The calc is:
       
  3226 	 * 4 = ceil(buffer len/mss).  To make sure we don't
       
  3227 	 * overrun the FIFO, adjust the max buffer len if mss
       
  3228 	 * drops. */
       
  3229 	if (mss) {
       
  3230 		u8 hdr_len;
       
  3231 		max_per_txd = min(mss << 2, max_per_txd);
       
  3232 		max_txd_pwr = fls(max_per_txd) - 1;
       
  3233 
       
  3234 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  3235 		if (skb->data_len && hdr_len == len) {
       
  3236 			switch (hw->mac_type) {
       
  3237 				unsigned int pull_size;
       
  3238 			case e1000_82544:
       
  3239 				/* Make sure we have room to chop off 4 bytes,
       
  3240 				 * and that the end alignment will work out to
       
  3241 				 * this hardware's requirements
       
  3242 				 * NOTE: this is a TSO only workaround
       
  3243 				 * if end byte alignment not correct move us
       
  3244 				 * into the next dword */
       
  3245 				if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
       
  3246 					break;
       
  3247 				/* fall through */
       
  3248 				pull_size = min((unsigned int)4, skb->data_len);
       
  3249 				if (!__pskb_pull_tail(skb, pull_size)) {
       
  3250 					e_err(drv, "__pskb_pull_tail "
       
  3251 					      "failed.\n");
       
  3252 					if (!adapter->ecdev) {
       
  3253 						dev_kfree_skb_any(skb);
       
  3254 					}
       
  3255 					return NETDEV_TX_OK;
       
  3256 				}
       
  3257 				len = skb_headlen(skb);
       
  3258 				break;
       
  3259 			default:
       
  3260 				/* do nothing */
       
  3261 				break;
       
  3262 			}
       
  3263 		}
       
  3264 	}
       
  3265 
       
  3266 	/* reserve a descriptor for the offload context */
       
  3267 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
       
  3268 		count++;
       
  3269 	count++;
       
  3270 
       
  3271 	/* Controller Erratum workaround */
       
  3272 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
       
  3273 		count++;
       
  3274 
       
  3275 	count += TXD_USE_COUNT(len, max_txd_pwr);
       
  3276 
       
  3277 	if (adapter->pcix_82544)
       
  3278 		count++;
       
  3279 
       
  3280 	/* work-around for errata 10 and it applies to all controllers
       
  3281 	 * in PCI-X mode, so add one more descriptor to the count
       
  3282 	 */
       
  3283 	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
       
  3284 			(len > 2015)))
       
  3285 		count++;
       
  3286 
       
  3287 	nr_frags = skb_shinfo(skb)->nr_frags;
       
  3288 	for (f = 0; f < nr_frags; f++)
       
  3289 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
       
  3290 				       max_txd_pwr);
       
  3291 	if (adapter->pcix_82544)
       
  3292 		count += nr_frags;
       
  3293 
       
  3294 	/* need: count + 2 desc gap to keep tail from touching
       
  3295 	 * head, otherwise try next time */
       
  3296 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
       
  3297 		return NETDEV_TX_BUSY;
       
  3298 
       
  3299 	if (unlikely((hw->mac_type == e1000_82547) &&
       
  3300 		     (e1000_82547_fifo_workaround(adapter, skb)))) {
       
  3301 		if (!adapter->ecdev) {
       
  3302 			netif_stop_queue(netdev);
       
  3303 			if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3304 				schedule_delayed_work(&adapter->fifo_stall_task, 1);
       
  3305 		}
       
  3306 		return NETDEV_TX_BUSY;
       
  3307 	}
       
  3308 
       
  3309 	if (vlan_tx_tag_present(skb)) {
       
  3310 		tx_flags |= E1000_TX_FLAGS_VLAN;
       
  3311 		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
       
  3312 	}
       
  3313 
       
  3314 	first = tx_ring->next_to_use;
       
  3315 
       
  3316 	tso = e1000_tso(adapter, tx_ring, skb);
       
  3317 	if (tso < 0) {
       
  3318 		if (!adapter->ecdev) {
       
  3319 			dev_kfree_skb_any(skb);
       
  3320 		}
       
  3321 		return NETDEV_TX_OK;
       
  3322 	}
       
  3323 
       
  3324 	if (likely(tso)) {
       
  3325 		if (likely(hw->mac_type != e1000_82544))
       
  3326 			tx_ring->last_tx_tso = true;
       
  3327 		tx_flags |= E1000_TX_FLAGS_TSO;
       
  3328 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
       
  3329 		tx_flags |= E1000_TX_FLAGS_CSUM;
       
  3330 
       
  3331 	if (likely(skb->protocol == htons(ETH_P_IP)))
       
  3332 		tx_flags |= E1000_TX_FLAGS_IPV4;
       
  3333 
       
  3334 	if (unlikely(skb->no_fcs))
       
  3335 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
       
  3336 
       
  3337 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
       
  3338 	                     nr_frags, mss);
       
  3339 
       
  3340 	if (count) {
       
  3341 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
       
  3342 		if (!adapter->ecdev) {
       
  3343 			/* Make sure there is space in the ring for the next send. */
       
  3344 			e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
       
  3345 		}
       
  3346 
       
  3347 	} else {
       
  3348 		if (!adapter->ecdev) {
       
  3349 			dev_kfree_skb_any(skb);
       
  3350 		}
       
  3351 		tx_ring->buffer_info[first].time_stamp = 0;
       
  3352 		tx_ring->next_to_use = first;
       
  3353 	}
       
  3354 
       
  3355 	return NETDEV_TX_OK;
       
  3356 }
       
  3357 
       
  3358 #define NUM_REGS 38 /* 1 based count */
       
  3359 static void e1000_regdump(struct e1000_adapter *adapter)
       
  3360 {
       
  3361 	struct e1000_hw *hw = &adapter->hw;
       
  3362 	u32 regs[NUM_REGS];
       
  3363 	u32 *regs_buff = regs;
       
  3364 	int i = 0;
       
  3365 
       
  3366 	static const char * const reg_name[] = {
       
  3367 		"CTRL",  "STATUS",
       
  3368 		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
       
  3369 		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
       
  3370 		"TIDV", "TXDCTL", "TADV", "TARC0",
       
  3371 		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
       
  3372 		"TXDCTL1", "TARC1",
       
  3373 		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
       
  3374 		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
       
  3375 		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
       
  3376 	};
       
  3377 
       
  3378 	regs_buff[0]  = er32(CTRL);
       
  3379 	regs_buff[1]  = er32(STATUS);
       
  3380 
       
  3381 	regs_buff[2]  = er32(RCTL);
       
  3382 	regs_buff[3]  = er32(RDLEN);
       
  3383 	regs_buff[4]  = er32(RDH);
       
  3384 	regs_buff[5]  = er32(RDT);
       
  3385 	regs_buff[6]  = er32(RDTR);
       
  3386 
       
  3387 	regs_buff[7]  = er32(TCTL);
       
  3388 	regs_buff[8]  = er32(TDBAL);
       
  3389 	regs_buff[9]  = er32(TDBAH);
       
  3390 	regs_buff[10] = er32(TDLEN);
       
  3391 	regs_buff[11] = er32(TDH);
       
  3392 	regs_buff[12] = er32(TDT);
       
  3393 	regs_buff[13] = er32(TIDV);
       
  3394 	regs_buff[14] = er32(TXDCTL);
       
  3395 	regs_buff[15] = er32(TADV);
       
  3396 	regs_buff[16] = er32(TARC0);
       
  3397 
       
  3398 	regs_buff[17] = er32(TDBAL1);
       
  3399 	regs_buff[18] = er32(TDBAH1);
       
  3400 	regs_buff[19] = er32(TDLEN1);
       
  3401 	regs_buff[20] = er32(TDH1);
       
  3402 	regs_buff[21] = er32(TDT1);
       
  3403 	regs_buff[22] = er32(TXDCTL1);
       
  3404 	regs_buff[23] = er32(TARC1);
       
  3405 	regs_buff[24] = er32(CTRL_EXT);
       
  3406 	regs_buff[25] = er32(ERT);
       
  3407 	regs_buff[26] = er32(RDBAL0);
       
  3408 	regs_buff[27] = er32(RDBAH0);
       
  3409 	regs_buff[28] = er32(TDFH);
       
  3410 	regs_buff[29] = er32(TDFT);
       
  3411 	regs_buff[30] = er32(TDFHS);
       
  3412 	regs_buff[31] = er32(TDFTS);
       
  3413 	regs_buff[32] = er32(TDFPC);
       
  3414 	regs_buff[33] = er32(RDFH);
       
  3415 	regs_buff[34] = er32(RDFT);
       
  3416 	regs_buff[35] = er32(RDFHS);
       
  3417 	regs_buff[36] = er32(RDFTS);
       
  3418 	regs_buff[37] = er32(RDFPC);
       
  3419 
       
  3420 	pr_info("Register dump\n");
       
  3421 	for (i = 0; i < NUM_REGS; i++)
       
  3422 		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
       
  3423 }
       
  3424 
       
  3425 /*
       
  3426  * e1000_dump: Print registers, tx ring and rx ring
       
  3427  */
       
  3428 static void e1000_dump(struct e1000_adapter *adapter)
       
  3429 {
       
  3430 	/* this code doesn't handle multiple rings */
       
  3431 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
       
  3432 	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
       
  3433 	int i;
       
  3434 
       
  3435 	if (!netif_msg_hw(adapter))
       
  3436 		return;
       
  3437 
       
  3438 	/* Print Registers */
       
  3439 	e1000_regdump(adapter);
       
  3440 
       
  3441 	/*
       
  3442 	 * transmit dump
       
  3443 	 */
       
  3444 	pr_info("TX Desc ring0 dump\n");
       
  3445 
       
  3446 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
       
  3447 	 *
       
  3448 	 * Legacy Transmit Descriptor
       
  3449 	 *   +--------------------------------------------------------------+
       
  3450 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
       
  3451 	 *   +--------------------------------------------------------------+
       
  3452 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
       
  3453 	 *   +--------------------------------------------------------------+
       
  3454 	 *   63       48 47        36 35    32 31     24 23    16 15        0
       
  3455 	 *
       
  3456 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
       
  3457 	 *   63      48 47    40 39       32 31             16 15    8 7      0
       
  3458 	 *   +----------------------------------------------------------------+
       
  3459 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
       
  3460 	 *   +----------------------------------------------------------------+
       
  3461 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
       
  3462 	 *   +----------------------------------------------------------------+
       
  3463 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
       
  3464 	 *
       
  3465 	 * Extended Data Descriptor (DTYP=0x1)
       
  3466 	 *   +----------------------------------------------------------------+
       
  3467 	 * 0 |                     Buffer Address [63:0]                      |
       
  3468 	 *   +----------------------------------------------------------------+
       
  3469 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
       
  3470 	 *   +----------------------------------------------------------------+
       
  3471 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
       
  3472 	 */
       
  3473 	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
       
  3474 	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
       
  3475 
       
  3476 	if (!netif_msg_tx_done(adapter))
       
  3477 		goto rx_ring_summary;
       
  3478 
       
  3479 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
       
  3480 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3481 		struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
       
  3482 		struct my_u { __le64 a; __le64 b; };
       
  3483 		struct my_u *u = (struct my_u *)tx_desc;
       
  3484 		const char *type;
       
  3485 
       
  3486 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
       
  3487 			type = "NTC/U";
       
  3488 		else if (i == tx_ring->next_to_use)
       
  3489 			type = "NTU";
       
  3490 		else if (i == tx_ring->next_to_clean)
       
  3491 			type = "NTC";
       
  3492 		else
       
  3493 			type = "";
       
  3494 
       
  3495 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
       
  3496 			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
       
  3497 			le64_to_cpu(u->a), le64_to_cpu(u->b),
       
  3498 			(u64)buffer_info->dma, buffer_info->length,
       
  3499 			buffer_info->next_to_watch,
       
  3500 			(u64)buffer_info->time_stamp, buffer_info->skb, type);
       
  3501 	}
       
  3502 
       
  3503 rx_ring_summary:
       
  3504 	/*
       
  3505 	 * receive dump
       
  3506 	 */
       
  3507 	pr_info("\nRX Desc ring dump\n");
       
  3508 
       
  3509 	/* Legacy Receive Descriptor Format
       
  3510 	 *
       
  3511 	 * +-----------------------------------------------------+
       
  3512 	 * |                Buffer Address [63:0]                |
       
  3513 	 * +-----------------------------------------------------+
       
  3514 	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
       
  3515 	 * +-----------------------------------------------------+
       
  3516 	 * 63       48 47    40 39      32 31         16 15      0
       
  3517 	 */
       
  3518 	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
       
  3519 
       
  3520 	if (!netif_msg_rx_status(adapter))
       
  3521 		goto exit;
       
  3522 
       
  3523 	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
       
  3524 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  3525 		struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
       
  3526 		struct my_u { __le64 a; __le64 b; };
       
  3527 		struct my_u *u = (struct my_u *)rx_desc;
       
  3528 		const char *type;
       
  3529 
       
  3530 		if (i == rx_ring->next_to_use)
       
  3531 			type = "NTU";
       
  3532 		else if (i == rx_ring->next_to_clean)
       
  3533 			type = "NTC";
       
  3534 		else
       
  3535 			type = "";
       
  3536 
       
  3537 		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
       
  3538 			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
       
  3539 			(u64)buffer_info->dma, buffer_info->skb, type);
       
  3540 	} /* for */
       
  3541 
       
  3542 	/* dump the descriptor caches */
       
  3543 	/* rx */
       
  3544 	pr_info("Rx descriptor cache in 64bit format\n");
       
  3545 	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
       
  3546 		pr_info("R%04X: %08X|%08X %08X|%08X\n",
       
  3547 			i,
       
  3548 			readl(adapter->hw.hw_addr + i+4),
       
  3549 			readl(adapter->hw.hw_addr + i),
       
  3550 			readl(adapter->hw.hw_addr + i+12),
       
  3551 			readl(adapter->hw.hw_addr + i+8));
       
  3552 	}
       
  3553 	/* tx */
       
  3554 	pr_info("Tx descriptor cache in 64bit format\n");
       
  3555 	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
       
  3556 		pr_info("T%04X: %08X|%08X %08X|%08X\n",
       
  3557 			i,
       
  3558 			readl(adapter->hw.hw_addr + i+4),
       
  3559 			readl(adapter->hw.hw_addr + i),
       
  3560 			readl(adapter->hw.hw_addr + i+12),
       
  3561 			readl(adapter->hw.hw_addr + i+8));
       
  3562 	}
       
  3563 exit:
       
  3564 	return;
       
  3565 }
       
  3566 
       
  3567 /**
       
  3568  * e1000_tx_timeout - Respond to a Tx Hang
       
  3569  * @netdev: network interface device structure
       
  3570  **/
       
  3571 
       
  3572 static void e1000_tx_timeout(struct net_device *netdev)
       
  3573 {
       
  3574 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3575 
       
  3576 	/* Do the reset outside of interrupt context */
       
  3577 	adapter->tx_timeout_count++;
       
  3578 	schedule_work(&adapter->reset_task);
       
  3579 }
       
  3580 
       
  3581 static void e1000_reset_task(struct work_struct *work)
       
  3582 {
       
  3583 	struct e1000_adapter *adapter =
       
  3584 		container_of(work, struct e1000_adapter, reset_task);
       
  3585 
       
  3586 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  3587 		return;
       
  3588 	e_err(drv, "Reset adapter\n");
       
  3589 	e1000_reinit_safe(adapter);
       
  3590 }
       
  3591 
       
  3592 /**
       
  3593  * e1000_get_stats - Get System Network Statistics
       
  3594  * @netdev: network interface device structure
       
  3595  *
       
  3596  * Returns the address of the device statistics structure.
       
  3597  * The statistics are actually updated from the watchdog.
       
  3598  **/
       
  3599 
       
  3600 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
       
  3601 {
       
  3602 	/* only return the current stats */
       
  3603 	return &netdev->stats;
       
  3604 }
       
  3605 
       
  3606 /**
       
  3607  * e1000_change_mtu - Change the Maximum Transfer Unit
       
  3608  * @netdev: network interface device structure
       
  3609  * @new_mtu: new value for maximum frame size
       
  3610  *
       
  3611  * Returns 0 on success, negative on failure
       
  3612  **/
       
  3613 
       
  3614 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
       
  3615 {
       
  3616 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3617 	struct e1000_hw *hw = &adapter->hw;
       
  3618 	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
       
  3619 
       
  3620 	if (adapter->ecdev) {
       
  3621 		return -EBUSY;
       
  3622 	}
       
  3623 
       
  3624 	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
       
  3625 	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
       
  3626 		e_err(probe, "Invalid MTU setting\n");
       
  3627 		return -EINVAL;
       
  3628 	}
       
  3629 
       
  3630 	/* Adapter-specific max frame size limits. */
       
  3631 	switch (hw->mac_type) {
       
  3632 	case e1000_undefined ... e1000_82542_rev2_1:
       
  3633 		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
       
  3634 			e_err(probe, "Jumbo Frames not supported.\n");
       
  3635 			return -EINVAL;
       
  3636 		}
       
  3637 		break;
       
  3638 	default:
       
  3639 		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
       
  3640 		break;
       
  3641 	}
       
  3642 
       
  3643 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
  3644 		msleep(1);
       
  3645 	/* e1000_down has a dependency on max_frame_size */
       
  3646 	hw->max_frame_size = max_frame;
       
  3647 	if (netif_running(netdev))
       
  3648 		e1000_down(adapter);
       
  3649 
       
  3650 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
       
  3651 	 * means we reserve 2 more, this pushes us to allocate from the next
       
  3652 	 * larger slab size.
       
  3653 	 * i.e. RXBUFFER_2048 --> size-4096 slab
       
  3654 	 *  however with the new *_jumbo_rx* routines, jumbo receives will use
       
  3655 	 *  fragmented skbs */
       
  3656 
       
  3657 	if (max_frame <= E1000_RXBUFFER_2048)
       
  3658 		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
       
  3659 	else
       
  3660 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
       
  3661 		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
       
  3662 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
       
  3663 		adapter->rx_buffer_len = PAGE_SIZE;
       
  3664 #endif
       
  3665 
       
  3666 	/* adjust allocation if LPE protects us, and we aren't using SBP */
       
  3667 	if (!hw->tbi_compatibility_on &&
       
  3668 	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
       
  3669 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
       
  3670 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
       
  3671 
       
  3672 	pr_info("%s changing MTU from %d to %d\n",
       
  3673 		netdev->name, netdev->mtu, new_mtu);
       
  3674 	netdev->mtu = new_mtu;
       
  3675 
       
  3676 	if (netif_running(netdev))
       
  3677 		e1000_up(adapter);
       
  3678 	else
       
  3679 		e1000_reset(adapter);
       
  3680 
       
  3681 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
  3682 
       
  3683 	return 0;
       
  3684 }
       
  3685 
       
  3686 /**
       
  3687  * e1000_update_stats - Update the board statistics counters
       
  3688  * @adapter: board private structure
       
  3689  **/
       
  3690 
       
  3691 void e1000_update_stats(struct e1000_adapter *adapter)
       
  3692 {
       
  3693 	struct net_device *netdev = adapter->netdev;
       
  3694 	struct e1000_hw *hw = &adapter->hw;
       
  3695 	struct pci_dev *pdev = adapter->pdev;
       
  3696 	unsigned long flags = 0;
       
  3697 	u16 phy_tmp;
       
  3698 
       
  3699 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
       
  3700 
       
  3701 	/*
       
  3702 	 * Prevent stats update while adapter is being reset, or if the pci
       
  3703 	 * connection is down.
       
  3704 	 */
       
  3705 	if (adapter->link_speed == 0)
       
  3706 		return;
       
  3707 	if (pci_channel_offline(pdev))
       
  3708 		return;
       
  3709 
       
  3710 	if (!adapter->ecdev) {
       
  3711 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  3712 	}
       
  3713 
       
  3714 	/* these counters are modified from e1000_tbi_adjust_stats,
       
  3715 	 * called from the interrupt context, so they must only
       
  3716 	 * be written while holding adapter->stats_lock
       
  3717 	 */
       
  3718 
       
  3719 	adapter->stats.crcerrs += er32(CRCERRS);
       
  3720 	adapter->stats.gprc += er32(GPRC);
       
  3721 	adapter->stats.gorcl += er32(GORCL);
       
  3722 	adapter->stats.gorch += er32(GORCH);
       
  3723 	adapter->stats.bprc += er32(BPRC);
       
  3724 	adapter->stats.mprc += er32(MPRC);
       
  3725 	adapter->stats.roc += er32(ROC);
       
  3726 
       
  3727 	adapter->stats.prc64 += er32(PRC64);
       
  3728 	adapter->stats.prc127 += er32(PRC127);
       
  3729 	adapter->stats.prc255 += er32(PRC255);
       
  3730 	adapter->stats.prc511 += er32(PRC511);
       
  3731 	adapter->stats.prc1023 += er32(PRC1023);
       
  3732 	adapter->stats.prc1522 += er32(PRC1522);
       
  3733 
       
  3734 	adapter->stats.symerrs += er32(SYMERRS);
       
  3735 	adapter->stats.mpc += er32(MPC);
       
  3736 	adapter->stats.scc += er32(SCC);
       
  3737 	adapter->stats.ecol += er32(ECOL);
       
  3738 	adapter->stats.mcc += er32(MCC);
       
  3739 	adapter->stats.latecol += er32(LATECOL);
       
  3740 	adapter->stats.dc += er32(DC);
       
  3741 	adapter->stats.sec += er32(SEC);
       
  3742 	adapter->stats.rlec += er32(RLEC);
       
  3743 	adapter->stats.xonrxc += er32(XONRXC);
       
  3744 	adapter->stats.xontxc += er32(XONTXC);
       
  3745 	adapter->stats.xoffrxc += er32(XOFFRXC);
       
  3746 	adapter->stats.xofftxc += er32(XOFFTXC);
       
  3747 	adapter->stats.fcruc += er32(FCRUC);
       
  3748 	adapter->stats.gptc += er32(GPTC);
       
  3749 	adapter->stats.gotcl += er32(GOTCL);
       
  3750 	adapter->stats.gotch += er32(GOTCH);
       
  3751 	adapter->stats.rnbc += er32(RNBC);
       
  3752 	adapter->stats.ruc += er32(RUC);
       
  3753 	adapter->stats.rfc += er32(RFC);
       
  3754 	adapter->stats.rjc += er32(RJC);
       
  3755 	adapter->stats.torl += er32(TORL);
       
  3756 	adapter->stats.torh += er32(TORH);
       
  3757 	adapter->stats.totl += er32(TOTL);
       
  3758 	adapter->stats.toth += er32(TOTH);
       
  3759 	adapter->stats.tpr += er32(TPR);
       
  3760 
       
  3761 	adapter->stats.ptc64 += er32(PTC64);
       
  3762 	adapter->stats.ptc127 += er32(PTC127);
       
  3763 	adapter->stats.ptc255 += er32(PTC255);
       
  3764 	adapter->stats.ptc511 += er32(PTC511);
       
  3765 	adapter->stats.ptc1023 += er32(PTC1023);
       
  3766 	adapter->stats.ptc1522 += er32(PTC1522);
       
  3767 
       
  3768 	adapter->stats.mptc += er32(MPTC);
       
  3769 	adapter->stats.bptc += er32(BPTC);
       
  3770 
       
  3771 	/* used for adaptive IFS */
       
  3772 
       
  3773 	hw->tx_packet_delta = er32(TPT);
       
  3774 	adapter->stats.tpt += hw->tx_packet_delta;
       
  3775 	hw->collision_delta = er32(COLC);
       
  3776 	adapter->stats.colc += hw->collision_delta;
       
  3777 
       
  3778 	if (hw->mac_type >= e1000_82543) {
       
  3779 		adapter->stats.algnerrc += er32(ALGNERRC);
       
  3780 		adapter->stats.rxerrc += er32(RXERRC);
       
  3781 		adapter->stats.tncrs += er32(TNCRS);
       
  3782 		adapter->stats.cexterr += er32(CEXTERR);
       
  3783 		adapter->stats.tsctc += er32(TSCTC);
       
  3784 		adapter->stats.tsctfc += er32(TSCTFC);
       
  3785 	}
       
  3786 
       
  3787 	/* Fill out the OS statistics structure */
       
  3788 	netdev->stats.multicast = adapter->stats.mprc;
       
  3789 	netdev->stats.collisions = adapter->stats.colc;
       
  3790 
       
  3791 	/* Rx Errors */
       
  3792 
       
  3793 	/* RLEC on some newer hardware can be incorrect so build
       
  3794 	* our own version based on RUC and ROC */
       
  3795 	netdev->stats.rx_errors = adapter->stats.rxerrc +
       
  3796 		adapter->stats.crcerrs + adapter->stats.algnerrc +
       
  3797 		adapter->stats.ruc + adapter->stats.roc +
       
  3798 		adapter->stats.cexterr;
       
  3799 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
       
  3800 	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
       
  3801 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
       
  3802 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
       
  3803 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
       
  3804 
       
  3805 	/* Tx Errors */
       
  3806 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
       
  3807 	netdev->stats.tx_errors = adapter->stats.txerrc;
       
  3808 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
       
  3809 	netdev->stats.tx_window_errors = adapter->stats.latecol;
       
  3810 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
       
  3811 	if (hw->bad_tx_carr_stats_fd &&
       
  3812 	    adapter->link_duplex == FULL_DUPLEX) {
       
  3813 		netdev->stats.tx_carrier_errors = 0;
       
  3814 		adapter->stats.tncrs = 0;
       
  3815 	}
       
  3816 
       
  3817 	/* Tx Dropped needs to be maintained elsewhere */
       
  3818 
       
  3819 	/* Phy Stats */
       
  3820 	if (hw->media_type == e1000_media_type_copper) {
       
  3821 		if ((adapter->link_speed == SPEED_1000) &&
       
  3822 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
       
  3823 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
       
  3824 			adapter->phy_stats.idle_errors += phy_tmp;
       
  3825 		}
       
  3826 
       
  3827 		if ((hw->mac_type <= e1000_82546) &&
       
  3828 		   (hw->phy_type == e1000_phy_m88) &&
       
  3829 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
       
  3830 			adapter->phy_stats.receive_errors += phy_tmp;
       
  3831 	}
       
  3832 
       
  3833 	/* Management Stats */
       
  3834 	if (hw->has_smbus) {
       
  3835 		adapter->stats.mgptc += er32(MGTPTC);
       
  3836 		adapter->stats.mgprc += er32(MGTPRC);
       
  3837 		adapter->stats.mgpdc += er32(MGTPDC);
       
  3838 	}
       
  3839 
       
  3840 	if (!adapter->ecdev) {
       
  3841 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  3842 	}
       
  3843 }
       
  3844 
       
  3845 void ec_poll(struct net_device *netdev)
       
  3846 {
       
  3847 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3848 	if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) {
       
  3849 		e1000_watchdog(&adapter->watchdog_task.work);
       
  3850 		adapter->ec_watchdog_jiffies = jiffies;
       
  3851 	}
       
  3852 
       
  3853 	e1000_intr(0, netdev);
       
  3854 }
       
  3855 
       
  3856 /**
       
  3857  * e1000_intr - Interrupt Handler
       
  3858  * @irq: interrupt number
       
  3859  * @data: pointer to a network interface device structure
       
  3860  **/
       
  3861 
       
  3862 static irqreturn_t e1000_intr(int irq, void *data)
       
  3863 {
       
  3864 	struct net_device *netdev = data;
       
  3865 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3866 	struct e1000_hw *hw = &adapter->hw;
       
  3867 	u32 icr = er32(ICR);
       
  3868 
       
  3869 	if (unlikely((!icr)))
       
  3870 		return IRQ_NONE;  /* Not our interrupt */
       
  3871 
       
  3872 	/*
       
  3873 	 * we might have caused the interrupt, but the above
       
  3874 	 * read cleared it, and just in case the driver is
       
  3875 	 * down there is nothing to do so return handled
       
  3876 	 */
       
  3877 	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
       
  3878 		return IRQ_HANDLED;
       
  3879 
       
  3880 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
       
  3881 		hw->get_link_status = 1;
       
  3882 		/* guard against interrupt when we're going down */
       
  3883 		if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags))
       
  3884 			schedule_delayed_work(&adapter->watchdog_task, 1);
       
  3885 	}
       
  3886 
       
  3887 	if (adapter->ecdev) {
       
  3888 		int i, ec_work_done = 0;
       
  3889 		for (i = 0; i < E1000_MAX_INTR; i++) {
       
  3890 			if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
       
  3891 							&ec_work_done, 100) &&
       
  3892 						!e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
       
  3893 				break;
       
  3894 			}
       
  3895 		}
       
  3896  	} else {
       
  3897 		/* disable interrupts, without the synchronize_irq bit */
       
  3898 		ew32(IMC, ~0);
       
  3899 		E1000_WRITE_FLUSH();
       
  3900 
       
  3901 		if (likely(napi_schedule_prep(&adapter->napi))) {
       
  3902 			adapter->total_tx_bytes = 0;
       
  3903 			adapter->total_tx_packets = 0;
       
  3904 			adapter->total_rx_bytes = 0;
       
  3905 			adapter->total_rx_packets = 0;
       
  3906 			__napi_schedule(&adapter->napi);
       
  3907 		} else {
       
  3908 			/* this really should not happen! if it does it is basically a
       
  3909 			 * bug, but not a hard error, so enable ints and continue */
       
  3910 			if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3911 				e1000_irq_enable(adapter);
       
  3912 		}
       
  3913 	}
       
  3914 
       
  3915 	return IRQ_HANDLED;
       
  3916 }
       
  3917 
       
  3918 /**
       
  3919  * e1000_clean - NAPI Rx polling callback
       
  3920  * @adapter: board private structure
       
  3921  * EtherCAT: never called
       
  3922  **/
       
  3923 static int e1000_clean(struct napi_struct *napi, int budget)
       
  3924 {
       
  3925 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
       
  3926 	int tx_clean_complete = 0, work_done = 0;
       
  3927 
       
  3928 	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
       
  3929 
       
  3930 	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
       
  3931 
       
  3932 	if (!tx_clean_complete)
       
  3933 		work_done = budget;
       
  3934 
       
  3935 	/* If budget not fully consumed, exit the polling mode */
       
  3936 	if (work_done < budget) {
       
  3937 		if (likely(adapter->itr_setting & 3))
       
  3938 			e1000_set_itr(adapter);
       
  3939 		napi_complete(napi);
       
  3940 		if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3941 			e1000_irq_enable(adapter);
       
  3942 	}
       
  3943 
       
  3944 	return work_done;
       
  3945 }
       
  3946 
       
  3947 /**
       
  3948  * e1000_clean_tx_irq - Reclaim resources after transmit completes
       
  3949  * @adapter: board private structure
       
  3950  **/
       
  3951 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
       
  3952 			       struct e1000_tx_ring *tx_ring)
       
  3953 {
       
  3954 	struct e1000_hw *hw = &adapter->hw;
       
  3955 	struct net_device *netdev = adapter->netdev;
       
  3956 	struct e1000_tx_desc *tx_desc, *eop_desc;
       
  3957 	struct e1000_buffer *buffer_info;
       
  3958 	unsigned int i, eop;
       
  3959 	unsigned int count = 0;
       
  3960 	unsigned int total_tx_bytes=0, total_tx_packets=0;
       
  3961 
       
  3962 	i = tx_ring->next_to_clean;
       
  3963 	eop = tx_ring->buffer_info[i].next_to_watch;
       
  3964 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  3965 
       
  3966 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
       
  3967 	       (count < tx_ring->count)) {
       
  3968 		bool cleaned = false;
       
  3969 		rmb();	/* read buffer_info after eop_desc */
       
  3970 		for ( ; !cleaned; count++) {
       
  3971 			tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3972 			buffer_info = &tx_ring->buffer_info[i];
       
  3973 			cleaned = (i == eop);
       
  3974 
       
  3975 			if (cleaned) {
       
  3976 				total_tx_packets += buffer_info->segs;
       
  3977 				total_tx_bytes += buffer_info->bytecount;
       
  3978 			}
       
  3979 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  3980 			tx_desc->upper.data = 0;
       
  3981 
       
  3982 			if (unlikely(++i == tx_ring->count)) i = 0;
       
  3983 		}
       
  3984 
       
  3985 		eop = tx_ring->buffer_info[i].next_to_watch;
       
  3986 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  3987 	}
       
  3988 
       
  3989 	tx_ring->next_to_clean = i;
       
  3990 
       
  3991 #define TX_WAKE_THRESHOLD 32
       
  3992 	if (!adapter->ecdev && unlikely(count && netif_carrier_ok(netdev) &&
       
  3993 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
       
  3994 		/* Make sure that anybody stopping the queue after this
       
  3995 		 * sees the new next_to_clean.
       
  3996 		 */
       
  3997 		smp_mb();
       
  3998 
       
  3999 		if (netif_queue_stopped(netdev) &&
       
  4000 		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
       
  4001 			netif_wake_queue(netdev);
       
  4002 			++adapter->restart_queue;
       
  4003 		}
       
  4004 	}
       
  4005 
       
  4006 	if (!adapter->ecdev && adapter->detect_tx_hung) {
       
  4007 		/* Detect a transmit hang in hardware, this serializes the
       
  4008 		 * check with the clearing of time_stamp and movement of i */
       
  4009 		adapter->detect_tx_hung = false;
       
  4010 		if (tx_ring->buffer_info[eop].time_stamp &&
       
  4011 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
       
  4012 		               (adapter->tx_timeout_factor * HZ)) &&
       
  4013 		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
       
  4014 
       
  4015 			/* detected Tx unit hang */
       
  4016 			e_err(drv, "Detected Tx Unit Hang\n"
       
  4017 			      "  Tx Queue             <%lu>\n"
       
  4018 			      "  TDH                  <%x>\n"
       
  4019 			      "  TDT                  <%x>\n"
       
  4020 			      "  next_to_use          <%x>\n"
       
  4021 			      "  next_to_clean        <%x>\n"
       
  4022 			      "buffer_info[next_to_clean]\n"
       
  4023 			      "  time_stamp           <%lx>\n"
       
  4024 			      "  next_to_watch        <%x>\n"
       
  4025 			      "  jiffies              <%lx>\n"
       
  4026 			      "  next_to_watch.status <%x>\n",
       
  4027 				(unsigned long)((tx_ring - adapter->tx_ring) /
       
  4028 					sizeof(struct e1000_tx_ring)),
       
  4029 				readl(hw->hw_addr + tx_ring->tdh),
       
  4030 				readl(hw->hw_addr + tx_ring->tdt),
       
  4031 				tx_ring->next_to_use,
       
  4032 				tx_ring->next_to_clean,
       
  4033 				tx_ring->buffer_info[eop].time_stamp,
       
  4034 				eop,
       
  4035 				jiffies,
       
  4036 				eop_desc->upper.fields.status);
       
  4037 			e1000_dump(adapter);
       
  4038 			netif_stop_queue(netdev);
       
  4039 		}
       
  4040 	}
       
  4041 	adapter->total_tx_bytes += total_tx_bytes;
       
  4042 	adapter->total_tx_packets += total_tx_packets;
       
  4043 	netdev->stats.tx_bytes += total_tx_bytes;
       
  4044 	netdev->stats.tx_packets += total_tx_packets;
       
  4045 	return count < tx_ring->count;
       
  4046 }
       
  4047 
       
  4048 /**
       
  4049  * e1000_rx_checksum - Receive Checksum Offload for 82543
       
  4050  * @adapter:     board private structure
       
  4051  * @status_err:  receive descriptor status and error fields
       
  4052  * @csum:        receive descriptor csum field
       
  4053  * @sk_buff:     socket buffer with received data
       
  4054  **/
       
  4055 
       
  4056 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
       
  4057 			      u32 csum, struct sk_buff *skb)
       
  4058 {
       
  4059 	struct e1000_hw *hw = &adapter->hw;
       
  4060 	u16 status = (u16)status_err;
       
  4061 	u8 errors = (u8)(status_err >> 24);
       
  4062 
       
  4063 	skb_checksum_none_assert(skb);
       
  4064 
       
  4065 	/* 82543 or newer only */
       
  4066 	if (unlikely(hw->mac_type < e1000_82543)) return;
       
  4067 	/* Ignore Checksum bit is set */
       
  4068 	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
       
  4069 	/* TCP/UDP checksum error bit is set */
       
  4070 	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
       
  4071 		/* let the stack verify checksum errors */
       
  4072 		adapter->hw_csum_err++;
       
  4073 		return;
       
  4074 	}
       
  4075 	/* TCP/UDP Checksum has not been calculated */
       
  4076 	if (!(status & E1000_RXD_STAT_TCPCS))
       
  4077 		return;
       
  4078 
       
  4079 	/* It must be a TCP or UDP packet with a valid checksum */
       
  4080 	if (likely(status & E1000_RXD_STAT_TCPCS)) {
       
  4081 		/* TCP checksum is good */
       
  4082 		skb->ip_summed = CHECKSUM_UNNECESSARY;
       
  4083 	}
       
  4084 	adapter->hw_csum_good++;
       
  4085 }
       
  4086 
       
  4087 /**
       
  4088  * e1000_consume_page - helper function
       
  4089  **/
       
  4090 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
       
  4091                                u16 length)
       
  4092 {
       
  4093 	bi->page = NULL;
       
  4094 	skb->len += length;
       
  4095 	skb->data_len += length;
       
  4096 	skb->truesize += PAGE_SIZE;
       
  4097 }
       
  4098 
       
  4099 /**
       
  4100  * e1000_receive_skb - helper function to handle rx indications
       
  4101  * @adapter: board private structure
       
  4102  * @status: descriptor status field as written by hardware
       
  4103  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
       
  4104  * @skb: pointer to sk_buff to be indicated to stack
       
  4105  */
       
  4106 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
       
  4107 			      __le16 vlan, struct sk_buff *skb)
       
  4108 {
       
  4109 	skb->protocol = eth_type_trans(skb, adapter->netdev);
       
  4110 
       
  4111 	if (status & E1000_RXD_STAT_VP) {
       
  4112 		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
       
  4113 
       
  4114 		__vlan_hwaccel_put_tag(skb, vid);
       
  4115 	}
       
  4116 	napi_gro_receive(&adapter->napi, skb);
       
  4117 }
       
  4118 
       
  4119 /**
       
  4120  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
       
  4121  * @adapter: board private structure
       
  4122  * @rx_ring: ring to clean
       
  4123  * @work_done: amount of napi work completed this call
       
  4124  * @work_to_do: max amount of work allowed for this call to do
       
  4125  *
       
  4126  * the return value indicates whether actual cleaning was done, there
       
  4127  * is no guarantee that everything was cleaned
       
  4128  */
       
  4129 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
       
  4130 				     struct e1000_rx_ring *rx_ring,
       
  4131 				     int *work_done, int work_to_do)
       
  4132 {
       
  4133 	struct e1000_hw *hw = &adapter->hw;
       
  4134 	struct net_device *netdev = adapter->netdev;
       
  4135 	struct pci_dev *pdev = adapter->pdev;
       
  4136 	struct e1000_rx_desc *rx_desc, *next_rxd;
       
  4137 	struct e1000_buffer *buffer_info, *next_buffer;
       
  4138 	unsigned long irq_flags;
       
  4139 	u32 length;
       
  4140 	unsigned int i;
       
  4141 	int cleaned_count = 0;
       
  4142 	bool cleaned = false;
       
  4143 	unsigned int total_rx_bytes=0, total_rx_packets=0;
       
  4144 
       
  4145 	i = rx_ring->next_to_clean;
       
  4146 	rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4147 	buffer_info = &rx_ring->buffer_info[i];
       
  4148 
       
  4149 	while (rx_desc->status & E1000_RXD_STAT_DD) {
       
  4150 		struct sk_buff *skb;
       
  4151 		u8 status;
       
  4152 
       
  4153 		if (*work_done >= work_to_do)
       
  4154 			break;
       
  4155 		(*work_done)++;
       
  4156 		rmb(); /* read descriptor and rx_buffer_info after status DD */
       
  4157 
       
  4158 		status = rx_desc->status;
       
  4159 		skb = buffer_info->skb;
       
  4160 		if (!adapter->ecdev) {
       
  4161 			buffer_info->skb = NULL;
       
  4162 		}
       
  4163 
       
  4164 		if (++i == rx_ring->count) i = 0;
       
  4165 		next_rxd = E1000_RX_DESC(*rx_ring, i);
       
  4166 		prefetch(next_rxd);
       
  4167 
       
  4168 		next_buffer = &rx_ring->buffer_info[i];
       
  4169 
       
  4170 		cleaned = true;
       
  4171 		cleaned_count++;
       
  4172 		dma_unmap_page(&pdev->dev, buffer_info->dma,
       
  4173 			       buffer_info->length, DMA_FROM_DEVICE);
       
  4174 		buffer_info->dma = 0;
       
  4175 
       
  4176 		length = le16_to_cpu(rx_desc->length);
       
  4177 
       
  4178 		/* errors is only valid for DD + EOP descriptors */
       
  4179 		if (!adapter->ecdev &&
       
  4180 		    unlikely((status & E1000_RXD_STAT_EOP) &&
       
  4181 		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
       
  4182 			u8 last_byte = *(skb->data + length - 1);
       
  4183 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
       
  4184 				       last_byte)) {
       
  4185 				spin_lock_irqsave(&adapter->stats_lock,
       
  4186 				                  irq_flags);
       
  4187 				e1000_tbi_adjust_stats(hw, &adapter->stats,
       
  4188 				                       length, skb->data);
       
  4189 				spin_unlock_irqrestore(&adapter->stats_lock,
       
  4190 				                       irq_flags);
       
  4191 				length--;
       
  4192 			} else {
       
  4193 				/* recycle both page and skb */
       
  4194 				buffer_info->skb = skb;
       
  4195 				/* an error means any chain goes out the window
       
  4196 				 * too */
       
  4197 				if (rx_ring->rx_skb_top)
       
  4198 					dev_kfree_skb(rx_ring->rx_skb_top);
       
  4199 				rx_ring->rx_skb_top = NULL;
       
  4200 				goto next_desc;
       
  4201 			}
       
  4202 		}
       
  4203 
       
  4204 #define rxtop rx_ring->rx_skb_top
       
  4205 		if (!(status & E1000_RXD_STAT_EOP)) {
       
  4206 			/* this descriptor is only the beginning (or middle) */
       
  4207 			if (!rxtop) {
       
  4208 				/* this is the beginning of a chain */
       
  4209 				rxtop = skb;
       
  4210 				skb_fill_page_desc(rxtop, 0, buffer_info->page,
       
  4211 				                   0, length);
       
  4212 			} else {
       
  4213 				/* this is the middle of a chain */
       
  4214 				skb_fill_page_desc(rxtop,
       
  4215 				    skb_shinfo(rxtop)->nr_frags,
       
  4216 				    buffer_info->page, 0, length);
       
  4217 				/* re-use the skb, only consumed the page */
       
  4218 				buffer_info->skb = skb;
       
  4219 			}
       
  4220 			e1000_consume_page(buffer_info, rxtop, length);
       
  4221 			goto next_desc;
       
  4222 		} else {
       
  4223 			if (rxtop) {
       
  4224 				/* end of the chain */
       
  4225 				skb_fill_page_desc(rxtop,
       
  4226 				    skb_shinfo(rxtop)->nr_frags,
       
  4227 				    buffer_info->page, 0, length);
       
  4228 				/* re-use the current skb, we only consumed the
       
  4229 				 * page */
       
  4230 				buffer_info->skb = skb;
       
  4231 				skb = rxtop;
       
  4232 				rxtop = NULL;
       
  4233 				e1000_consume_page(buffer_info, skb, length);
       
  4234 			} else {
       
  4235 				/* no chain, got EOP, this buf is the packet
       
  4236 				 * copybreak to save the put_page/alloc_page */
       
  4237 				if (length <= copybreak &&
       
  4238 				    skb_tailroom(skb) >= length) {
       
  4239 					u8 *vaddr;
       
  4240 					vaddr = kmap_atomic(buffer_info->page);
       
  4241 					memcpy(skb_tail_pointer(skb), vaddr, length);
       
  4242 					kunmap_atomic(vaddr);
       
  4243 					/* re-use the page, so don't erase
       
  4244 					 * buffer_info->page */
       
  4245 					skb_put(skb, length);
       
  4246 				} else {
       
  4247 					skb_fill_page_desc(skb, 0,
       
  4248 					                   buffer_info->page, 0,
       
  4249 				                           length);
       
  4250 					e1000_consume_page(buffer_info, skb,
       
  4251 					                   length);
       
  4252 				}
       
  4253 			}
       
  4254 		}
       
  4255 
       
  4256 		/* Receive Checksum Offload XXX recompute due to CRC strip? */
       
  4257 		e1000_rx_checksum(adapter,
       
  4258 		                  (u32)(status) |
       
  4259 		                  ((u32)(rx_desc->errors) << 24),
       
  4260 		                  le16_to_cpu(rx_desc->csum), skb);
       
  4261 
       
  4262 		total_rx_bytes += (skb->len - 4); /* don't count FCS */
       
  4263 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
       
  4264 			pskb_trim(skb, skb->len - 4);
       
  4265 		total_rx_packets++;
       
  4266 
       
  4267 		/* eth type trans needs skb->data to point to something */
       
  4268 		if (!pskb_may_pull(skb, ETH_HLEN)) {
       
  4269 			e_err(drv, "pskb_may_pull failed.\n");
       
  4270 			if (!adapter->ecdev) {
       
  4271 				dev_kfree_skb(skb);
       
  4272 			}
       
  4273 			goto next_desc;
       
  4274 		}
       
  4275 
       
  4276 		if (adapter->ecdev) {
       
  4277 			ecdev_receive(adapter->ecdev, skb->data, length);
       
  4278 
       
  4279 			// No need to detect link status as
       
  4280 			// long as frames are received: Reset watchdog.
       
  4281 			adapter->ec_watchdog_jiffies = jiffies;
       
  4282 		} else {
       
  4283 			e1000_receive_skb(adapter, status, rx_desc->special, skb);
       
  4284 		}
       
  4285 
       
  4286 next_desc:
       
  4287 		rx_desc->status = 0;
       
  4288 
       
  4289 		/* return some buffers to hardware, one at a time is too slow */
       
  4290 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
       
  4291 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4292 			cleaned_count = 0;
       
  4293 		}
       
  4294 
       
  4295 		/* use prefetched values */
       
  4296 		rx_desc = next_rxd;
       
  4297 		buffer_info = next_buffer;
       
  4298 	}
       
  4299 	rx_ring->next_to_clean = i;
       
  4300 
       
  4301 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
       
  4302 	if (cleaned_count)
       
  4303 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4304 
       
  4305 	adapter->total_rx_packets += total_rx_packets;
       
  4306 	adapter->total_rx_bytes += total_rx_bytes;
       
  4307 	netdev->stats.rx_bytes += total_rx_bytes;
       
  4308 	netdev->stats.rx_packets += total_rx_packets;
       
  4309 	return cleaned;
       
  4310 }
       
  4311 
       
  4312 /*
       
  4313  * this should improve performance for small packets with large amounts
       
  4314  * of reassembly being done in the stack
       
  4315  */
       
  4316 static void e1000_check_copybreak(struct net_device *netdev,
       
  4317 				 struct e1000_buffer *buffer_info,
       
  4318 				 u32 length, struct sk_buff **skb)
       
  4319 {
       
  4320 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4321 	struct sk_buff *new_skb;
       
  4322 
       
  4323 	if (adapter->ecdev || length > copybreak)
       
  4324 		return;
       
  4325 
       
  4326 	new_skb = netdev_alloc_skb_ip_align(netdev, length);
       
  4327 	if (!new_skb)
       
  4328 		return;
       
  4329 
       
  4330 	skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
       
  4331 				       (*skb)->data - NET_IP_ALIGN,
       
  4332 				       length + NET_IP_ALIGN);
       
  4333 	/* save the skb in buffer_info as good */
       
  4334 	buffer_info->skb = *skb;
       
  4335 	*skb = new_skb;
       
  4336 }
       
  4337 
       
  4338 /**
       
  4339  * e1000_clean_rx_irq - Send received data up the network stack; legacy
       
  4340  * @adapter: board private structure
       
  4341  * @rx_ring: ring to clean
       
  4342  * @work_done: amount of napi work completed this call
       
  4343  * @work_to_do: max amount of work allowed for this call to do
       
  4344  */
       
  4345 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
  4346 			       struct e1000_rx_ring *rx_ring,
       
  4347 			       int *work_done, int work_to_do)
       
  4348 {
       
  4349 	struct e1000_hw *hw = &adapter->hw;
       
  4350 	struct net_device *netdev = adapter->netdev;
       
  4351 	struct pci_dev *pdev = adapter->pdev;
       
  4352 	struct e1000_rx_desc *rx_desc, *next_rxd;
       
  4353 	struct e1000_buffer *buffer_info, *next_buffer;
       
  4354 	unsigned long flags;
       
  4355 	u32 length;
       
  4356 	unsigned int i;
       
  4357 	int cleaned_count = 0;
       
  4358 	bool cleaned = false;
       
  4359 	unsigned int total_rx_bytes=0, total_rx_packets=0;
       
  4360 
       
  4361 	i = rx_ring->next_to_clean;
       
  4362 	rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4363 	buffer_info = &rx_ring->buffer_info[i];
       
  4364 
       
  4365 	while (rx_desc->status & E1000_RXD_STAT_DD) {
       
  4366 		struct sk_buff *skb;
       
  4367 		u8 status;
       
  4368 
       
  4369 		if (*work_done >= work_to_do)
       
  4370 			break;
       
  4371 		(*work_done)++;
       
  4372 		rmb(); /* read descriptor and rx_buffer_info after status DD */
       
  4373 
       
  4374 		status = rx_desc->status;
       
  4375 		skb = buffer_info->skb;
       
  4376 		if (!adapter->ecdev) {
       
  4377 			buffer_info->skb = NULL;
       
  4378 		}
       
  4379 
       
  4380 		prefetch(skb->data - NET_IP_ALIGN);
       
  4381 
       
  4382 		if (++i == rx_ring->count) i = 0;
       
  4383 		next_rxd = E1000_RX_DESC(*rx_ring, i);
       
  4384 		prefetch(next_rxd);
       
  4385 
       
  4386 		next_buffer = &rx_ring->buffer_info[i];
       
  4387 
       
  4388 		cleaned = true;
       
  4389 		cleaned_count++;
       
  4390 		dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  4391 				 buffer_info->length, DMA_FROM_DEVICE);
       
  4392 		buffer_info->dma = 0;
       
  4393 
       
  4394 		length = le16_to_cpu(rx_desc->length);
       
  4395 		/* !EOP means multiple descriptors were used to store a single
       
  4396 		 * packet, if thats the case we need to toss it.  In fact, we
       
  4397 		 * to toss every packet with the EOP bit clear and the next
       
  4398 		 * frame that _does_ have the EOP bit set, as it is by
       
  4399 		 * definition only a frame fragment
       
  4400 		 */
       
  4401 		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
       
  4402 			adapter->discarding = true;
       
  4403 
       
  4404 		if (adapter->discarding) {
       
  4405 			/* All receives must fit into a single buffer */
       
  4406 			e_dbg("Receive packet consumed multiple buffers\n");
       
  4407 			/* recycle */
       
  4408 			buffer_info->skb = skb;
       
  4409 			if (status & E1000_RXD_STAT_EOP)
       
  4410 				adapter->discarding = false;
       
  4411 			goto next_desc;
       
  4412 		}
       
  4413 
       
  4414 		if (!adapter->ecdev &&
       
  4415 		    unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
       
  4416 			u8 last_byte = *(skb->data + length - 1);
       
  4417 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
       
  4418 				       last_byte)) {
       
  4419 				spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4420 				e1000_tbi_adjust_stats(hw, &adapter->stats,
       
  4421 				                       length, skb->data);
       
  4422 				spin_unlock_irqrestore(&adapter->stats_lock,
       
  4423 				                       flags);
       
  4424 				length--;
       
  4425 			} else {
       
  4426 				/* recycle */
       
  4427 				buffer_info->skb = skb;
       
  4428 				goto next_desc;
       
  4429 			}
       
  4430 		}
       
  4431 
       
  4432 		total_rx_bytes += (length - 4); /* don't count FCS */
       
  4433 		total_rx_packets++;
       
  4434 
       
  4435 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
       
  4436 			/* adjust length to remove Ethernet CRC, this must be
       
  4437 			 * done after the TBI_ACCEPT workaround above
       
  4438 			 */
       
  4439 			length -= 4;
       
  4440 
       
  4441 		e1000_check_copybreak(netdev, buffer_info, length, &skb);
       
  4442 
       
  4443 		skb_put(skb, length);
       
  4444 
       
  4445 		/* Receive Checksum Offload */
       
  4446 		e1000_rx_checksum(adapter,
       
  4447 				  (u32)(status) |
       
  4448 				  ((u32)(rx_desc->errors) << 24),
       
  4449 				  le16_to_cpu(rx_desc->csum), skb);
       
  4450 
       
  4451 		if (adapter->ecdev) {
       
  4452 			ecdev_receive(adapter->ecdev, skb->data, length);
       
  4453 
       
  4454 			// No need to detect link status as
       
  4455 			// long as frames are received: Reset watchdog.
       
  4456 			adapter->ec_watchdog_jiffies = jiffies;
       
  4457 		} else {
       
  4458 			e1000_receive_skb(adapter, status, rx_desc->special, skb);
       
  4459 		}
       
  4460 
       
  4461 next_desc:
       
  4462 		rx_desc->status = 0;
       
  4463 
       
  4464 		/* return some buffers to hardware, one at a time is too slow */
       
  4465 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
       
  4466 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4467 			cleaned_count = 0;
       
  4468 		}
       
  4469 
       
  4470 		/* use prefetched values */
       
  4471 		rx_desc = next_rxd;
       
  4472 		buffer_info = next_buffer;
       
  4473 	}
       
  4474 	rx_ring->next_to_clean = i;
       
  4475 
       
  4476 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
       
  4477 	if (cleaned_count)
       
  4478 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4479 
       
  4480 	adapter->total_rx_packets += total_rx_packets;
       
  4481 	adapter->total_rx_bytes += total_rx_bytes;
       
  4482 	netdev->stats.rx_bytes += total_rx_bytes;
       
  4483 	netdev->stats.rx_packets += total_rx_packets;
       
  4484 	return cleaned;
       
  4485 }
       
  4486 
       
  4487 /**
       
  4488  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
       
  4489  * @adapter: address of board private structure
       
  4490  * @rx_ring: pointer to receive ring structure
       
  4491  * @cleaned_count: number of buffers to allocate this pass
       
  4492  **/
       
  4493 
       
  4494 static void
       
  4495 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
       
  4496                              struct e1000_rx_ring *rx_ring, int cleaned_count)
       
  4497 {
       
  4498 	struct net_device *netdev = adapter->netdev;
       
  4499 	struct pci_dev *pdev = adapter->pdev;
       
  4500 	struct e1000_rx_desc *rx_desc;
       
  4501 	struct e1000_buffer *buffer_info;
       
  4502 	struct sk_buff *skb;
       
  4503 	unsigned int i;
       
  4504 	unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
       
  4505 
       
  4506 	i = rx_ring->next_to_use;
       
  4507 	buffer_info = &rx_ring->buffer_info[i];
       
  4508 
       
  4509 	while (cleaned_count--) {
       
  4510 		skb = buffer_info->skb;
       
  4511 		if (skb) {
       
  4512 			skb_trim(skb, 0);
       
  4513 			goto check_page;
       
  4514 		}
       
  4515 
       
  4516 		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4517 		if (unlikely(!skb)) {
       
  4518 			/* Better luck next round */
       
  4519 			adapter->alloc_rx_buff_failed++;
       
  4520 			break;
       
  4521 		}
       
  4522 
       
  4523 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4524 		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4525 			struct sk_buff *oldskb = skb;
       
  4526 			e_err(rx_err, "skb align check failed: %u bytes at "
       
  4527 			      "%p\n", bufsz, skb->data);
       
  4528 			/* Try again, without freeing the previous */
       
  4529 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4530 			/* Failed allocation, critical failure */
       
  4531 			if (!skb) {
       
  4532 				dev_kfree_skb(oldskb);
       
  4533 				adapter->alloc_rx_buff_failed++;
       
  4534 				break;
       
  4535 			}
       
  4536 
       
  4537 			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4538 				/* give up */
       
  4539 				dev_kfree_skb(skb);
       
  4540 				dev_kfree_skb(oldskb);
       
  4541 				break; /* while (cleaned_count--) */
       
  4542 			}
       
  4543 
       
  4544 			/* Use new allocation */
       
  4545 			dev_kfree_skb(oldskb);
       
  4546 		}
       
  4547 		buffer_info->skb = skb;
       
  4548 		buffer_info->length = adapter->rx_buffer_len;
       
  4549 check_page:
       
  4550 		/* allocate a new page if necessary */
       
  4551 		if (!buffer_info->page) {
       
  4552 			buffer_info->page = alloc_page(GFP_ATOMIC);
       
  4553 			if (unlikely(!buffer_info->page)) {
       
  4554 				adapter->alloc_rx_buff_failed++;
       
  4555 				break;
       
  4556 			}
       
  4557 		}
       
  4558 
       
  4559 		if (!buffer_info->dma) {
       
  4560 			buffer_info->dma = dma_map_page(&pdev->dev,
       
  4561 			                                buffer_info->page, 0,
       
  4562 							buffer_info->length,
       
  4563 							DMA_FROM_DEVICE);
       
  4564 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
       
  4565 				put_page(buffer_info->page);
       
  4566 				dev_kfree_skb(skb);
       
  4567 				buffer_info->page = NULL;
       
  4568 				buffer_info->skb = NULL;
       
  4569 				buffer_info->dma = 0;
       
  4570 				adapter->alloc_rx_buff_failed++;
       
  4571 				break; /* while !buffer_info->skb */
       
  4572 			}
       
  4573 		}
       
  4574 
       
  4575 		rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4576 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  4577 
       
  4578 		if (unlikely(++i == rx_ring->count))
       
  4579 			i = 0;
       
  4580 		buffer_info = &rx_ring->buffer_info[i];
       
  4581 	}
       
  4582 
       
  4583 	if (likely(rx_ring->next_to_use != i)) {
       
  4584 		rx_ring->next_to_use = i;
       
  4585 		if (unlikely(i-- == 0))
       
  4586 			i = (rx_ring->count - 1);
       
  4587 
       
  4588 		/* Force memory writes to complete before letting h/w
       
  4589 		 * know there are new descriptors to fetch.  (Only
       
  4590 		 * applicable for weak-ordered memory model archs,
       
  4591 		 * such as IA-64). */
       
  4592 		wmb();
       
  4593 		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
       
  4594 	}
       
  4595 }
       
  4596 
       
  4597 /**
       
  4598  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
       
  4599  * @adapter: address of board private structure
       
  4600  **/
       
  4601 
       
  4602 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
  4603 				   struct e1000_rx_ring *rx_ring,
       
  4604 				   int cleaned_count)
       
  4605 {
       
  4606 	struct e1000_hw *hw = &adapter->hw;
       
  4607 	struct net_device *netdev = adapter->netdev;
       
  4608 	struct pci_dev *pdev = adapter->pdev;
       
  4609 	struct e1000_rx_desc *rx_desc;
       
  4610 	struct e1000_buffer *buffer_info;
       
  4611 	struct sk_buff *skb;
       
  4612 	unsigned int i;
       
  4613 	unsigned int bufsz = adapter->rx_buffer_len;
       
  4614 
       
  4615 	i = rx_ring->next_to_use;
       
  4616 	buffer_info = &rx_ring->buffer_info[i];
       
  4617 
       
  4618 	while (cleaned_count--) {
       
  4619 		skb = buffer_info->skb;
       
  4620 		if (skb) {
       
  4621 			skb_trim(skb, 0);
       
  4622 			goto map_skb;
       
  4623 		}
       
  4624 
       
  4625 		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4626 		if (unlikely(!skb)) {
       
  4627 			/* Better luck next round */
       
  4628 			adapter->alloc_rx_buff_failed++;
       
  4629 			break;
       
  4630 		}
       
  4631 
       
  4632 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4633 		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4634 			struct sk_buff *oldskb = skb;
       
  4635 			e_err(rx_err, "skb align check failed: %u bytes at "
       
  4636 			      "%p\n", bufsz, skb->data);
       
  4637 			/* Try again, without freeing the previous */
       
  4638 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4639 			/* Failed allocation, critical failure */
       
  4640 			if (!skb) {
       
  4641 				dev_kfree_skb(oldskb);
       
  4642 				adapter->alloc_rx_buff_failed++;
       
  4643 				break;
       
  4644 			}
       
  4645 
       
  4646 			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4647 				/* give up */
       
  4648 				dev_kfree_skb(skb);
       
  4649 				dev_kfree_skb(oldskb);
       
  4650 				adapter->alloc_rx_buff_failed++;
       
  4651 				break; /* while !buffer_info->skb */
       
  4652 			}
       
  4653 
       
  4654 			/* Use new allocation */
       
  4655 			dev_kfree_skb(oldskb);
       
  4656 		}
       
  4657 		buffer_info->skb = skb;
       
  4658 		buffer_info->length = adapter->rx_buffer_len;
       
  4659 map_skb:
       
  4660 		buffer_info->dma = dma_map_single(&pdev->dev,
       
  4661 						  skb->data,
       
  4662 						  buffer_info->length,
       
  4663 						  DMA_FROM_DEVICE);
       
  4664 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
       
  4665 			dev_kfree_skb(skb);
       
  4666 			buffer_info->skb = NULL;
       
  4667 			buffer_info->dma = 0;
       
  4668 			adapter->alloc_rx_buff_failed++;
       
  4669 			break; /* while !buffer_info->skb */
       
  4670 		}
       
  4671 
       
  4672 		/*
       
  4673 		 * XXX if it was allocated cleanly it will never map to a
       
  4674 		 * boundary crossing
       
  4675 		 */
       
  4676 
       
  4677 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4678 		if (!e1000_check_64k_bound(adapter,
       
  4679 					(void *)(unsigned long)buffer_info->dma,
       
  4680 					adapter->rx_buffer_len)) {
       
  4681 			e_err(rx_err, "dma align check failed: %u bytes at "
       
  4682 			      "%p\n", adapter->rx_buffer_len,
       
  4683 			      (void *)(unsigned long)buffer_info->dma);
       
  4684 			dev_kfree_skb(skb);
       
  4685 			buffer_info->skb = NULL;
       
  4686 
       
  4687 			dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  4688 					 adapter->rx_buffer_len,
       
  4689 					 DMA_FROM_DEVICE);
       
  4690 			buffer_info->dma = 0;
       
  4691 
       
  4692 			adapter->alloc_rx_buff_failed++;
       
  4693 			break; /* while !buffer_info->skb */
       
  4694 		}
       
  4695 		rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4696 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  4697 
       
  4698 		if (unlikely(++i == rx_ring->count))
       
  4699 			i = 0;
       
  4700 		buffer_info = &rx_ring->buffer_info[i];
       
  4701 	}
       
  4702 
       
  4703 	if (likely(rx_ring->next_to_use != i)) {
       
  4704 		rx_ring->next_to_use = i;
       
  4705 		if (unlikely(i-- == 0))
       
  4706 			i = (rx_ring->count - 1);
       
  4707 
       
  4708 		/* Force memory writes to complete before letting h/w
       
  4709 		 * know there are new descriptors to fetch.  (Only
       
  4710 		 * applicable for weak-ordered memory model archs,
       
  4711 		 * such as IA-64). */
       
  4712 		wmb();
       
  4713 		writel(i, hw->hw_addr + rx_ring->rdt);
       
  4714 	}
       
  4715 }
       
  4716 
       
  4717 /**
       
  4718  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
       
  4719  * @adapter:
       
  4720  **/
       
  4721 
       
  4722 static void e1000_smartspeed(struct e1000_adapter *adapter)
       
  4723 {
       
  4724 	struct e1000_hw *hw = &adapter->hw;
       
  4725 	u16 phy_status;
       
  4726 	u16 phy_ctrl;
       
  4727 
       
  4728 	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
       
  4729 	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
       
  4730 		return;
       
  4731 
       
  4732 	if (adapter->smartspeed == 0) {
       
  4733 		/* If Master/Slave config fault is asserted twice,
       
  4734 		 * we assume back-to-back */
       
  4735 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
       
  4736 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
       
  4737 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
       
  4738 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
       
  4739 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
       
  4740 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
       
  4741 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
       
  4742 			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
       
  4743 					    phy_ctrl);
       
  4744 			adapter->smartspeed++;
       
  4745 			if (!e1000_phy_setup_autoneg(hw) &&
       
  4746 			   !e1000_read_phy_reg(hw, PHY_CTRL,
       
  4747 				   	       &phy_ctrl)) {
       
  4748 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
       
  4749 					     MII_CR_RESTART_AUTO_NEG);
       
  4750 				e1000_write_phy_reg(hw, PHY_CTRL,
       
  4751 						    phy_ctrl);
       
  4752 			}
       
  4753 		}
       
  4754 		return;
       
  4755 	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
       
  4756 		/* If still no link, perhaps using 2/3 pair cable */
       
  4757 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
       
  4758 		phy_ctrl |= CR_1000T_MS_ENABLE;
       
  4759 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
       
  4760 		if (!e1000_phy_setup_autoneg(hw) &&
       
  4761 		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
       
  4762 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
       
  4763 				     MII_CR_RESTART_AUTO_NEG);
       
  4764 			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
       
  4765 		}
       
  4766 	}
       
  4767 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
       
  4768 	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
       
  4769 		adapter->smartspeed = 0;
       
  4770 }
       
  4771 
       
  4772 /**
       
  4773  * e1000_ioctl -
       
  4774  * @netdev:
       
  4775  * @ifreq:
       
  4776  * @cmd:
       
  4777  **/
       
  4778 
       
  4779 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  4780 {
       
  4781 	switch (cmd) {
       
  4782 	case SIOCGMIIPHY:
       
  4783 	case SIOCGMIIREG:
       
  4784 	case SIOCSMIIREG:
       
  4785 		return e1000_mii_ioctl(netdev, ifr, cmd);
       
  4786 	default:
       
  4787 		return -EOPNOTSUPP;
       
  4788 	}
       
  4789 }
       
  4790 
       
  4791 /**
       
  4792  * e1000_mii_ioctl -
       
  4793  * @netdev:
       
  4794  * @ifreq:
       
  4795  * @cmd:
       
  4796  **/
       
  4797 
       
  4798 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
  4799 			   int cmd)
       
  4800 {
       
  4801 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4802 	struct e1000_hw *hw = &adapter->hw;
       
  4803 	struct mii_ioctl_data *data = if_mii(ifr);
       
  4804 	int retval;
       
  4805 	u16 mii_reg;
       
  4806 	unsigned long flags;
       
  4807 
       
  4808 	if (hw->media_type != e1000_media_type_copper)
       
  4809 		return -EOPNOTSUPP;
       
  4810 
       
  4811 	switch (cmd) {
       
  4812 	case SIOCGMIIPHY:
       
  4813 		data->phy_id = hw->phy_addr;
       
  4814 		break;
       
  4815 	case SIOCGMIIREG:
       
  4816 		if (adapter->ecdev) {
       
  4817 			return -EPERM;
       
  4818 		}
       
  4819 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4820 		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
       
  4821 				   &data->val_out)) {
       
  4822 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4823 			return -EIO;
       
  4824 		}
       
  4825 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4826 		break;
       
  4827 	case SIOCSMIIREG:
       
  4828 		if (adapter->ecdev) {
       
  4829 			return -EPERM;
       
  4830 		}
       
  4831 		if (data->reg_num & ~(0x1F))
       
  4832 			return -EFAULT;
       
  4833 		mii_reg = data->val_in;
       
  4834 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4835 		if (e1000_write_phy_reg(hw, data->reg_num,
       
  4836 					mii_reg)) {
       
  4837 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4838 			return -EIO;
       
  4839 		}
       
  4840 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4841 		if (hw->media_type == e1000_media_type_copper) {
       
  4842 			switch (data->reg_num) {
       
  4843 			case PHY_CTRL:
       
  4844 				if (mii_reg & MII_CR_POWER_DOWN)
       
  4845 					break;
       
  4846 				if (mii_reg & MII_CR_AUTO_NEG_EN) {
       
  4847 					hw->autoneg = 1;
       
  4848 					hw->autoneg_advertised = 0x2F;
       
  4849 				} else {
       
  4850 					u32 speed;
       
  4851 					if (mii_reg & 0x40)
       
  4852 						speed = SPEED_1000;
       
  4853 					else if (mii_reg & 0x2000)
       
  4854 						speed = SPEED_100;
       
  4855 					else
       
  4856 						speed = SPEED_10;
       
  4857 					retval = e1000_set_spd_dplx(
       
  4858 						adapter, speed,
       
  4859 						((mii_reg & 0x100)
       
  4860 						 ? DUPLEX_FULL :
       
  4861 						 DUPLEX_HALF));
       
  4862 					if (retval)
       
  4863 						return retval;
       
  4864 				}
       
  4865 				if (netif_running(adapter->netdev))
       
  4866 					e1000_reinit_locked(adapter);
       
  4867 				else
       
  4868 					e1000_reset(adapter);
       
  4869 				break;
       
  4870 			case M88E1000_PHY_SPEC_CTRL:
       
  4871 			case M88E1000_EXT_PHY_SPEC_CTRL:
       
  4872 				if (e1000_phy_reset(hw))
       
  4873 					return -EIO;
       
  4874 				break;
       
  4875 			}
       
  4876 		} else {
       
  4877 			switch (data->reg_num) {
       
  4878 			case PHY_CTRL:
       
  4879 				if (mii_reg & MII_CR_POWER_DOWN)
       
  4880 					break;
       
  4881 				if (netif_running(adapter->netdev))
       
  4882 					e1000_reinit_locked(adapter);
       
  4883 				else
       
  4884 					e1000_reset(adapter);
       
  4885 				break;
       
  4886 			}
       
  4887 		}
       
  4888 		break;
       
  4889 	default:
       
  4890 		return -EOPNOTSUPP;
       
  4891 	}
       
  4892 	return E1000_SUCCESS;
       
  4893 }
       
  4894 
       
  4895 void e1000_pci_set_mwi(struct e1000_hw *hw)
       
  4896 {
       
  4897 	struct e1000_adapter *adapter = hw->back;
       
  4898 	int ret_val = pci_set_mwi(adapter->pdev);
       
  4899 
       
  4900 	if (ret_val)
       
  4901 		e_err(probe, "Error in setting MWI\n");
       
  4902 }
       
  4903 
       
  4904 void e1000_pci_clear_mwi(struct e1000_hw *hw)
       
  4905 {
       
  4906 	struct e1000_adapter *adapter = hw->back;
       
  4907 
       
  4908 	pci_clear_mwi(adapter->pdev);
       
  4909 }
       
  4910 
       
  4911 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
       
  4912 {
       
  4913 	struct e1000_adapter *adapter = hw->back;
       
  4914 	return pcix_get_mmrbc(adapter->pdev);
       
  4915 }
       
  4916 
       
  4917 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
       
  4918 {
       
  4919 	struct e1000_adapter *adapter = hw->back;
       
  4920 	pcix_set_mmrbc(adapter->pdev, mmrbc);
       
  4921 }
       
  4922 
       
  4923 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
       
  4924 {
       
  4925 	outl(value, port);
       
  4926 }
       
  4927 
       
  4928 static bool e1000_vlan_used(struct e1000_adapter *adapter)
       
  4929 {
       
  4930 	u16 vid;
       
  4931 
       
  4932 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
       
  4933 		return true;
       
  4934 	return false;
       
  4935 }
       
  4936 
       
  4937 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
       
  4938 			      netdev_features_t features)
       
  4939 {
       
  4940 	struct e1000_hw *hw = &adapter->hw;
       
  4941 	u32 ctrl;
       
  4942 
       
  4943 	ctrl = er32(CTRL);
       
  4944 	if (features & NETIF_F_HW_VLAN_RX) {
       
  4945 		/* enable VLAN tag insert/strip */
       
  4946 		ctrl |= E1000_CTRL_VME;
       
  4947 	} else {
       
  4948 		/* disable VLAN tag insert/strip */
       
  4949 		ctrl &= ~E1000_CTRL_VME;
       
  4950 	}
       
  4951 	ew32(CTRL, ctrl);
       
  4952 }
       
  4953 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
       
  4954 				     bool filter_on)
       
  4955 {
       
  4956 	struct e1000_hw *hw = &adapter->hw;
       
  4957 	u32 rctl;
       
  4958 
       
  4959 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4960 		e1000_irq_disable(adapter);
       
  4961 
       
  4962 	__e1000_vlan_mode(adapter, adapter->netdev->features);
       
  4963 	if (filter_on) {
       
  4964 		/* enable VLAN receive filtering */
       
  4965 		rctl = er32(RCTL);
       
  4966 		rctl &= ~E1000_RCTL_CFIEN;
       
  4967 		if (!(adapter->netdev->flags & IFF_PROMISC))
       
  4968 			rctl |= E1000_RCTL_VFE;
       
  4969 		ew32(RCTL, rctl);
       
  4970 		e1000_update_mng_vlan(adapter);
       
  4971 	} else {
       
  4972 		/* disable VLAN receive filtering */
       
  4973 		rctl = er32(RCTL);
       
  4974 		rctl &= ~E1000_RCTL_VFE;
       
  4975 		ew32(RCTL, rctl);
       
  4976 	}
       
  4977 
       
  4978 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4979 		e1000_irq_enable(adapter);
       
  4980 }
       
  4981 
       
  4982 static void e1000_vlan_mode(struct net_device *netdev,
       
  4983 			    netdev_features_t features)
       
  4984 {
       
  4985 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4986 
       
  4987 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4988 		e1000_irq_disable(adapter);
       
  4989 
       
  4990 	__e1000_vlan_mode(adapter, features);
       
  4991 
       
  4992 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4993 		e1000_irq_enable(adapter);
       
  4994 }
       
  4995 
       
  4996 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
       
  4997 {
       
  4998 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4999 	struct e1000_hw *hw = &adapter->hw;
       
  5000 	u32 vfta, index;
       
  5001 
       
  5002 	if ((hw->mng_cookie.status &
       
  5003 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  5004 	    (vid == adapter->mng_vlan_id))
       
  5005 		return 0;
       
  5006 
       
  5007 	if (!e1000_vlan_used(adapter))
       
  5008 		e1000_vlan_filter_on_off(adapter, true);
       
  5009 
       
  5010 	/* add VID to filter table */
       
  5011 	index = (vid >> 5) & 0x7F;
       
  5012 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
       
  5013 	vfta |= (1 << (vid & 0x1F));
       
  5014 	e1000_write_vfta(hw, index, vfta);
       
  5015 
       
  5016 	set_bit(vid, adapter->active_vlans);
       
  5017 
       
  5018 	return 0;
       
  5019 }
       
  5020 
       
  5021 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
       
  5022 {
       
  5023 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5024 	struct e1000_hw *hw = &adapter->hw;
       
  5025 	u32 vfta, index;
       
  5026 
       
  5027 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5028 		e1000_irq_disable(adapter);
       
  5029 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5030 		e1000_irq_enable(adapter);
       
  5031 
       
  5032 	/* remove VID from filter table */
       
  5033 	index = (vid >> 5) & 0x7F;
       
  5034 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
       
  5035 	vfta &= ~(1 << (vid & 0x1F));
       
  5036 	e1000_write_vfta(hw, index, vfta);
       
  5037 
       
  5038 	clear_bit(vid, adapter->active_vlans);
       
  5039 
       
  5040 	if (!e1000_vlan_used(adapter))
       
  5041 		e1000_vlan_filter_on_off(adapter, false);
       
  5042 
       
  5043 	return 0;
       
  5044 }
       
  5045 
       
  5046 static void e1000_restore_vlan(struct e1000_adapter *adapter)
       
  5047 {
       
  5048 	u16 vid;
       
  5049 
       
  5050 	if (!e1000_vlan_used(adapter))
       
  5051 		return;
       
  5052 
       
  5053 	e1000_vlan_filter_on_off(adapter, true);
       
  5054 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
       
  5055 		e1000_vlan_rx_add_vid(adapter->netdev, vid);
       
  5056 }
       
  5057 
       
  5058 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
       
  5059 {
       
  5060 	struct e1000_hw *hw = &adapter->hw;
       
  5061 
       
  5062 	hw->autoneg = 0;
       
  5063 
       
  5064 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
       
  5065 	 * for the switch() below to work */
       
  5066 	if ((spd & 1) || (dplx & ~1))
       
  5067 		goto err_inval;
       
  5068 
       
  5069 	/* Fiber NICs only allow 1000 gbps Full duplex */
       
  5070 	if ((hw->media_type == e1000_media_type_fiber) &&
       
  5071 	    spd != SPEED_1000 &&
       
  5072 	    dplx != DUPLEX_FULL)
       
  5073 		goto err_inval;
       
  5074 
       
  5075 	switch (spd + dplx) {
       
  5076 	case SPEED_10 + DUPLEX_HALF:
       
  5077 		hw->forced_speed_duplex = e1000_10_half;
       
  5078 		break;
       
  5079 	case SPEED_10 + DUPLEX_FULL:
       
  5080 		hw->forced_speed_duplex = e1000_10_full;
       
  5081 		break;
       
  5082 	case SPEED_100 + DUPLEX_HALF:
       
  5083 		hw->forced_speed_duplex = e1000_100_half;
       
  5084 		break;
       
  5085 	case SPEED_100 + DUPLEX_FULL:
       
  5086 		hw->forced_speed_duplex = e1000_100_full;
       
  5087 		break;
       
  5088 	case SPEED_1000 + DUPLEX_FULL:
       
  5089 		hw->autoneg = 1;
       
  5090 		hw->autoneg_advertised = ADVERTISE_1000_FULL;
       
  5091 		break;
       
  5092 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
       
  5093 	default:
       
  5094 		goto err_inval;
       
  5095 	}
       
  5096 	return 0;
       
  5097 
       
  5098 err_inval:
       
  5099 	e_err(probe, "Unsupported Speed/Duplex configuration\n");
       
  5100 	return -EINVAL;
       
  5101 }
       
  5102 
       
  5103 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  5104 {
       
  5105 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5106 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5107 	struct e1000_hw *hw = &adapter->hw;
       
  5108 	u32 ctrl, ctrl_ext, rctl, status;
       
  5109 	u32 wufc = adapter->wol;
       
  5110 #ifdef CONFIG_PM
       
  5111 	int retval = 0;
       
  5112 #endif
       
  5113 
       
  5114 	if (adapter->ecdev) {
       
  5115 		return -EBUSY;
       
  5116 	}
       
  5117 
       
  5118 	netif_device_detach(netdev);
       
  5119 
       
  5120 	if (netif_running(netdev)) {
       
  5121 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
       
  5122 		e1000_down(adapter);
       
  5123 	}
       
  5124 
       
  5125 #ifdef CONFIG_PM
       
  5126 	retval = pci_save_state(pdev);
       
  5127 	if (retval)
       
  5128 		return retval;
       
  5129 #endif
       
  5130 
       
  5131 	status = er32(STATUS);
       
  5132 	if (status & E1000_STATUS_LU)
       
  5133 		wufc &= ~E1000_WUFC_LNKC;
       
  5134 
       
  5135 	if (wufc) {
       
  5136 		e1000_setup_rctl(adapter);
       
  5137 		e1000_set_rx_mode(netdev);
       
  5138 
       
  5139 		rctl = er32(RCTL);
       
  5140 
       
  5141 		/* turn on all-multi mode if wake on multicast is enabled */
       
  5142 		if (wufc & E1000_WUFC_MC)
       
  5143 			rctl |= E1000_RCTL_MPE;
       
  5144 
       
  5145 		/* enable receives in the hardware */
       
  5146 		ew32(RCTL, rctl | E1000_RCTL_EN);
       
  5147 
       
  5148 		if (hw->mac_type >= e1000_82540) {
       
  5149 			ctrl = er32(CTRL);
       
  5150 			/* advertise wake from D3Cold */
       
  5151 			#define E1000_CTRL_ADVD3WUC 0x00100000
       
  5152 			/* phy power management enable */
       
  5153 			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
       
  5154 			ctrl |= E1000_CTRL_ADVD3WUC |
       
  5155 				E1000_CTRL_EN_PHY_PWR_MGMT;
       
  5156 			ew32(CTRL, ctrl);
       
  5157 		}
       
  5158 
       
  5159 		if (hw->media_type == e1000_media_type_fiber ||
       
  5160 		    hw->media_type == e1000_media_type_internal_serdes) {
       
  5161 			/* keep the laser running in D3 */
       
  5162 			ctrl_ext = er32(CTRL_EXT);
       
  5163 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
       
  5164 			ew32(CTRL_EXT, ctrl_ext);
       
  5165 		}
       
  5166 
       
  5167 		ew32(WUC, E1000_WUC_PME_EN);
       
  5168 		ew32(WUFC, wufc);
       
  5169 	} else {
       
  5170 		ew32(WUC, 0);
       
  5171 		ew32(WUFC, 0);
       
  5172 	}
       
  5173 
       
  5174 	e1000_release_manageability(adapter);
       
  5175 
       
  5176 	*enable_wake = !!wufc;
       
  5177 
       
  5178 	/* make sure adapter isn't asleep if manageability is enabled */
       
  5179 	if (adapter->en_mng_pt)
       
  5180 		*enable_wake = true;
       
  5181 
       
  5182 	if (netif_running(netdev))
       
  5183 		e1000_free_irq(adapter);
       
  5184 
       
  5185 	pci_disable_device(pdev);
       
  5186 
       
  5187 	return 0;
       
  5188 }
       
  5189 
       
  5190 #ifdef CONFIG_PM
       
  5191 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
       
  5192 {
       
  5193 	int retval;
       
  5194 	bool wake;
       
  5195 
       
  5196 	retval = __e1000_shutdown(pdev, &wake);
       
  5197 	if (retval)
       
  5198 		return retval;
       
  5199 
       
  5200 	if (wake) {
       
  5201 		pci_prepare_to_sleep(pdev);
       
  5202 	} else {
       
  5203 		pci_wake_from_d3(pdev, false);
       
  5204 		pci_set_power_state(pdev, PCI_D3hot);
       
  5205 	}
       
  5206 
       
  5207 	return 0;
       
  5208 }
       
  5209 
       
  5210 static int e1000_resume(struct pci_dev *pdev)
       
  5211 {
       
  5212 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5213 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5214 	struct e1000_hw *hw = &adapter->hw;
       
  5215 	u32 err;
       
  5216 
       
  5217 	if (adapter->ecdev) {
       
  5218 		return -EBUSY;
       
  5219 	}
       
  5220 
       
  5221 	pci_set_power_state(pdev, PCI_D0);
       
  5222 	pci_restore_state(pdev);
       
  5223 	pci_save_state(pdev);
       
  5224 
       
  5225 	if (adapter->need_ioport)
       
  5226 		err = pci_enable_device(pdev);
       
  5227 	else
       
  5228 		err = pci_enable_device_mem(pdev);
       
  5229 	if (err) {
       
  5230 		pr_err("Cannot enable PCI device from suspend\n");
       
  5231 		return err;
       
  5232 	}
       
  5233 	pci_set_master(pdev);
       
  5234 
       
  5235 	pci_enable_wake(pdev, PCI_D3hot, 0);
       
  5236 	pci_enable_wake(pdev, PCI_D3cold, 0);
       
  5237 
       
  5238 	if (netif_running(netdev)) {
       
  5239 		err = e1000_request_irq(adapter);
       
  5240 		if (err)
       
  5241 			return err;
       
  5242 	}
       
  5243 
       
  5244 	e1000_power_up_phy(adapter);
       
  5245 	e1000_reset(adapter);
       
  5246 	ew32(WUS, ~0);
       
  5247 
       
  5248 	e1000_init_manageability(adapter);
       
  5249 
       
  5250 	if (netif_running(netdev))
       
  5251 		e1000_up(adapter);
       
  5252 
       
  5253 	if (!adapter->ecdev) {
       
  5254 		netif_device_attach(netdev);
       
  5255 	}
       
  5256 
       
  5257 	return 0;
       
  5258 }
       
  5259 #endif
       
  5260 
       
  5261 static void e1000_shutdown(struct pci_dev *pdev)
       
  5262 {
       
  5263 	bool wake;
       
  5264 
       
  5265 	__e1000_shutdown(pdev, &wake);
       
  5266 
       
  5267 	if (system_state == SYSTEM_POWER_OFF) {
       
  5268 		pci_wake_from_d3(pdev, wake);
       
  5269 		pci_set_power_state(pdev, PCI_D3hot);
       
  5270 	}
       
  5271 }
       
  5272 
       
  5273 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  5274 /*
       
  5275  * Polling 'interrupt' - used by things like netconsole to send skbs
       
  5276  * without having to re-enable interrupts. It's not called while
       
  5277  * the interrupt routine is executing.
       
  5278  */
       
  5279 static void e1000_netpoll(struct net_device *netdev)
       
  5280 {
       
  5281 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5282 
       
  5283 	disable_irq(adapter->pdev->irq);
       
  5284 	e1000_intr(adapter->pdev->irq, netdev);
       
  5285 	enable_irq(adapter->pdev->irq);
       
  5286 }
       
  5287 #endif
       
  5288 
       
  5289 /**
       
  5290  * e1000_io_error_detected - called when PCI error is detected
       
  5291  * @pdev: Pointer to PCI device
       
  5292  * @state: The current pci connection state
       
  5293  *
       
  5294  * This function is called after a PCI bus error affecting
       
  5295  * this device has been detected.
       
  5296  */
       
  5297 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
  5298 						pci_channel_state_t state)
       
  5299 {
       
  5300 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5301 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5302 
       
  5303 	netif_device_detach(netdev);
       
  5304 
       
  5305 	if (state == pci_channel_io_perm_failure)
       
  5306 		return PCI_ERS_RESULT_DISCONNECT;
       
  5307 
       
  5308 	if (netif_running(netdev))
       
  5309 		e1000_down(adapter);
       
  5310 	pci_disable_device(pdev);
       
  5311 
       
  5312 	/* Request a slot slot reset. */
       
  5313 	return PCI_ERS_RESULT_NEED_RESET;
       
  5314 }
       
  5315 
       
  5316 /**
       
  5317  * e1000_io_slot_reset - called after the pci bus has been reset.
       
  5318  * @pdev: Pointer to PCI device
       
  5319  *
       
  5320  * Restart the card from scratch, as if from a cold-boot. Implementation
       
  5321  * resembles the first-half of the e1000_resume routine.
       
  5322  */
       
  5323 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
       
  5324 {
       
  5325 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5326 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5327 	struct e1000_hw *hw = &adapter->hw;
       
  5328 	int err;
       
  5329 
       
  5330 	if (adapter->need_ioport)
       
  5331 		err = pci_enable_device(pdev);
       
  5332 	else
       
  5333 		err = pci_enable_device_mem(pdev);
       
  5334 	if (err) {
       
  5335 		pr_err("Cannot re-enable PCI device after reset.\n");
       
  5336 		return PCI_ERS_RESULT_DISCONNECT;
       
  5337 	}
       
  5338 	pci_set_master(pdev);
       
  5339 
       
  5340 	pci_enable_wake(pdev, PCI_D3hot, 0);
       
  5341 	pci_enable_wake(pdev, PCI_D3cold, 0);
       
  5342 
       
  5343 	e1000_reset(adapter);
       
  5344 	ew32(WUS, ~0);
       
  5345 
       
  5346 	return PCI_ERS_RESULT_RECOVERED;
       
  5347 }
       
  5348 
       
  5349 /**
       
  5350  * e1000_io_resume - called when traffic can start flowing again.
       
  5351  * @pdev: Pointer to PCI device
       
  5352  *
       
  5353  * This callback is called when the error recovery driver tells us that
       
  5354  * its OK to resume normal operation. Implementation resembles the
       
  5355  * second-half of the e1000_resume routine.
       
  5356  */
       
  5357 static void e1000_io_resume(struct pci_dev *pdev)
       
  5358 {
       
  5359 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5360 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5361 
       
  5362 	e1000_init_manageability(adapter);
       
  5363 
       
  5364 	if (netif_running(netdev)) {
       
  5365 		if (e1000_up(adapter)) {
       
  5366 			pr_info("can't bring device back up after reset\n");
       
  5367 			return;
       
  5368 		}
       
  5369 	}
       
  5370 
       
  5371 	netif_device_attach(netdev);
       
  5372 }
       
  5373 
       
  5374 /* e1000_main.c */