devices/e1000/e1000_main-3.10-ethercat.c
branchstable-1.5
changeset 2585 26480934a057
child 2598 19ff84bbbcb3
equal deleted inserted replaced
2584:0e3d989ff233 2585:26480934a057
       
     1 /*******************************************************************************
       
     2 
       
     3   Intel PRO/1000 Linux driver
       
     4   Copyright(c) 1999 - 2006 Intel Corporation.
       
     5 
       
     6   This program is free software; you can redistribute it and/or modify it
       
     7   under the terms and conditions of the GNU General Public License,
       
     8   version 2, as published by the Free Software Foundation.
       
     9 
       
    10   This program is distributed in the hope it will be useful, but WITHOUT
       
    11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    13   more details.
       
    14 
       
    15   You should have received a copy of the GNU General Public License along with
       
    16   this program; if not, write to the Free Software Foundation, Inc.,
       
    17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    18 
       
    19   The full GNU General Public License is included in this distribution in
       
    20   the file called "COPYING".
       
    21 
       
    22   Contact Information:
       
    23   Linux NICS <linux.nics@intel.com>
       
    24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    26 
       
    27   vim: noexpandtab
       
    28 
       
    29 *******************************************************************************/
       
    30 
       
    31 #include "e1000-3.10-ethercat.h"
       
    32 #include <net/ip6_checksum.h>
       
    33 #include <linux/io.h>
       
    34 #include <linux/prefetch.h>
       
    35 #include <linux/bitops.h>
       
    36 #include <linux/if_vlan.h>
       
    37 
       
    38 char e1000_driver_name[] = "ec_e1000";
       
    39 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
       
    40 #define DRV_VERSION "7.3.21-k8-NAPI"
       
    41 const char e1000_driver_version[] = DRV_VERSION;
       
    42 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
       
    43 
       
    44 /* e1000_pci_tbl - PCI Device ID Table
       
    45  *
       
    46  * Last entry must be all 0s
       
    47  *
       
    48  * Macro expands to...
       
    49  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
       
    50  */
       
    51 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
       
    52 	INTEL_E1000_ETHERNET_DEVICE(0x1000),
       
    53 	INTEL_E1000_ETHERNET_DEVICE(0x1001),
       
    54 	INTEL_E1000_ETHERNET_DEVICE(0x1004),
       
    55 	INTEL_E1000_ETHERNET_DEVICE(0x1008),
       
    56 	INTEL_E1000_ETHERNET_DEVICE(0x1009),
       
    57 	INTEL_E1000_ETHERNET_DEVICE(0x100C),
       
    58 	INTEL_E1000_ETHERNET_DEVICE(0x100D),
       
    59 	INTEL_E1000_ETHERNET_DEVICE(0x100E),
       
    60 	INTEL_E1000_ETHERNET_DEVICE(0x100F),
       
    61 	INTEL_E1000_ETHERNET_DEVICE(0x1010),
       
    62 	INTEL_E1000_ETHERNET_DEVICE(0x1011),
       
    63 	INTEL_E1000_ETHERNET_DEVICE(0x1012),
       
    64 	INTEL_E1000_ETHERNET_DEVICE(0x1013),
       
    65 	INTEL_E1000_ETHERNET_DEVICE(0x1014),
       
    66 	INTEL_E1000_ETHERNET_DEVICE(0x1015),
       
    67 	INTEL_E1000_ETHERNET_DEVICE(0x1016),
       
    68 	INTEL_E1000_ETHERNET_DEVICE(0x1017),
       
    69 	INTEL_E1000_ETHERNET_DEVICE(0x1018),
       
    70 	INTEL_E1000_ETHERNET_DEVICE(0x1019),
       
    71 	INTEL_E1000_ETHERNET_DEVICE(0x101A),
       
    72 	INTEL_E1000_ETHERNET_DEVICE(0x101D),
       
    73 	INTEL_E1000_ETHERNET_DEVICE(0x101E),
       
    74 	INTEL_E1000_ETHERNET_DEVICE(0x1026),
       
    75 	INTEL_E1000_ETHERNET_DEVICE(0x1027),
       
    76 	INTEL_E1000_ETHERNET_DEVICE(0x1028),
       
    77 	INTEL_E1000_ETHERNET_DEVICE(0x1075),
       
    78 	INTEL_E1000_ETHERNET_DEVICE(0x1076),
       
    79 	INTEL_E1000_ETHERNET_DEVICE(0x1077),
       
    80 	INTEL_E1000_ETHERNET_DEVICE(0x1078),
       
    81 	INTEL_E1000_ETHERNET_DEVICE(0x1079),
       
    82 	INTEL_E1000_ETHERNET_DEVICE(0x107A),
       
    83 	INTEL_E1000_ETHERNET_DEVICE(0x107B),
       
    84 	INTEL_E1000_ETHERNET_DEVICE(0x107C),
       
    85 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
       
    86 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
       
    87 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
       
    88 	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
       
    89 	/* required last entry */
       
    90 	{0,}
       
    91 };
       
    92 
       
    93 // do not auto-load driver
       
    94 // MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
       
    95 
       
    96 int e1000_up(struct e1000_adapter *adapter);
       
    97 void e1000_down(struct e1000_adapter *adapter);
       
    98 void e1000_reinit_locked(struct e1000_adapter *adapter);
       
    99 void e1000_reset(struct e1000_adapter *adapter);
       
   100 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
       
   101 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
       
   102 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
       
   103 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
       
   104 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
       
   105                              struct e1000_tx_ring *txdr);
       
   106 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
       
   107                              struct e1000_rx_ring *rxdr);
       
   108 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
       
   109                              struct e1000_tx_ring *tx_ring);
       
   110 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
       
   111                              struct e1000_rx_ring *rx_ring);
       
   112 void e1000_update_stats(struct e1000_adapter *adapter);
       
   113 
       
   114 static int e1000_init_module(void);
       
   115 static void e1000_exit_module(void);
       
   116 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
       
   117 static void e1000_remove(struct pci_dev *pdev);
       
   118 static int e1000_alloc_queues(struct e1000_adapter *adapter);
       
   119 static int e1000_sw_init(struct e1000_adapter *adapter);
       
   120 static int e1000_open(struct net_device *netdev);
       
   121 static int e1000_close(struct net_device *netdev);
       
   122 static void e1000_configure_tx(struct e1000_adapter *adapter);
       
   123 static void e1000_configure_rx(struct e1000_adapter *adapter);
       
   124 static void e1000_setup_rctl(struct e1000_adapter *adapter);
       
   125 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
       
   126 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
       
   127 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
       
   128                                 struct e1000_tx_ring *tx_ring);
       
   129 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
       
   130                                 struct e1000_rx_ring *rx_ring);
       
   131 static void e1000_set_rx_mode(struct net_device *netdev);
       
   132 static void e1000_update_phy_info_task(struct work_struct *work);
       
   133 static void e1000_watchdog(struct work_struct *work);
       
   134 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
       
   135 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
       
   136 				    struct net_device *netdev);
       
   137 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
       
   138 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
       
   139 static int e1000_set_mac(struct net_device *netdev, void *p);
       
   140 void ec_poll(struct net_device *);
       
   141 static irqreturn_t e1000_intr(int irq, void *data);
       
   142 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
       
   143 			       struct e1000_tx_ring *tx_ring);
       
   144 static int e1000_clean(struct napi_struct *napi, int budget);
       
   145 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
   146 			       struct e1000_rx_ring *rx_ring,
       
   147 			       int *work_done, int work_to_do);
       
   148 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
       
   149 				     struct e1000_rx_ring *rx_ring,
       
   150 				     int *work_done, int work_to_do);
       
   151 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
   152 				   struct e1000_rx_ring *rx_ring,
       
   153 				   int cleaned_count);
       
   154 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
       
   155 					 struct e1000_rx_ring *rx_ring,
       
   156 					 int cleaned_count);
       
   157 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
       
   158 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
   159 			   int cmd);
       
   160 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
       
   161 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
       
   162 static void e1000_tx_timeout(struct net_device *dev);
       
   163 static void e1000_reset_task(struct work_struct *work);
       
   164 static void e1000_smartspeed(struct e1000_adapter *adapter);
       
   165 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
       
   166                                        struct sk_buff *skb);
       
   167 
       
   168 static bool e1000_vlan_used(struct e1000_adapter *adapter);
       
   169 static void e1000_vlan_mode(struct net_device *netdev,
       
   170 			    netdev_features_t features);
       
   171 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
       
   172 				     bool filter_on);
       
   173 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
       
   174 				 __be16 proto, u16 vid);
       
   175 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
       
   176 				  __be16 proto, u16 vid);
       
   177 static void e1000_restore_vlan(struct e1000_adapter *adapter);
       
   178 
       
   179 #ifdef CONFIG_PM
       
   180 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
       
   181 static int e1000_resume(struct pci_dev *pdev);
       
   182 #endif
       
   183 static void e1000_shutdown(struct pci_dev *pdev);
       
   184 
       
   185 #ifdef CONFIG_NET_POLL_CONTROLLER
       
   186 /* for netdump / net console */
       
   187 static void e1000_netpoll (struct net_device *netdev);
       
   188 #endif
       
   189 
       
   190 #define COPYBREAK_DEFAULT 256
       
   191 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
       
   192 module_param(copybreak, uint, 0644);
       
   193 MODULE_PARM_DESC(copybreak,
       
   194 	"Maximum size of packet that is copied to a new buffer on receive");
       
   195 
       
   196 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
   197                      pci_channel_state_t state);
       
   198 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
       
   199 static void e1000_io_resume(struct pci_dev *pdev);
       
   200 
       
   201 static const struct pci_error_handlers e1000_err_handler = {
       
   202 	.error_detected = e1000_io_error_detected,
       
   203 	.slot_reset = e1000_io_slot_reset,
       
   204 	.resume = e1000_io_resume,
       
   205 };
       
   206 
       
   207 static struct pci_driver e1000_driver = {
       
   208 	.name     = e1000_driver_name,
       
   209 	.id_table = e1000_pci_tbl,
       
   210 	.probe    = e1000_probe,
       
   211 	.remove   = e1000_remove,
       
   212 #ifdef CONFIG_PM
       
   213 	/* Power Management Hooks */
       
   214 	.suspend  = e1000_suspend,
       
   215 	.resume   = e1000_resume,
       
   216 #endif
       
   217 	.shutdown = e1000_shutdown,
       
   218 	.err_handler = &e1000_err_handler
       
   219 };
       
   220 
       
   221 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   222 MODULE_DESCRIPTION("EtherCAT-capable Intel(R) PRO/1000 Network Driver");
       
   223 MODULE_LICENSE("GPL");
       
   224 MODULE_VERSION(DRV_VERSION);
       
   225 
       
   226 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
       
   227 static int debug = -1;
       
   228 module_param(debug, int, 0);
       
   229 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   230 
       
   231 /**
       
   232  * e1000_get_hw_dev - return device
       
   233  * used by hardware layer to print debugging information
       
   234  *
       
   235  **/
       
   236 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
       
   237 {
       
   238 	struct e1000_adapter *adapter = hw->back;
       
   239 	return adapter->netdev;
       
   240 }
       
   241 
       
   242 /**
       
   243  * e1000_init_module - Driver Registration Routine
       
   244  *
       
   245  * e1000_init_module is the first routine called when the driver is
       
   246  * loaded. All it does is register with the PCI subsystem.
       
   247  **/
       
   248 static int __init e1000_init_module(void)
       
   249 {
       
   250 	int ret;
       
   251 	pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
       
   252 
       
   253 	pr_info("%s\n", e1000_copyright);
       
   254 
       
   255 	ret = pci_register_driver(&e1000_driver);
       
   256 	if (copybreak != COPYBREAK_DEFAULT) {
       
   257 		if (copybreak == 0)
       
   258 			pr_info("copybreak disabled\n");
       
   259 		else
       
   260 			pr_info("copybreak enabled for "
       
   261 				   "packets <= %u bytes\n", copybreak);
       
   262 	}
       
   263 	return ret;
       
   264 }
       
   265 
       
   266 module_init(e1000_init_module);
       
   267 
       
   268 /**
       
   269  * e1000_exit_module - Driver Exit Cleanup Routine
       
   270  *
       
   271  * e1000_exit_module is called just before the driver is removed
       
   272  * from memory.
       
   273  **/
       
   274 static void __exit e1000_exit_module(void)
       
   275 {
       
   276 	pci_unregister_driver(&e1000_driver);
       
   277 }
       
   278 
       
   279 module_exit(e1000_exit_module);
       
   280 
       
   281 static int e1000_request_irq(struct e1000_adapter *adapter)
       
   282 {
       
   283 	struct net_device *netdev = adapter->netdev;
       
   284 	irq_handler_t handler = e1000_intr;
       
   285 	int irq_flags = IRQF_SHARED;
       
   286 	int err;
       
   287 
       
   288 	if (adapter->ecdev) {
       
   289 		return 0;
       
   290 	}
       
   291 
       
   292 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
       
   293 	                  netdev);
       
   294 	if (err) {
       
   295 		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
       
   296 	}
       
   297 
       
   298 	return err;
       
   299 }
       
   300 
       
   301 static void e1000_free_irq(struct e1000_adapter *adapter)
       
   302 {
       
   303 	struct net_device *netdev = adapter->netdev;
       
   304 
       
   305 	if (adapter->ecdev) {
       
   306 		return;
       
   307 	}
       
   308 
       
   309 	free_irq(adapter->pdev->irq, netdev);
       
   310 }
       
   311 
       
   312 /**
       
   313  * e1000_irq_disable - Mask off interrupt generation on the NIC
       
   314  * @adapter: board private structure
       
   315  **/
       
   316 static void e1000_irq_disable(struct e1000_adapter *adapter)
       
   317 {
       
   318 	struct e1000_hw *hw = &adapter->hw;
       
   319 
       
   320 	if (adapter->ecdev) {
       
   321 		return;
       
   322 	}
       
   323 
       
   324 	ew32(IMC, ~0);
       
   325 	E1000_WRITE_FLUSH();
       
   326 	synchronize_irq(adapter->pdev->irq);
       
   327 }
       
   328 
       
   329 /**
       
   330  * e1000_irq_enable - Enable default interrupt generation settings
       
   331  * @adapter: board private structure
       
   332  **/
       
   333 static void e1000_irq_enable(struct e1000_adapter *adapter)
       
   334 {
       
   335 	struct e1000_hw *hw = &adapter->hw;
       
   336 
       
   337 	if (adapter->ecdev) {
       
   338 		return;
       
   339 	}
       
   340 
       
   341 	ew32(IMS, IMS_ENABLE_MASK);
       
   342 	E1000_WRITE_FLUSH();
       
   343 }
       
   344 
       
   345 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
       
   346 {
       
   347 	struct e1000_hw *hw = &adapter->hw;
       
   348 	struct net_device *netdev = adapter->netdev;
       
   349 	u16 vid = hw->mng_cookie.vlan_id;
       
   350 	u16 old_vid = adapter->mng_vlan_id;
       
   351 
       
   352 	if (!e1000_vlan_used(adapter))
       
   353 		return;
       
   354 
       
   355 	if (!test_bit(vid, adapter->active_vlans)) {
       
   356 		if (hw->mng_cookie.status &
       
   357 		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
       
   358 			e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
       
   359 			adapter->mng_vlan_id = vid;
       
   360 		} else {
       
   361 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
   362 		}
       
   363 		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
       
   364 		    (vid != old_vid) &&
       
   365 		    !test_bit(old_vid, adapter->active_vlans))
       
   366 			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
       
   367 					       old_vid);
       
   368 	} else {
       
   369 		adapter->mng_vlan_id = vid;
       
   370 	}
       
   371 }
       
   372 
       
   373 static void e1000_init_manageability(struct e1000_adapter *adapter)
       
   374 {
       
   375 	struct e1000_hw *hw = &adapter->hw;
       
   376 
       
   377 	if (adapter->en_mng_pt) {
       
   378 		u32 manc = er32(MANC);
       
   379 
       
   380 		/* disable hardware interception of ARP */
       
   381 		manc &= ~(E1000_MANC_ARP_EN);
       
   382 
       
   383 		ew32(MANC, manc);
       
   384 	}
       
   385 }
       
   386 
       
   387 static void e1000_release_manageability(struct e1000_adapter *adapter)
       
   388 {
       
   389 	struct e1000_hw *hw = &adapter->hw;
       
   390 
       
   391 	if (adapter->en_mng_pt) {
       
   392 		u32 manc = er32(MANC);
       
   393 
       
   394 		/* re-enable hardware interception of ARP */
       
   395 		manc |= E1000_MANC_ARP_EN;
       
   396 
       
   397 		ew32(MANC, manc);
       
   398 	}
       
   399 }
       
   400 
       
   401 /**
       
   402  * e1000_configure - configure the hardware for RX and TX
       
   403  * @adapter = private board structure
       
   404  **/
       
   405 static void e1000_configure(struct e1000_adapter *adapter)
       
   406 {
       
   407 	struct net_device *netdev = adapter->netdev;
       
   408 	int i;
       
   409 
       
   410 	e1000_set_rx_mode(netdev);
       
   411 
       
   412 	e1000_restore_vlan(adapter);
       
   413 	e1000_init_manageability(adapter);
       
   414 
       
   415 	e1000_configure_tx(adapter);
       
   416 	e1000_setup_rctl(adapter);
       
   417 	e1000_configure_rx(adapter);
       
   418 	/* call E1000_DESC_UNUSED which always leaves
       
   419 	 * at least 1 descriptor unused to make sure
       
   420 	 * next_to_use != next_to_clean
       
   421 	 */
       
   422 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
   423 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
       
   424 		if (adapter->ecdev) {
       
   425 			/* fill rx ring completely! */
       
   426 			adapter->alloc_rx_buf(adapter, ring, ring->count);
       
   427 		} else {
       
   428 			/* this one leaves the last ring element unallocated! */
       
   429 			adapter->alloc_rx_buf(adapter, ring,
       
   430 					E1000_DESC_UNUSED(ring));
       
   431 		}
       
   432 	}
       
   433 }
       
   434 
       
   435 int e1000_up(struct e1000_adapter *adapter)
       
   436 {
       
   437 	struct e1000_hw *hw = &adapter->hw;
       
   438 
       
   439 	/* hardware has been reset, we need to reload some things */
       
   440 	e1000_configure(adapter);
       
   441 
       
   442 	clear_bit(__E1000_DOWN, &adapter->flags);
       
   443 
       
   444 	if (!adapter->ecdev) {
       
   445 		napi_enable(&adapter->napi);
       
   446 
       
   447 		e1000_irq_enable(adapter);
       
   448 
       
   449 		netif_wake_queue(adapter->netdev);
       
   450 
       
   451 		/* fire a link change interrupt to start the watchdog */
       
   452 		ew32(ICS, E1000_ICS_LSC);
       
   453 	}
       
   454 	return 0;
       
   455 }
       
   456 
       
   457 /**
       
   458  * e1000_power_up_phy - restore link in case the phy was powered down
       
   459  * @adapter: address of board private structure
       
   460  *
       
   461  * The phy may be powered down to save power and turn off link when the
       
   462  * driver is unloaded and wake on lan is not enabled (among others)
       
   463  * *** this routine MUST be followed by a call to e1000_reset ***
       
   464  **/
       
   465 void e1000_power_up_phy(struct e1000_adapter *adapter)
       
   466 {
       
   467 	struct e1000_hw *hw = &adapter->hw;
       
   468 	u16 mii_reg = 0;
       
   469 
       
   470 	/* Just clear the power down bit to wake the phy back up */
       
   471 	if (hw->media_type == e1000_media_type_copper) {
       
   472 		/* according to the manual, the phy will retain its
       
   473 		 * settings across a power-down/up cycle
       
   474 		 */
       
   475 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
       
   476 		mii_reg &= ~MII_CR_POWER_DOWN;
       
   477 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
       
   478 	}
       
   479 }
       
   480 
       
   481 static void e1000_power_down_phy(struct e1000_adapter *adapter)
       
   482 {
       
   483 	struct e1000_hw *hw = &adapter->hw;
       
   484 
       
   485 	/* Power down the PHY so no link is implied when interface is down *
       
   486 	 * The PHY cannot be powered down if any of the following is true *
       
   487 	 * (a) WoL is enabled
       
   488 	 * (b) AMT is active
       
   489 	 * (c) SoL/IDER session is active
       
   490 	 */
       
   491 	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
       
   492 	   hw->media_type == e1000_media_type_copper) {
       
   493 		u16 mii_reg = 0;
       
   494 
       
   495 		switch (hw->mac_type) {
       
   496 		case e1000_82540:
       
   497 		case e1000_82545:
       
   498 		case e1000_82545_rev_3:
       
   499 		case e1000_82546:
       
   500 		case e1000_ce4100:
       
   501 		case e1000_82546_rev_3:
       
   502 		case e1000_82541:
       
   503 		case e1000_82541_rev_2:
       
   504 		case e1000_82547:
       
   505 		case e1000_82547_rev_2:
       
   506 			if (er32(MANC) & E1000_MANC_SMBUS_EN)
       
   507 				goto out;
       
   508 			break;
       
   509 		default:
       
   510 			goto out;
       
   511 		}
       
   512 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
       
   513 		mii_reg |= MII_CR_POWER_DOWN;
       
   514 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
       
   515 		msleep(1);
       
   516 	}
       
   517 out:
       
   518 	return;
       
   519 }
       
   520 
       
   521 static void e1000_down_and_stop(struct e1000_adapter *adapter)
       
   522 {
       
   523 	set_bit(__E1000_DOWN, &adapter->flags);
       
   524 
       
   525 	/* Only kill reset task if adapter is not resetting */
       
   526 	if (!test_bit(__E1000_RESETTING, &adapter->flags))
       
   527 		cancel_work_sync(&adapter->reset_task);
       
   528 
       
   529 	if (!adapter->ecdev) {
       
   530 		cancel_delayed_work_sync(&adapter->watchdog_task);
       
   531 		cancel_delayed_work_sync(&adapter->phy_info_task);
       
   532 		cancel_delayed_work_sync(&adapter->fifo_stall_task);
       
   533 	}
       
   534 }
       
   535 
       
   536 void e1000_down(struct e1000_adapter *adapter)
       
   537 {
       
   538 	struct e1000_hw *hw = &adapter->hw;
       
   539 	struct net_device *netdev = adapter->netdev;
       
   540 	u32 rctl, tctl;
       
   541 
       
   542 
       
   543 	/* disable receives in the hardware */	
       
   544 	rctl = er32(RCTL);
       
   545 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
   546 
       
   547 	if (!adapter->ecdev) {
       
   548 		/* flush and sleep below */
       
   549 		netif_tx_disable(netdev);
       
   550 	}
       
   551 
       
   552 	/* disable transmits in the hardware */
       
   553 	tctl = er32(TCTL);
       
   554 	tctl &= ~E1000_TCTL_EN;
       
   555 	ew32(TCTL, tctl);
       
   556 	/* flush both disables and wait for them to finish */
       
   557 	E1000_WRITE_FLUSH();
       
   558 	msleep(10);
       
   559 
       
   560 	if (!adapter->ecdev) {
       
   561 		napi_disable(&adapter->napi);
       
   562 
       
   563 		e1000_irq_disable(adapter);
       
   564 	}
       
   565 
       
   566 	/* Setting DOWN must be after irq_disable to prevent
       
   567 	 * a screaming interrupt.  Setting DOWN also prevents
       
   568 	 * tasks from rescheduling.
       
   569 	 */
       
   570 	e1000_down_and_stop(adapter);
       
   571 
       
   572 	adapter->link_speed = 0;
       
   573 	adapter->link_duplex = 0;
       
   574 
       
   575 	if (!adapter->ecdev) {
       
   576 		netif_carrier_off(netdev);
       
   577 	}
       
   578 
       
   579 	e1000_reset(adapter);
       
   580 	e1000_clean_all_tx_rings(adapter);
       
   581 	e1000_clean_all_rx_rings(adapter);
       
   582 }
       
   583 
       
   584 static void e1000_reinit_safe(struct e1000_adapter *adapter)
       
   585 {
       
   586 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
   587 		msleep(1);
       
   588 	mutex_lock(&adapter->mutex);
       
   589 	e1000_down(adapter);
       
   590 	e1000_up(adapter);
       
   591 	mutex_unlock(&adapter->mutex);
       
   592 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
   593 }
       
   594 
       
   595 void e1000_reinit_locked(struct e1000_adapter *adapter)
       
   596 {
       
   597 	/* if rtnl_lock is not held the call path is bogus */
       
   598 	ASSERT_RTNL();
       
   599 	WARN_ON(in_interrupt());
       
   600 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
   601 		msleep(1);
       
   602 	e1000_down(adapter);
       
   603 	e1000_up(adapter);
       
   604 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
   605 }
       
   606 
       
   607 void e1000_reset(struct e1000_adapter *adapter)
       
   608 {
       
   609 	struct e1000_hw *hw = &adapter->hw;
       
   610 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
       
   611 	bool legacy_pba_adjust = false;
       
   612 	u16 hwm;
       
   613 
       
   614 	/* Repartition Pba for greater than 9k mtu
       
   615 	 * To take effect CTRL.RST is required.
       
   616 	 */
       
   617 
       
   618 	switch (hw->mac_type) {
       
   619 	case e1000_82542_rev2_0:
       
   620 	case e1000_82542_rev2_1:
       
   621 	case e1000_82543:
       
   622 	case e1000_82544:
       
   623 	case e1000_82540:
       
   624 	case e1000_82541:
       
   625 	case e1000_82541_rev_2:
       
   626 		legacy_pba_adjust = true;
       
   627 		pba = E1000_PBA_48K;
       
   628 		break;
       
   629 	case e1000_82545:
       
   630 	case e1000_82545_rev_3:
       
   631 	case e1000_82546:
       
   632 	case e1000_ce4100:
       
   633 	case e1000_82546_rev_3:
       
   634 		pba = E1000_PBA_48K;
       
   635 		break;
       
   636 	case e1000_82547:
       
   637 	case e1000_82547_rev_2:
       
   638 		legacy_pba_adjust = true;
       
   639 		pba = E1000_PBA_30K;
       
   640 		break;
       
   641 	case e1000_undefined:
       
   642 	case e1000_num_macs:
       
   643 		break;
       
   644 	}
       
   645 
       
   646 	if (legacy_pba_adjust) {
       
   647 		if (hw->max_frame_size > E1000_RXBUFFER_8192)
       
   648 			pba -= 8; /* allocate more FIFO for Tx */
       
   649 
       
   650 		if (hw->mac_type == e1000_82547) {
       
   651 			adapter->tx_fifo_head = 0;
       
   652 			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
       
   653 			adapter->tx_fifo_size =
       
   654 				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
       
   655 			atomic_set(&adapter->tx_fifo_stall, 0);
       
   656 		}
       
   657 	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
       
   658 		/* adjust PBA for jumbo frames */
       
   659 		ew32(PBA, pba);
       
   660 
       
   661 		/* To maintain wire speed transmits, the Tx FIFO should be
       
   662 		 * large enough to accommodate two full transmit packets,
       
   663 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
       
   664 		 * the Rx FIFO should be large enough to accommodate at least
       
   665 		 * one full receive packet and is similarly rounded up and
       
   666 		 * expressed in KB.
       
   667 		 */
       
   668 		pba = er32(PBA);
       
   669 		/* upper 16 bits has Tx packet buffer allocation size in KB */
       
   670 		tx_space = pba >> 16;
       
   671 		/* lower 16 bits has Rx packet buffer allocation size in KB */
       
   672 		pba &= 0xffff;
       
   673 		/* the Tx fifo also stores 16 bytes of information about the Tx
       
   674 		 * but don't include ethernet FCS because hardware appends it
       
   675 		 */
       
   676 		min_tx_space = (hw->max_frame_size +
       
   677 		                sizeof(struct e1000_tx_desc) -
       
   678 		                ETH_FCS_LEN) * 2;
       
   679 		min_tx_space = ALIGN(min_tx_space, 1024);
       
   680 		min_tx_space >>= 10;
       
   681 		/* software strips receive CRC, so leave room for it */
       
   682 		min_rx_space = hw->max_frame_size;
       
   683 		min_rx_space = ALIGN(min_rx_space, 1024);
       
   684 		min_rx_space >>= 10;
       
   685 
       
   686 		/* If current Tx allocation is less than the min Tx FIFO size,
       
   687 		 * and the min Tx FIFO size is less than the current Rx FIFO
       
   688 		 * allocation, take space away from current Rx allocation
       
   689 		 */
       
   690 		if (tx_space < min_tx_space &&
       
   691 		    ((min_tx_space - tx_space) < pba)) {
       
   692 			pba = pba - (min_tx_space - tx_space);
       
   693 
       
   694 			/* PCI/PCIx hardware has PBA alignment constraints */
       
   695 			switch (hw->mac_type) {
       
   696 			case e1000_82545 ... e1000_82546_rev_3:
       
   697 				pba &= ~(E1000_PBA_8K - 1);
       
   698 				break;
       
   699 			default:
       
   700 				break;
       
   701 			}
       
   702 
       
   703 			/* if short on Rx space, Rx wins and must trump Tx
       
   704 			 * adjustment or use Early Receive if available
       
   705 			 */
       
   706 			if (pba < min_rx_space)
       
   707 				pba = min_rx_space;
       
   708 		}
       
   709 	}
       
   710 
       
   711 	ew32(PBA, pba);
       
   712 
       
   713 	/* flow control settings:
       
   714 	 * The high water mark must be low enough to fit one full frame
       
   715 	 * (or the size used for early receive) above it in the Rx FIFO.
       
   716 	 * Set it to the lower of:
       
   717 	 * - 90% of the Rx FIFO size, and
       
   718 	 * - the full Rx FIFO size minus the early receive size (for parts
       
   719 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
       
   720 	 * - the full Rx FIFO size minus one full frame
       
   721 	 */
       
   722 	hwm = min(((pba << 10) * 9 / 10),
       
   723 		  ((pba << 10) - hw->max_frame_size));
       
   724 
       
   725 	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
       
   726 	hw->fc_low_water = hw->fc_high_water - 8;
       
   727 	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
       
   728 	hw->fc_send_xon = 1;
       
   729 	hw->fc = hw->original_fc;
       
   730 
       
   731 	/* Allow time for pending master requests to run */
       
   732 	e1000_reset_hw(hw);
       
   733 	if (hw->mac_type >= e1000_82544)
       
   734 		ew32(WUC, 0);
       
   735 
       
   736 	if (e1000_init_hw(hw))
       
   737 		e_dev_err("Hardware Error\n");
       
   738 	e1000_update_mng_vlan(adapter);
       
   739 
       
   740 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
       
   741 	if (hw->mac_type >= e1000_82544 &&
       
   742 	    hw->autoneg == 1 &&
       
   743 	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
       
   744 		u32 ctrl = er32(CTRL);
       
   745 		/* clear phy power management bit if we are in gig only mode,
       
   746 		 * which if enabled will attempt negotiation to 100Mb, which
       
   747 		 * can cause a loss of link at power off or driver unload
       
   748 		 */
       
   749 		ctrl &= ~E1000_CTRL_SWDPIN3;
       
   750 		ew32(CTRL, ctrl);
       
   751 	}
       
   752 
       
   753 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
       
   754 	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
       
   755 
       
   756 	e1000_reset_adaptive(hw);
       
   757 	e1000_phy_get_info(hw, &adapter->phy_info);
       
   758 
       
   759 	e1000_release_manageability(adapter);
       
   760 }
       
   761 
       
   762 /* Dump the eeprom for users having checksum issues */
       
   763 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
       
   764 {
       
   765 	struct net_device *netdev = adapter->netdev;
       
   766 	struct ethtool_eeprom eeprom;
       
   767 	const struct ethtool_ops *ops = netdev->ethtool_ops;
       
   768 	u8 *data;
       
   769 	int i;
       
   770 	u16 csum_old, csum_new = 0;
       
   771 
       
   772 	eeprom.len = ops->get_eeprom_len(netdev);
       
   773 	eeprom.offset = 0;
       
   774 
       
   775 	data = kmalloc(eeprom.len, GFP_KERNEL);
       
   776 	if (!data)
       
   777 		return;
       
   778 
       
   779 	ops->get_eeprom(netdev, &eeprom, data);
       
   780 
       
   781 	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
       
   782 		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
       
   783 	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
       
   784 		csum_new += data[i] + (data[i + 1] << 8);
       
   785 	csum_new = EEPROM_SUM - csum_new;
       
   786 
       
   787 	pr_err("/*********************/\n");
       
   788 	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
       
   789 	pr_err("Calculated              : 0x%04x\n", csum_new);
       
   790 
       
   791 	pr_err("Offset    Values\n");
       
   792 	pr_err("========  ======\n");
       
   793 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
       
   794 
       
   795 	pr_err("Include this output when contacting your support provider.\n");
       
   796 	pr_err("This is not a software error! Something bad happened to\n");
       
   797 	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
       
   798 	pr_err("result in further problems, possibly loss of data,\n");
       
   799 	pr_err("corruption or system hangs!\n");
       
   800 	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
       
   801 	pr_err("which is invalid and requires you to set the proper MAC\n");
       
   802 	pr_err("address manually before continuing to enable this network\n");
       
   803 	pr_err("device. Please inspect the EEPROM dump and report the\n");
       
   804 	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
       
   805 	pr_err("/*********************/\n");
       
   806 
       
   807 	kfree(data);
       
   808 }
       
   809 
       
   810 /**
       
   811  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
       
   812  * @pdev: PCI device information struct
       
   813  *
       
   814  * Return true if an adapter needs ioport resources
       
   815  **/
       
   816 static int e1000_is_need_ioport(struct pci_dev *pdev)
       
   817 {
       
   818 	switch (pdev->device) {
       
   819 	case E1000_DEV_ID_82540EM:
       
   820 	case E1000_DEV_ID_82540EM_LOM:
       
   821 	case E1000_DEV_ID_82540EP:
       
   822 	case E1000_DEV_ID_82540EP_LOM:
       
   823 	case E1000_DEV_ID_82540EP_LP:
       
   824 	case E1000_DEV_ID_82541EI:
       
   825 	case E1000_DEV_ID_82541EI_MOBILE:
       
   826 	case E1000_DEV_ID_82541ER:
       
   827 	case E1000_DEV_ID_82541ER_LOM:
       
   828 	case E1000_DEV_ID_82541GI:
       
   829 	case E1000_DEV_ID_82541GI_LF:
       
   830 	case E1000_DEV_ID_82541GI_MOBILE:
       
   831 	case E1000_DEV_ID_82544EI_COPPER:
       
   832 	case E1000_DEV_ID_82544EI_FIBER:
       
   833 	case E1000_DEV_ID_82544GC_COPPER:
       
   834 	case E1000_DEV_ID_82544GC_LOM:
       
   835 	case E1000_DEV_ID_82545EM_COPPER:
       
   836 	case E1000_DEV_ID_82545EM_FIBER:
       
   837 	case E1000_DEV_ID_82546EB_COPPER:
       
   838 	case E1000_DEV_ID_82546EB_FIBER:
       
   839 	case E1000_DEV_ID_82546EB_QUAD_COPPER:
       
   840 		return true;
       
   841 	default:
       
   842 		return false;
       
   843 	}
       
   844 }
       
   845 
       
   846 static netdev_features_t e1000_fix_features(struct net_device *netdev,
       
   847 	netdev_features_t features)
       
   848 {
       
   849 	/* Since there is no support for separate Rx/Tx vlan accel
       
   850 	 * enable/disable make sure Tx flag is always in same state as Rx.
       
   851 	 */
       
   852 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
       
   853 		features |= NETIF_F_HW_VLAN_CTAG_TX;
       
   854 	else
       
   855 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
       
   856 
       
   857 	return features;
       
   858 }
       
   859 
       
   860 static int e1000_set_features(struct net_device *netdev,
       
   861 	netdev_features_t features)
       
   862 {
       
   863 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
   864 	netdev_features_t changed = features ^ netdev->features;
       
   865 
       
   866 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
       
   867 		e1000_vlan_mode(netdev, features);
       
   868 
       
   869 	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
       
   870 		return 0;
       
   871 
       
   872 	netdev->features = features;
       
   873 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
       
   874 
       
   875 	if (netif_running(netdev))
       
   876 		e1000_reinit_locked(adapter);
       
   877 	else
       
   878 		e1000_reset(adapter);
       
   879 
       
   880 	return 0;
       
   881 }
       
   882 
       
   883 static const struct net_device_ops e1000_netdev_ops = {
       
   884 	.ndo_open		= e1000_open,
       
   885 	.ndo_stop		= e1000_close,
       
   886 	.ndo_start_xmit		= e1000_xmit_frame,
       
   887 	.ndo_get_stats		= e1000_get_stats,
       
   888 	.ndo_set_rx_mode	= e1000_set_rx_mode,
       
   889 	.ndo_set_mac_address	= e1000_set_mac,
       
   890 	.ndo_tx_timeout		= e1000_tx_timeout,
       
   891 	.ndo_change_mtu		= e1000_change_mtu,
       
   892 	.ndo_do_ioctl		= e1000_ioctl,
       
   893 	.ndo_validate_addr	= eth_validate_addr,
       
   894 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
       
   895 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
       
   896 #ifdef CONFIG_NET_POLL_CONTROLLER
       
   897 	.ndo_poll_controller	= e1000_netpoll,
       
   898 #endif
       
   899 	.ndo_fix_features	= e1000_fix_features,
       
   900 	.ndo_set_features	= e1000_set_features,
       
   901 };
       
   902 
       
   903 /**
       
   904  * e1000_init_hw_struct - initialize members of hw struct
       
   905  * @adapter: board private struct
       
   906  * @hw: structure used by e1000_hw.c
       
   907  *
       
   908  * Factors out initialization of the e1000_hw struct to its own function
       
   909  * that can be called very early at init (just after struct allocation).
       
   910  * Fields are initialized based on PCI device information and
       
   911  * OS network device settings (MTU size).
       
   912  * Returns negative error codes if MAC type setup fails.
       
   913  */
       
   914 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
       
   915 				struct e1000_hw *hw)
       
   916 {
       
   917 	struct pci_dev *pdev = adapter->pdev;
       
   918 
       
   919 	/* PCI config space info */
       
   920 	hw->vendor_id = pdev->vendor;
       
   921 	hw->device_id = pdev->device;
       
   922 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
       
   923 	hw->subsystem_id = pdev->subsystem_device;
       
   924 	hw->revision_id = pdev->revision;
       
   925 
       
   926 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
       
   927 
       
   928 	hw->max_frame_size = adapter->netdev->mtu +
       
   929 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
       
   930 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
       
   931 
       
   932 	/* identify the MAC */
       
   933 	if (e1000_set_mac_type(hw)) {
       
   934 		e_err(probe, "Unknown MAC Type\n");
       
   935 		return -EIO;
       
   936 	}
       
   937 
       
   938 	switch (hw->mac_type) {
       
   939 	default:
       
   940 		break;
       
   941 	case e1000_82541:
       
   942 	case e1000_82547:
       
   943 	case e1000_82541_rev_2:
       
   944 	case e1000_82547_rev_2:
       
   945 		hw->phy_init_script = 1;
       
   946 		break;
       
   947 	}
       
   948 
       
   949 	e1000_set_media_type(hw);
       
   950 	e1000_get_bus_info(hw);
       
   951 
       
   952 	hw->wait_autoneg_complete = false;
       
   953 	hw->tbi_compatibility_en = true;
       
   954 	hw->adaptive_ifs = true;
       
   955 
       
   956 	/* Copper options */
       
   957 
       
   958 	if (hw->media_type == e1000_media_type_copper) {
       
   959 		hw->mdix = AUTO_ALL_MODES;
       
   960 		hw->disable_polarity_correction = false;
       
   961 		hw->master_slave = E1000_MASTER_SLAVE;
       
   962 	}
       
   963 
       
   964 	return 0;
       
   965 }
       
   966 
       
   967 /**
       
   968  * e1000_probe - Device Initialization Routine
       
   969  * @pdev: PCI device information struct
       
   970  * @ent: entry in e1000_pci_tbl
       
   971  *
       
   972  * Returns 0 on success, negative on failure
       
   973  *
       
   974  * e1000_probe initializes an adapter identified by a pci_dev structure.
       
   975  * The OS initialization, configuring of the adapter private structure,
       
   976  * and a hardware reset occur.
       
   977  **/
       
   978 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
       
   979 {
       
   980 	struct net_device *netdev;
       
   981 	struct e1000_adapter *adapter;
       
   982 	struct e1000_hw *hw;
       
   983 
       
   984 	static int cards_found = 0;
       
   985 	static int global_quad_port_a = 0; /* global ksp3 port a indication */
       
   986 	int i, err, pci_using_dac;
       
   987 	u16 eeprom_data = 0;
       
   988 	u16 tmp = 0;
       
   989 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
       
   990 	int bars, need_ioport;
       
   991 
       
   992 	/* do not allocate ioport bars when not needed */
       
   993 	need_ioport = e1000_is_need_ioport(pdev);
       
   994 	if (need_ioport) {
       
   995 		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
       
   996 		err = pci_enable_device(pdev);
       
   997 	} else {
       
   998 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
       
   999 		err = pci_enable_device_mem(pdev);
       
  1000 	}
       
  1001 	if (err)
       
  1002 		return err;
       
  1003 
       
  1004 	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
       
  1005 	if (err)
       
  1006 		goto err_pci_reg;
       
  1007 
       
  1008 	pci_set_master(pdev);
       
  1009 	err = pci_save_state(pdev);
       
  1010 	if (err)
       
  1011 		goto err_alloc_etherdev;
       
  1012 
       
  1013 	err = -ENOMEM;
       
  1014 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
       
  1015 	if (!netdev)
       
  1016 		goto err_alloc_etherdev;
       
  1017 
       
  1018 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  1019 
       
  1020 	pci_set_drvdata(pdev, netdev);
       
  1021 	adapter = netdev_priv(netdev);
       
  1022 	adapter->netdev = netdev;
       
  1023 	adapter->pdev = pdev;
       
  1024 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
       
  1025 	adapter->bars = bars;
       
  1026 	adapter->need_ioport = need_ioport;
       
  1027 
       
  1028 	hw = &adapter->hw;
       
  1029 	hw->back = adapter;
       
  1030 
       
  1031 	err = -EIO;
       
  1032 	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
       
  1033 	if (!hw->hw_addr)
       
  1034 		goto err_ioremap;
       
  1035 
       
  1036 	if (adapter->need_ioport) {
       
  1037 		for (i = BAR_1; i <= BAR_5; i++) {
       
  1038 			if (pci_resource_len(pdev, i) == 0)
       
  1039 				continue;
       
  1040 			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
       
  1041 				hw->io_base = pci_resource_start(pdev, i);
       
  1042 				break;
       
  1043 			}
       
  1044 		}
       
  1045 	}
       
  1046 
       
  1047 	/* make ready for any if (hw->...) below */
       
  1048 	err = e1000_init_hw_struct(adapter, hw);
       
  1049 	if (err)
       
  1050 		goto err_sw_init;
       
  1051 
       
  1052 	/* there is a workaround being applied below that limits
       
  1053 	 * 64-bit DMA addresses to 64-bit hardware.  There are some
       
  1054 	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
       
  1055 	 */
       
  1056 	pci_using_dac = 0;
       
  1057 	if ((hw->bus_type == e1000_bus_type_pcix) &&
       
  1058 	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
       
  1059 		/* according to DMA-API-HOWTO, coherent calls will always
       
  1060 		 * succeed if the set call did
       
  1061 		 */
       
  1062 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
       
  1063 		pci_using_dac = 1;
       
  1064 	} else {
       
  1065 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
       
  1066 		if (err) {
       
  1067 			pr_err("No usable DMA config, aborting\n");
       
  1068 			goto err_dma;
       
  1069 		}
       
  1070 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
       
  1071 	}
       
  1072 
       
  1073 	netdev->netdev_ops = &e1000_netdev_ops;
       
  1074 	e1000_set_ethtool_ops(netdev);
       
  1075 	netdev->watchdog_timeo = 5 * HZ;
       
  1076 	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
       
  1077 
       
  1078 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  1079 
       
  1080 	adapter->bd_number = cards_found;
       
  1081 
       
  1082 	/* setup the private structure */
       
  1083 
       
  1084 	err = e1000_sw_init(adapter);
       
  1085 	if (err)
       
  1086 		goto err_sw_init;
       
  1087 
       
  1088 	err = -EIO;
       
  1089 	if (hw->mac_type == e1000_ce4100) {
       
  1090 		hw->ce4100_gbe_mdio_base_virt =
       
  1091 					ioremap(pci_resource_start(pdev, BAR_1),
       
  1092 		                                pci_resource_len(pdev, BAR_1));
       
  1093 
       
  1094 		if (!hw->ce4100_gbe_mdio_base_virt)
       
  1095 			goto err_mdio_ioremap;
       
  1096 	}
       
  1097 
       
  1098 	if (hw->mac_type >= e1000_82543) {
       
  1099 		netdev->hw_features = NETIF_F_SG |
       
  1100 				   NETIF_F_HW_CSUM |
       
  1101 				   NETIF_F_HW_VLAN_CTAG_RX;
       
  1102 		netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
       
  1103 				   NETIF_F_HW_VLAN_CTAG_FILTER;
       
  1104 	}
       
  1105 
       
  1106 	if ((hw->mac_type >= e1000_82544) &&
       
  1107 	   (hw->mac_type != e1000_82547))
       
  1108 		netdev->hw_features |= NETIF_F_TSO;
       
  1109 
       
  1110 	netdev->priv_flags |= IFF_SUPP_NOFCS;
       
  1111 
       
  1112 	netdev->features |= netdev->hw_features;
       
  1113 	netdev->hw_features |= (NETIF_F_RXCSUM |
       
  1114 				NETIF_F_RXALL |
       
  1115 				NETIF_F_RXFCS);
       
  1116 
       
  1117 	if (pci_using_dac) {
       
  1118 		netdev->features |= NETIF_F_HIGHDMA;
       
  1119 		netdev->vlan_features |= NETIF_F_HIGHDMA;
       
  1120 	}
       
  1121 
       
  1122 	netdev->vlan_features |= (NETIF_F_TSO |
       
  1123 				  NETIF_F_HW_CSUM |
       
  1124 				  NETIF_F_SG);
       
  1125 
       
  1126 	netdev->priv_flags |= IFF_UNICAST_FLT;
       
  1127 
       
  1128 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
       
  1129 
       
  1130 	/* initialize eeprom parameters */
       
  1131 	if (e1000_init_eeprom_params(hw)) {
       
  1132 		e_err(probe, "EEPROM initialization failed\n");
       
  1133 		goto err_eeprom;
       
  1134 	}
       
  1135 
       
  1136 	/* before reading the EEPROM, reset the controller to
       
  1137 	 * put the device in a known good starting state
       
  1138 	 */
       
  1139 
       
  1140 	e1000_reset_hw(hw);
       
  1141 
       
  1142 	/* make sure the EEPROM is good */
       
  1143 	if (e1000_validate_eeprom_checksum(hw) < 0) {
       
  1144 		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
       
  1145 		e1000_dump_eeprom(adapter);
       
  1146 		/* set MAC address to all zeroes to invalidate and temporary
       
  1147 		 * disable this device for the user. This blocks regular
       
  1148 		 * traffic while still permitting ethtool ioctls from reaching
       
  1149 		 * the hardware as well as allowing the user to run the
       
  1150 		 * interface after manually setting a hw addr using
       
  1151 		 * `ip set address`
       
  1152 		 */
       
  1153 		memset(hw->mac_addr, 0, netdev->addr_len);
       
  1154 	} else {
       
  1155 		/* copy the MAC address out of the EEPROM */
       
  1156 		if (e1000_read_mac_addr(hw))
       
  1157 			e_err(probe, "EEPROM Read Error\n");
       
  1158 	}
       
  1159 	/* don't block initalization here due to bad MAC address */
       
  1160 	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
       
  1161 
       
  1162 	if (!is_valid_ether_addr(netdev->dev_addr))
       
  1163 		e_err(probe, "Invalid MAC Address\n");
       
  1164 
       
  1165 
       
  1166 	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
       
  1167 	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
       
  1168 			  e1000_82547_tx_fifo_stall_task);
       
  1169 	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
       
  1170 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
       
  1171 
       
  1172 	e1000_check_options(adapter);
       
  1173 
       
  1174 	/* Initial Wake on LAN setting
       
  1175 	 * If APM wake is enabled in the EEPROM,
       
  1176 	 * enable the ACPI Magic Packet filter
       
  1177 	 */
       
  1178 
       
  1179 	switch (hw->mac_type) {
       
  1180 	case e1000_82542_rev2_0:
       
  1181 	case e1000_82542_rev2_1:
       
  1182 	case e1000_82543:
       
  1183 		break;
       
  1184 	case e1000_82544:
       
  1185 		e1000_read_eeprom(hw,
       
  1186 			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
       
  1187 		eeprom_apme_mask = E1000_EEPROM_82544_APM;
       
  1188 		break;
       
  1189 	case e1000_82546:
       
  1190 	case e1000_82546_rev_3:
       
  1191 		if (er32(STATUS) & E1000_STATUS_FUNC_1){
       
  1192 			e1000_read_eeprom(hw,
       
  1193 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
       
  1194 			break;
       
  1195 		}
       
  1196 		/* Fall Through */
       
  1197 	default:
       
  1198 		e1000_read_eeprom(hw,
       
  1199 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
       
  1200 		break;
       
  1201 	}
       
  1202 	if (eeprom_data & eeprom_apme_mask)
       
  1203 		adapter->eeprom_wol |= E1000_WUFC_MAG;
       
  1204 
       
  1205 	/* now that we have the eeprom settings, apply the special cases
       
  1206 	 * where the eeprom may be wrong or the board simply won't support
       
  1207 	 * wake on lan on a particular port
       
  1208 	 */
       
  1209 	switch (pdev->device) {
       
  1210 	case E1000_DEV_ID_82546GB_PCIE:
       
  1211 		adapter->eeprom_wol = 0;
       
  1212 		break;
       
  1213 	case E1000_DEV_ID_82546EB_FIBER:
       
  1214 	case E1000_DEV_ID_82546GB_FIBER:
       
  1215 		/* Wake events only supported on port A for dual fiber
       
  1216 		 * regardless of eeprom setting
       
  1217 		 */
       
  1218 		if (er32(STATUS) & E1000_STATUS_FUNC_1)
       
  1219 			adapter->eeprom_wol = 0;
       
  1220 		break;
       
  1221 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
       
  1222 		/* if quad port adapter, disable WoL on all but port A */
       
  1223 		if (global_quad_port_a != 0)
       
  1224 			adapter->eeprom_wol = 0;
       
  1225 		else
       
  1226 			adapter->quad_port_a = true;
       
  1227 		/* Reset for multiple quad port adapters */
       
  1228 		if (++global_quad_port_a == 4)
       
  1229 			global_quad_port_a = 0;
       
  1230 		break;
       
  1231 	}
       
  1232 
       
  1233 	/* initialize the wol settings based on the eeprom settings */
       
  1234 	adapter->wol = adapter->eeprom_wol;
       
  1235 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
       
  1236 
       
  1237 	/* Auto detect PHY address */
       
  1238 	if (hw->mac_type == e1000_ce4100) {
       
  1239 		for (i = 0; i < 32; i++) {
       
  1240 			hw->phy_addr = i;
       
  1241 			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
       
  1242 			if (tmp == 0 || tmp == 0xFF) {
       
  1243 				if (i == 31)
       
  1244 					goto err_eeprom;
       
  1245 				continue;
       
  1246 			} else
       
  1247 				break;
       
  1248 		}
       
  1249 	}
       
  1250 
       
  1251 	/* reset the hardware with the new settings */
       
  1252 	e1000_reset(adapter);
       
  1253 
       
  1254  	// offer device to EtherCAT master module
       
  1255 	adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE);
       
  1256 	if (adapter->ecdev) {
       
  1257 		err = ecdev_open(adapter->ecdev);
       
  1258 		if (err) {
       
  1259 			ecdev_withdraw(adapter->ecdev);
       
  1260 			goto err_register;
       
  1261 		}
       
  1262 	} else {
       
  1263 		strcpy(netdev->name, "eth%d");
       
  1264 		err = register_netdev(netdev);
       
  1265 		if (err)
       
  1266 			goto err_register;
       
  1267 	}
       
  1268 
       
  1269 	e1000_vlan_filter_on_off(adapter, false);
       
  1270 
       
  1271 	/* print bus type/speed/width info */
       
  1272 	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
       
  1273 	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
       
  1274 	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
       
  1275 		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
       
  1276 		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
       
  1277 		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
       
  1278 	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
       
  1279 	       netdev->dev_addr);
       
  1280 
       
  1281 	if (!adapter->ecdev) {
       
  1282 		/* carrier off reporting is important to ethtool even BEFORE open */
       
  1283 		netif_carrier_off(netdev);
       
  1284 	}
       
  1285 
       
  1286 	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
       
  1287 
       
  1288 	cards_found++;
       
  1289 	return 0;
       
  1290 
       
  1291 err_register:
       
  1292 err_eeprom:
       
  1293 	e1000_phy_hw_reset(hw);
       
  1294 
       
  1295 	if (hw->flash_address)
       
  1296 		iounmap(hw->flash_address);
       
  1297 	kfree(adapter->tx_ring);
       
  1298 	kfree(adapter->rx_ring);
       
  1299 err_dma:
       
  1300 err_sw_init:
       
  1301 err_mdio_ioremap:
       
  1302 	iounmap(hw->ce4100_gbe_mdio_base_virt);
       
  1303 	iounmap(hw->hw_addr);
       
  1304 err_ioremap:
       
  1305 	free_netdev(netdev);
       
  1306 err_alloc_etherdev:
       
  1307 	pci_release_selected_regions(pdev, bars);
       
  1308 err_pci_reg:
       
  1309 	pci_disable_device(pdev);
       
  1310 	return err;
       
  1311 }
       
  1312 
       
  1313 /**
       
  1314  * e1000_remove - Device Removal Routine
       
  1315  * @pdev: PCI device information struct
       
  1316  *
       
  1317  * e1000_remove is called by the PCI subsystem to alert the driver
       
  1318  * that it should release a PCI device.  The could be caused by a
       
  1319  * Hot-Plug event, or because the driver is going to be removed from
       
  1320  * memory.
       
  1321  **/
       
  1322 static void e1000_remove(struct pci_dev *pdev)
       
  1323 {
       
  1324 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  1325 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1326 	struct e1000_hw *hw = &adapter->hw;
       
  1327 
       
  1328 	e1000_down_and_stop(adapter);
       
  1329 	e1000_release_manageability(adapter);
       
  1330 
       
  1331 	if (adapter->ecdev) {
       
  1332 		ecdev_close(adapter->ecdev);
       
  1333 		ecdev_withdraw(adapter->ecdev);
       
  1334 	} else {
       
  1335 		unregister_netdev(netdev);
       
  1336 	}
       
  1337 
       
  1338 	e1000_phy_hw_reset(hw);
       
  1339 
       
  1340 	kfree(adapter->tx_ring);
       
  1341 	kfree(adapter->rx_ring);
       
  1342 
       
  1343 	if (hw->mac_type == e1000_ce4100)
       
  1344 		iounmap(hw->ce4100_gbe_mdio_base_virt);
       
  1345 	iounmap(hw->hw_addr);
       
  1346 	if (hw->flash_address)
       
  1347 		iounmap(hw->flash_address);
       
  1348 	pci_release_selected_regions(pdev, adapter->bars);
       
  1349 
       
  1350 	free_netdev(netdev);
       
  1351 
       
  1352 	pci_disable_device(pdev);
       
  1353 }
       
  1354 
       
  1355 /**
       
  1356  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
       
  1357  * @adapter: board private structure to initialize
       
  1358  *
       
  1359  * e1000_sw_init initializes the Adapter private data structure.
       
  1360  * e1000_init_hw_struct MUST be called before this function
       
  1361  **/
       
  1362 static int e1000_sw_init(struct e1000_adapter *adapter)
       
  1363 {
       
  1364 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
       
  1365 
       
  1366 	adapter->num_tx_queues = 1;
       
  1367 	adapter->num_rx_queues = 1;
       
  1368 
       
  1369 	if (e1000_alloc_queues(adapter)) {
       
  1370 		e_err(probe, "Unable to allocate memory for queues\n");
       
  1371 		return -ENOMEM;
       
  1372 	}
       
  1373 
       
  1374 	/* Explicitly disable IRQ since the NIC can be in any state. */
       
  1375 	e1000_irq_disable(adapter);
       
  1376 
       
  1377 	spin_lock_init(&adapter->stats_lock);
       
  1378 	mutex_init(&adapter->mutex);
       
  1379 
       
  1380 	set_bit(__E1000_DOWN, &adapter->flags);
       
  1381 
       
  1382 	return 0;
       
  1383 }
       
  1384 
       
  1385 /**
       
  1386  * e1000_alloc_queues - Allocate memory for all rings
       
  1387  * @adapter: board private structure to initialize
       
  1388  *
       
  1389  * We allocate one ring per queue at run-time since we don't know the
       
  1390  * number of queues at compile-time.
       
  1391  **/
       
  1392 static int e1000_alloc_queues(struct e1000_adapter *adapter)
       
  1393 {
       
  1394 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
       
  1395 	                           sizeof(struct e1000_tx_ring), GFP_KERNEL);
       
  1396 	if (!adapter->tx_ring)
       
  1397 		return -ENOMEM;
       
  1398 
       
  1399 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
       
  1400 	                           sizeof(struct e1000_rx_ring), GFP_KERNEL);
       
  1401 	if (!adapter->rx_ring) {
       
  1402 		kfree(adapter->tx_ring);
       
  1403 		return -ENOMEM;
       
  1404 	}
       
  1405 
       
  1406 	return E1000_SUCCESS;
       
  1407 }
       
  1408 
       
  1409 /**
       
  1410  * e1000_open - Called when a network interface is made active
       
  1411  * @netdev: network interface device structure
       
  1412  *
       
  1413  * Returns 0 on success, negative value on failure
       
  1414  *
       
  1415  * The open entry point is called when a network interface is made
       
  1416  * active by the system (IFF_UP).  At this point all resources needed
       
  1417  * for transmit and receive operations are allocated, the interrupt
       
  1418  * handler is registered with the OS, the watchdog task is started,
       
  1419  * and the stack is notified that the interface is ready.
       
  1420  **/
       
  1421 static int e1000_open(struct net_device *netdev)
       
  1422 {
       
  1423 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1424 	struct e1000_hw *hw = &adapter->hw;
       
  1425 	int err;
       
  1426 
       
  1427 	/* disallow open during test */
       
  1428 	if (test_bit(__E1000_TESTING, &adapter->flags))
       
  1429 		return -EBUSY;
       
  1430 
       
  1431 	netif_carrier_off(netdev);
       
  1432 
       
  1433 	/* allocate transmit descriptors */
       
  1434 	err = e1000_setup_all_tx_resources(adapter);
       
  1435 	if (err)
       
  1436 		goto err_setup_tx;
       
  1437 
       
  1438 	/* allocate receive descriptors */
       
  1439 	err = e1000_setup_all_rx_resources(adapter);
       
  1440 	if (err)
       
  1441 		goto err_setup_rx;
       
  1442 
       
  1443 	e1000_power_up_phy(adapter);
       
  1444 
       
  1445 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
  1446 	if ((hw->mng_cookie.status &
       
  1447 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
       
  1448 		e1000_update_mng_vlan(adapter);
       
  1449 	}
       
  1450 
       
  1451 	/* before we allocate an interrupt, we must be ready to handle it.
       
  1452 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
       
  1453 	 * as soon as we call pci_request_irq, so we have to setup our
       
  1454 	 * clean_rx handler before we do so.
       
  1455 	 */
       
  1456 	e1000_configure(adapter);
       
  1457 
       
  1458 	err = e1000_request_irq(adapter);
       
  1459 	if (err)
       
  1460 		goto err_req_irq;
       
  1461 
       
  1462 	/* From here on the code is the same as e1000_up() */
       
  1463 	clear_bit(__E1000_DOWN, &adapter->flags);
       
  1464 
       
  1465 	if (!adapter->ecdev) {
       
  1466 		napi_enable(&adapter->napi);
       
  1467 
       
  1468 		e1000_irq_enable(adapter);
       
  1469 
       
  1470 		netif_start_queue(netdev);
       
  1471 	}
       
  1472 
       
  1473 	/* fire a link status change interrupt to start the watchdog */
       
  1474 	ew32(ICS, E1000_ICS_LSC);
       
  1475 
       
  1476 	return E1000_SUCCESS;
       
  1477 
       
  1478 err_req_irq:
       
  1479 	e1000_power_down_phy(adapter);
       
  1480 	e1000_free_all_rx_resources(adapter);
       
  1481 err_setup_rx:
       
  1482 	e1000_free_all_tx_resources(adapter);
       
  1483 err_setup_tx:
       
  1484 	e1000_reset(adapter);
       
  1485 
       
  1486 	return err;
       
  1487 }
       
  1488 
       
  1489 /**
       
  1490  * e1000_close - Disables a network interface
       
  1491  * @netdev: network interface device structure
       
  1492  *
       
  1493  * Returns 0, this is not allowed to fail
       
  1494  *
       
  1495  * The close entry point is called when an interface is de-activated
       
  1496  * by the OS.  The hardware is still under the drivers control, but
       
  1497  * needs to be disabled.  A global MAC reset is issued to stop the
       
  1498  * hardware, and all transmit and receive resources are freed.
       
  1499  **/
       
  1500 static int e1000_close(struct net_device *netdev)
       
  1501 {
       
  1502 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1503 	struct e1000_hw *hw = &adapter->hw;
       
  1504 
       
  1505 	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
       
  1506 	e1000_down(adapter);
       
  1507 	e1000_power_down_phy(adapter);
       
  1508 	e1000_free_irq(adapter);
       
  1509 
       
  1510 	e1000_free_all_tx_resources(adapter);
       
  1511 	e1000_free_all_rx_resources(adapter);
       
  1512 
       
  1513 	/* kill manageability vlan ID if supported, but not if a vlan with
       
  1514 	 * the same ID is registered on the host OS (let 8021q kill it)
       
  1515 	 */
       
  1516 	if ((hw->mng_cookie.status &
       
  1517 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  1518 	    !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
       
  1519 		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
       
  1520 				       adapter->mng_vlan_id);
       
  1521 	}
       
  1522 
       
  1523 	return 0;
       
  1524 }
       
  1525 
       
  1526 /**
       
  1527  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
       
  1528  * @adapter: address of board private structure
       
  1529  * @start: address of beginning of memory
       
  1530  * @len: length of memory
       
  1531  **/
       
  1532 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
       
  1533 				  unsigned long len)
       
  1534 {
       
  1535 	struct e1000_hw *hw = &adapter->hw;
       
  1536 	unsigned long begin = (unsigned long)start;
       
  1537 	unsigned long end = begin + len;
       
  1538 
       
  1539 	/* First rev 82545 and 82546 need to not allow any memory
       
  1540 	 * write location to cross 64k boundary due to errata 23
       
  1541 	 */
       
  1542 	if (hw->mac_type == e1000_82545 ||
       
  1543 	    hw->mac_type == e1000_ce4100 ||
       
  1544 	    hw->mac_type == e1000_82546) {
       
  1545 		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
       
  1546 	}
       
  1547 
       
  1548 	return true;
       
  1549 }
       
  1550 
       
  1551 /**
       
  1552  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
       
  1553  * @adapter: board private structure
       
  1554  * @txdr:    tx descriptor ring (for a specific queue) to setup
       
  1555  *
       
  1556  * Return 0 on success, negative on failure
       
  1557  **/
       
  1558 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
       
  1559 				    struct e1000_tx_ring *txdr)
       
  1560 {
       
  1561 	struct pci_dev *pdev = adapter->pdev;
       
  1562 	int size;
       
  1563 
       
  1564 	size = sizeof(struct e1000_buffer) * txdr->count;
       
  1565 	txdr->buffer_info = vzalloc(size);
       
  1566 	if (!txdr->buffer_info)
       
  1567 		return -ENOMEM;
       
  1568 
       
  1569 	/* round up to nearest 4K */
       
  1570 
       
  1571 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
       
  1572 	txdr->size = ALIGN(txdr->size, 4096);
       
  1573 
       
  1574 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
       
  1575 					GFP_KERNEL);
       
  1576 	if (!txdr->desc) {
       
  1577 setup_tx_desc_die:
       
  1578 		vfree(txdr->buffer_info);
       
  1579 		return -ENOMEM;
       
  1580 	}
       
  1581 
       
  1582 	/* Fix for errata 23, can't cross 64kB boundary */
       
  1583 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
       
  1584 		void *olddesc = txdr->desc;
       
  1585 		dma_addr_t olddma = txdr->dma;
       
  1586 		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
       
  1587 		      txdr->size, txdr->desc);
       
  1588 		/* Try again, without freeing the previous */
       
  1589 		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
       
  1590 						&txdr->dma, GFP_KERNEL);
       
  1591 		/* Failed allocation, critical failure */
       
  1592 		if (!txdr->desc) {
       
  1593 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1594 					  olddma);
       
  1595 			goto setup_tx_desc_die;
       
  1596 		}
       
  1597 
       
  1598 		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
       
  1599 			/* give up */
       
  1600 			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
       
  1601 					  txdr->dma);
       
  1602 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1603 					  olddma);
       
  1604 			e_err(probe, "Unable to allocate aligned memory "
       
  1605 			      "for the transmit descriptor ring\n");
       
  1606 			vfree(txdr->buffer_info);
       
  1607 			return -ENOMEM;
       
  1608 		} else {
       
  1609 			/* Free old allocation, new allocation was successful */
       
  1610 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1611 					  olddma);
       
  1612 		}
       
  1613 	}
       
  1614 	memset(txdr->desc, 0, txdr->size);
       
  1615 
       
  1616 	txdr->next_to_use = 0;
       
  1617 	txdr->next_to_clean = 0;
       
  1618 
       
  1619 	return 0;
       
  1620 }
       
  1621 
       
  1622 /**
       
  1623  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
       
  1624  * 				  (Descriptors) for all queues
       
  1625  * @adapter: board private structure
       
  1626  *
       
  1627  * Return 0 on success, negative on failure
       
  1628  **/
       
  1629 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
       
  1630 {
       
  1631 	int i, err = 0;
       
  1632 
       
  1633 	for (i = 0; i < adapter->num_tx_queues; i++) {
       
  1634 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
       
  1635 		if (err) {
       
  1636 			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
       
  1637 			for (i-- ; i >= 0; i--)
       
  1638 				e1000_free_tx_resources(adapter,
       
  1639 							&adapter->tx_ring[i]);
       
  1640 			break;
       
  1641 		}
       
  1642 	}
       
  1643 
       
  1644 	return err;
       
  1645 }
       
  1646 
       
  1647 /**
       
  1648  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
       
  1649  * @adapter: board private structure
       
  1650  *
       
  1651  * Configure the Tx unit of the MAC after a reset.
       
  1652  **/
       
  1653 static void e1000_configure_tx(struct e1000_adapter *adapter)
       
  1654 {
       
  1655 	u64 tdba;
       
  1656 	struct e1000_hw *hw = &adapter->hw;
       
  1657 	u32 tdlen, tctl, tipg;
       
  1658 	u32 ipgr1, ipgr2;
       
  1659 
       
  1660 	/* Setup the HW Tx Head and Tail descriptor pointers */
       
  1661 
       
  1662 	switch (adapter->num_tx_queues) {
       
  1663 	case 1:
       
  1664 	default:
       
  1665 		tdba = adapter->tx_ring[0].dma;
       
  1666 		tdlen = adapter->tx_ring[0].count *
       
  1667 			sizeof(struct e1000_tx_desc);
       
  1668 		ew32(TDLEN, tdlen);
       
  1669 		ew32(TDBAH, (tdba >> 32));
       
  1670 		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
       
  1671 		ew32(TDT, 0);
       
  1672 		ew32(TDH, 0);
       
  1673 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
       
  1674 					   E1000_TDH : E1000_82542_TDH);
       
  1675 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
       
  1676 					   E1000_TDT : E1000_82542_TDT);
       
  1677 		break;
       
  1678 	}
       
  1679 
       
  1680 	/* Set the default values for the Tx Inter Packet Gap timer */
       
  1681 	if ((hw->media_type == e1000_media_type_fiber ||
       
  1682 	     hw->media_type == e1000_media_type_internal_serdes))
       
  1683 		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
       
  1684 	else
       
  1685 		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
       
  1686 
       
  1687 	switch (hw->mac_type) {
       
  1688 	case e1000_82542_rev2_0:
       
  1689 	case e1000_82542_rev2_1:
       
  1690 		tipg = DEFAULT_82542_TIPG_IPGT;
       
  1691 		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
       
  1692 		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
       
  1693 		break;
       
  1694 	default:
       
  1695 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
       
  1696 		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
       
  1697 		break;
       
  1698 	}
       
  1699 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
       
  1700 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
       
  1701 	ew32(TIPG, tipg);
       
  1702 
       
  1703 	/* Set the Tx Interrupt Delay register */
       
  1704 
       
  1705 	ew32(TIDV, adapter->tx_int_delay);
       
  1706 	if (hw->mac_type >= e1000_82540)
       
  1707 		ew32(TADV, adapter->tx_abs_int_delay);
       
  1708 
       
  1709 	/* Program the Transmit Control Register */
       
  1710 
       
  1711 	tctl = er32(TCTL);
       
  1712 	tctl &= ~E1000_TCTL_CT;
       
  1713 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
       
  1714 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
       
  1715 
       
  1716 	e1000_config_collision_dist(hw);
       
  1717 
       
  1718 	/* Setup Transmit Descriptor Settings for eop descriptor */
       
  1719 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
       
  1720 
       
  1721 	/* only set IDE if we are delaying interrupts using the timers */
       
  1722 	if (adapter->tx_int_delay)
       
  1723 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
       
  1724 
       
  1725 	if (hw->mac_type < e1000_82543)
       
  1726 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
       
  1727 	else
       
  1728 		adapter->txd_cmd |= E1000_TXD_CMD_RS;
       
  1729 
       
  1730 	/* Cache if we're 82544 running in PCI-X because we'll
       
  1731 	 * need this to apply a workaround later in the send path.
       
  1732 	 */
       
  1733 	if (hw->mac_type == e1000_82544 &&
       
  1734 	    hw->bus_type == e1000_bus_type_pcix)
       
  1735 		adapter->pcix_82544 = true;
       
  1736 
       
  1737 	ew32(TCTL, tctl);
       
  1738 
       
  1739 }
       
  1740 
       
  1741 /**
       
  1742  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
       
  1743  * @adapter: board private structure
       
  1744  * @rxdr:    rx descriptor ring (for a specific queue) to setup
       
  1745  *
       
  1746  * Returns 0 on success, negative on failure
       
  1747  **/
       
  1748 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
       
  1749 				    struct e1000_rx_ring *rxdr)
       
  1750 {
       
  1751 	struct pci_dev *pdev = adapter->pdev;
       
  1752 	int size, desc_len;
       
  1753 
       
  1754 	size = sizeof(struct e1000_buffer) * rxdr->count;
       
  1755 	rxdr->buffer_info = vzalloc(size);
       
  1756 	if (!rxdr->buffer_info)
       
  1757 		return -ENOMEM;
       
  1758 
       
  1759 	desc_len = sizeof(struct e1000_rx_desc);
       
  1760 
       
  1761 	/* Round up to nearest 4K */
       
  1762 
       
  1763 	rxdr->size = rxdr->count * desc_len;
       
  1764 	rxdr->size = ALIGN(rxdr->size, 4096);
       
  1765 
       
  1766 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
       
  1767 					GFP_KERNEL);
       
  1768 	if (!rxdr->desc) {
       
  1769 setup_rx_desc_die:
       
  1770 		vfree(rxdr->buffer_info);
       
  1771 		return -ENOMEM;
       
  1772 	}
       
  1773 
       
  1774 	/* Fix for errata 23, can't cross 64kB boundary */
       
  1775 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
       
  1776 		void *olddesc = rxdr->desc;
       
  1777 		dma_addr_t olddma = rxdr->dma;
       
  1778 		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
       
  1779 		      rxdr->size, rxdr->desc);
       
  1780 		/* Try again, without freeing the previous */
       
  1781 		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
       
  1782 						&rxdr->dma, GFP_KERNEL);
       
  1783 		/* Failed allocation, critical failure */
       
  1784 		if (!rxdr->desc) {
       
  1785 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1786 					  olddma);
       
  1787 			goto setup_rx_desc_die;
       
  1788 		}
       
  1789 
       
  1790 		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
       
  1791 			/* give up */
       
  1792 			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
       
  1793 					  rxdr->dma);
       
  1794 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1795 					  olddma);
       
  1796 			e_err(probe, "Unable to allocate aligned memory for "
       
  1797 			      "the Rx descriptor ring\n");
       
  1798 			goto setup_rx_desc_die;
       
  1799 		} else {
       
  1800 			/* Free old allocation, new allocation was successful */
       
  1801 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1802 					  olddma);
       
  1803 		}
       
  1804 	}
       
  1805 	memset(rxdr->desc, 0, rxdr->size);
       
  1806 
       
  1807 	rxdr->next_to_clean = 0;
       
  1808 	rxdr->next_to_use = 0;
       
  1809 	rxdr->rx_skb_top = NULL;
       
  1810 
       
  1811 	return 0;
       
  1812 }
       
  1813 
       
  1814 /**
       
  1815  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
       
  1816  * 				  (Descriptors) for all queues
       
  1817  * @adapter: board private structure
       
  1818  *
       
  1819  * Return 0 on success, negative on failure
       
  1820  **/
       
  1821 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
       
  1822 {
       
  1823 	int i, err = 0;
       
  1824 
       
  1825 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
  1826 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
       
  1827 		if (err) {
       
  1828 			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
       
  1829 			for (i-- ; i >= 0; i--)
       
  1830 				e1000_free_rx_resources(adapter,
       
  1831 							&adapter->rx_ring[i]);
       
  1832 			break;
       
  1833 		}
       
  1834 	}
       
  1835 
       
  1836 	return err;
       
  1837 }
       
  1838 
       
  1839 /**
       
  1840  * e1000_setup_rctl - configure the receive control registers
       
  1841  * @adapter: Board private structure
       
  1842  **/
       
  1843 static void e1000_setup_rctl(struct e1000_adapter *adapter)
       
  1844 {
       
  1845 	struct e1000_hw *hw = &adapter->hw;
       
  1846 	u32 rctl;
       
  1847 
       
  1848 	rctl = er32(RCTL);
       
  1849 
       
  1850 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
       
  1851 
       
  1852 	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
       
  1853 		E1000_RCTL_RDMTS_HALF |
       
  1854 		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
       
  1855 
       
  1856 	if (hw->tbi_compatibility_on == 1)
       
  1857 		rctl |= E1000_RCTL_SBP;
       
  1858 	else
       
  1859 		rctl &= ~E1000_RCTL_SBP;
       
  1860 
       
  1861 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
       
  1862 		rctl &= ~E1000_RCTL_LPE;
       
  1863 	else
       
  1864 		rctl |= E1000_RCTL_LPE;
       
  1865 
       
  1866 	/* Setup buffer sizes */
       
  1867 	rctl &= ~E1000_RCTL_SZ_4096;
       
  1868 	rctl |= E1000_RCTL_BSEX;
       
  1869 	switch (adapter->rx_buffer_len) {
       
  1870 		case E1000_RXBUFFER_2048:
       
  1871 		default:
       
  1872 			rctl |= E1000_RCTL_SZ_2048;
       
  1873 			rctl &= ~E1000_RCTL_BSEX;
       
  1874 			break;
       
  1875 		case E1000_RXBUFFER_4096:
       
  1876 			rctl |= E1000_RCTL_SZ_4096;
       
  1877 			break;
       
  1878 		case E1000_RXBUFFER_8192:
       
  1879 			rctl |= E1000_RCTL_SZ_8192;
       
  1880 			break;
       
  1881 		case E1000_RXBUFFER_16384:
       
  1882 			rctl |= E1000_RCTL_SZ_16384;
       
  1883 			break;
       
  1884 	}
       
  1885 
       
  1886 	/* This is useful for sniffing bad packets. */
       
  1887 	if (adapter->netdev->features & NETIF_F_RXALL) {
       
  1888 		/* UPE and MPE will be handled by normal PROMISC logic
       
  1889 		 * in e1000e_set_rx_mode
       
  1890 		 */
       
  1891 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
       
  1892 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
       
  1893 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
       
  1894 
       
  1895 		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
       
  1896 			  E1000_RCTL_DPF | /* Allow filtered pause */
       
  1897 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
       
  1898 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
       
  1899 		 * and that breaks VLANs.
       
  1900 		 */
       
  1901 	}
       
  1902 
       
  1903 	ew32(RCTL, rctl);
       
  1904 }
       
  1905 
       
  1906 /**
       
  1907  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
       
  1908  * @adapter: board private structure
       
  1909  *
       
  1910  * Configure the Rx unit of the MAC after a reset.
       
  1911  **/
       
  1912 static void e1000_configure_rx(struct e1000_adapter *adapter)
       
  1913 {
       
  1914 	u64 rdba;
       
  1915 	struct e1000_hw *hw = &adapter->hw;
       
  1916 	u32 rdlen, rctl, rxcsum;
       
  1917 
       
  1918 	if (adapter->netdev->mtu > ETH_DATA_LEN) {
       
  1919 		rdlen = adapter->rx_ring[0].count *
       
  1920 		        sizeof(struct e1000_rx_desc);
       
  1921 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
       
  1922 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
       
  1923 	} else {
       
  1924 		rdlen = adapter->rx_ring[0].count *
       
  1925 		        sizeof(struct e1000_rx_desc);
       
  1926 		adapter->clean_rx = e1000_clean_rx_irq;
       
  1927 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
       
  1928 	}
       
  1929 
       
  1930 	/* disable receives while setting up the descriptors */
       
  1931 	rctl = er32(RCTL);
       
  1932 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
  1933 
       
  1934 	/* set the Receive Delay Timer Register */
       
  1935 	ew32(RDTR, adapter->rx_int_delay);
       
  1936 
       
  1937 	if (hw->mac_type >= e1000_82540) {
       
  1938 		ew32(RADV, adapter->rx_abs_int_delay);
       
  1939 		if (adapter->itr_setting != 0)
       
  1940 			ew32(ITR, 1000000000 / (adapter->itr * 256));
       
  1941 	}
       
  1942 
       
  1943 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
       
  1944 	 * the Base and Length of the Rx Descriptor Ring
       
  1945 	 */
       
  1946 	switch (adapter->num_rx_queues) {
       
  1947 	case 1:
       
  1948 	default:
       
  1949 		rdba = adapter->rx_ring[0].dma;
       
  1950 		ew32(RDLEN, rdlen);
       
  1951 		ew32(RDBAH, (rdba >> 32));
       
  1952 		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
       
  1953 		ew32(RDT, 0);
       
  1954 		ew32(RDH, 0);
       
  1955 		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
       
  1956 					   E1000_RDH : E1000_82542_RDH);
       
  1957 		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
       
  1958 					   E1000_RDT : E1000_82542_RDT);
       
  1959 		break;
       
  1960 	}
       
  1961 
       
  1962 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
       
  1963 	if (hw->mac_type >= e1000_82543) {
       
  1964 		rxcsum = er32(RXCSUM);
       
  1965 		if (adapter->rx_csum)
       
  1966 			rxcsum |= E1000_RXCSUM_TUOFL;
       
  1967 		else
       
  1968 			/* don't need to clear IPPCSE as it defaults to 0 */
       
  1969 			rxcsum &= ~E1000_RXCSUM_TUOFL;
       
  1970 		ew32(RXCSUM, rxcsum);
       
  1971 	}
       
  1972 
       
  1973 	/* Enable Receives */
       
  1974 	ew32(RCTL, rctl | E1000_RCTL_EN);
       
  1975 }
       
  1976 
       
  1977 /**
       
  1978  * e1000_free_tx_resources - Free Tx Resources per Queue
       
  1979  * @adapter: board private structure
       
  1980  * @tx_ring: Tx descriptor ring for a specific queue
       
  1981  *
       
  1982  * Free all transmit software resources
       
  1983  **/
       
  1984 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
       
  1985 				    struct e1000_tx_ring *tx_ring)
       
  1986 {
       
  1987 	struct pci_dev *pdev = adapter->pdev;
       
  1988 
       
  1989 	e1000_clean_tx_ring(adapter, tx_ring);
       
  1990 
       
  1991 	vfree(tx_ring->buffer_info);
       
  1992 	tx_ring->buffer_info = NULL;
       
  1993 
       
  1994 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
       
  1995 			  tx_ring->dma);
       
  1996 
       
  1997 	tx_ring->desc = NULL;
       
  1998 }
       
  1999 
       
  2000 /**
       
  2001  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
       
  2002  * @adapter: board private structure
       
  2003  *
       
  2004  * Free all transmit software resources
       
  2005  **/
       
  2006 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
       
  2007 {
       
  2008 	int i;
       
  2009 
       
  2010 	for (i = 0; i < adapter->num_tx_queues; i++)
       
  2011 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
       
  2012 }
       
  2013 
       
  2014 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
       
  2015 					     struct e1000_buffer *buffer_info)
       
  2016 {
       
  2017 	if (adapter->ecdev) {
       
  2018 		return;
       
  2019 	}
       
  2020 
       
  2021 	if (buffer_info->dma) {
       
  2022 		if (buffer_info->mapped_as_page)
       
  2023 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
       
  2024 				       buffer_info->length, DMA_TO_DEVICE);
       
  2025 		else
       
  2026 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
       
  2027 					 buffer_info->length,
       
  2028 					 DMA_TO_DEVICE);
       
  2029 		buffer_info->dma = 0;
       
  2030 	}
       
  2031 	if (buffer_info->skb) {
       
  2032 		dev_kfree_skb_any(buffer_info->skb);
       
  2033 		buffer_info->skb = NULL;
       
  2034 	}
       
  2035 	buffer_info->time_stamp = 0;
       
  2036 	/* buffer_info must be completely set up in the transmit path */
       
  2037 }
       
  2038 
       
  2039 /**
       
  2040  * e1000_clean_tx_ring - Free Tx Buffers
       
  2041  * @adapter: board private structure
       
  2042  * @tx_ring: ring to be cleaned
       
  2043  **/
       
  2044 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
       
  2045 				struct e1000_tx_ring *tx_ring)
       
  2046 {
       
  2047 	struct e1000_hw *hw = &adapter->hw;
       
  2048 	struct e1000_buffer *buffer_info;
       
  2049 	unsigned long size;
       
  2050 	unsigned int i;
       
  2051 
       
  2052 	/* Free all the Tx ring sk_buffs */
       
  2053 
       
  2054 	for (i = 0; i < tx_ring->count; i++) {
       
  2055 		buffer_info = &tx_ring->buffer_info[i];
       
  2056 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  2057 	}
       
  2058 
       
  2059 	netdev_reset_queue(adapter->netdev);
       
  2060 	size = sizeof(struct e1000_buffer) * tx_ring->count;
       
  2061 	memset(tx_ring->buffer_info, 0, size);
       
  2062 
       
  2063 	/* Zero out the descriptor ring */
       
  2064 
       
  2065 	memset(tx_ring->desc, 0, tx_ring->size);
       
  2066 
       
  2067 	tx_ring->next_to_use = 0;
       
  2068 	tx_ring->next_to_clean = 0;
       
  2069 	tx_ring->last_tx_tso = false;
       
  2070 
       
  2071 	writel(0, hw->hw_addr + tx_ring->tdh);
       
  2072 	writel(0, hw->hw_addr + tx_ring->tdt);
       
  2073 }
       
  2074 
       
  2075 /**
       
  2076  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
       
  2077  * @adapter: board private structure
       
  2078  **/
       
  2079 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
       
  2080 {
       
  2081 	int i;
       
  2082 
       
  2083 	for (i = 0; i < adapter->num_tx_queues; i++)
       
  2084 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
       
  2085 }
       
  2086 
       
  2087 /**
       
  2088  * e1000_free_rx_resources - Free Rx Resources
       
  2089  * @adapter: board private structure
       
  2090  * @rx_ring: ring to clean the resources from
       
  2091  *
       
  2092  * Free all receive software resources
       
  2093  **/
       
  2094 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
       
  2095 				    struct e1000_rx_ring *rx_ring)
       
  2096 {
       
  2097 	struct pci_dev *pdev = adapter->pdev;
       
  2098 
       
  2099 	e1000_clean_rx_ring(adapter, rx_ring);
       
  2100 
       
  2101 	vfree(rx_ring->buffer_info);
       
  2102 	rx_ring->buffer_info = NULL;
       
  2103 
       
  2104 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
       
  2105 			  rx_ring->dma);
       
  2106 
       
  2107 	rx_ring->desc = NULL;
       
  2108 }
       
  2109 
       
  2110 /**
       
  2111  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
       
  2112  * @adapter: board private structure
       
  2113  *
       
  2114  * Free all receive software resources
       
  2115  **/
       
  2116 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
       
  2117 {
       
  2118 	int i;
       
  2119 
       
  2120 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  2121 		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
       
  2122 }
       
  2123 
       
  2124 /**
       
  2125  * e1000_clean_rx_ring - Free Rx Buffers per Queue
       
  2126  * @adapter: board private structure
       
  2127  * @rx_ring: ring to free buffers from
       
  2128  **/
       
  2129 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
       
  2130 				struct e1000_rx_ring *rx_ring)
       
  2131 {
       
  2132 	struct e1000_hw *hw = &adapter->hw;
       
  2133 	struct e1000_buffer *buffer_info;
       
  2134 	struct pci_dev *pdev = adapter->pdev;
       
  2135 	unsigned long size;
       
  2136 	unsigned int i;
       
  2137 
       
  2138 	/* Free all the Rx ring sk_buffs */
       
  2139 	for (i = 0; i < rx_ring->count; i++) {
       
  2140 		buffer_info = &rx_ring->buffer_info[i];
       
  2141 		if (buffer_info->dma &&
       
  2142 		    adapter->clean_rx == e1000_clean_rx_irq) {
       
  2143 			dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  2144 			                 buffer_info->length,
       
  2145 					 DMA_FROM_DEVICE);
       
  2146 		} else if (buffer_info->dma &&
       
  2147 		           adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
       
  2148 			dma_unmap_page(&pdev->dev, buffer_info->dma,
       
  2149 				       buffer_info->length,
       
  2150 				       DMA_FROM_DEVICE);
       
  2151 		}
       
  2152 
       
  2153 		buffer_info->dma = 0;
       
  2154 		if (buffer_info->page) {
       
  2155 			put_page(buffer_info->page);
       
  2156 			buffer_info->page = NULL;
       
  2157 		}
       
  2158 		if (buffer_info->skb) {
       
  2159 			dev_kfree_skb(buffer_info->skb);
       
  2160 			buffer_info->skb = NULL;
       
  2161 		}
       
  2162 	}
       
  2163 
       
  2164 	/* there also may be some cached data from a chained receive */
       
  2165 	if (rx_ring->rx_skb_top) {
       
  2166 		dev_kfree_skb(rx_ring->rx_skb_top);
       
  2167 		rx_ring->rx_skb_top = NULL;
       
  2168 	}
       
  2169 
       
  2170 	size = sizeof(struct e1000_buffer) * rx_ring->count;
       
  2171 	memset(rx_ring->buffer_info, 0, size);
       
  2172 
       
  2173 	/* Zero out the descriptor ring */
       
  2174 	memset(rx_ring->desc, 0, rx_ring->size);
       
  2175 
       
  2176 	rx_ring->next_to_clean = 0;
       
  2177 	rx_ring->next_to_use = 0;
       
  2178 
       
  2179 	writel(0, hw->hw_addr + rx_ring->rdh);
       
  2180 	writel(0, hw->hw_addr + rx_ring->rdt);
       
  2181 }
       
  2182 
       
  2183 /**
       
  2184  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
       
  2185  * @adapter: board private structure
       
  2186  **/
       
  2187 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
       
  2188 {
       
  2189 	int i;
       
  2190 
       
  2191 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  2192 		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
       
  2193 }
       
  2194 
       
  2195 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
       
  2196  * and memory write and invalidate disabled for certain operations
       
  2197  */
       
  2198 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
       
  2199 {
       
  2200 	struct e1000_hw *hw = &adapter->hw;
       
  2201 	struct net_device *netdev = adapter->netdev;
       
  2202 	u32 rctl;
       
  2203 
       
  2204 	e1000_pci_clear_mwi(hw);
       
  2205 
       
  2206 	rctl = er32(RCTL);
       
  2207 	rctl |= E1000_RCTL_RST;
       
  2208 	ew32(RCTL, rctl);
       
  2209 	E1000_WRITE_FLUSH();
       
  2210 	mdelay(5);
       
  2211 
       
  2212 	if (!adapter->ecdev && netif_running(netdev))
       
  2213 		e1000_clean_all_rx_rings(adapter);
       
  2214 }
       
  2215 
       
  2216 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
       
  2217 {
       
  2218 	struct e1000_hw *hw = &adapter->hw;
       
  2219 	struct net_device *netdev = adapter->netdev;
       
  2220 	u32 rctl;
       
  2221 
       
  2222 	rctl = er32(RCTL);
       
  2223 	rctl &= ~E1000_RCTL_RST;
       
  2224 	ew32(RCTL, rctl);
       
  2225 	E1000_WRITE_FLUSH();
       
  2226 	mdelay(5);
       
  2227 
       
  2228 	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
       
  2229 		e1000_pci_set_mwi(hw);
       
  2230 
       
  2231 	if (!adapter->netdev && netif_running(netdev)) {
       
  2232 		/* No need to loop, because 82542 supports only 1 queue */
       
  2233 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
       
  2234 		e1000_configure_rx(adapter);
       
  2235 		if (adapter->ecdev) {
       
  2236 			/* fill rx ring completely! */
       
  2237 			adapter->alloc_rx_buf(adapter, ring, ring->count);
       
  2238 		} else {
       
  2239 			/* this one leaves the last ring element unallocated! */
       
  2240 			adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
       
  2241 		}
       
  2242 
       
  2243 	}
       
  2244 }
       
  2245 
       
  2246 /**
       
  2247  * e1000_set_mac - Change the Ethernet Address of the NIC
       
  2248  * @netdev: network interface device structure
       
  2249  * @p: pointer to an address structure
       
  2250  *
       
  2251  * Returns 0 on success, negative on failure
       
  2252  **/
       
  2253 static int e1000_set_mac(struct net_device *netdev, void *p)
       
  2254 {
       
  2255 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2256 	struct e1000_hw *hw = &adapter->hw;
       
  2257 	struct sockaddr *addr = p;
       
  2258 
       
  2259 	if (!is_valid_ether_addr(addr->sa_data))
       
  2260 		return -EADDRNOTAVAIL;
       
  2261 
       
  2262 	/* 82542 2.0 needs to be in reset to write receive address registers */
       
  2263 
       
  2264 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2265 		e1000_enter_82542_rst(adapter);
       
  2266 
       
  2267 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2268 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
       
  2269 
       
  2270 	e1000_rar_set(hw, hw->mac_addr, 0);
       
  2271 
       
  2272 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2273 		e1000_leave_82542_rst(adapter);
       
  2274 
       
  2275 	return 0;
       
  2276 }
       
  2277 
       
  2278 /**
       
  2279  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
       
  2280  * @netdev: network interface device structure
       
  2281  *
       
  2282  * The set_rx_mode entry point is called whenever the unicast or multicast
       
  2283  * address lists or the network interface flags are updated. This routine is
       
  2284  * responsible for configuring the hardware for proper unicast, multicast,
       
  2285  * promiscuous mode, and all-multi behavior.
       
  2286  **/
       
  2287 static void e1000_set_rx_mode(struct net_device *netdev)
       
  2288 {
       
  2289 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2290 	struct e1000_hw *hw = &adapter->hw;
       
  2291 	struct netdev_hw_addr *ha;
       
  2292 	bool use_uc = false;
       
  2293 	u32 rctl;
       
  2294 	u32 hash_value;
       
  2295 	int i, rar_entries = E1000_RAR_ENTRIES;
       
  2296 	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
       
  2297 	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
       
  2298 
       
  2299 	if (!mcarray)
       
  2300 		return;
       
  2301 
       
  2302 	/* Check for Promiscuous and All Multicast modes */
       
  2303 
       
  2304 	rctl = er32(RCTL);
       
  2305 
       
  2306 	if (netdev->flags & IFF_PROMISC) {
       
  2307 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
       
  2308 		rctl &= ~E1000_RCTL_VFE;
       
  2309 	} else {
       
  2310 		if (netdev->flags & IFF_ALLMULTI)
       
  2311 			rctl |= E1000_RCTL_MPE;
       
  2312 		else
       
  2313 			rctl &= ~E1000_RCTL_MPE;
       
  2314 		/* Enable VLAN filter if there is a VLAN */
       
  2315 		if (e1000_vlan_used(adapter))
       
  2316 			rctl |= E1000_RCTL_VFE;
       
  2317 	}
       
  2318 
       
  2319 	if (netdev_uc_count(netdev) > rar_entries - 1) {
       
  2320 		rctl |= E1000_RCTL_UPE;
       
  2321 	} else if (!(netdev->flags & IFF_PROMISC)) {
       
  2322 		rctl &= ~E1000_RCTL_UPE;
       
  2323 		use_uc = true;
       
  2324 	}
       
  2325 
       
  2326 	ew32(RCTL, rctl);
       
  2327 
       
  2328 	/* 82542 2.0 needs to be in reset to write receive address registers */
       
  2329 
       
  2330 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2331 		e1000_enter_82542_rst(adapter);
       
  2332 
       
  2333 	/* load the first 14 addresses into the exact filters 1-14. Unicast
       
  2334 	 * addresses take precedence to avoid disabling unicast filtering
       
  2335 	 * when possible.
       
  2336 	 *
       
  2337 	 * RAR 0 is used for the station MAC address
       
  2338 	 * if there are not 14 addresses, go ahead and clear the filters
       
  2339 	 */
       
  2340 	i = 1;
       
  2341 	if (use_uc)
       
  2342 		netdev_for_each_uc_addr(ha, netdev) {
       
  2343 			if (i == rar_entries)
       
  2344 				break;
       
  2345 			e1000_rar_set(hw, ha->addr, i++);
       
  2346 		}
       
  2347 
       
  2348 	netdev_for_each_mc_addr(ha, netdev) {
       
  2349 		if (i == rar_entries) {
       
  2350 			/* load any remaining addresses into the hash table */
       
  2351 			u32 hash_reg, hash_bit, mta;
       
  2352 			hash_value = e1000_hash_mc_addr(hw, ha->addr);
       
  2353 			hash_reg = (hash_value >> 5) & 0x7F;
       
  2354 			hash_bit = hash_value & 0x1F;
       
  2355 			mta = (1 << hash_bit);
       
  2356 			mcarray[hash_reg] |= mta;
       
  2357 		} else {
       
  2358 			e1000_rar_set(hw, ha->addr, i++);
       
  2359 		}
       
  2360 	}
       
  2361 
       
  2362 	for (; i < rar_entries; i++) {
       
  2363 		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
       
  2364 		E1000_WRITE_FLUSH();
       
  2365 		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
       
  2366 		E1000_WRITE_FLUSH();
       
  2367 	}
       
  2368 
       
  2369 	/* write the hash table completely, write from bottom to avoid
       
  2370 	 * both stupid write combining chipsets, and flushing each write
       
  2371 	 */
       
  2372 	for (i = mta_reg_count - 1; i >= 0 ; i--) {
       
  2373 		/* If we are on an 82544 has an errata where writing odd
       
  2374 		 * offsets overwrites the previous even offset, but writing
       
  2375 		 * backwards over the range solves the issue by always
       
  2376 		 * writing the odd offset first
       
  2377 		 */
       
  2378 		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
       
  2379 	}
       
  2380 	E1000_WRITE_FLUSH();
       
  2381 
       
  2382 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2383 		e1000_leave_82542_rst(adapter);
       
  2384 
       
  2385 	kfree(mcarray);
       
  2386 }
       
  2387 
       
  2388 /**
       
  2389  * e1000_update_phy_info_task - get phy info
       
  2390  * @work: work struct contained inside adapter struct
       
  2391  *
       
  2392  * Need to wait a few seconds after link up to get diagnostic information from
       
  2393  * the phy
       
  2394  */
       
  2395 static void e1000_update_phy_info_task(struct work_struct *work)
       
  2396 {
       
  2397 	struct e1000_adapter *adapter = container_of(work,
       
  2398 						     struct e1000_adapter,
       
  2399 						     phy_info_task.work);
       
  2400 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2401 		return;
       
  2402 	mutex_lock(&adapter->mutex);
       
  2403 	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
       
  2404 	mutex_unlock(&adapter->mutex);
       
  2405 }
       
  2406 
       
  2407 /**
       
  2408  * e1000_82547_tx_fifo_stall_task - task to complete work
       
  2409  * @work: work struct contained inside adapter struct
       
  2410  **/
       
  2411 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
       
  2412 {
       
  2413 	struct e1000_adapter *adapter = container_of(work,
       
  2414 						     struct e1000_adapter,
       
  2415 						     fifo_stall_task.work);
       
  2416 	struct e1000_hw *hw = &adapter->hw;
       
  2417 	struct net_device *netdev = adapter->netdev;
       
  2418 	u32 tctl;
       
  2419 
       
  2420 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2421 		return;
       
  2422 	mutex_lock(&adapter->mutex);
       
  2423 	if (atomic_read(&adapter->tx_fifo_stall)) {
       
  2424 		if ((er32(TDT) == er32(TDH)) &&
       
  2425 		   (er32(TDFT) == er32(TDFH)) &&
       
  2426 		   (er32(TDFTS) == er32(TDFHS))) {
       
  2427 			tctl = er32(TCTL);
       
  2428 			ew32(TCTL, tctl & ~E1000_TCTL_EN);
       
  2429 			ew32(TDFT, adapter->tx_head_addr);
       
  2430 			ew32(TDFH, adapter->tx_head_addr);
       
  2431 			ew32(TDFTS, adapter->tx_head_addr);
       
  2432 			ew32(TDFHS, adapter->tx_head_addr);
       
  2433 			ew32(TCTL, tctl);
       
  2434 			E1000_WRITE_FLUSH();
       
  2435 
       
  2436 			adapter->tx_fifo_head = 0;
       
  2437 			atomic_set(&adapter->tx_fifo_stall, 0);
       
  2438 			netif_wake_queue(netdev);
       
  2439 		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
       
  2440 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
       
  2441 		}
       
  2442 	}
       
  2443 	mutex_unlock(&adapter->mutex);
       
  2444 }
       
  2445 
       
  2446 bool e1000_has_link(struct e1000_adapter *adapter)
       
  2447 {
       
  2448 	struct e1000_hw *hw = &adapter->hw;
       
  2449 	bool link_active = false;
       
  2450 
       
  2451 	/* get_link_status is set on LSC (link status) interrupt or rx
       
  2452 	 * sequence error interrupt (except on intel ce4100).
       
  2453 	 * get_link_status will stay false until the
       
  2454 	 * e1000_check_for_link establishes link for copper adapters
       
  2455 	 * ONLY
       
  2456 	 */
       
  2457 	switch (hw->media_type) {
       
  2458 	case e1000_media_type_copper:
       
  2459 		if (hw->mac_type == e1000_ce4100)
       
  2460 			hw->get_link_status = 1;
       
  2461 		if (hw->get_link_status) {
       
  2462 			e1000_check_for_link(hw);
       
  2463 			link_active = !hw->get_link_status;
       
  2464 		} else {
       
  2465 			link_active = true;
       
  2466 		}
       
  2467 		break;
       
  2468 	case e1000_media_type_fiber:
       
  2469 		e1000_check_for_link(hw);
       
  2470 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
       
  2471 		break;
       
  2472 	case e1000_media_type_internal_serdes:
       
  2473 		e1000_check_for_link(hw);
       
  2474 		link_active = hw->serdes_has_link;
       
  2475 		break;
       
  2476 	default:
       
  2477 		break;
       
  2478 	}
       
  2479 
       
  2480 	return link_active;
       
  2481 }
       
  2482 
       
  2483 /**
       
  2484  * e1000_watchdog - work function
       
  2485  * @work: work struct contained inside adapter struct
       
  2486  **/
       
  2487 static void e1000_watchdog(struct work_struct *work)
       
  2488 {
       
  2489 	struct e1000_adapter *adapter = container_of(work,
       
  2490 						     struct e1000_adapter,
       
  2491 						     watchdog_task.work);
       
  2492 	struct e1000_hw *hw = &adapter->hw;
       
  2493 	struct net_device *netdev = adapter->netdev;
       
  2494 	struct e1000_tx_ring *txdr = adapter->tx_ring;
       
  2495 	u32 link, tctl;
       
  2496 
       
  2497 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2498 		return;
       
  2499 
       
  2500 	mutex_lock(&adapter->mutex);
       
  2501 	link = e1000_has_link(adapter);
       
  2502 	if (!adapter->ecdev && (netif_carrier_ok(netdev)) && link)
       
  2503 		goto link_up;
       
  2504 
       
  2505 	if (link) {
       
  2506 		if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev))
       
  2507 				|| (!adapter->ecdev && !netif_carrier_ok(netdev))) {
       
  2508 			u32 ctrl;
       
  2509 			bool txb2b __attribute__ ((unused)) = true;
       
  2510 			/* update snapshot of PHY registers on LSC */
       
  2511 			e1000_get_speed_and_duplex(hw,
       
  2512 						   &adapter->link_speed,
       
  2513 						   &adapter->link_duplex);
       
  2514 
       
  2515 			ctrl = er32(CTRL);
       
  2516 			pr_info("%s NIC Link is Up %d Mbps %s, "
       
  2517 				"Flow Control: %s\n",
       
  2518 				netdev->name,
       
  2519 				adapter->link_speed,
       
  2520 				adapter->link_duplex == FULL_DUPLEX ?
       
  2521 				"Full Duplex" : "Half Duplex",
       
  2522 				((ctrl & E1000_CTRL_TFCE) && (ctrl &
       
  2523 				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
       
  2524 				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
       
  2525 				E1000_CTRL_TFCE) ? "TX" : "None")));
       
  2526 
       
  2527 			/* adjust timeout factor according to speed/duplex */
       
  2528 			adapter->tx_timeout_factor = 1;
       
  2529 			switch (adapter->link_speed) {
       
  2530 			case SPEED_10:
       
  2531 				txb2b = false;
       
  2532 				adapter->tx_timeout_factor = 16;
       
  2533 				break;
       
  2534 			case SPEED_100:
       
  2535 				txb2b = false;
       
  2536 				/* maybe add some timeout factor ? */
       
  2537 				break;
       
  2538 			}
       
  2539 
       
  2540 			/* enable transmits in the hardware */
       
  2541 			tctl = er32(TCTL);
       
  2542 			tctl |= E1000_TCTL_EN;
       
  2543 			ew32(TCTL, tctl);
       
  2544 
       
  2545 			if (adapter->ecdev) {
       
  2546 				ecdev_set_link(adapter->ecdev, 1);
       
  2547 			}
       
  2548 			else {
       
  2549 				netif_carrier_on(netdev);
       
  2550 				if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  2551 					schedule_delayed_work(&adapter->phy_info_task,
       
  2552 							2 * HZ);
       
  2553 			}
       
  2554 			adapter->smartspeed = 0;
       
  2555 		}
       
  2556 	} else {
       
  2557 		if ((adapter->ecdev && ecdev_get_link(adapter->ecdev))
       
  2558 				|| (!adapter->ecdev && netif_carrier_ok(netdev))) {
       
  2559 			adapter->link_speed = 0;
       
  2560 			adapter->link_duplex = 0;
       
  2561 			pr_info("%s NIC Link is Down\n",
       
  2562 				netdev->name);
       
  2563 
       
  2564 			if (adapter->ecdev) {
       
  2565 				ecdev_set_link(adapter->ecdev, 0);
       
  2566 			} else {
       
  2567 				netif_carrier_off(netdev);
       
  2568 
       
  2569 				if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  2570 					schedule_delayed_work(&adapter->phy_info_task,
       
  2571 							2 * HZ);
       
  2572 			}
       
  2573 		}
       
  2574 
       
  2575 		e1000_smartspeed(adapter);
       
  2576 	}
       
  2577 
       
  2578 link_up:
       
  2579 	e1000_update_stats(adapter);
       
  2580 
       
  2581 	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
       
  2582 	adapter->tpt_old = adapter->stats.tpt;
       
  2583 	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
       
  2584 	adapter->colc_old = adapter->stats.colc;
       
  2585 
       
  2586 	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
       
  2587 	adapter->gorcl_old = adapter->stats.gorcl;
       
  2588 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
       
  2589 	adapter->gotcl_old = adapter->stats.gotcl;
       
  2590 
       
  2591 	e1000_update_adaptive(hw);
       
  2592 
       
  2593 	if (!adapter->ecdev && !netif_carrier_ok(netdev)) {
       
  2594 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
       
  2595 			/* We've lost link, so the controller stops DMA,
       
  2596 			 * but we've got queued Tx work that's never going
       
  2597 			 * to get done, so reset controller to flush Tx.
       
  2598 			 * (Do the reset outside of interrupt context).
       
  2599 			 */
       
  2600 			adapter->tx_timeout_count++;
       
  2601 			schedule_work(&adapter->reset_task);
       
  2602 			/* exit immediately since reset is imminent */
       
  2603 			goto unlock;
       
  2604 		}
       
  2605 	}
       
  2606 
       
  2607 	/* Simple mode for Interrupt Throttle Rate (ITR) */
       
  2608 	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
       
  2609 		/* Symmetric Tx/Rx gets a reduced ITR=2000;
       
  2610 		 * Total asymmetrical Tx or Rx gets ITR=8000;
       
  2611 		 * everyone else is between 2000-8000.
       
  2612 		 */
       
  2613 		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
       
  2614 		u32 dif = (adapter->gotcl > adapter->gorcl ?
       
  2615 			    adapter->gotcl - adapter->gorcl :
       
  2616 			    adapter->gorcl - adapter->gotcl) / 10000;
       
  2617 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
       
  2618 
       
  2619 		ew32(ITR, 1000000000 / (itr * 256));
       
  2620 	}
       
  2621 
       
  2622 	/* Cause software interrupt to ensure rx ring is cleaned */
       
  2623 	ew32(ICS, E1000_ICS_RXDMT0);
       
  2624 
       
  2625 	/* Force detection of hung controller every watchdog period */
       
  2626 	adapter->detect_tx_hung = true;
       
  2627 
       
  2628 	/* Reschedule the task */
       
  2629 	if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags))
       
  2630 		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
       
  2631 
       
  2632 unlock:
       
  2633 	mutex_unlock(&adapter->mutex);
       
  2634 }
       
  2635 
       
  2636 enum latency_range {
       
  2637 	lowest_latency = 0,
       
  2638 	low_latency = 1,
       
  2639 	bulk_latency = 2,
       
  2640 	latency_invalid = 255
       
  2641 };
       
  2642 
       
  2643 /**
       
  2644  * e1000_update_itr - update the dynamic ITR value based on statistics
       
  2645  * @adapter: pointer to adapter
       
  2646  * @itr_setting: current adapter->itr
       
  2647  * @packets: the number of packets during this measurement interval
       
  2648  * @bytes: the number of bytes during this measurement interval
       
  2649  *
       
  2650  *      Stores a new ITR value based on packets and byte
       
  2651  *      counts during the last interrupt.  The advantage of per interrupt
       
  2652  *      computation is faster updates and more accurate ITR for the current
       
  2653  *      traffic pattern.  Constants in this function were computed
       
  2654  *      based on theoretical maximum wire speed and thresholds were set based
       
  2655  *      on testing data as well as attempting to minimize response time
       
  2656  *      while increasing bulk throughput.
       
  2657  *      this functionality is controlled by the InterruptThrottleRate module
       
  2658  *      parameter (see e1000_param.c)
       
  2659  **/
       
  2660 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
       
  2661 				     u16 itr_setting, int packets, int bytes)
       
  2662 {
       
  2663 	unsigned int retval = itr_setting;
       
  2664 	struct e1000_hw *hw = &adapter->hw;
       
  2665 
       
  2666 	if (unlikely(hw->mac_type < e1000_82540))
       
  2667 		goto update_itr_done;
       
  2668 
       
  2669 	if (packets == 0)
       
  2670 		goto update_itr_done;
       
  2671 
       
  2672 	switch (itr_setting) {
       
  2673 	case lowest_latency:
       
  2674 		/* jumbo frames get bulk treatment*/
       
  2675 		if (bytes/packets > 8000)
       
  2676 			retval = bulk_latency;
       
  2677 		else if ((packets < 5) && (bytes > 512))
       
  2678 			retval = low_latency;
       
  2679 		break;
       
  2680 	case low_latency:  /* 50 usec aka 20000 ints/s */
       
  2681 		if (bytes > 10000) {
       
  2682 			/* jumbo frames need bulk latency setting */
       
  2683 			if (bytes/packets > 8000)
       
  2684 				retval = bulk_latency;
       
  2685 			else if ((packets < 10) || ((bytes/packets) > 1200))
       
  2686 				retval = bulk_latency;
       
  2687 			else if ((packets > 35))
       
  2688 				retval = lowest_latency;
       
  2689 		} else if (bytes/packets > 2000)
       
  2690 			retval = bulk_latency;
       
  2691 		else if (packets <= 2 && bytes < 512)
       
  2692 			retval = lowest_latency;
       
  2693 		break;
       
  2694 	case bulk_latency: /* 250 usec aka 4000 ints/s */
       
  2695 		if (bytes > 25000) {
       
  2696 			if (packets > 35)
       
  2697 				retval = low_latency;
       
  2698 		} else if (bytes < 6000) {
       
  2699 			retval = low_latency;
       
  2700 		}
       
  2701 		break;
       
  2702 	}
       
  2703 
       
  2704 update_itr_done:
       
  2705 	return retval;
       
  2706 }
       
  2707 
       
  2708 static void e1000_set_itr(struct e1000_adapter *adapter)
       
  2709 {
       
  2710 	struct e1000_hw *hw = &adapter->hw;
       
  2711 	u16 current_itr;
       
  2712 	u32 new_itr = adapter->itr;
       
  2713 
       
  2714 	if (unlikely(hw->mac_type < e1000_82540))
       
  2715 		return;
       
  2716 
       
  2717 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
       
  2718 	if (unlikely(adapter->link_speed != SPEED_1000)) {
       
  2719 		current_itr = 0;
       
  2720 		new_itr = 4000;
       
  2721 		goto set_itr_now;
       
  2722 	}
       
  2723 
       
  2724 	adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
       
  2725 					   adapter->total_tx_packets,
       
  2726 					   adapter->total_tx_bytes);
       
  2727 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2728 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
       
  2729 		adapter->tx_itr = low_latency;
       
  2730 
       
  2731 	adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
       
  2732 					   adapter->total_rx_packets,
       
  2733 					   adapter->total_rx_bytes);
       
  2734 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2735 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
       
  2736 		adapter->rx_itr = low_latency;
       
  2737 
       
  2738 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
       
  2739 
       
  2740 	switch (current_itr) {
       
  2741 	/* counts and packets in update_itr are dependent on these numbers */
       
  2742 	case lowest_latency:
       
  2743 		new_itr = 70000;
       
  2744 		break;
       
  2745 	case low_latency:
       
  2746 		new_itr = 20000; /* aka hwitr = ~200 */
       
  2747 		break;
       
  2748 	case bulk_latency:
       
  2749 		new_itr = 4000;
       
  2750 		break;
       
  2751 	default:
       
  2752 		break;
       
  2753 	}
       
  2754 
       
  2755 set_itr_now:
       
  2756 	if (new_itr != adapter->itr) {
       
  2757 		/* this attempts to bias the interrupt rate towards Bulk
       
  2758 		 * by adding intermediate steps when interrupt rate is
       
  2759 		 * increasing
       
  2760 		 */
       
  2761 		new_itr = new_itr > adapter->itr ?
       
  2762 			  min(adapter->itr + (new_itr >> 2), new_itr) :
       
  2763 			  new_itr;
       
  2764 		adapter->itr = new_itr;
       
  2765 		ew32(ITR, 1000000000 / (new_itr * 256));
       
  2766 	}
       
  2767 }
       
  2768 
       
  2769 #define E1000_TX_FLAGS_CSUM		0x00000001
       
  2770 #define E1000_TX_FLAGS_VLAN		0x00000002
       
  2771 #define E1000_TX_FLAGS_TSO		0x00000004
       
  2772 #define E1000_TX_FLAGS_IPV4		0x00000008
       
  2773 #define E1000_TX_FLAGS_NO_FCS		0x00000010
       
  2774 #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
       
  2775 #define E1000_TX_FLAGS_VLAN_SHIFT	16
       
  2776 
       
  2777 static int e1000_tso(struct e1000_adapter *adapter,
       
  2778 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
       
  2779 {
       
  2780 	struct e1000_context_desc *context_desc;
       
  2781 	struct e1000_buffer *buffer_info;
       
  2782 	unsigned int i;
       
  2783 	u32 cmd_length = 0;
       
  2784 	u16 ipcse = 0, tucse, mss;
       
  2785 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
       
  2786 	int err;
       
  2787 
       
  2788 	if (skb_is_gso(skb)) {
       
  2789 		if (skb_header_cloned(skb)) {
       
  2790 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
       
  2791 			if (err)
       
  2792 				return err;
       
  2793 		}
       
  2794 
       
  2795 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  2796 		mss = skb_shinfo(skb)->gso_size;
       
  2797 		if (skb->protocol == htons(ETH_P_IP)) {
       
  2798 			struct iphdr *iph = ip_hdr(skb);
       
  2799 			iph->tot_len = 0;
       
  2800 			iph->check = 0;
       
  2801 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
       
  2802 								 iph->daddr, 0,
       
  2803 								 IPPROTO_TCP,
       
  2804 								 0);
       
  2805 			cmd_length = E1000_TXD_CMD_IP;
       
  2806 			ipcse = skb_transport_offset(skb) - 1;
       
  2807 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
       
  2808 			ipv6_hdr(skb)->payload_len = 0;
       
  2809 			tcp_hdr(skb)->check =
       
  2810 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
       
  2811 						 &ipv6_hdr(skb)->daddr,
       
  2812 						 0, IPPROTO_TCP, 0);
       
  2813 			ipcse = 0;
       
  2814 		}
       
  2815 		ipcss = skb_network_offset(skb);
       
  2816 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
       
  2817 		tucss = skb_transport_offset(skb);
       
  2818 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
       
  2819 		tucse = 0;
       
  2820 
       
  2821 		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
       
  2822 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
       
  2823 
       
  2824 		i = tx_ring->next_to_use;
       
  2825 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  2826 		buffer_info = &tx_ring->buffer_info[i];
       
  2827 
       
  2828 		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
       
  2829 		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
       
  2830 		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
       
  2831 		context_desc->upper_setup.tcp_fields.tucss = tucss;
       
  2832 		context_desc->upper_setup.tcp_fields.tucso = tucso;
       
  2833 		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
       
  2834 		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
       
  2835 		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
       
  2836 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
       
  2837 
       
  2838 		buffer_info->time_stamp = jiffies;
       
  2839 		buffer_info->next_to_watch = i;
       
  2840 
       
  2841 		if (++i == tx_ring->count) i = 0;
       
  2842 		tx_ring->next_to_use = i;
       
  2843 
       
  2844 		return true;
       
  2845 	}
       
  2846 	return false;
       
  2847 }
       
  2848 
       
  2849 static bool e1000_tx_csum(struct e1000_adapter *adapter,
       
  2850 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
       
  2851 {
       
  2852 	struct e1000_context_desc *context_desc;
       
  2853 	struct e1000_buffer *buffer_info;
       
  2854 	unsigned int i;
       
  2855 	u8 css;
       
  2856 	u32 cmd_len = E1000_TXD_CMD_DEXT;
       
  2857 
       
  2858 	if (skb->ip_summed != CHECKSUM_PARTIAL)
       
  2859 		return false;
       
  2860 
       
  2861 	switch (skb->protocol) {
       
  2862 	case cpu_to_be16(ETH_P_IP):
       
  2863 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
       
  2864 			cmd_len |= E1000_TXD_CMD_TCP;
       
  2865 		break;
       
  2866 	case cpu_to_be16(ETH_P_IPV6):
       
  2867 		/* XXX not handling all IPV6 headers */
       
  2868 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
       
  2869 			cmd_len |= E1000_TXD_CMD_TCP;
       
  2870 		break;
       
  2871 	default:
       
  2872 		if (unlikely(net_ratelimit()))
       
  2873 			e_warn(drv, "checksum_partial proto=%x!\n",
       
  2874 			       skb->protocol);
       
  2875 		break;
       
  2876 	}
       
  2877 
       
  2878 	css = skb_checksum_start_offset(skb);
       
  2879 
       
  2880 	i = tx_ring->next_to_use;
       
  2881 	buffer_info = &tx_ring->buffer_info[i];
       
  2882 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  2883 
       
  2884 	context_desc->lower_setup.ip_config = 0;
       
  2885 	context_desc->upper_setup.tcp_fields.tucss = css;
       
  2886 	context_desc->upper_setup.tcp_fields.tucso =
       
  2887 		css + skb->csum_offset;
       
  2888 	context_desc->upper_setup.tcp_fields.tucse = 0;
       
  2889 	context_desc->tcp_seg_setup.data = 0;
       
  2890 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
       
  2891 
       
  2892 	buffer_info->time_stamp = jiffies;
       
  2893 	buffer_info->next_to_watch = i;
       
  2894 
       
  2895 	if (unlikely(++i == tx_ring->count)) i = 0;
       
  2896 	tx_ring->next_to_use = i;
       
  2897 
       
  2898 	return true;
       
  2899 }
       
  2900 
       
  2901 #define E1000_MAX_TXD_PWR	12
       
  2902 #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
       
  2903 
       
  2904 static int e1000_tx_map(struct e1000_adapter *adapter,
       
  2905 			struct e1000_tx_ring *tx_ring,
       
  2906 			struct sk_buff *skb, unsigned int first,
       
  2907 			unsigned int max_per_txd, unsigned int nr_frags,
       
  2908 			unsigned int mss)
       
  2909 {
       
  2910 	struct e1000_hw *hw = &adapter->hw;
       
  2911 	struct pci_dev *pdev = adapter->pdev;
       
  2912 	struct e1000_buffer *buffer_info;
       
  2913 	unsigned int len = skb_headlen(skb);
       
  2914 	unsigned int offset = 0, size, count = 0, i;
       
  2915 	unsigned int f, bytecount, segs;
       
  2916 
       
  2917 	i = tx_ring->next_to_use;
       
  2918 
       
  2919 	while (len) {
       
  2920 		buffer_info = &tx_ring->buffer_info[i];
       
  2921 		size = min(len, max_per_txd);
       
  2922 		/* Workaround for Controller erratum --
       
  2923 		 * descriptor for non-tso packet in a linear SKB that follows a
       
  2924 		 * tso gets written back prematurely before the data is fully
       
  2925 		 * DMA'd to the controller
       
  2926 		 */
       
  2927 		if (!skb->data_len && tx_ring->last_tx_tso &&
       
  2928 		    !skb_is_gso(skb)) {
       
  2929 			tx_ring->last_tx_tso = false;
       
  2930 			size -= 4;
       
  2931 		}
       
  2932 
       
  2933 		/* Workaround for premature desc write-backs
       
  2934 		 * in TSO mode.  Append 4-byte sentinel desc
       
  2935 		 */
       
  2936 		if (unlikely(mss && !nr_frags && size == len && size > 8))
       
  2937 			size -= 4;
       
  2938 		/* work-around for errata 10 and it applies
       
  2939 		 * to all controllers in PCI-X mode
       
  2940 		 * The fix is to make sure that the first descriptor of a
       
  2941 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
       
  2942 		 */
       
  2943 		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
       
  2944 		                (size > 2015) && count == 0))
       
  2945 		        size = 2015;
       
  2946 
       
  2947 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
       
  2948 		 * terminating buffers within evenly-aligned dwords.
       
  2949 		 */
       
  2950 		if (unlikely(adapter->pcix_82544 &&
       
  2951 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
       
  2952 		   size > 4))
       
  2953 			size -= 4;
       
  2954 
       
  2955 		buffer_info->length = size;
       
  2956 		/* set time_stamp *before* dma to help avoid a possible race */
       
  2957 		buffer_info->time_stamp = jiffies;
       
  2958 		buffer_info->mapped_as_page = false;
       
  2959 		buffer_info->dma = dma_map_single(&pdev->dev,
       
  2960 						  skb->data + offset,
       
  2961 						  size, DMA_TO_DEVICE);
       
  2962 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
       
  2963 			goto dma_error;
       
  2964 		buffer_info->next_to_watch = i;
       
  2965 
       
  2966 		len -= size;
       
  2967 		offset += size;
       
  2968 		count++;
       
  2969 		if (len) {
       
  2970 			i++;
       
  2971 			if (unlikely(i == tx_ring->count))
       
  2972 				i = 0;
       
  2973 		}
       
  2974 	}
       
  2975 
       
  2976 	for (f = 0; f < nr_frags; f++) {
       
  2977 		const struct skb_frag_struct *frag;
       
  2978 
       
  2979 		frag = &skb_shinfo(skb)->frags[f];
       
  2980 		len = skb_frag_size(frag);
       
  2981 		offset = 0;
       
  2982 
       
  2983 		while (len) {
       
  2984 			unsigned long bufend;
       
  2985 			i++;
       
  2986 			if (unlikely(i == tx_ring->count))
       
  2987 				i = 0;
       
  2988 
       
  2989 			buffer_info = &tx_ring->buffer_info[i];
       
  2990 			size = min(len, max_per_txd);
       
  2991 			/* Workaround for premature desc write-backs
       
  2992 			 * in TSO mode.  Append 4-byte sentinel desc
       
  2993 			 */
       
  2994 			if (unlikely(mss && f == (nr_frags-1) &&
       
  2995 			    size == len && size > 8))
       
  2996 				size -= 4;
       
  2997 			/* Workaround for potential 82544 hang in PCI-X.
       
  2998 			 * Avoid terminating buffers within evenly-aligned
       
  2999 			 * dwords.
       
  3000 			 */
       
  3001 			bufend = (unsigned long)
       
  3002 				page_to_phys(skb_frag_page(frag));
       
  3003 			bufend += offset + size - 1;
       
  3004 			if (unlikely(adapter->pcix_82544 &&
       
  3005 				     !(bufend & 4) &&
       
  3006 				     size > 4))
       
  3007 				size -= 4;
       
  3008 
       
  3009 			buffer_info->length = size;
       
  3010 			buffer_info->time_stamp = jiffies;
       
  3011 			buffer_info->mapped_as_page = true;
       
  3012 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
       
  3013 						offset, size, DMA_TO_DEVICE);
       
  3014 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
       
  3015 				goto dma_error;
       
  3016 			buffer_info->next_to_watch = i;
       
  3017 
       
  3018 			len -= size;
       
  3019 			offset += size;
       
  3020 			count++;
       
  3021 		}
       
  3022 	}
       
  3023 
       
  3024 	segs = skb_shinfo(skb)->gso_segs ?: 1;
       
  3025 	/* multiply data chunks by size of headers */
       
  3026 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
       
  3027 
       
  3028 	tx_ring->buffer_info[i].skb = skb;
       
  3029 	tx_ring->buffer_info[i].segs = segs;
       
  3030 	tx_ring->buffer_info[i].bytecount = bytecount;
       
  3031 	tx_ring->buffer_info[first].next_to_watch = i;
       
  3032 
       
  3033 	return count;
       
  3034 
       
  3035 dma_error:
       
  3036 	dev_err(&pdev->dev, "TX DMA map failed\n");
       
  3037 	buffer_info->dma = 0;
       
  3038 	if (count)
       
  3039 		count--;
       
  3040 
       
  3041 	while (count--) {
       
  3042 		if (i==0)
       
  3043 			i += tx_ring->count;
       
  3044 		i--;
       
  3045 		buffer_info = &tx_ring->buffer_info[i];
       
  3046 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  3047 	}
       
  3048 
       
  3049 	return 0;
       
  3050 }
       
  3051 
       
  3052 static void e1000_tx_queue(struct e1000_adapter *adapter,
       
  3053 			   struct e1000_tx_ring *tx_ring, int tx_flags,
       
  3054 			   int count)
       
  3055 {
       
  3056 	struct e1000_hw *hw = &adapter->hw;
       
  3057 	struct e1000_tx_desc *tx_desc = NULL;
       
  3058 	struct e1000_buffer *buffer_info;
       
  3059 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
       
  3060 	unsigned int i;
       
  3061 
       
  3062 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
       
  3063 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
       
  3064 			     E1000_TXD_CMD_TSE;
       
  3065 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  3066 
       
  3067 		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
       
  3068 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
       
  3069 	}
       
  3070 
       
  3071 	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
       
  3072 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
       
  3073 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  3074 	}
       
  3075 
       
  3076 	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
       
  3077 		txd_lower |= E1000_TXD_CMD_VLE;
       
  3078 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
       
  3079 	}
       
  3080 
       
  3081 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
       
  3082 		txd_lower &= ~(E1000_TXD_CMD_IFCS);
       
  3083 
       
  3084 	i = tx_ring->next_to_use;
       
  3085 
       
  3086 	while (count--) {
       
  3087 		buffer_info = &tx_ring->buffer_info[i];
       
  3088 		tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3089 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  3090 		tx_desc->lower.data =
       
  3091 			cpu_to_le32(txd_lower | buffer_info->length);
       
  3092 		tx_desc->upper.data = cpu_to_le32(txd_upper);
       
  3093 		if (unlikely(++i == tx_ring->count)) i = 0;
       
  3094 	}
       
  3095 
       
  3096 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
       
  3097 
       
  3098 	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
       
  3099 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
       
  3100 		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
       
  3101 
       
  3102 	/* Force memory writes to complete before letting h/w
       
  3103 	 * know there are new descriptors to fetch.  (Only
       
  3104 	 * applicable for weak-ordered memory model archs,
       
  3105 	 * such as IA-64).
       
  3106 	 */
       
  3107 	wmb();
       
  3108 
       
  3109 	tx_ring->next_to_use = i;
       
  3110 	writel(i, hw->hw_addr + tx_ring->tdt);
       
  3111 	/* we need this if more than one processor can write to our tail
       
  3112 	 * at a time, it synchronizes IO on IA64/Altix systems
       
  3113 	 */
       
  3114 	mmiowb();
       
  3115 }
       
  3116 
       
  3117 /* 82547 workaround to avoid controller hang in half-duplex environment.
       
  3118  * The workaround is to avoid queuing a large packet that would span
       
  3119  * the internal Tx FIFO ring boundary by notifying the stack to resend
       
  3120  * the packet at a later time.  This gives the Tx FIFO an opportunity to
       
  3121  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
       
  3122  * to the beginning of the Tx FIFO.
       
  3123  */
       
  3124 
       
  3125 #define E1000_FIFO_HDR			0x10
       
  3126 #define E1000_82547_PAD_LEN		0x3E0
       
  3127 
       
  3128 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
       
  3129 				       struct sk_buff *skb)
       
  3130 {
       
  3131 	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
       
  3132 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
       
  3133 
       
  3134 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
       
  3135 
       
  3136 	if (adapter->link_duplex != HALF_DUPLEX)
       
  3137 		goto no_fifo_stall_required;
       
  3138 
       
  3139 	if (atomic_read(&adapter->tx_fifo_stall))
       
  3140 		return 1;
       
  3141 
       
  3142 	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
       
  3143 		atomic_set(&adapter->tx_fifo_stall, 1);
       
  3144 		return 1;
       
  3145 	}
       
  3146 
       
  3147 no_fifo_stall_required:
       
  3148 	adapter->tx_fifo_head += skb_fifo_len;
       
  3149 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
       
  3150 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
       
  3151 	return 0;
       
  3152 }
       
  3153 
       
  3154 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
       
  3155 {
       
  3156 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3157 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
       
  3158 
       
  3159 	if (adapter->ecdev) {
       
  3160 		return -EBUSY;
       
  3161 	}
       
  3162 
       
  3163 	netif_stop_queue(netdev);
       
  3164 	/* Herbert's original patch had:
       
  3165 	 *  smp_mb__after_netif_stop_queue();
       
  3166 	 * but since that doesn't exist yet, just open code it.
       
  3167 	 */
       
  3168 	smp_mb();
       
  3169 
       
  3170 	/* We need to check again in a case another CPU has just
       
  3171 	 * made room available.
       
  3172 	 */
       
  3173 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
       
  3174 		return -EBUSY;
       
  3175 
       
  3176 	/* A reprieve! */
       
  3177 	netif_start_queue(netdev);
       
  3178 	++adapter->restart_queue;
       
  3179 	return 0;
       
  3180 }
       
  3181 
       
  3182 static int e1000_maybe_stop_tx(struct net_device *netdev,
       
  3183 			       struct e1000_tx_ring *tx_ring, int size)
       
  3184 {
       
  3185 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
       
  3186 		return 0;
       
  3187 	return __e1000_maybe_stop_tx(netdev, size);
       
  3188 }
       
  3189 
       
  3190 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
       
  3191 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
       
  3192 				    struct net_device *netdev)
       
  3193 {
       
  3194 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3195 	struct e1000_hw *hw = &adapter->hw;
       
  3196 	struct e1000_tx_ring *tx_ring;
       
  3197 	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
       
  3198 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
       
  3199 	unsigned int tx_flags = 0;
       
  3200 	unsigned int len = skb_headlen(skb);
       
  3201 	unsigned int nr_frags;
       
  3202 	unsigned int mss;
       
  3203 	int count = 0;
       
  3204 	int tso;
       
  3205 	unsigned int f;
       
  3206 
       
  3207 	/* This goes back to the question of how to logically map a Tx queue
       
  3208 	 * to a flow.  Right now, performance is impacted slightly negatively
       
  3209 	 * if using multiple Tx queues.  If the stack breaks away from a
       
  3210 	 * single qdisc implementation, we can look at this again.
       
  3211 	 */
       
  3212 	tx_ring = adapter->tx_ring;
       
  3213 
       
  3214 	if (unlikely(skb->len <= 0)) {
       
  3215 		if (!adapter->ecdev) {
       
  3216 			dev_kfree_skb_any(skb);
       
  3217 		}
       
  3218 		return NETDEV_TX_OK;
       
  3219 	}
       
  3220 
       
  3221 	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
       
  3222 	 * packets may get corrupted during padding by HW.
       
  3223 	 * To WA this issue, pad all small packets manually.
       
  3224 	 */
       
  3225 	if (skb->len < ETH_ZLEN) {
       
  3226 		if (skb_pad(skb, ETH_ZLEN - skb->len))
       
  3227 			return NETDEV_TX_OK;
       
  3228 		skb->len = ETH_ZLEN;
       
  3229 		skb_set_tail_pointer(skb, ETH_ZLEN);
       
  3230 	}
       
  3231 
       
  3232 	mss = skb_shinfo(skb)->gso_size;
       
  3233 	/* The controller does a simple calculation to
       
  3234 	 * make sure there is enough room in the FIFO before
       
  3235 	 * initiating the DMA for each buffer.  The calc is:
       
  3236 	 * 4 = ceil(buffer len/mss).  To make sure we don't
       
  3237 	 * overrun the FIFO, adjust the max buffer len if mss
       
  3238 	 * drops.
       
  3239 	 */
       
  3240 	if (mss) {
       
  3241 		u8 hdr_len;
       
  3242 		max_per_txd = min(mss << 2, max_per_txd);
       
  3243 		max_txd_pwr = fls(max_per_txd) - 1;
       
  3244 
       
  3245 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  3246 		if (skb->data_len && hdr_len == len) {
       
  3247 			switch (hw->mac_type) {
       
  3248 				unsigned int pull_size;
       
  3249 			case e1000_82544:
       
  3250 				/* Make sure we have room to chop off 4 bytes,
       
  3251 				 * and that the end alignment will work out to
       
  3252 				 * this hardware's requirements
       
  3253 				 * NOTE: this is a TSO only workaround
       
  3254 				 * if end byte alignment not correct move us
       
  3255 				 * into the next dword
       
  3256 				 */
       
  3257 				if ((unsigned long)(skb_tail_pointer(skb) - 1)
       
  3258 				    & 4)
       
  3259 					break;
       
  3260 				/* fall through */
       
  3261 				pull_size = min((unsigned int)4, skb->data_len);
       
  3262 				if (!__pskb_pull_tail(skb, pull_size)) {
       
  3263 					e_err(drv, "__pskb_pull_tail "
       
  3264 					      "failed.\n");
       
  3265 					if (!adapter->ecdev) {
       
  3266 						dev_kfree_skb_any(skb);
       
  3267 					}
       
  3268 					return NETDEV_TX_OK;
       
  3269 				}
       
  3270 				len = skb_headlen(skb);
       
  3271 				break;
       
  3272 			default:
       
  3273 				/* do nothing */
       
  3274 				break;
       
  3275 			}
       
  3276 		}
       
  3277 	}
       
  3278 
       
  3279 	/* reserve a descriptor for the offload context */
       
  3280 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
       
  3281 		count++;
       
  3282 	count++;
       
  3283 
       
  3284 	/* Controller Erratum workaround */
       
  3285 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
       
  3286 		count++;
       
  3287 
       
  3288 	count += TXD_USE_COUNT(len, max_txd_pwr);
       
  3289 
       
  3290 	if (adapter->pcix_82544)
       
  3291 		count++;
       
  3292 
       
  3293 	/* work-around for errata 10 and it applies to all controllers
       
  3294 	 * in PCI-X mode, so add one more descriptor to the count
       
  3295 	 */
       
  3296 	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
       
  3297 			(len > 2015)))
       
  3298 		count++;
       
  3299 
       
  3300 	nr_frags = skb_shinfo(skb)->nr_frags;
       
  3301 	for (f = 0; f < nr_frags; f++)
       
  3302 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
       
  3303 				       max_txd_pwr);
       
  3304 	if (adapter->pcix_82544)
       
  3305 		count += nr_frags;
       
  3306 
       
  3307 	/* need: count + 2 desc gap to keep tail from touching
       
  3308 	 * head, otherwise try next time
       
  3309 	 */
       
  3310 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
       
  3311 		return NETDEV_TX_BUSY;
       
  3312 
       
  3313 	if (unlikely((hw->mac_type == e1000_82547) &&
       
  3314 		     (e1000_82547_fifo_workaround(adapter, skb)))) {
       
  3315 		if (!adapter->ecdev) {
       
  3316 			netif_stop_queue(netdev);
       
  3317 			if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3318 				schedule_delayed_work(&adapter->fifo_stall_task, 1);
       
  3319 		}
       
  3320 		return NETDEV_TX_BUSY;
       
  3321 	}
       
  3322 
       
  3323 	if (vlan_tx_tag_present(skb)) {
       
  3324 		tx_flags |= E1000_TX_FLAGS_VLAN;
       
  3325 		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
       
  3326 	}
       
  3327 
       
  3328 	first = tx_ring->next_to_use;
       
  3329 
       
  3330 	tso = e1000_tso(adapter, tx_ring, skb);
       
  3331 	if (tso < 0) {
       
  3332 		if (!adapter->ecdev) {
       
  3333 			dev_kfree_skb_any(skb);
       
  3334 		}
       
  3335 		return NETDEV_TX_OK;
       
  3336 	}
       
  3337 
       
  3338 	if (likely(tso)) {
       
  3339 		if (likely(hw->mac_type != e1000_82544))
       
  3340 			tx_ring->last_tx_tso = true;
       
  3341 		tx_flags |= E1000_TX_FLAGS_TSO;
       
  3342 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
       
  3343 		tx_flags |= E1000_TX_FLAGS_CSUM;
       
  3344 
       
  3345 	if (likely(skb->protocol == htons(ETH_P_IP)))
       
  3346 		tx_flags |= E1000_TX_FLAGS_IPV4;
       
  3347 
       
  3348 	if (unlikely(skb->no_fcs))
       
  3349 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
       
  3350 
       
  3351 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
       
  3352 			     nr_frags, mss);
       
  3353 
       
  3354 	if (count) {
       
  3355 		netdev_sent_queue(netdev, skb->len);
       
  3356 		skb_tx_timestamp(skb);
       
  3357 
       
  3358 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
       
  3359 		if (!adapter->ecdev) {
       
  3360 			/* Make sure there is space in the ring for the next send. */
       
  3361 			e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
       
  3362 		}
       
  3363 
       
  3364 	} else {
       
  3365 		if (!adapter->ecdev) {
       
  3366 			dev_kfree_skb_any(skb);
       
  3367 		}
       
  3368 		tx_ring->buffer_info[first].time_stamp = 0;
       
  3369 		tx_ring->next_to_use = first;
       
  3370 	}
       
  3371 
       
  3372 	return NETDEV_TX_OK;
       
  3373 }
       
  3374 
       
  3375 #define NUM_REGS 38 /* 1 based count */
       
  3376 static void e1000_regdump(struct e1000_adapter *adapter)
       
  3377 {
       
  3378 	struct e1000_hw *hw = &adapter->hw;
       
  3379 	u32 regs[NUM_REGS];
       
  3380 	u32 *regs_buff = regs;
       
  3381 	int i = 0;
       
  3382 
       
  3383 	static const char * const reg_name[] = {
       
  3384 		"CTRL",  "STATUS",
       
  3385 		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
       
  3386 		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
       
  3387 		"TIDV", "TXDCTL", "TADV", "TARC0",
       
  3388 		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
       
  3389 		"TXDCTL1", "TARC1",
       
  3390 		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
       
  3391 		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
       
  3392 		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
       
  3393 	};
       
  3394 
       
  3395 	regs_buff[0]  = er32(CTRL);
       
  3396 	regs_buff[1]  = er32(STATUS);
       
  3397 
       
  3398 	regs_buff[2]  = er32(RCTL);
       
  3399 	regs_buff[3]  = er32(RDLEN);
       
  3400 	regs_buff[4]  = er32(RDH);
       
  3401 	regs_buff[5]  = er32(RDT);
       
  3402 	regs_buff[6]  = er32(RDTR);
       
  3403 
       
  3404 	regs_buff[7]  = er32(TCTL);
       
  3405 	regs_buff[8]  = er32(TDBAL);
       
  3406 	regs_buff[9]  = er32(TDBAH);
       
  3407 	regs_buff[10] = er32(TDLEN);
       
  3408 	regs_buff[11] = er32(TDH);
       
  3409 	regs_buff[12] = er32(TDT);
       
  3410 	regs_buff[13] = er32(TIDV);
       
  3411 	regs_buff[14] = er32(TXDCTL);
       
  3412 	regs_buff[15] = er32(TADV);
       
  3413 	regs_buff[16] = er32(TARC0);
       
  3414 
       
  3415 	regs_buff[17] = er32(TDBAL1);
       
  3416 	regs_buff[18] = er32(TDBAH1);
       
  3417 	regs_buff[19] = er32(TDLEN1);
       
  3418 	regs_buff[20] = er32(TDH1);
       
  3419 	regs_buff[21] = er32(TDT1);
       
  3420 	regs_buff[22] = er32(TXDCTL1);
       
  3421 	regs_buff[23] = er32(TARC1);
       
  3422 	regs_buff[24] = er32(CTRL_EXT);
       
  3423 	regs_buff[25] = er32(ERT);
       
  3424 	regs_buff[26] = er32(RDBAL0);
       
  3425 	regs_buff[27] = er32(RDBAH0);
       
  3426 	regs_buff[28] = er32(TDFH);
       
  3427 	regs_buff[29] = er32(TDFT);
       
  3428 	regs_buff[30] = er32(TDFHS);
       
  3429 	regs_buff[31] = er32(TDFTS);
       
  3430 	regs_buff[32] = er32(TDFPC);
       
  3431 	regs_buff[33] = er32(RDFH);
       
  3432 	regs_buff[34] = er32(RDFT);
       
  3433 	regs_buff[35] = er32(RDFHS);
       
  3434 	regs_buff[36] = er32(RDFTS);
       
  3435 	regs_buff[37] = er32(RDFPC);
       
  3436 
       
  3437 	pr_info("Register dump\n");
       
  3438 	for (i = 0; i < NUM_REGS; i++)
       
  3439 		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
       
  3440 }
       
  3441 
       
  3442 /*
       
  3443  * e1000_dump: Print registers, tx ring and rx ring
       
  3444  */
       
  3445 static void e1000_dump(struct e1000_adapter *adapter)
       
  3446 {
       
  3447 	/* this code doesn't handle multiple rings */
       
  3448 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
       
  3449 	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
       
  3450 	int i;
       
  3451 
       
  3452 	if (!netif_msg_hw(adapter))
       
  3453 		return;
       
  3454 
       
  3455 	/* Print Registers */
       
  3456 	e1000_regdump(adapter);
       
  3457 
       
  3458 	/* transmit dump */
       
  3459 	pr_info("TX Desc ring0 dump\n");
       
  3460 
       
  3461 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
       
  3462 	 *
       
  3463 	 * Legacy Transmit Descriptor
       
  3464 	 *   +--------------------------------------------------------------+
       
  3465 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
       
  3466 	 *   +--------------------------------------------------------------+
       
  3467 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
       
  3468 	 *   +--------------------------------------------------------------+
       
  3469 	 *   63       48 47        36 35    32 31     24 23    16 15        0
       
  3470 	 *
       
  3471 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
       
  3472 	 *   63      48 47    40 39       32 31             16 15    8 7      0
       
  3473 	 *   +----------------------------------------------------------------+
       
  3474 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
       
  3475 	 *   +----------------------------------------------------------------+
       
  3476 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
       
  3477 	 *   +----------------------------------------------------------------+
       
  3478 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
       
  3479 	 *
       
  3480 	 * Extended Data Descriptor (DTYP=0x1)
       
  3481 	 *   +----------------------------------------------------------------+
       
  3482 	 * 0 |                     Buffer Address [63:0]                      |
       
  3483 	 *   +----------------------------------------------------------------+
       
  3484 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
       
  3485 	 *   +----------------------------------------------------------------+
       
  3486 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
       
  3487 	 */
       
  3488 	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
       
  3489 	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
       
  3490 
       
  3491 	if (!netif_msg_tx_done(adapter))
       
  3492 		goto rx_ring_summary;
       
  3493 
       
  3494 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
       
  3495 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3496 		struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
       
  3497 		struct my_u { __le64 a; __le64 b; };
       
  3498 		struct my_u *u = (struct my_u *)tx_desc;
       
  3499 		const char *type;
       
  3500 
       
  3501 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
       
  3502 			type = "NTC/U";
       
  3503 		else if (i == tx_ring->next_to_use)
       
  3504 			type = "NTU";
       
  3505 		else if (i == tx_ring->next_to_clean)
       
  3506 			type = "NTC";
       
  3507 		else
       
  3508 			type = "";
       
  3509 
       
  3510 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
       
  3511 			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
       
  3512 			le64_to_cpu(u->a), le64_to_cpu(u->b),
       
  3513 			(u64)buffer_info->dma, buffer_info->length,
       
  3514 			buffer_info->next_to_watch,
       
  3515 			(u64)buffer_info->time_stamp, buffer_info->skb, type);
       
  3516 	}
       
  3517 
       
  3518 rx_ring_summary:
       
  3519 	/* receive dump */
       
  3520 	pr_info("\nRX Desc ring dump\n");
       
  3521 
       
  3522 	/* Legacy Receive Descriptor Format
       
  3523 	 *
       
  3524 	 * +-----------------------------------------------------+
       
  3525 	 * |                Buffer Address [63:0]                |
       
  3526 	 * +-----------------------------------------------------+
       
  3527 	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
       
  3528 	 * +-----------------------------------------------------+
       
  3529 	 * 63       48 47    40 39      32 31         16 15      0
       
  3530 	 */
       
  3531 	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
       
  3532 
       
  3533 	if (!netif_msg_rx_status(adapter))
       
  3534 		goto exit;
       
  3535 
       
  3536 	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
       
  3537 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  3538 		struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
       
  3539 		struct my_u { __le64 a; __le64 b; };
       
  3540 		struct my_u *u = (struct my_u *)rx_desc;
       
  3541 		const char *type;
       
  3542 
       
  3543 		if (i == rx_ring->next_to_use)
       
  3544 			type = "NTU";
       
  3545 		else if (i == rx_ring->next_to_clean)
       
  3546 			type = "NTC";
       
  3547 		else
       
  3548 			type = "";
       
  3549 
       
  3550 		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
       
  3551 			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
       
  3552 			(u64)buffer_info->dma, buffer_info->skb, type);
       
  3553 	} /* for */
       
  3554 
       
  3555 	/* dump the descriptor caches */
       
  3556 	/* rx */
       
  3557 	pr_info("Rx descriptor cache in 64bit format\n");
       
  3558 	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
       
  3559 		pr_info("R%04X: %08X|%08X %08X|%08X\n",
       
  3560 			i,
       
  3561 			readl(adapter->hw.hw_addr + i+4),
       
  3562 			readl(adapter->hw.hw_addr + i),
       
  3563 			readl(adapter->hw.hw_addr + i+12),
       
  3564 			readl(adapter->hw.hw_addr + i+8));
       
  3565 	}
       
  3566 	/* tx */
       
  3567 	pr_info("Tx descriptor cache in 64bit format\n");
       
  3568 	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
       
  3569 		pr_info("T%04X: %08X|%08X %08X|%08X\n",
       
  3570 			i,
       
  3571 			readl(adapter->hw.hw_addr + i+4),
       
  3572 			readl(adapter->hw.hw_addr + i),
       
  3573 			readl(adapter->hw.hw_addr + i+12),
       
  3574 			readl(adapter->hw.hw_addr + i+8));
       
  3575 	}
       
  3576 exit:
       
  3577 	return;
       
  3578 }
       
  3579 
       
  3580 /**
       
  3581  * e1000_tx_timeout - Respond to a Tx Hang
       
  3582  * @netdev: network interface device structure
       
  3583  **/
       
  3584 static void e1000_tx_timeout(struct net_device *netdev)
       
  3585 {
       
  3586 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3587 
       
  3588 	/* Do the reset outside of interrupt context */
       
  3589 	adapter->tx_timeout_count++;
       
  3590 	schedule_work(&adapter->reset_task);
       
  3591 }
       
  3592 
       
  3593 static void e1000_reset_task(struct work_struct *work)
       
  3594 {
       
  3595 	struct e1000_adapter *adapter =
       
  3596 		container_of(work, struct e1000_adapter, reset_task);
       
  3597 
       
  3598 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  3599 		return;
       
  3600 	e_err(drv, "Reset adapter\n");
       
  3601 	e1000_reinit_safe(adapter);
       
  3602 }
       
  3603 
       
  3604 /**
       
  3605  * e1000_get_stats - Get System Network Statistics
       
  3606  * @netdev: network interface device structure
       
  3607  *
       
  3608  * Returns the address of the device statistics structure.
       
  3609  * The statistics are actually updated from the watchdog.
       
  3610  **/
       
  3611 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
       
  3612 {
       
  3613 	/* only return the current stats */
       
  3614 	return &netdev->stats;
       
  3615 }
       
  3616 
       
  3617 /**
       
  3618  * e1000_change_mtu - Change the Maximum Transfer Unit
       
  3619  * @netdev: network interface device structure
       
  3620  * @new_mtu: new value for maximum frame size
       
  3621  *
       
  3622  * Returns 0 on success, negative on failure
       
  3623  **/
       
  3624 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
       
  3625 {
       
  3626 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3627 	struct e1000_hw *hw = &adapter->hw;
       
  3628 	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
       
  3629 
       
  3630 	if (adapter->ecdev) {
       
  3631 		return -EBUSY;
       
  3632 	}
       
  3633 
       
  3634 	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
       
  3635 	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
       
  3636 		e_err(probe, "Invalid MTU setting\n");
       
  3637 		return -EINVAL;
       
  3638 	}
       
  3639 
       
  3640 	/* Adapter-specific max frame size limits. */
       
  3641 	switch (hw->mac_type) {
       
  3642 	case e1000_undefined ... e1000_82542_rev2_1:
       
  3643 		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
       
  3644 			e_err(probe, "Jumbo Frames not supported.\n");
       
  3645 			return -EINVAL;
       
  3646 		}
       
  3647 		break;
       
  3648 	default:
       
  3649 		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
       
  3650 		break;
       
  3651 	}
       
  3652 
       
  3653 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
  3654 		msleep(1);
       
  3655 	/* e1000_down has a dependency on max_frame_size */
       
  3656 	hw->max_frame_size = max_frame;
       
  3657 	if (netif_running(netdev))
       
  3658 		e1000_down(adapter);
       
  3659 
       
  3660 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
       
  3661 	 * means we reserve 2 more, this pushes us to allocate from the next
       
  3662 	 * larger slab size.
       
  3663 	 * i.e. RXBUFFER_2048 --> size-4096 slab
       
  3664 	 * however with the new *_jumbo_rx* routines, jumbo receives will use
       
  3665 	 * fragmented skbs
       
  3666 	 */
       
  3667 
       
  3668 	if (max_frame <= E1000_RXBUFFER_2048)
       
  3669 		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
       
  3670 	else
       
  3671 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
       
  3672 		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
       
  3673 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
       
  3674 		adapter->rx_buffer_len = PAGE_SIZE;
       
  3675 #endif
       
  3676 
       
  3677 	/* adjust allocation if LPE protects us, and we aren't using SBP */
       
  3678 	if (!hw->tbi_compatibility_on &&
       
  3679 	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
       
  3680 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
       
  3681 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
       
  3682 
       
  3683 	pr_info("%s changing MTU from %d to %d\n",
       
  3684 		netdev->name, netdev->mtu, new_mtu);
       
  3685 	netdev->mtu = new_mtu;
       
  3686 
       
  3687 	if (netif_running(netdev))
       
  3688 		e1000_up(adapter);
       
  3689 	else
       
  3690 		e1000_reset(adapter);
       
  3691 
       
  3692 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
  3693 
       
  3694 	return 0;
       
  3695 }
       
  3696 
       
  3697 /**
       
  3698  * e1000_update_stats - Update the board statistics counters
       
  3699  * @adapter: board private structure
       
  3700  **/
       
  3701 void e1000_update_stats(struct e1000_adapter *adapter)
       
  3702 {
       
  3703 	struct net_device *netdev = adapter->netdev;
       
  3704 	struct e1000_hw *hw = &adapter->hw;
       
  3705 	struct pci_dev *pdev = adapter->pdev;
       
  3706 	unsigned long flags = 0;
       
  3707 	u16 phy_tmp;
       
  3708 
       
  3709 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
       
  3710 
       
  3711 	/* Prevent stats update while adapter is being reset, or if the pci
       
  3712 	 * connection is down.
       
  3713 	 */
       
  3714 	if (adapter->link_speed == 0)
       
  3715 		return;
       
  3716 	if (pci_channel_offline(pdev))
       
  3717 		return;
       
  3718 
       
  3719 	if (!adapter->ecdev) {
       
  3720 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  3721 	}
       
  3722 
       
  3723 	/* these counters are modified from e1000_tbi_adjust_stats,
       
  3724 	 * called from the interrupt context, so they must only
       
  3725 	 * be written while holding adapter->stats_lock
       
  3726 	 */
       
  3727 
       
  3728 	adapter->stats.crcerrs += er32(CRCERRS);
       
  3729 	adapter->stats.gprc += er32(GPRC);
       
  3730 	adapter->stats.gorcl += er32(GORCL);
       
  3731 	adapter->stats.gorch += er32(GORCH);
       
  3732 	adapter->stats.bprc += er32(BPRC);
       
  3733 	adapter->stats.mprc += er32(MPRC);
       
  3734 	adapter->stats.roc += er32(ROC);
       
  3735 
       
  3736 	adapter->stats.prc64 += er32(PRC64);
       
  3737 	adapter->stats.prc127 += er32(PRC127);
       
  3738 	adapter->stats.prc255 += er32(PRC255);
       
  3739 	adapter->stats.prc511 += er32(PRC511);
       
  3740 	adapter->stats.prc1023 += er32(PRC1023);
       
  3741 	adapter->stats.prc1522 += er32(PRC1522);
       
  3742 
       
  3743 	adapter->stats.symerrs += er32(SYMERRS);
       
  3744 	adapter->stats.mpc += er32(MPC);
       
  3745 	adapter->stats.scc += er32(SCC);
       
  3746 	adapter->stats.ecol += er32(ECOL);
       
  3747 	adapter->stats.mcc += er32(MCC);
       
  3748 	adapter->stats.latecol += er32(LATECOL);
       
  3749 	adapter->stats.dc += er32(DC);
       
  3750 	adapter->stats.sec += er32(SEC);
       
  3751 	adapter->stats.rlec += er32(RLEC);
       
  3752 	adapter->stats.xonrxc += er32(XONRXC);
       
  3753 	adapter->stats.xontxc += er32(XONTXC);
       
  3754 	adapter->stats.xoffrxc += er32(XOFFRXC);
       
  3755 	adapter->stats.xofftxc += er32(XOFFTXC);
       
  3756 	adapter->stats.fcruc += er32(FCRUC);
       
  3757 	adapter->stats.gptc += er32(GPTC);
       
  3758 	adapter->stats.gotcl += er32(GOTCL);
       
  3759 	adapter->stats.gotch += er32(GOTCH);
       
  3760 	adapter->stats.rnbc += er32(RNBC);
       
  3761 	adapter->stats.ruc += er32(RUC);
       
  3762 	adapter->stats.rfc += er32(RFC);
       
  3763 	adapter->stats.rjc += er32(RJC);
       
  3764 	adapter->stats.torl += er32(TORL);
       
  3765 	adapter->stats.torh += er32(TORH);
       
  3766 	adapter->stats.totl += er32(TOTL);
       
  3767 	adapter->stats.toth += er32(TOTH);
       
  3768 	adapter->stats.tpr += er32(TPR);
       
  3769 
       
  3770 	adapter->stats.ptc64 += er32(PTC64);
       
  3771 	adapter->stats.ptc127 += er32(PTC127);
       
  3772 	adapter->stats.ptc255 += er32(PTC255);
       
  3773 	adapter->stats.ptc511 += er32(PTC511);
       
  3774 	adapter->stats.ptc1023 += er32(PTC1023);
       
  3775 	adapter->stats.ptc1522 += er32(PTC1522);
       
  3776 
       
  3777 	adapter->stats.mptc += er32(MPTC);
       
  3778 	adapter->stats.bptc += er32(BPTC);
       
  3779 
       
  3780 	/* used for adaptive IFS */
       
  3781 
       
  3782 	hw->tx_packet_delta = er32(TPT);
       
  3783 	adapter->stats.tpt += hw->tx_packet_delta;
       
  3784 	hw->collision_delta = er32(COLC);
       
  3785 	adapter->stats.colc += hw->collision_delta;
       
  3786 
       
  3787 	if (hw->mac_type >= e1000_82543) {
       
  3788 		adapter->stats.algnerrc += er32(ALGNERRC);
       
  3789 		adapter->stats.rxerrc += er32(RXERRC);
       
  3790 		adapter->stats.tncrs += er32(TNCRS);
       
  3791 		adapter->stats.cexterr += er32(CEXTERR);
       
  3792 		adapter->stats.tsctc += er32(TSCTC);
       
  3793 		adapter->stats.tsctfc += er32(TSCTFC);
       
  3794 	}
       
  3795 
       
  3796 	/* Fill out the OS statistics structure */
       
  3797 	netdev->stats.multicast = adapter->stats.mprc;
       
  3798 	netdev->stats.collisions = adapter->stats.colc;
       
  3799 
       
  3800 	/* Rx Errors */
       
  3801 
       
  3802 	/* RLEC on some newer hardware can be incorrect so build
       
  3803 	 * our own version based on RUC and ROC
       
  3804 	 */
       
  3805 	netdev->stats.rx_errors = adapter->stats.rxerrc +
       
  3806 		adapter->stats.crcerrs + adapter->stats.algnerrc +
       
  3807 		adapter->stats.ruc + adapter->stats.roc +
       
  3808 		adapter->stats.cexterr;
       
  3809 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
       
  3810 	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
       
  3811 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
       
  3812 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
       
  3813 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
       
  3814 
       
  3815 	/* Tx Errors */
       
  3816 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
       
  3817 	netdev->stats.tx_errors = adapter->stats.txerrc;
       
  3818 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
       
  3819 	netdev->stats.tx_window_errors = adapter->stats.latecol;
       
  3820 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
       
  3821 	if (hw->bad_tx_carr_stats_fd &&
       
  3822 	    adapter->link_duplex == FULL_DUPLEX) {
       
  3823 		netdev->stats.tx_carrier_errors = 0;
       
  3824 		adapter->stats.tncrs = 0;
       
  3825 	}
       
  3826 
       
  3827 	/* Tx Dropped needs to be maintained elsewhere */
       
  3828 
       
  3829 	/* Phy Stats */
       
  3830 	if (hw->media_type == e1000_media_type_copper) {
       
  3831 		if ((adapter->link_speed == SPEED_1000) &&
       
  3832 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
       
  3833 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
       
  3834 			adapter->phy_stats.idle_errors += phy_tmp;
       
  3835 		}
       
  3836 
       
  3837 		if ((hw->mac_type <= e1000_82546) &&
       
  3838 		   (hw->phy_type == e1000_phy_m88) &&
       
  3839 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
       
  3840 			adapter->phy_stats.receive_errors += phy_tmp;
       
  3841 	}
       
  3842 
       
  3843 	/* Management Stats */
       
  3844 	if (hw->has_smbus) {
       
  3845 		adapter->stats.mgptc += er32(MGTPTC);
       
  3846 		adapter->stats.mgprc += er32(MGTPRC);
       
  3847 		adapter->stats.mgpdc += er32(MGTPDC);
       
  3848 	}
       
  3849 
       
  3850 	if (!adapter->ecdev) {
       
  3851 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  3852 	}
       
  3853 }
       
  3854 
       
  3855 void ec_poll(struct net_device *netdev)
       
  3856 {
       
  3857 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3858 	if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) {
       
  3859 		e1000_watchdog(&adapter->watchdog_task.work);
       
  3860 		adapter->ec_watchdog_jiffies = jiffies;
       
  3861 	}
       
  3862 
       
  3863 	e1000_intr(0, netdev);
       
  3864 }
       
  3865 
       
  3866 /**
       
  3867  * e1000_intr - Interrupt Handler
       
  3868  * @irq: interrupt number
       
  3869  * @data: pointer to a network interface device structure
       
  3870  **/
       
  3871 static irqreturn_t e1000_intr(int irq, void *data)
       
  3872 {
       
  3873 	struct net_device *netdev = data;
       
  3874 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3875 	struct e1000_hw *hw = &adapter->hw;
       
  3876 	u32 icr = er32(ICR);
       
  3877 
       
  3878 	if (unlikely((!icr)))
       
  3879 		return IRQ_NONE;  /* Not our interrupt */
       
  3880 
       
  3881 	/* we might have caused the interrupt, but the above
       
  3882 	 * read cleared it, and just in case the driver is
       
  3883 	 * down there is nothing to do so return handled
       
  3884 	 */
       
  3885 	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
       
  3886 		return IRQ_HANDLED;
       
  3887 
       
  3888 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
       
  3889 		hw->get_link_status = 1;
       
  3890 		/* guard against interrupt when we're going down */
       
  3891 		if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags))
       
  3892 			schedule_delayed_work(&adapter->watchdog_task, 1);
       
  3893 	}
       
  3894 
       
  3895 	if (adapter->ecdev) {
       
  3896 		int i, ec_work_done = 0;
       
  3897 		for (i = 0; i < E1000_MAX_INTR; i++) {
       
  3898 			if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
       
  3899 							&ec_work_done, 100) &&
       
  3900 						!e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
       
  3901 				break;
       
  3902 			}
       
  3903 		}
       
  3904  	} else {
       
  3905 		/* disable interrupts, without the synchronize_irq bit */
       
  3906 		ew32(IMC, ~0);
       
  3907 		E1000_WRITE_FLUSH();
       
  3908 
       
  3909 		if (likely(napi_schedule_prep(&adapter->napi))) {
       
  3910 			adapter->total_tx_bytes = 0;
       
  3911 			adapter->total_tx_packets = 0;
       
  3912 			adapter->total_rx_bytes = 0;
       
  3913 			adapter->total_rx_packets = 0;
       
  3914 			__napi_schedule(&adapter->napi);
       
  3915 		} else {
       
  3916 			/* this really should not happen! if it does it is basically a
       
  3917 			 * bug, but not a hard error, so enable ints and continue
       
  3918 			 */
       
  3919 			if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3920 				e1000_irq_enable(adapter);
       
  3921 		}
       
  3922 	}
       
  3923 
       
  3924 	return IRQ_HANDLED;
       
  3925 }
       
  3926 
       
  3927 /**
       
  3928  * e1000_clean - NAPI Rx polling callback
       
  3929  * @adapter: board private structure
       
  3930  * EtherCAT: never called
       
  3931  **/
       
  3932 static int e1000_clean(struct napi_struct *napi, int budget)
       
  3933 {
       
  3934 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
       
  3935 						     napi);
       
  3936 	int tx_clean_complete = 0, work_done = 0;
       
  3937 
       
  3938 	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
       
  3939 
       
  3940 	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
       
  3941 
       
  3942 	if (!tx_clean_complete)
       
  3943 		work_done = budget;
       
  3944 
       
  3945 	/* If budget not fully consumed, exit the polling mode */
       
  3946 	if (work_done < budget) {
       
  3947 		if (likely(adapter->itr_setting & 3))
       
  3948 			e1000_set_itr(adapter);
       
  3949 		napi_complete(napi);
       
  3950 		if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3951 			e1000_irq_enable(adapter);
       
  3952 	}
       
  3953 
       
  3954 	return work_done;
       
  3955 }
       
  3956 
       
  3957 /**
       
  3958  * e1000_clean_tx_irq - Reclaim resources after transmit completes
       
  3959  * @adapter: board private structure
       
  3960  **/
       
  3961 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
       
  3962 			       struct e1000_tx_ring *tx_ring)
       
  3963 {
       
  3964 	struct e1000_hw *hw = &adapter->hw;
       
  3965 	struct net_device *netdev = adapter->netdev;
       
  3966 	struct e1000_tx_desc *tx_desc, *eop_desc;
       
  3967 	struct e1000_buffer *buffer_info;
       
  3968 	unsigned int i, eop;
       
  3969 	unsigned int count = 0;
       
  3970 	unsigned int total_tx_bytes=0, total_tx_packets=0;
       
  3971 	unsigned int bytes_compl = 0, pkts_compl = 0;
       
  3972 
       
  3973 	i = tx_ring->next_to_clean;
       
  3974 	eop = tx_ring->buffer_info[i].next_to_watch;
       
  3975 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  3976 
       
  3977 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
       
  3978 	       (count < tx_ring->count)) {
       
  3979 		bool cleaned = false;
       
  3980 		rmb();	/* read buffer_info after eop_desc */
       
  3981 		for ( ; !cleaned; count++) {
       
  3982 			tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3983 			buffer_info = &tx_ring->buffer_info[i];
       
  3984 			cleaned = (i == eop);
       
  3985 
       
  3986 			if (cleaned) {
       
  3987 				total_tx_packets += buffer_info->segs;
       
  3988 				total_tx_bytes += buffer_info->bytecount;
       
  3989 				if (buffer_info->skb) {
       
  3990 					bytes_compl += buffer_info->skb->len;
       
  3991 					pkts_compl++;
       
  3992 				}
       
  3993 
       
  3994 			}
       
  3995 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  3996 			tx_desc->upper.data = 0;
       
  3997 
       
  3998 			if (unlikely(++i == tx_ring->count)) i = 0;
       
  3999 		}
       
  4000 
       
  4001 		eop = tx_ring->buffer_info[i].next_to_watch;
       
  4002 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  4003 	}
       
  4004 
       
  4005 	tx_ring->next_to_clean = i;
       
  4006 
       
  4007 	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
       
  4008 
       
  4009 #define TX_WAKE_THRESHOLD 32
       
  4010 	if (!adapter->ecdev && unlikely(count && netif_carrier_ok(netdev) &&
       
  4011 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
       
  4012 		/* Make sure that anybody stopping the queue after this
       
  4013 		 * sees the new next_to_clean.
       
  4014 		 */
       
  4015 		smp_mb();
       
  4016 
       
  4017 		if (netif_queue_stopped(netdev) &&
       
  4018 		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
       
  4019 			netif_wake_queue(netdev);
       
  4020 			++adapter->restart_queue;
       
  4021 		}
       
  4022 	}
       
  4023 
       
  4024 	if (!adapter->ecdev && adapter->detect_tx_hung) {
       
  4025 		/* Detect a transmit hang in hardware, this serializes the
       
  4026 		 * check with the clearing of time_stamp and movement of i
       
  4027 		 */
       
  4028 		adapter->detect_tx_hung = false;
       
  4029 		if (tx_ring->buffer_info[eop].time_stamp &&
       
  4030 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
       
  4031 			       (adapter->tx_timeout_factor * HZ)) &&
       
  4032 		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
       
  4033 
       
  4034 			/* detected Tx unit hang */
       
  4035 			e_err(drv, "Detected Tx Unit Hang\n"
       
  4036 			      "  Tx Queue             <%lu>\n"
       
  4037 			      "  TDH                  <%x>\n"
       
  4038 			      "  TDT                  <%x>\n"
       
  4039 			      "  next_to_use          <%x>\n"
       
  4040 			      "  next_to_clean        <%x>\n"
       
  4041 			      "buffer_info[next_to_clean]\n"
       
  4042 			      "  time_stamp           <%lx>\n"
       
  4043 			      "  next_to_watch        <%x>\n"
       
  4044 			      "  jiffies              <%lx>\n"
       
  4045 			      "  next_to_watch.status <%x>\n",
       
  4046 				(unsigned long)((tx_ring - adapter->tx_ring) /
       
  4047 					sizeof(struct e1000_tx_ring)),
       
  4048 				readl(hw->hw_addr + tx_ring->tdh),
       
  4049 				readl(hw->hw_addr + tx_ring->tdt),
       
  4050 				tx_ring->next_to_use,
       
  4051 				tx_ring->next_to_clean,
       
  4052 				tx_ring->buffer_info[eop].time_stamp,
       
  4053 				eop,
       
  4054 				jiffies,
       
  4055 				eop_desc->upper.fields.status);
       
  4056 			e1000_dump(adapter);
       
  4057 			netif_stop_queue(netdev);
       
  4058 		}
       
  4059 	}
       
  4060 	adapter->total_tx_bytes += total_tx_bytes;
       
  4061 	adapter->total_tx_packets += total_tx_packets;
       
  4062 	netdev->stats.tx_bytes += total_tx_bytes;
       
  4063 	netdev->stats.tx_packets += total_tx_packets;
       
  4064 	return count < tx_ring->count;
       
  4065 }
       
  4066 
       
  4067 /**
       
  4068  * e1000_rx_checksum - Receive Checksum Offload for 82543
       
  4069  * @adapter:     board private structure
       
  4070  * @status_err:  receive descriptor status and error fields
       
  4071  * @csum:        receive descriptor csum field
       
  4072  * @sk_buff:     socket buffer with received data
       
  4073  **/
       
  4074 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
       
  4075 			      u32 csum, struct sk_buff *skb)
       
  4076 {
       
  4077 	struct e1000_hw *hw = &adapter->hw;
       
  4078 	u16 status = (u16)status_err;
       
  4079 	u8 errors = (u8)(status_err >> 24);
       
  4080 
       
  4081 	skb_checksum_none_assert(skb);
       
  4082 
       
  4083 	/* 82543 or newer only */
       
  4084 	if (unlikely(hw->mac_type < e1000_82543)) return;
       
  4085 	/* Ignore Checksum bit is set */
       
  4086 	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
       
  4087 	/* TCP/UDP checksum error bit is set */
       
  4088 	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
       
  4089 		/* let the stack verify checksum errors */
       
  4090 		adapter->hw_csum_err++;
       
  4091 		return;
       
  4092 	}
       
  4093 	/* TCP/UDP Checksum has not been calculated */
       
  4094 	if (!(status & E1000_RXD_STAT_TCPCS))
       
  4095 		return;
       
  4096 
       
  4097 	/* It must be a TCP or UDP packet with a valid checksum */
       
  4098 	if (likely(status & E1000_RXD_STAT_TCPCS)) {
       
  4099 		/* TCP checksum is good */
       
  4100 		skb->ip_summed = CHECKSUM_UNNECESSARY;
       
  4101 	}
       
  4102 	adapter->hw_csum_good++;
       
  4103 }
       
  4104 
       
  4105 /**
       
  4106  * e1000_consume_page - helper function
       
  4107  **/
       
  4108 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
       
  4109 			       u16 length)
       
  4110 {
       
  4111 	bi->page = NULL;
       
  4112 	skb->len += length;
       
  4113 	skb->data_len += length;
       
  4114 	skb->truesize += PAGE_SIZE;
       
  4115 }
       
  4116 
       
  4117 /**
       
  4118  * e1000_receive_skb - helper function to handle rx indications
       
  4119  * @adapter: board private structure
       
  4120  * @status: descriptor status field as written by hardware
       
  4121  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
       
  4122  * @skb: pointer to sk_buff to be indicated to stack
       
  4123  */
       
  4124 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
       
  4125 			      __le16 vlan, struct sk_buff *skb)
       
  4126 {
       
  4127 	skb->protocol = eth_type_trans(skb, adapter->netdev);
       
  4128 
       
  4129 	if (status & E1000_RXD_STAT_VP) {
       
  4130 		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
       
  4131 
       
  4132 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
       
  4133 	}
       
  4134 	napi_gro_receive(&adapter->napi, skb);
       
  4135 }
       
  4136 
       
  4137 /**
       
  4138  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
       
  4139  * @adapter: board private structure
       
  4140  * @rx_ring: ring to clean
       
  4141  * @work_done: amount of napi work completed this call
       
  4142  * @work_to_do: max amount of work allowed for this call to do
       
  4143  *
       
  4144  * the return value indicates whether actual cleaning was done, there
       
  4145  * is no guarantee that everything was cleaned
       
  4146  */
       
  4147 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
       
  4148 				     struct e1000_rx_ring *rx_ring,
       
  4149 				     int *work_done, int work_to_do)
       
  4150 {
       
  4151 	struct e1000_hw *hw = &adapter->hw;
       
  4152 	struct net_device *netdev = adapter->netdev;
       
  4153 	struct pci_dev *pdev = adapter->pdev;
       
  4154 	struct e1000_rx_desc *rx_desc, *next_rxd;
       
  4155 	struct e1000_buffer *buffer_info, *next_buffer;
       
  4156 	unsigned long irq_flags;
       
  4157 	u32 length;
       
  4158 	unsigned int i;
       
  4159 	int cleaned_count = 0;
       
  4160 	bool cleaned = false;
       
  4161 	unsigned int total_rx_bytes=0, total_rx_packets=0;
       
  4162 
       
  4163 	i = rx_ring->next_to_clean;
       
  4164 	rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4165 	buffer_info = &rx_ring->buffer_info[i];
       
  4166 
       
  4167 	while (rx_desc->status & E1000_RXD_STAT_DD) {
       
  4168 		struct sk_buff *skb;
       
  4169 		u8 status;
       
  4170 
       
  4171 		if (*work_done >= work_to_do)
       
  4172 			break;
       
  4173 		(*work_done)++;
       
  4174 		rmb(); /* read descriptor and rx_buffer_info after status DD */
       
  4175 
       
  4176 		status = rx_desc->status;
       
  4177 		skb = buffer_info->skb;
       
  4178 		if (!adapter->ecdev) {
       
  4179 			buffer_info->skb = NULL;
       
  4180 		}
       
  4181 
       
  4182 		if (++i == rx_ring->count) i = 0;
       
  4183 		next_rxd = E1000_RX_DESC(*rx_ring, i);
       
  4184 		prefetch(next_rxd);
       
  4185 
       
  4186 		next_buffer = &rx_ring->buffer_info[i];
       
  4187 
       
  4188 		cleaned = true;
       
  4189 		cleaned_count++;
       
  4190 		dma_unmap_page(&pdev->dev, buffer_info->dma,
       
  4191 			       buffer_info->length, DMA_FROM_DEVICE);
       
  4192 		buffer_info->dma = 0;
       
  4193 
       
  4194 		length = le16_to_cpu(rx_desc->length);
       
  4195 
       
  4196 		/* errors is only valid for DD + EOP descriptors */
       
  4197 		if (!adapter->ecdev &&
       
  4198 		    unlikely((status & E1000_RXD_STAT_EOP) &&
       
  4199 		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
       
  4200 			u8 *mapped;
       
  4201 			u8 last_byte;
       
  4202 
       
  4203 			mapped = page_address(buffer_info->page);
       
  4204 			last_byte = *(mapped + length - 1);
       
  4205 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
       
  4206 				       last_byte)) {
       
  4207 				spin_lock_irqsave(&adapter->stats_lock,
       
  4208 						  irq_flags);
       
  4209 				e1000_tbi_adjust_stats(hw, &adapter->stats,
       
  4210 						       length, mapped);
       
  4211 				spin_unlock_irqrestore(&adapter->stats_lock,
       
  4212 						       irq_flags);
       
  4213 				length--;
       
  4214 			} else {
       
  4215 				if (netdev->features & NETIF_F_RXALL)
       
  4216 					goto process_skb;
       
  4217 				/* recycle both page and skb */
       
  4218 				buffer_info->skb = skb;
       
  4219 				/* an error means any chain goes out the window
       
  4220 				 * too
       
  4221 				 */
       
  4222 				if (rx_ring->rx_skb_top)
       
  4223 					dev_kfree_skb(rx_ring->rx_skb_top);
       
  4224 				rx_ring->rx_skb_top = NULL;
       
  4225 				goto next_desc;
       
  4226 			}
       
  4227 		}
       
  4228 
       
  4229 #define rxtop rx_ring->rx_skb_top
       
  4230 process_skb:
       
  4231 		if (!(status & E1000_RXD_STAT_EOP)) {
       
  4232 			/* this descriptor is only the beginning (or middle) */
       
  4233 			if (!rxtop) {
       
  4234 				/* this is the beginning of a chain */
       
  4235 				rxtop = skb;
       
  4236 				skb_fill_page_desc(rxtop, 0, buffer_info->page,
       
  4237 						   0, length);
       
  4238 			} else {
       
  4239 				/* this is the middle of a chain */
       
  4240 				skb_fill_page_desc(rxtop,
       
  4241 				    skb_shinfo(rxtop)->nr_frags,
       
  4242 				    buffer_info->page, 0, length);
       
  4243 				/* re-use the skb, only consumed the page */
       
  4244 				buffer_info->skb = skb;
       
  4245 			}
       
  4246 			e1000_consume_page(buffer_info, rxtop, length);
       
  4247 			goto next_desc;
       
  4248 		} else {
       
  4249 			if (rxtop) {
       
  4250 				/* end of the chain */
       
  4251 				skb_fill_page_desc(rxtop,
       
  4252 				    skb_shinfo(rxtop)->nr_frags,
       
  4253 				    buffer_info->page, 0, length);
       
  4254 				/* re-use the current skb, we only consumed the
       
  4255 				 * page
       
  4256 				 */
       
  4257 				buffer_info->skb = skb;
       
  4258 				skb = rxtop;
       
  4259 				rxtop = NULL;
       
  4260 				e1000_consume_page(buffer_info, skb, length);
       
  4261 			} else {
       
  4262 				/* no chain, got EOP, this buf is the packet
       
  4263 				 * copybreak to save the put_page/alloc_page
       
  4264 				 */
       
  4265 				if (length <= copybreak &&
       
  4266 				    skb_tailroom(skb) >= length) {
       
  4267 					u8 *vaddr;
       
  4268 					vaddr = kmap_atomic(buffer_info->page);
       
  4269 					memcpy(skb_tail_pointer(skb), vaddr,
       
  4270 					       length);
       
  4271 					kunmap_atomic(vaddr);
       
  4272 					/* re-use the page, so don't erase
       
  4273 					 * buffer_info->page
       
  4274 					 */
       
  4275 					skb_put(skb, length);
       
  4276 				} else {
       
  4277 					skb_fill_page_desc(skb, 0,
       
  4278 							   buffer_info->page, 0,
       
  4279 							   length);
       
  4280 					e1000_consume_page(buffer_info, skb,
       
  4281 							   length);
       
  4282 				}
       
  4283 			}
       
  4284 		}
       
  4285 
       
  4286 		/* Receive Checksum Offload XXX recompute due to CRC strip? */
       
  4287 		e1000_rx_checksum(adapter,
       
  4288 				  (u32)(status) |
       
  4289 				  ((u32)(rx_desc->errors) << 24),
       
  4290 				  le16_to_cpu(rx_desc->csum), skb);
       
  4291 
       
  4292 		total_rx_bytes += (skb->len - 4); /* don't count FCS */
       
  4293 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
       
  4294 			pskb_trim(skb, skb->len - 4);
       
  4295 		total_rx_packets++;
       
  4296 
       
  4297 		/* eth type trans needs skb->data to point to something */
       
  4298 		if (!pskb_may_pull(skb, ETH_HLEN)) {
       
  4299 			e_err(drv, "pskb_may_pull failed.\n");
       
  4300 			if (!adapter->ecdev) {
       
  4301 				dev_kfree_skb(skb);
       
  4302 			}
       
  4303 			goto next_desc;
       
  4304 		}
       
  4305 
       
  4306 		if (adapter->ecdev) {
       
  4307 			ecdev_receive(adapter->ecdev, skb->data, length);
       
  4308 
       
  4309 			// No need to detect link status as
       
  4310 			// long as frames are received: Reset watchdog.
       
  4311 			adapter->ec_watchdog_jiffies = jiffies;
       
  4312 		} else {
       
  4313 			e1000_receive_skb(adapter, status, rx_desc->special, skb);
       
  4314 		}
       
  4315 
       
  4316 next_desc:
       
  4317 		rx_desc->status = 0;
       
  4318 
       
  4319 		/* return some buffers to hardware, one at a time is too slow */
       
  4320 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
       
  4321 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4322 			cleaned_count = 0;
       
  4323 		}
       
  4324 
       
  4325 		/* use prefetched values */
       
  4326 		rx_desc = next_rxd;
       
  4327 		buffer_info = next_buffer;
       
  4328 	}
       
  4329 	rx_ring->next_to_clean = i;
       
  4330 
       
  4331 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
       
  4332 	if (cleaned_count)
       
  4333 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4334 
       
  4335 	adapter->total_rx_packets += total_rx_packets;
       
  4336 	adapter->total_rx_bytes += total_rx_bytes;
       
  4337 	netdev->stats.rx_bytes += total_rx_bytes;
       
  4338 	netdev->stats.rx_packets += total_rx_packets;
       
  4339 	return cleaned;
       
  4340 }
       
  4341 
       
  4342 /* this should improve performance for small packets with large amounts
       
  4343  * of reassembly being done in the stack
       
  4344  */
       
  4345 static void e1000_check_copybreak(struct net_device *netdev,
       
  4346 				 struct e1000_buffer *buffer_info,
       
  4347 				 u32 length, struct sk_buff **skb)
       
  4348 {
       
  4349 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4350 	struct sk_buff *new_skb;
       
  4351 
       
  4352 	if (adapter->ecdev || length > copybreak)
       
  4353 		return;
       
  4354 
       
  4355 	new_skb = netdev_alloc_skb_ip_align(netdev, length);
       
  4356 	if (!new_skb)
       
  4357 		return;
       
  4358 
       
  4359 	skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
       
  4360 				       (*skb)->data - NET_IP_ALIGN,
       
  4361 				       length + NET_IP_ALIGN);
       
  4362 	/* save the skb in buffer_info as good */
       
  4363 	buffer_info->skb = *skb;
       
  4364 	*skb = new_skb;
       
  4365 }
       
  4366 
       
  4367 /**
       
  4368  * e1000_clean_rx_irq - Send received data up the network stack; legacy
       
  4369  * @adapter: board private structure
       
  4370  * @rx_ring: ring to clean
       
  4371  * @work_done: amount of napi work completed this call
       
  4372  * @work_to_do: max amount of work allowed for this call to do
       
  4373  */
       
  4374 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
  4375 			       struct e1000_rx_ring *rx_ring,
       
  4376 			       int *work_done, int work_to_do)
       
  4377 {
       
  4378 	struct e1000_hw *hw = &adapter->hw;
       
  4379 	struct net_device *netdev = adapter->netdev;
       
  4380 	struct pci_dev *pdev = adapter->pdev;
       
  4381 	struct e1000_rx_desc *rx_desc, *next_rxd;
       
  4382 	struct e1000_buffer *buffer_info, *next_buffer;
       
  4383 	unsigned long flags;
       
  4384 	u32 length;
       
  4385 	unsigned int i;
       
  4386 	int cleaned_count = 0;
       
  4387 	bool cleaned = false;
       
  4388 	unsigned int total_rx_bytes=0, total_rx_packets=0;
       
  4389 
       
  4390 	i = rx_ring->next_to_clean;
       
  4391 	rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4392 	buffer_info = &rx_ring->buffer_info[i];
       
  4393 
       
  4394 	while (rx_desc->status & E1000_RXD_STAT_DD) {
       
  4395 		struct sk_buff *skb;
       
  4396 		u8 status;
       
  4397 
       
  4398 		if (*work_done >= work_to_do)
       
  4399 			break;
       
  4400 		(*work_done)++;
       
  4401 		rmb(); /* read descriptor and rx_buffer_info after status DD */
       
  4402 
       
  4403 		status = rx_desc->status;
       
  4404 		skb = buffer_info->skb;
       
  4405 		if (!adapter->ecdev) {
       
  4406 			buffer_info->skb = NULL;
       
  4407 		}
       
  4408 
       
  4409 		prefetch(skb->data - NET_IP_ALIGN);
       
  4410 
       
  4411 		if (++i == rx_ring->count) i = 0;
       
  4412 		next_rxd = E1000_RX_DESC(*rx_ring, i);
       
  4413 		prefetch(next_rxd);
       
  4414 
       
  4415 		next_buffer = &rx_ring->buffer_info[i];
       
  4416 
       
  4417 		cleaned = true;
       
  4418 		cleaned_count++;
       
  4419 		dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  4420 				 buffer_info->length, DMA_FROM_DEVICE);
       
  4421 		buffer_info->dma = 0;
       
  4422 
       
  4423 		length = le16_to_cpu(rx_desc->length);
       
  4424 		/* !EOP means multiple descriptors were used to store a single
       
  4425 		 * packet, if thats the case we need to toss it.  In fact, we
       
  4426 		 * to toss every packet with the EOP bit clear and the next
       
  4427 		 * frame that _does_ have the EOP bit set, as it is by
       
  4428 		 * definition only a frame fragment
       
  4429 		 */
       
  4430 		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
       
  4431 			adapter->discarding = true;
       
  4432 
       
  4433 		if (adapter->discarding) {
       
  4434 			/* All receives must fit into a single buffer */
       
  4435 			e_dbg("Receive packet consumed multiple buffers\n");
       
  4436 			/* recycle */
       
  4437 			buffer_info->skb = skb;
       
  4438 			if (status & E1000_RXD_STAT_EOP)
       
  4439 				adapter->discarding = false;
       
  4440 			goto next_desc;
       
  4441 		}
       
  4442 
       
  4443 		if (!adapter->ecdev &&
       
  4444 		    unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
       
  4445 			u8 last_byte = *(skb->data + length - 1);
       
  4446 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
       
  4447 				       last_byte)) {
       
  4448 				spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4449 				e1000_tbi_adjust_stats(hw, &adapter->stats,
       
  4450 						       length, skb->data);
       
  4451 				spin_unlock_irqrestore(&adapter->stats_lock,
       
  4452 						       flags);
       
  4453 				length--;
       
  4454 			} else {
       
  4455 				if (netdev->features & NETIF_F_RXALL)
       
  4456 					goto process_skb;
       
  4457 				/* recycle */
       
  4458 				buffer_info->skb = skb;
       
  4459 				goto next_desc;
       
  4460 			}
       
  4461 		}
       
  4462 
       
  4463 process_skb:
       
  4464 		total_rx_bytes += (length - 4); /* don't count FCS */
       
  4465 		total_rx_packets++;
       
  4466 
       
  4467 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
       
  4468 			/* adjust length to remove Ethernet CRC, this must be
       
  4469 			 * done after the TBI_ACCEPT workaround above
       
  4470 			 */
       
  4471 			length -= 4;
       
  4472 
       
  4473 		e1000_check_copybreak(netdev, buffer_info, length, &skb);
       
  4474 
       
  4475 		skb_put(skb, length);
       
  4476 
       
  4477 		/* Receive Checksum Offload */
       
  4478 		e1000_rx_checksum(adapter,
       
  4479 				  (u32)(status) |
       
  4480 				  ((u32)(rx_desc->errors) << 24),
       
  4481 				  le16_to_cpu(rx_desc->csum), skb);
       
  4482 
       
  4483 		if (adapter->ecdev) {
       
  4484 			ecdev_receive(adapter->ecdev, skb->data, length);
       
  4485 
       
  4486 			// No need to detect link status as
       
  4487 			// long as frames are received: Reset watchdog.
       
  4488 			adapter->ec_watchdog_jiffies = jiffies;
       
  4489 		} else {
       
  4490 			e1000_receive_skb(adapter, status, rx_desc->special, skb);
       
  4491 		}
       
  4492 
       
  4493 next_desc:
       
  4494 		rx_desc->status = 0;
       
  4495 
       
  4496 		/* return some buffers to hardware, one at a time is too slow */
       
  4497 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
       
  4498 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4499 			cleaned_count = 0;
       
  4500 		}
       
  4501 
       
  4502 		/* use prefetched values */
       
  4503 		rx_desc = next_rxd;
       
  4504 		buffer_info = next_buffer;
       
  4505 	}
       
  4506 	rx_ring->next_to_clean = i;
       
  4507 
       
  4508 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
       
  4509 	if (cleaned_count)
       
  4510 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4511 
       
  4512 	adapter->total_rx_packets += total_rx_packets;
       
  4513 	adapter->total_rx_bytes += total_rx_bytes;
       
  4514 	netdev->stats.rx_bytes += total_rx_bytes;
       
  4515 	netdev->stats.rx_packets += total_rx_packets;
       
  4516 	return cleaned;
       
  4517 }
       
  4518 
       
  4519 /**
       
  4520  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
       
  4521  * @adapter: address of board private structure
       
  4522  * @rx_ring: pointer to receive ring structure
       
  4523  * @cleaned_count: number of buffers to allocate this pass
       
  4524  **/
       
  4525 static void
       
  4526 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
       
  4527 			     struct e1000_rx_ring *rx_ring, int cleaned_count)
       
  4528 {
       
  4529 	struct net_device *netdev = adapter->netdev;
       
  4530 	struct pci_dev *pdev = adapter->pdev;
       
  4531 	struct e1000_rx_desc *rx_desc;
       
  4532 	struct e1000_buffer *buffer_info;
       
  4533 	struct sk_buff *skb;
       
  4534 	unsigned int i;
       
  4535 	unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
       
  4536 
       
  4537 	i = rx_ring->next_to_use;
       
  4538 	buffer_info = &rx_ring->buffer_info[i];
       
  4539 
       
  4540 	while (cleaned_count--) {
       
  4541 		skb = buffer_info->skb;
       
  4542 		if (skb) {
       
  4543 			skb_trim(skb, 0);
       
  4544 			goto check_page;
       
  4545 		}
       
  4546 
       
  4547 		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4548 		if (unlikely(!skb)) {
       
  4549 			/* Better luck next round */
       
  4550 			adapter->alloc_rx_buff_failed++;
       
  4551 			break;
       
  4552 		}
       
  4553 
       
  4554 		buffer_info->skb = skb;
       
  4555 		buffer_info->length = adapter->rx_buffer_len;
       
  4556 check_page:
       
  4557 		/* allocate a new page if necessary */
       
  4558 		if (!buffer_info->page) {
       
  4559 			buffer_info->page = alloc_page(GFP_ATOMIC);
       
  4560 			if (unlikely(!buffer_info->page)) {
       
  4561 				adapter->alloc_rx_buff_failed++;
       
  4562 				break;
       
  4563 			}
       
  4564 		}
       
  4565 
       
  4566 		if (!buffer_info->dma) {
       
  4567 			buffer_info->dma = dma_map_page(&pdev->dev,
       
  4568 							buffer_info->page, 0,
       
  4569 							buffer_info->length,
       
  4570 							DMA_FROM_DEVICE);
       
  4571 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
       
  4572 				put_page(buffer_info->page);
       
  4573 				dev_kfree_skb(skb);
       
  4574 				buffer_info->page = NULL;
       
  4575 				buffer_info->skb = NULL;
       
  4576 				buffer_info->dma = 0;
       
  4577 				adapter->alloc_rx_buff_failed++;
       
  4578 				break; /* while !buffer_info->skb */
       
  4579 			}
       
  4580 		}
       
  4581 
       
  4582 		rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4583 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  4584 
       
  4585 		if (unlikely(++i == rx_ring->count))
       
  4586 			i = 0;
       
  4587 		buffer_info = &rx_ring->buffer_info[i];
       
  4588 	}
       
  4589 
       
  4590 	if (likely(rx_ring->next_to_use != i)) {
       
  4591 		rx_ring->next_to_use = i;
       
  4592 		if (unlikely(i-- == 0))
       
  4593 			i = (rx_ring->count - 1);
       
  4594 
       
  4595 		/* Force memory writes to complete before letting h/w
       
  4596 		 * know there are new descriptors to fetch.  (Only
       
  4597 		 * applicable for weak-ordered memory model archs,
       
  4598 		 * such as IA-64).
       
  4599 		 */
       
  4600 		wmb();
       
  4601 		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
       
  4602 	}
       
  4603 }
       
  4604 
       
  4605 /**
       
  4606  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
       
  4607  * @adapter: address of board private structure
       
  4608  **/
       
  4609 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
  4610 				   struct e1000_rx_ring *rx_ring,
       
  4611 				   int cleaned_count)
       
  4612 {
       
  4613 	struct e1000_hw *hw = &adapter->hw;
       
  4614 	struct net_device *netdev = adapter->netdev;
       
  4615 	struct pci_dev *pdev = adapter->pdev;
       
  4616 	struct e1000_rx_desc *rx_desc;
       
  4617 	struct e1000_buffer *buffer_info;
       
  4618 	struct sk_buff *skb;
       
  4619 	unsigned int i;
       
  4620 	unsigned int bufsz = adapter->rx_buffer_len;
       
  4621 
       
  4622 	i = rx_ring->next_to_use;
       
  4623 	buffer_info = &rx_ring->buffer_info[i];
       
  4624 
       
  4625 	while (cleaned_count--) {
       
  4626 		skb = buffer_info->skb;
       
  4627 		if (skb) {
       
  4628 			skb_trim(skb, 0);
       
  4629 			goto map_skb;
       
  4630 		}
       
  4631 
       
  4632 		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4633 		if (unlikely(!skb)) {
       
  4634 			/* Better luck next round */
       
  4635 			adapter->alloc_rx_buff_failed++;
       
  4636 			break;
       
  4637 		}
       
  4638 
       
  4639 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4640 		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4641 			struct sk_buff *oldskb = skb;
       
  4642 			e_err(rx_err, "skb align check failed: %u bytes at "
       
  4643 			      "%p\n", bufsz, skb->data);
       
  4644 			/* Try again, without freeing the previous */
       
  4645 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4646 			/* Failed allocation, critical failure */
       
  4647 			if (!skb) {
       
  4648 				dev_kfree_skb(oldskb);
       
  4649 				adapter->alloc_rx_buff_failed++;
       
  4650 				break;
       
  4651 			}
       
  4652 
       
  4653 			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4654 				/* give up */
       
  4655 				dev_kfree_skb(skb);
       
  4656 				dev_kfree_skb(oldskb);
       
  4657 				adapter->alloc_rx_buff_failed++;
       
  4658 				break; /* while !buffer_info->skb */
       
  4659 			}
       
  4660 
       
  4661 			/* Use new allocation */
       
  4662 			dev_kfree_skb(oldskb);
       
  4663 		}
       
  4664 		buffer_info->skb = skb;
       
  4665 		buffer_info->length = adapter->rx_buffer_len;
       
  4666 map_skb:
       
  4667 		buffer_info->dma = dma_map_single(&pdev->dev,
       
  4668 						  skb->data,
       
  4669 						  buffer_info->length,
       
  4670 						  DMA_FROM_DEVICE);
       
  4671 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
       
  4672 			dev_kfree_skb(skb);
       
  4673 			buffer_info->skb = NULL;
       
  4674 			buffer_info->dma = 0;
       
  4675 			adapter->alloc_rx_buff_failed++;
       
  4676 			break; /* while !buffer_info->skb */
       
  4677 		}
       
  4678 
       
  4679 		/* XXX if it was allocated cleanly it will never map to a
       
  4680 		 * boundary crossing
       
  4681 		 */
       
  4682 
       
  4683 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4684 		if (!e1000_check_64k_bound(adapter,
       
  4685 					(void *)(unsigned long)buffer_info->dma,
       
  4686 					adapter->rx_buffer_len)) {
       
  4687 			e_err(rx_err, "dma align check failed: %u bytes at "
       
  4688 			      "%p\n", adapter->rx_buffer_len,
       
  4689 			      (void *)(unsigned long)buffer_info->dma);
       
  4690 			dev_kfree_skb(skb);
       
  4691 			buffer_info->skb = NULL;
       
  4692 
       
  4693 			dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  4694 					 adapter->rx_buffer_len,
       
  4695 					 DMA_FROM_DEVICE);
       
  4696 			buffer_info->dma = 0;
       
  4697 
       
  4698 			adapter->alloc_rx_buff_failed++;
       
  4699 			break; /* while !buffer_info->skb */
       
  4700 		}
       
  4701 		rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4702 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  4703 
       
  4704 		if (unlikely(++i == rx_ring->count))
       
  4705 			i = 0;
       
  4706 		buffer_info = &rx_ring->buffer_info[i];
       
  4707 	}
       
  4708 
       
  4709 	if (likely(rx_ring->next_to_use != i)) {
       
  4710 		rx_ring->next_to_use = i;
       
  4711 		if (unlikely(i-- == 0))
       
  4712 			i = (rx_ring->count - 1);
       
  4713 
       
  4714 		/* Force memory writes to complete before letting h/w
       
  4715 		 * know there are new descriptors to fetch.  (Only
       
  4716 		 * applicable for weak-ordered memory model archs,
       
  4717 		 * such as IA-64).
       
  4718 		 */
       
  4719 		wmb();
       
  4720 		writel(i, hw->hw_addr + rx_ring->rdt);
       
  4721 	}
       
  4722 }
       
  4723 
       
  4724 /**
       
  4725  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
       
  4726  * @adapter:
       
  4727  **/
       
  4728 static void e1000_smartspeed(struct e1000_adapter *adapter)
       
  4729 {
       
  4730 	struct e1000_hw *hw = &adapter->hw;
       
  4731 	u16 phy_status;
       
  4732 	u16 phy_ctrl;
       
  4733 
       
  4734 	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
       
  4735 	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
       
  4736 		return;
       
  4737 
       
  4738 	if (adapter->smartspeed == 0) {
       
  4739 		/* If Master/Slave config fault is asserted twice,
       
  4740 		 * we assume back-to-back
       
  4741 		 */
       
  4742 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
       
  4743 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
       
  4744 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
       
  4745 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
       
  4746 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
       
  4747 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
       
  4748 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
       
  4749 			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
       
  4750 					    phy_ctrl);
       
  4751 			adapter->smartspeed++;
       
  4752 			if (!e1000_phy_setup_autoneg(hw) &&
       
  4753 			   !e1000_read_phy_reg(hw, PHY_CTRL,
       
  4754 					       &phy_ctrl)) {
       
  4755 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
       
  4756 					     MII_CR_RESTART_AUTO_NEG);
       
  4757 				e1000_write_phy_reg(hw, PHY_CTRL,
       
  4758 						    phy_ctrl);
       
  4759 			}
       
  4760 		}
       
  4761 		return;
       
  4762 	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
       
  4763 		/* If still no link, perhaps using 2/3 pair cable */
       
  4764 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
       
  4765 		phy_ctrl |= CR_1000T_MS_ENABLE;
       
  4766 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
       
  4767 		if (!e1000_phy_setup_autoneg(hw) &&
       
  4768 		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
       
  4769 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
       
  4770 				     MII_CR_RESTART_AUTO_NEG);
       
  4771 			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
       
  4772 		}
       
  4773 	}
       
  4774 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
       
  4775 	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
       
  4776 		adapter->smartspeed = 0;
       
  4777 }
       
  4778 
       
  4779 /**
       
  4780  * e1000_ioctl -
       
  4781  * @netdev:
       
  4782  * @ifreq:
       
  4783  * @cmd:
       
  4784  **/
       
  4785 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  4786 {
       
  4787 	switch (cmd) {
       
  4788 	case SIOCGMIIPHY:
       
  4789 	case SIOCGMIIREG:
       
  4790 	case SIOCSMIIREG:
       
  4791 		return e1000_mii_ioctl(netdev, ifr, cmd);
       
  4792 	default:
       
  4793 		return -EOPNOTSUPP;
       
  4794 	}
       
  4795 }
       
  4796 
       
  4797 /**
       
  4798  * e1000_mii_ioctl -
       
  4799  * @netdev:
       
  4800  * @ifreq:
       
  4801  * @cmd:
       
  4802  **/
       
  4803 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
  4804 			   int cmd)
       
  4805 {
       
  4806 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4807 	struct e1000_hw *hw = &adapter->hw;
       
  4808 	struct mii_ioctl_data *data = if_mii(ifr);
       
  4809 	int retval;
       
  4810 	u16 mii_reg;
       
  4811 	unsigned long flags;
       
  4812 
       
  4813 	if (hw->media_type != e1000_media_type_copper)
       
  4814 		return -EOPNOTSUPP;
       
  4815 
       
  4816 	switch (cmd) {
       
  4817 	case SIOCGMIIPHY:
       
  4818 		data->phy_id = hw->phy_addr;
       
  4819 		break;
       
  4820 	case SIOCGMIIREG:
       
  4821 		if (adapter->ecdev) {
       
  4822 			return -EPERM;
       
  4823 		}
       
  4824 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4825 		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
       
  4826 				   &data->val_out)) {
       
  4827 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4828 			return -EIO;
       
  4829 		}
       
  4830 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4831 		break;
       
  4832 	case SIOCSMIIREG:
       
  4833 		if (adapter->ecdev) {
       
  4834 			return -EPERM;
       
  4835 		}
       
  4836 		if (data->reg_num & ~(0x1F))
       
  4837 			return -EFAULT;
       
  4838 		mii_reg = data->val_in;
       
  4839 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4840 		if (e1000_write_phy_reg(hw, data->reg_num,
       
  4841 					mii_reg)) {
       
  4842 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4843 			return -EIO;
       
  4844 		}
       
  4845 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4846 		if (hw->media_type == e1000_media_type_copper) {
       
  4847 			switch (data->reg_num) {
       
  4848 			case PHY_CTRL:
       
  4849 				if (mii_reg & MII_CR_POWER_DOWN)
       
  4850 					break;
       
  4851 				if (mii_reg & MII_CR_AUTO_NEG_EN) {
       
  4852 					hw->autoneg = 1;
       
  4853 					hw->autoneg_advertised = 0x2F;
       
  4854 				} else {
       
  4855 					u32 speed;
       
  4856 					if (mii_reg & 0x40)
       
  4857 						speed = SPEED_1000;
       
  4858 					else if (mii_reg & 0x2000)
       
  4859 						speed = SPEED_100;
       
  4860 					else
       
  4861 						speed = SPEED_10;
       
  4862 					retval = e1000_set_spd_dplx(
       
  4863 						adapter, speed,
       
  4864 						((mii_reg & 0x100)
       
  4865 						 ? DUPLEX_FULL :
       
  4866 						 DUPLEX_HALF));
       
  4867 					if (retval)
       
  4868 						return retval;
       
  4869 				}
       
  4870 				if (netif_running(adapter->netdev))
       
  4871 					e1000_reinit_locked(adapter);
       
  4872 				else
       
  4873 					e1000_reset(adapter);
       
  4874 				break;
       
  4875 			case M88E1000_PHY_SPEC_CTRL:
       
  4876 			case M88E1000_EXT_PHY_SPEC_CTRL:
       
  4877 				if (e1000_phy_reset(hw))
       
  4878 					return -EIO;
       
  4879 				break;
       
  4880 			}
       
  4881 		} else {
       
  4882 			switch (data->reg_num) {
       
  4883 			case PHY_CTRL:
       
  4884 				if (mii_reg & MII_CR_POWER_DOWN)
       
  4885 					break;
       
  4886 				if (netif_running(adapter->netdev))
       
  4887 					e1000_reinit_locked(adapter);
       
  4888 				else
       
  4889 					e1000_reset(adapter);
       
  4890 				break;
       
  4891 			}
       
  4892 		}
       
  4893 		break;
       
  4894 	default:
       
  4895 		return -EOPNOTSUPP;
       
  4896 	}
       
  4897 	return E1000_SUCCESS;
       
  4898 }
       
  4899 
       
  4900 void e1000_pci_set_mwi(struct e1000_hw *hw)
       
  4901 {
       
  4902 	struct e1000_adapter *adapter = hw->back;
       
  4903 	int ret_val = pci_set_mwi(adapter->pdev);
       
  4904 
       
  4905 	if (ret_val)
       
  4906 		e_err(probe, "Error in setting MWI\n");
       
  4907 }
       
  4908 
       
  4909 void e1000_pci_clear_mwi(struct e1000_hw *hw)
       
  4910 {
       
  4911 	struct e1000_adapter *adapter = hw->back;
       
  4912 
       
  4913 	pci_clear_mwi(adapter->pdev);
       
  4914 }
       
  4915 
       
  4916 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
       
  4917 {
       
  4918 	struct e1000_adapter *adapter = hw->back;
       
  4919 	return pcix_get_mmrbc(adapter->pdev);
       
  4920 }
       
  4921 
       
  4922 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
       
  4923 {
       
  4924 	struct e1000_adapter *adapter = hw->back;
       
  4925 	pcix_set_mmrbc(adapter->pdev, mmrbc);
       
  4926 }
       
  4927 
       
  4928 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
       
  4929 {
       
  4930 	outl(value, port);
       
  4931 }
       
  4932 
       
  4933 static bool e1000_vlan_used(struct e1000_adapter *adapter)
       
  4934 {
       
  4935 	u16 vid;
       
  4936 
       
  4937 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
       
  4938 		return true;
       
  4939 	return false;
       
  4940 }
       
  4941 
       
  4942 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
       
  4943 			      netdev_features_t features)
       
  4944 {
       
  4945 	struct e1000_hw *hw = &adapter->hw;
       
  4946 	u32 ctrl;
       
  4947 
       
  4948 	ctrl = er32(CTRL);
       
  4949 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
       
  4950 		/* enable VLAN tag insert/strip */
       
  4951 		ctrl |= E1000_CTRL_VME;
       
  4952 	} else {
       
  4953 		/* disable VLAN tag insert/strip */
       
  4954 		ctrl &= ~E1000_CTRL_VME;
       
  4955 	}
       
  4956 	ew32(CTRL, ctrl);
       
  4957 }
       
  4958 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
       
  4959 				     bool filter_on)
       
  4960 {
       
  4961 	struct e1000_hw *hw = &adapter->hw;
       
  4962 	u32 rctl;
       
  4963 
       
  4964 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4965 		e1000_irq_disable(adapter);
       
  4966 
       
  4967 	__e1000_vlan_mode(adapter, adapter->netdev->features);
       
  4968 	if (filter_on) {
       
  4969 		/* enable VLAN receive filtering */
       
  4970 		rctl = er32(RCTL);
       
  4971 		rctl &= ~E1000_RCTL_CFIEN;
       
  4972 		if (!(adapter->netdev->flags & IFF_PROMISC))
       
  4973 			rctl |= E1000_RCTL_VFE;
       
  4974 		ew32(RCTL, rctl);
       
  4975 		e1000_update_mng_vlan(adapter);
       
  4976 	} else {
       
  4977 		/* disable VLAN receive filtering */
       
  4978 		rctl = er32(RCTL);
       
  4979 		rctl &= ~E1000_RCTL_VFE;
       
  4980 		ew32(RCTL, rctl);
       
  4981 	}
       
  4982 
       
  4983 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4984 		e1000_irq_enable(adapter);
       
  4985 }
       
  4986 
       
  4987 static void e1000_vlan_mode(struct net_device *netdev,
       
  4988 			    netdev_features_t features)
       
  4989 {
       
  4990 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4991 
       
  4992 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4993 		e1000_irq_disable(adapter);
       
  4994 
       
  4995 	__e1000_vlan_mode(adapter, features);
       
  4996 
       
  4997 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4998 		e1000_irq_enable(adapter);
       
  4999 }
       
  5000 
       
  5001 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
       
  5002 				 __be16 proto, u16 vid)
       
  5003 {
       
  5004 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5005 	struct e1000_hw *hw = &adapter->hw;
       
  5006 	u32 vfta, index;
       
  5007 
       
  5008 	if ((hw->mng_cookie.status &
       
  5009 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  5010 	    (vid == adapter->mng_vlan_id))
       
  5011 		return 0;
       
  5012 
       
  5013 	if (!e1000_vlan_used(adapter))
       
  5014 		e1000_vlan_filter_on_off(adapter, true);
       
  5015 
       
  5016 	/* add VID to filter table */
       
  5017 	index = (vid >> 5) & 0x7F;
       
  5018 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
       
  5019 	vfta |= (1 << (vid & 0x1F));
       
  5020 	e1000_write_vfta(hw, index, vfta);
       
  5021 
       
  5022 	set_bit(vid, adapter->active_vlans);
       
  5023 
       
  5024 	return 0;
       
  5025 }
       
  5026 
       
  5027 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
       
  5028 				  __be16 proto, u16 vid)
       
  5029 {
       
  5030 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5031 	struct e1000_hw *hw = &adapter->hw;
       
  5032 	u32 vfta, index;
       
  5033 
       
  5034 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5035 		e1000_irq_disable(adapter);
       
  5036 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5037 		e1000_irq_enable(adapter);
       
  5038 
       
  5039 	/* remove VID from filter table */
       
  5040 	index = (vid >> 5) & 0x7F;
       
  5041 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
       
  5042 	vfta &= ~(1 << (vid & 0x1F));
       
  5043 	e1000_write_vfta(hw, index, vfta);
       
  5044 
       
  5045 	clear_bit(vid, adapter->active_vlans);
       
  5046 
       
  5047 	if (!e1000_vlan_used(adapter))
       
  5048 		e1000_vlan_filter_on_off(adapter, false);
       
  5049 
       
  5050 	return 0;
       
  5051 }
       
  5052 
       
  5053 static void e1000_restore_vlan(struct e1000_adapter *adapter)
       
  5054 {
       
  5055 	u16 vid;
       
  5056 
       
  5057 	if (!e1000_vlan_used(adapter))
       
  5058 		return;
       
  5059 
       
  5060 	e1000_vlan_filter_on_off(adapter, true);
       
  5061 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
       
  5062 		e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
       
  5063 }
       
  5064 
       
  5065 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
       
  5066 {
       
  5067 	struct e1000_hw *hw = &adapter->hw;
       
  5068 
       
  5069 	hw->autoneg = 0;
       
  5070 
       
  5071 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
       
  5072 	 * for the switch() below to work
       
  5073 	 */
       
  5074 	if ((spd & 1) || (dplx & ~1))
       
  5075 		goto err_inval;
       
  5076 
       
  5077 	/* Fiber NICs only allow 1000 gbps Full duplex */
       
  5078 	if ((hw->media_type == e1000_media_type_fiber) &&
       
  5079 	    spd != SPEED_1000 &&
       
  5080 	    dplx != DUPLEX_FULL)
       
  5081 		goto err_inval;
       
  5082 
       
  5083 	switch (spd + dplx) {
       
  5084 	case SPEED_10 + DUPLEX_HALF:
       
  5085 		hw->forced_speed_duplex = e1000_10_half;
       
  5086 		break;
       
  5087 	case SPEED_10 + DUPLEX_FULL:
       
  5088 		hw->forced_speed_duplex = e1000_10_full;
       
  5089 		break;
       
  5090 	case SPEED_100 + DUPLEX_HALF:
       
  5091 		hw->forced_speed_duplex = e1000_100_half;
       
  5092 		break;
       
  5093 	case SPEED_100 + DUPLEX_FULL:
       
  5094 		hw->forced_speed_duplex = e1000_100_full;
       
  5095 		break;
       
  5096 	case SPEED_1000 + DUPLEX_FULL:
       
  5097 		hw->autoneg = 1;
       
  5098 		hw->autoneg_advertised = ADVERTISE_1000_FULL;
       
  5099 		break;
       
  5100 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
       
  5101 	default:
       
  5102 		goto err_inval;
       
  5103 	}
       
  5104 
       
  5105 	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
       
  5106 	hw->mdix = AUTO_ALL_MODES;
       
  5107 
       
  5108 	return 0;
       
  5109 
       
  5110 err_inval:
       
  5111 	e_err(probe, "Unsupported Speed/Duplex configuration\n");
       
  5112 	return -EINVAL;
       
  5113 }
       
  5114 
       
  5115 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  5116 {
       
  5117 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5118 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5119 	struct e1000_hw *hw = &adapter->hw;
       
  5120 	u32 ctrl, ctrl_ext, rctl, status;
       
  5121 	u32 wufc = adapter->wol;
       
  5122 #ifdef CONFIG_PM
       
  5123 	int retval = 0;
       
  5124 #endif
       
  5125 
       
  5126 	if (adapter->ecdev) {
       
  5127 		return -EBUSY;
       
  5128 	}
       
  5129 
       
  5130 	netif_device_detach(netdev);
       
  5131 
       
  5132 	if (netif_running(netdev)) {
       
  5133 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
       
  5134 		e1000_down(adapter);
       
  5135 	}
       
  5136 
       
  5137 #ifdef CONFIG_PM
       
  5138 	retval = pci_save_state(pdev);
       
  5139 	if (retval)
       
  5140 		return retval;
       
  5141 #endif
       
  5142 
       
  5143 	status = er32(STATUS);
       
  5144 	if (status & E1000_STATUS_LU)
       
  5145 		wufc &= ~E1000_WUFC_LNKC;
       
  5146 
       
  5147 	if (wufc) {
       
  5148 		e1000_setup_rctl(adapter);
       
  5149 		e1000_set_rx_mode(netdev);
       
  5150 
       
  5151 		rctl = er32(RCTL);
       
  5152 
       
  5153 		/* turn on all-multi mode if wake on multicast is enabled */
       
  5154 		if (wufc & E1000_WUFC_MC)
       
  5155 			rctl |= E1000_RCTL_MPE;
       
  5156 
       
  5157 		/* enable receives in the hardware */
       
  5158 		ew32(RCTL, rctl | E1000_RCTL_EN);
       
  5159 
       
  5160 		if (hw->mac_type >= e1000_82540) {
       
  5161 			ctrl = er32(CTRL);
       
  5162 			/* advertise wake from D3Cold */
       
  5163 			#define E1000_CTRL_ADVD3WUC 0x00100000
       
  5164 			/* phy power management enable */
       
  5165 			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
       
  5166 			ctrl |= E1000_CTRL_ADVD3WUC |
       
  5167 				E1000_CTRL_EN_PHY_PWR_MGMT;
       
  5168 			ew32(CTRL, ctrl);
       
  5169 		}
       
  5170 
       
  5171 		if (hw->media_type == e1000_media_type_fiber ||
       
  5172 		    hw->media_type == e1000_media_type_internal_serdes) {
       
  5173 			/* keep the laser running in D3 */
       
  5174 			ctrl_ext = er32(CTRL_EXT);
       
  5175 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
       
  5176 			ew32(CTRL_EXT, ctrl_ext);
       
  5177 		}
       
  5178 
       
  5179 		ew32(WUC, E1000_WUC_PME_EN);
       
  5180 		ew32(WUFC, wufc);
       
  5181 	} else {
       
  5182 		ew32(WUC, 0);
       
  5183 		ew32(WUFC, 0);
       
  5184 	}
       
  5185 
       
  5186 	e1000_release_manageability(adapter);
       
  5187 
       
  5188 	*enable_wake = !!wufc;
       
  5189 
       
  5190 	/* make sure adapter isn't asleep if manageability is enabled */
       
  5191 	if (adapter->en_mng_pt)
       
  5192 		*enable_wake = true;
       
  5193 
       
  5194 	if (netif_running(netdev))
       
  5195 		e1000_free_irq(adapter);
       
  5196 
       
  5197 	pci_disable_device(pdev);
       
  5198 
       
  5199 	return 0;
       
  5200 }
       
  5201 
       
  5202 #ifdef CONFIG_PM
       
  5203 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
       
  5204 {
       
  5205 	int retval;
       
  5206 	bool wake;
       
  5207 
       
  5208 	retval = __e1000_shutdown(pdev, &wake);
       
  5209 	if (retval)
       
  5210 		return retval;
       
  5211 
       
  5212 	if (wake) {
       
  5213 		pci_prepare_to_sleep(pdev);
       
  5214 	} else {
       
  5215 		pci_wake_from_d3(pdev, false);
       
  5216 		pci_set_power_state(pdev, PCI_D3hot);
       
  5217 	}
       
  5218 
       
  5219 	return 0;
       
  5220 }
       
  5221 
       
  5222 static int e1000_resume(struct pci_dev *pdev)
       
  5223 {
       
  5224 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5225 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5226 	struct e1000_hw *hw = &adapter->hw;
       
  5227 	u32 err;
       
  5228 
       
  5229 	if (adapter->ecdev) {
       
  5230 		return -EBUSY;
       
  5231 	}
       
  5232 
       
  5233 	pci_set_power_state(pdev, PCI_D0);
       
  5234 	pci_restore_state(pdev);
       
  5235 	pci_save_state(pdev);
       
  5236 
       
  5237 	if (adapter->need_ioport)
       
  5238 		err = pci_enable_device(pdev);
       
  5239 	else
       
  5240 		err = pci_enable_device_mem(pdev);
       
  5241 	if (err) {
       
  5242 		pr_err("Cannot enable PCI device from suspend\n");
       
  5243 		return err;
       
  5244 	}
       
  5245 	pci_set_master(pdev);
       
  5246 
       
  5247 	pci_enable_wake(pdev, PCI_D3hot, 0);
       
  5248 	pci_enable_wake(pdev, PCI_D3cold, 0);
       
  5249 
       
  5250 	if (netif_running(netdev)) {
       
  5251 		err = e1000_request_irq(adapter);
       
  5252 		if (err)
       
  5253 			return err;
       
  5254 	}
       
  5255 
       
  5256 	e1000_power_up_phy(adapter);
       
  5257 	e1000_reset(adapter);
       
  5258 	ew32(WUS, ~0);
       
  5259 
       
  5260 	e1000_init_manageability(adapter);
       
  5261 
       
  5262 	if (netif_running(netdev))
       
  5263 		e1000_up(adapter);
       
  5264 
       
  5265 	if (!adapter->ecdev) {
       
  5266 		netif_device_attach(netdev);
       
  5267 	}
       
  5268 
       
  5269 	return 0;
       
  5270 }
       
  5271 #endif
       
  5272 
       
  5273 static void e1000_shutdown(struct pci_dev *pdev)
       
  5274 {
       
  5275 	bool wake;
       
  5276 
       
  5277 	__e1000_shutdown(pdev, &wake);
       
  5278 
       
  5279 	if (system_state == SYSTEM_POWER_OFF) {
       
  5280 		pci_wake_from_d3(pdev, wake);
       
  5281 		pci_set_power_state(pdev, PCI_D3hot);
       
  5282 	}
       
  5283 }
       
  5284 
       
  5285 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  5286 /* Polling 'interrupt' - used by things like netconsole to send skbs
       
  5287  * without having to re-enable interrupts. It's not called while
       
  5288  * the interrupt routine is executing.
       
  5289  */
       
  5290 static void e1000_netpoll(struct net_device *netdev)
       
  5291 {
       
  5292 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5293 
       
  5294 	disable_irq(adapter->pdev->irq);
       
  5295 	e1000_intr(adapter->pdev->irq, netdev);
       
  5296 	enable_irq(adapter->pdev->irq);
       
  5297 }
       
  5298 #endif
       
  5299 
       
  5300 /**
       
  5301  * e1000_io_error_detected - called when PCI error is detected
       
  5302  * @pdev: Pointer to PCI device
       
  5303  * @state: The current pci connection state
       
  5304  *
       
  5305  * This function is called after a PCI bus error affecting
       
  5306  * this device has been detected.
       
  5307  */
       
  5308 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
  5309 						pci_channel_state_t state)
       
  5310 {
       
  5311 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5312 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5313 
       
  5314 	netif_device_detach(netdev);
       
  5315 
       
  5316 	if (state == pci_channel_io_perm_failure)
       
  5317 		return PCI_ERS_RESULT_DISCONNECT;
       
  5318 
       
  5319 	if (netif_running(netdev))
       
  5320 		e1000_down(adapter);
       
  5321 	pci_disable_device(pdev);
       
  5322 
       
  5323 	/* Request a slot slot reset. */
       
  5324 	return PCI_ERS_RESULT_NEED_RESET;
       
  5325 }
       
  5326 
       
  5327 /**
       
  5328  * e1000_io_slot_reset - called after the pci bus has been reset.
       
  5329  * @pdev: Pointer to PCI device
       
  5330  *
       
  5331  * Restart the card from scratch, as if from a cold-boot. Implementation
       
  5332  * resembles the first-half of the e1000_resume routine.
       
  5333  */
       
  5334 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
       
  5335 {
       
  5336 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5337 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5338 	struct e1000_hw *hw = &adapter->hw;
       
  5339 	int err;
       
  5340 
       
  5341 	if (adapter->need_ioport)
       
  5342 		err = pci_enable_device(pdev);
       
  5343 	else
       
  5344 		err = pci_enable_device_mem(pdev);
       
  5345 	if (err) {
       
  5346 		pr_err("Cannot re-enable PCI device after reset.\n");
       
  5347 		return PCI_ERS_RESULT_DISCONNECT;
       
  5348 	}
       
  5349 	pci_set_master(pdev);
       
  5350 
       
  5351 	pci_enable_wake(pdev, PCI_D3hot, 0);
       
  5352 	pci_enable_wake(pdev, PCI_D3cold, 0);
       
  5353 
       
  5354 	e1000_reset(adapter);
       
  5355 	ew32(WUS, ~0);
       
  5356 
       
  5357 	return PCI_ERS_RESULT_RECOVERED;
       
  5358 }
       
  5359 
       
  5360 /**
       
  5361  * e1000_io_resume - called when traffic can start flowing again.
       
  5362  * @pdev: Pointer to PCI device
       
  5363  *
       
  5364  * This callback is called when the error recovery driver tells us that
       
  5365  * its OK to resume normal operation. Implementation resembles the
       
  5366  * second-half of the e1000_resume routine.
       
  5367  */
       
  5368 static void e1000_io_resume(struct pci_dev *pdev)
       
  5369 {
       
  5370 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5371 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5372 
       
  5373 	e1000_init_manageability(adapter);
       
  5374 
       
  5375 	if (netif_running(netdev)) {
       
  5376 		if (e1000_up(adapter)) {
       
  5377 			pr_info("can't bring device back up after reset\n");
       
  5378 			return;
       
  5379 		}
       
  5380 	}
       
  5381 
       
  5382 	netif_device_attach(netdev);
       
  5383 }
       
  5384 
       
  5385 /* e1000_main.c */