devices/e1000/e1000_main-3.6-ethercat.c
branchstable-1.5
changeset 2562 e842688ccc5e
parent 2545 80177f7aa836
child 2582 87e502828b3f
equal deleted inserted replaced
2561:4839e81d2bdd 2562:e842688ccc5e
       
     1 /*******************************************************************************
       
     2 
       
     3   Intel PRO/1000 Linux driver
       
     4   Copyright(c) 1999 - 2006 Intel Corporation.
       
     5 
       
     6   This program is free software; you can redistribute it and/or modify it
       
     7   under the terms and conditions of the GNU General Public License,
       
     8   version 2, as published by the Free Software Foundation.
       
     9 
       
    10   This program is distributed in the hope it will be useful, but WITHOUT
       
    11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    13   more details.
       
    14 
       
    15   You should have received a copy of the GNU General Public License along with
       
    16   this program; if not, write to the Free Software Foundation, Inc.,
       
    17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    18 
       
    19   The full GNU General Public License is included in this distribution in
       
    20   the file called "COPYING".
       
    21 
       
    22   Contact Information:
       
    23   Linux NICS <linux.nics@intel.com>
       
    24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    26 
       
    27   vim: noexpandtab
       
    28 
       
    29 *******************************************************************************/
       
    30 
       
    31 #include "e1000-3.4-ethercat.h"
       
    32 #include <net/ip6_checksum.h>
       
    33 #include <linux/io.h>
       
    34 #include <linux/prefetch.h>
       
    35 #include <linux/bitops.h>
       
    36 #include <linux/if_vlan.h>
       
    37 
       
    38 char e1000_driver_name[] = "ec_e1000";
       
    39 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
       
    40 #define DRV_VERSION "7.3.21-k8-NAPI"
       
    41 const char e1000_driver_version[] = DRV_VERSION;
       
    42 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
       
    43 
       
    44 /* e1000_pci_tbl - PCI Device ID Table
       
    45  *
       
    46  * Last entry must be all 0s
       
    47  *
       
    48  * Macro expands to...
       
    49  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
       
    50  */
       
    51 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
       
    52 	INTEL_E1000_ETHERNET_DEVICE(0x1000),
       
    53 	INTEL_E1000_ETHERNET_DEVICE(0x1001),
       
    54 	INTEL_E1000_ETHERNET_DEVICE(0x1004),
       
    55 	INTEL_E1000_ETHERNET_DEVICE(0x1008),
       
    56 	INTEL_E1000_ETHERNET_DEVICE(0x1009),
       
    57 	INTEL_E1000_ETHERNET_DEVICE(0x100C),
       
    58 	INTEL_E1000_ETHERNET_DEVICE(0x100D),
       
    59 	INTEL_E1000_ETHERNET_DEVICE(0x100E),
       
    60 	INTEL_E1000_ETHERNET_DEVICE(0x100F),
       
    61 	INTEL_E1000_ETHERNET_DEVICE(0x1010),
       
    62 	INTEL_E1000_ETHERNET_DEVICE(0x1011),
       
    63 	INTEL_E1000_ETHERNET_DEVICE(0x1012),
       
    64 	INTEL_E1000_ETHERNET_DEVICE(0x1013),
       
    65 	INTEL_E1000_ETHERNET_DEVICE(0x1014),
       
    66 	INTEL_E1000_ETHERNET_DEVICE(0x1015),
       
    67 	INTEL_E1000_ETHERNET_DEVICE(0x1016),
       
    68 	INTEL_E1000_ETHERNET_DEVICE(0x1017),
       
    69 	INTEL_E1000_ETHERNET_DEVICE(0x1018),
       
    70 	INTEL_E1000_ETHERNET_DEVICE(0x1019),
       
    71 	INTEL_E1000_ETHERNET_DEVICE(0x101A),
       
    72 	INTEL_E1000_ETHERNET_DEVICE(0x101D),
       
    73 	INTEL_E1000_ETHERNET_DEVICE(0x101E),
       
    74 	INTEL_E1000_ETHERNET_DEVICE(0x1026),
       
    75 	INTEL_E1000_ETHERNET_DEVICE(0x1027),
       
    76 	INTEL_E1000_ETHERNET_DEVICE(0x1028),
       
    77 	INTEL_E1000_ETHERNET_DEVICE(0x1075),
       
    78 	INTEL_E1000_ETHERNET_DEVICE(0x1076),
       
    79 	INTEL_E1000_ETHERNET_DEVICE(0x1077),
       
    80 	INTEL_E1000_ETHERNET_DEVICE(0x1078),
       
    81 	INTEL_E1000_ETHERNET_DEVICE(0x1079),
       
    82 	INTEL_E1000_ETHERNET_DEVICE(0x107A),
       
    83 	INTEL_E1000_ETHERNET_DEVICE(0x107B),
       
    84 	INTEL_E1000_ETHERNET_DEVICE(0x107C),
       
    85 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
       
    86 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
       
    87 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
       
    88 	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
       
    89 	/* required last entry */
       
    90 	{0,}
       
    91 };
       
    92 
       
    93 // do not auto-load driver
       
    94 // MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
       
    95 
       
    96 int e1000_up(struct e1000_adapter *adapter);
       
    97 void e1000_down(struct e1000_adapter *adapter);
       
    98 void e1000_reinit_locked(struct e1000_adapter *adapter);
       
    99 void e1000_reset(struct e1000_adapter *adapter);
       
   100 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
       
   101 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
       
   102 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
       
   103 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
       
   104 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
       
   105                              struct e1000_tx_ring *txdr);
       
   106 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
       
   107                              struct e1000_rx_ring *rxdr);
       
   108 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
       
   109                              struct e1000_tx_ring *tx_ring);
       
   110 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
       
   111                              struct e1000_rx_ring *rx_ring);
       
   112 void e1000_update_stats(struct e1000_adapter *adapter);
       
   113 
       
   114 static int e1000_init_module(void);
       
   115 static void e1000_exit_module(void);
       
   116 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
       
   117 static void __devexit e1000_remove(struct pci_dev *pdev);
       
   118 static int e1000_alloc_queues(struct e1000_adapter *adapter);
       
   119 static int e1000_sw_init(struct e1000_adapter *adapter);
       
   120 static int e1000_open(struct net_device *netdev);
       
   121 static int e1000_close(struct net_device *netdev);
       
   122 static void e1000_configure_tx(struct e1000_adapter *adapter);
       
   123 static void e1000_configure_rx(struct e1000_adapter *adapter);
       
   124 static void e1000_setup_rctl(struct e1000_adapter *adapter);
       
   125 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
       
   126 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
       
   127 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
       
   128                                 struct e1000_tx_ring *tx_ring);
       
   129 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
       
   130                                 struct e1000_rx_ring *rx_ring);
       
   131 static void e1000_set_rx_mode(struct net_device *netdev);
       
   132 static void e1000_update_phy_info_task(struct work_struct *work);
       
   133 static void e1000_watchdog(struct work_struct *work);
       
   134 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
       
   135 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
       
   136 				    struct net_device *netdev);
       
   137 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
       
   138 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
       
   139 static int e1000_set_mac(struct net_device *netdev, void *p);
       
   140 void ec_poll(struct net_device *);
       
   141 static irqreturn_t e1000_intr(int irq, void *data);
       
   142 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
       
   143 			       struct e1000_tx_ring *tx_ring);
       
   144 static int e1000_clean(struct napi_struct *napi, int budget);
       
   145 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
   146 			       struct e1000_rx_ring *rx_ring,
       
   147 			       int *work_done, int work_to_do);
       
   148 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
       
   149 				     struct e1000_rx_ring *rx_ring,
       
   150 				     int *work_done, int work_to_do);
       
   151 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
   152 				   struct e1000_rx_ring *rx_ring,
       
   153 				   int cleaned_count);
       
   154 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
       
   155 					 struct e1000_rx_ring *rx_ring,
       
   156 					 int cleaned_count);
       
   157 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
       
   158 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
   159 			   int cmd);
       
   160 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
       
   161 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
       
   162 static void e1000_tx_timeout(struct net_device *dev);
       
   163 static void e1000_reset_task(struct work_struct *work);
       
   164 static void e1000_smartspeed(struct e1000_adapter *adapter);
       
   165 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
       
   166                                        struct sk_buff *skb);
       
   167 
       
   168 static bool e1000_vlan_used(struct e1000_adapter *adapter);
       
   169 static void e1000_vlan_mode(struct net_device *netdev,
       
   170 			    netdev_features_t features);
       
   171 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
       
   172 				     bool filter_on);
       
   173 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
       
   174 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
       
   175 static void e1000_restore_vlan(struct e1000_adapter *adapter);
       
   176 
       
   177 #ifdef CONFIG_PM
       
   178 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
       
   179 static int e1000_resume(struct pci_dev *pdev);
       
   180 #endif
       
   181 static void e1000_shutdown(struct pci_dev *pdev);
       
   182 
       
   183 #ifdef CONFIG_NET_POLL_CONTROLLER
       
   184 /* for netdump / net console */
       
   185 static void e1000_netpoll (struct net_device *netdev);
       
   186 #endif
       
   187 
       
   188 #define COPYBREAK_DEFAULT 256
       
   189 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
       
   190 module_param(copybreak, uint, 0644);
       
   191 MODULE_PARM_DESC(copybreak,
       
   192 	"Maximum size of packet that is copied to a new buffer on receive");
       
   193 
       
   194 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
   195                      pci_channel_state_t state);
       
   196 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
       
   197 static void e1000_io_resume(struct pci_dev *pdev);
       
   198 
       
   199 static struct pci_error_handlers e1000_err_handler = {
       
   200 	.error_detected = e1000_io_error_detected,
       
   201 	.slot_reset = e1000_io_slot_reset,
       
   202 	.resume = e1000_io_resume,
       
   203 };
       
   204 
       
   205 static struct pci_driver e1000_driver = {
       
   206 	.name     = e1000_driver_name,
       
   207 	.id_table = e1000_pci_tbl,
       
   208 	.probe    = e1000_probe,
       
   209 	.remove   = __devexit_p(e1000_remove),
       
   210 #ifdef CONFIG_PM
       
   211 	/* Power Management Hooks */
       
   212 	.suspend  = e1000_suspend,
       
   213 	.resume   = e1000_resume,
       
   214 #endif
       
   215 	.shutdown = e1000_shutdown,
       
   216 	.err_handler = &e1000_err_handler
       
   217 };
       
   218 
       
   219 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>");
       
   220 MODULE_DESCRIPTION("EtherCAT-capable Intel(R) PRO/1000 Network Driver");
       
   221 MODULE_LICENSE("GPL");
       
   222 MODULE_VERSION(DRV_VERSION);
       
   223 
       
   224 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
       
   225 static int debug = -1;
       
   226 module_param(debug, int, 0);
       
   227 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
       
   228 
       
   229 /**
       
   230  * e1000_get_hw_dev - return device
       
   231  * used by hardware layer to print debugging information
       
   232  *
       
   233  **/
       
   234 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
       
   235 {
       
   236 	struct e1000_adapter *adapter = hw->back;
       
   237 	return adapter->netdev;
       
   238 }
       
   239 
       
   240 /**
       
   241  * e1000_init_module - Driver Registration Routine
       
   242  *
       
   243  * e1000_init_module is the first routine called when the driver is
       
   244  * loaded. All it does is register with the PCI subsystem.
       
   245  **/
       
   246 
       
   247 static int __init e1000_init_module(void)
       
   248 {
       
   249 	int ret;
       
   250 	pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
       
   251 
       
   252 	pr_info("%s\n", e1000_copyright);
       
   253 
       
   254 	ret = pci_register_driver(&e1000_driver);
       
   255 	if (copybreak != COPYBREAK_DEFAULT) {
       
   256 		if (copybreak == 0)
       
   257 			pr_info("copybreak disabled\n");
       
   258 		else
       
   259 			pr_info("copybreak enabled for "
       
   260 				   "packets <= %u bytes\n", copybreak);
       
   261 	}
       
   262 	return ret;
       
   263 }
       
   264 
       
   265 module_init(e1000_init_module);
       
   266 
       
   267 /**
       
   268  * e1000_exit_module - Driver Exit Cleanup Routine
       
   269  *
       
   270  * e1000_exit_module is called just before the driver is removed
       
   271  * from memory.
       
   272  **/
       
   273 
       
   274 static void __exit e1000_exit_module(void)
       
   275 {
       
   276 	pci_unregister_driver(&e1000_driver);
       
   277 }
       
   278 
       
   279 module_exit(e1000_exit_module);
       
   280 
       
   281 static int e1000_request_irq(struct e1000_adapter *adapter)
       
   282 {
       
   283 	struct net_device *netdev = adapter->netdev;
       
   284 	irq_handler_t handler = e1000_intr;
       
   285 	int irq_flags = IRQF_SHARED;
       
   286 	int err;
       
   287 
       
   288 	if (adapter->ecdev) {
       
   289 		return 0;
       
   290 	}
       
   291 
       
   292 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
       
   293 	                  netdev);
       
   294 	if (err) {
       
   295 		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
       
   296 	}
       
   297 
       
   298 	return err;
       
   299 }
       
   300 
       
   301 static void e1000_free_irq(struct e1000_adapter *adapter)
       
   302 {
       
   303 	struct net_device *netdev = adapter->netdev;
       
   304 
       
   305 	if (adapter->ecdev) {
       
   306 		return;
       
   307 	}
       
   308 
       
   309 	free_irq(adapter->pdev->irq, netdev);
       
   310 }
       
   311 
       
   312 /**
       
   313  * e1000_irq_disable - Mask off interrupt generation on the NIC
       
   314  * @adapter: board private structure
       
   315  **/
       
   316 
       
   317 static void e1000_irq_disable(struct e1000_adapter *adapter)
       
   318 {
       
   319 	struct e1000_hw *hw = &adapter->hw;
       
   320 
       
   321 	if (adapter->ecdev) {
       
   322 		return;
       
   323 	}
       
   324 
       
   325 	ew32(IMC, ~0);
       
   326 	E1000_WRITE_FLUSH();
       
   327 	synchronize_irq(adapter->pdev->irq);
       
   328 }
       
   329 
       
   330 /**
       
   331  * e1000_irq_enable - Enable default interrupt generation settings
       
   332  * @adapter: board private structure
       
   333  **/
       
   334 
       
   335 static void e1000_irq_enable(struct e1000_adapter *adapter)
       
   336 {
       
   337 	struct e1000_hw *hw = &adapter->hw;
       
   338 
       
   339 	if (adapter->ecdev) {
       
   340 		return;
       
   341 	}
       
   342 
       
   343 	ew32(IMS, IMS_ENABLE_MASK);
       
   344 	E1000_WRITE_FLUSH();
       
   345 }
       
   346 
       
   347 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
       
   348 {
       
   349 	struct e1000_hw *hw = &adapter->hw;
       
   350 	struct net_device *netdev = adapter->netdev;
       
   351 	u16 vid = hw->mng_cookie.vlan_id;
       
   352 	u16 old_vid = adapter->mng_vlan_id;
       
   353 
       
   354 	if (!e1000_vlan_used(adapter))
       
   355 		return;
       
   356 
       
   357 	if (!test_bit(vid, adapter->active_vlans)) {
       
   358 		if (hw->mng_cookie.status &
       
   359 		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
       
   360 			e1000_vlan_rx_add_vid(netdev, vid);
       
   361 			adapter->mng_vlan_id = vid;
       
   362 		} else {
       
   363 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
   364 		}
       
   365 		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
       
   366 		    (vid != old_vid) &&
       
   367 		    !test_bit(old_vid, adapter->active_vlans))
       
   368 			e1000_vlan_rx_kill_vid(netdev, old_vid);
       
   369 	} else {
       
   370 		adapter->mng_vlan_id = vid;
       
   371 	}
       
   372 }
       
   373 
       
   374 static void e1000_init_manageability(struct e1000_adapter *adapter)
       
   375 {
       
   376 	struct e1000_hw *hw = &adapter->hw;
       
   377 
       
   378 	if (adapter->en_mng_pt) {
       
   379 		u32 manc = er32(MANC);
       
   380 
       
   381 		/* disable hardware interception of ARP */
       
   382 		manc &= ~(E1000_MANC_ARP_EN);
       
   383 
       
   384 		ew32(MANC, manc);
       
   385 	}
       
   386 }
       
   387 
       
   388 static void e1000_release_manageability(struct e1000_adapter *adapter)
       
   389 {
       
   390 	struct e1000_hw *hw = &adapter->hw;
       
   391 
       
   392 	if (adapter->en_mng_pt) {
       
   393 		u32 manc = er32(MANC);
       
   394 
       
   395 		/* re-enable hardware interception of ARP */
       
   396 		manc |= E1000_MANC_ARP_EN;
       
   397 
       
   398 		ew32(MANC, manc);
       
   399 	}
       
   400 }
       
   401 
       
   402 /**
       
   403  * e1000_configure - configure the hardware for RX and TX
       
   404  * @adapter = private board structure
       
   405  **/
       
   406 static void e1000_configure(struct e1000_adapter *adapter)
       
   407 {
       
   408 	struct net_device *netdev = adapter->netdev;
       
   409 	int i;
       
   410 
       
   411 	e1000_set_rx_mode(netdev);
       
   412 
       
   413 	e1000_restore_vlan(adapter);
       
   414 	e1000_init_manageability(adapter);
       
   415 
       
   416 	e1000_configure_tx(adapter);
       
   417 	e1000_setup_rctl(adapter);
       
   418 	e1000_configure_rx(adapter);
       
   419 	/* call E1000_DESC_UNUSED which always leaves
       
   420 	 * at least 1 descriptor unused to make sure
       
   421 	 * next_to_use != next_to_clean */
       
   422 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
   423 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
       
   424 		if (adapter->ecdev) {
       
   425 			/* fill rx ring completely! */
       
   426 			adapter->alloc_rx_buf(adapter, ring, ring->count);
       
   427 		} else {
       
   428 			/* this one leaves the last ring element unallocated! */
       
   429 			adapter->alloc_rx_buf(adapter, ring,
       
   430 					E1000_DESC_UNUSED(ring));
       
   431 		}
       
   432 	}
       
   433 }
       
   434 
       
   435 int e1000_up(struct e1000_adapter *adapter)
       
   436 {
       
   437 	struct e1000_hw *hw = &adapter->hw;
       
   438 
       
   439 	/* hardware has been reset, we need to reload some things */
       
   440 	e1000_configure(adapter);
       
   441 
       
   442 	clear_bit(__E1000_DOWN, &adapter->flags);
       
   443 
       
   444 	if (!adapter->ecdev) {
       
   445 		napi_enable(&adapter->napi);
       
   446 
       
   447 		e1000_irq_enable(adapter);
       
   448 
       
   449 		netif_wake_queue(adapter->netdev);
       
   450 
       
   451 		/* fire a link change interrupt to start the watchdog */
       
   452 		ew32(ICS, E1000_ICS_LSC);
       
   453 	}
       
   454 	return 0;
       
   455 }
       
   456 
       
   457 /**
       
   458  * e1000_power_up_phy - restore link in case the phy was powered down
       
   459  * @adapter: address of board private structure
       
   460  *
       
   461  * The phy may be powered down to save power and turn off link when the
       
   462  * driver is unloaded and wake on lan is not enabled (among others)
       
   463  * *** this routine MUST be followed by a call to e1000_reset ***
       
   464  *
       
   465  **/
       
   466 
       
   467 void e1000_power_up_phy(struct e1000_adapter *adapter)
       
   468 {
       
   469 	struct e1000_hw *hw = &adapter->hw;
       
   470 	u16 mii_reg = 0;
       
   471 
       
   472 	/* Just clear the power down bit to wake the phy back up */
       
   473 	if (hw->media_type == e1000_media_type_copper) {
       
   474 		/* according to the manual, the phy will retain its
       
   475 		 * settings across a power-down/up cycle */
       
   476 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
       
   477 		mii_reg &= ~MII_CR_POWER_DOWN;
       
   478 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
       
   479 	}
       
   480 }
       
   481 
       
   482 static void e1000_power_down_phy(struct e1000_adapter *adapter)
       
   483 {
       
   484 	struct e1000_hw *hw = &adapter->hw;
       
   485 
       
   486 	/* Power down the PHY so no link is implied when interface is down *
       
   487 	 * The PHY cannot be powered down if any of the following is true *
       
   488 	 * (a) WoL is enabled
       
   489 	 * (b) AMT is active
       
   490 	 * (c) SoL/IDER session is active */
       
   491 	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
       
   492 	   hw->media_type == e1000_media_type_copper) {
       
   493 		u16 mii_reg = 0;
       
   494 
       
   495 		switch (hw->mac_type) {
       
   496 		case e1000_82540:
       
   497 		case e1000_82545:
       
   498 		case e1000_82545_rev_3:
       
   499 		case e1000_82546:
       
   500 		case e1000_ce4100:
       
   501 		case e1000_82546_rev_3:
       
   502 		case e1000_82541:
       
   503 		case e1000_82541_rev_2:
       
   504 		case e1000_82547:
       
   505 		case e1000_82547_rev_2:
       
   506 			if (er32(MANC) & E1000_MANC_SMBUS_EN)
       
   507 				goto out;
       
   508 			break;
       
   509 		default:
       
   510 			goto out;
       
   511 		}
       
   512 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
       
   513 		mii_reg |= MII_CR_POWER_DOWN;
       
   514 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
       
   515 		msleep(1);
       
   516 	}
       
   517 out:
       
   518 	return;
       
   519 }
       
   520 
       
   521 static void e1000_down_and_stop(struct e1000_adapter *adapter)
       
   522 {
       
   523 	set_bit(__E1000_DOWN, &adapter->flags);
       
   524 
       
   525 	/* Only kill reset task if adapter is not resetting */
       
   526 	if (!test_bit(__E1000_RESETTING, &adapter->flags))
       
   527 		cancel_work_sync(&adapter->reset_task);
       
   528 
       
   529 	if (!adapter->ecdev) {
       
   530 		cancel_delayed_work_sync(&adapter->watchdog_task);
       
   531 		cancel_delayed_work_sync(&adapter->phy_info_task);
       
   532 		cancel_delayed_work_sync(&adapter->fifo_stall_task);
       
   533 	}
       
   534 }
       
   535 
       
   536 void e1000_down(struct e1000_adapter *adapter)
       
   537 {
       
   538 	struct e1000_hw *hw = &adapter->hw;
       
   539 	struct net_device *netdev = adapter->netdev;
       
   540 	u32 rctl, tctl;
       
   541 
       
   542 
       
   543 	/* disable receives in the hardware */	
       
   544 	rctl = er32(RCTL);
       
   545 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
   546 
       
   547 	if (!adapter->ecdev) {
       
   548 		/* flush and sleep below */
       
   549 		netif_tx_disable(netdev);
       
   550 	}
       
   551 
       
   552 	/* disable transmits in the hardware */
       
   553 	tctl = er32(TCTL);
       
   554 	tctl &= ~E1000_TCTL_EN;
       
   555 	ew32(TCTL, tctl);
       
   556 	/* flush both disables and wait for them to finish */
       
   557 	E1000_WRITE_FLUSH();
       
   558 	msleep(10);
       
   559 
       
   560 	if (!adapter->ecdev) {
       
   561 		napi_disable(&adapter->napi);
       
   562 
       
   563 		e1000_irq_disable(adapter);
       
   564 	}
       
   565 
       
   566 	/*
       
   567 	 * Setting DOWN must be after irq_disable to prevent
       
   568 	 * a screaming interrupt.  Setting DOWN also prevents
       
   569 	 * tasks from rescheduling.
       
   570 	 */
       
   571 	e1000_down_and_stop(adapter);
       
   572 
       
   573 	adapter->link_speed = 0;
       
   574 	adapter->link_duplex = 0;
       
   575 
       
   576 	if (!adapter->ecdev) {
       
   577 		netif_carrier_off(netdev);
       
   578 	}
       
   579 
       
   580 	e1000_reset(adapter);
       
   581 	e1000_clean_all_tx_rings(adapter);
       
   582 	e1000_clean_all_rx_rings(adapter);
       
   583 }
       
   584 
       
   585 static void e1000_reinit_safe(struct e1000_adapter *adapter)
       
   586 {
       
   587 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
   588 		msleep(1);
       
   589 	mutex_lock(&adapter->mutex);
       
   590 	e1000_down(adapter);
       
   591 	e1000_up(adapter);
       
   592 	mutex_unlock(&adapter->mutex);
       
   593 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
   594 }
       
   595 
       
   596 void e1000_reinit_locked(struct e1000_adapter *adapter)
       
   597 {
       
   598 	/* if rtnl_lock is not held the call path is bogus */
       
   599 	ASSERT_RTNL();
       
   600 	WARN_ON(in_interrupt());
       
   601 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
   602 		msleep(1);
       
   603 	e1000_down(adapter);
       
   604 	e1000_up(adapter);
       
   605 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
   606 }
       
   607 
       
   608 void e1000_reset(struct e1000_adapter *adapter)
       
   609 {
       
   610 	struct e1000_hw *hw = &adapter->hw;
       
   611 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
       
   612 	bool legacy_pba_adjust = false;
       
   613 	u16 hwm;
       
   614 
       
   615 	/* Repartition Pba for greater than 9k mtu
       
   616 	 * To take effect CTRL.RST is required.
       
   617 	 */
       
   618 
       
   619 	switch (hw->mac_type) {
       
   620 	case e1000_82542_rev2_0:
       
   621 	case e1000_82542_rev2_1:
       
   622 	case e1000_82543:
       
   623 	case e1000_82544:
       
   624 	case e1000_82540:
       
   625 	case e1000_82541:
       
   626 	case e1000_82541_rev_2:
       
   627 		legacy_pba_adjust = true;
       
   628 		pba = E1000_PBA_48K;
       
   629 		break;
       
   630 	case e1000_82545:
       
   631 	case e1000_82545_rev_3:
       
   632 	case e1000_82546:
       
   633 	case e1000_ce4100:
       
   634 	case e1000_82546_rev_3:
       
   635 		pba = E1000_PBA_48K;
       
   636 		break;
       
   637 	case e1000_82547:
       
   638 	case e1000_82547_rev_2:
       
   639 		legacy_pba_adjust = true;
       
   640 		pba = E1000_PBA_30K;
       
   641 		break;
       
   642 	case e1000_undefined:
       
   643 	case e1000_num_macs:
       
   644 		break;
       
   645 	}
       
   646 
       
   647 	if (legacy_pba_adjust) {
       
   648 		if (hw->max_frame_size > E1000_RXBUFFER_8192)
       
   649 			pba -= 8; /* allocate more FIFO for Tx */
       
   650 
       
   651 		if (hw->mac_type == e1000_82547) {
       
   652 			adapter->tx_fifo_head = 0;
       
   653 			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
       
   654 			adapter->tx_fifo_size =
       
   655 				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
       
   656 			atomic_set(&adapter->tx_fifo_stall, 0);
       
   657 		}
       
   658 	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
       
   659 		/* adjust PBA for jumbo frames */
       
   660 		ew32(PBA, pba);
       
   661 
       
   662 		/* To maintain wire speed transmits, the Tx FIFO should be
       
   663 		 * large enough to accommodate two full transmit packets,
       
   664 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
       
   665 		 * the Rx FIFO should be large enough to accommodate at least
       
   666 		 * one full receive packet and is similarly rounded up and
       
   667 		 * expressed in KB. */
       
   668 		pba = er32(PBA);
       
   669 		/* upper 16 bits has Tx packet buffer allocation size in KB */
       
   670 		tx_space = pba >> 16;
       
   671 		/* lower 16 bits has Rx packet buffer allocation size in KB */
       
   672 		pba &= 0xffff;
       
   673 		/*
       
   674 		 * the tx fifo also stores 16 bytes of information about the tx
       
   675 		 * but don't include ethernet FCS because hardware appends it
       
   676 		 */
       
   677 		min_tx_space = (hw->max_frame_size +
       
   678 		                sizeof(struct e1000_tx_desc) -
       
   679 		                ETH_FCS_LEN) * 2;
       
   680 		min_tx_space = ALIGN(min_tx_space, 1024);
       
   681 		min_tx_space >>= 10;
       
   682 		/* software strips receive CRC, so leave room for it */
       
   683 		min_rx_space = hw->max_frame_size;
       
   684 		min_rx_space = ALIGN(min_rx_space, 1024);
       
   685 		min_rx_space >>= 10;
       
   686 
       
   687 		/* If current Tx allocation is less than the min Tx FIFO size,
       
   688 		 * and the min Tx FIFO size is less than the current Rx FIFO
       
   689 		 * allocation, take space away from current Rx allocation */
       
   690 		if (tx_space < min_tx_space &&
       
   691 		    ((min_tx_space - tx_space) < pba)) {
       
   692 			pba = pba - (min_tx_space - tx_space);
       
   693 
       
   694 			/* PCI/PCIx hardware has PBA alignment constraints */
       
   695 			switch (hw->mac_type) {
       
   696 			case e1000_82545 ... e1000_82546_rev_3:
       
   697 				pba &= ~(E1000_PBA_8K - 1);
       
   698 				break;
       
   699 			default:
       
   700 				break;
       
   701 			}
       
   702 
       
   703 			/* if short on rx space, rx wins and must trump tx
       
   704 			 * adjustment or use Early Receive if available */
       
   705 			if (pba < min_rx_space)
       
   706 				pba = min_rx_space;
       
   707 		}
       
   708 	}
       
   709 
       
   710 	ew32(PBA, pba);
       
   711 
       
   712 	/*
       
   713 	 * flow control settings:
       
   714 	 * The high water mark must be low enough to fit one full frame
       
   715 	 * (or the size used for early receive) above it in the Rx FIFO.
       
   716 	 * Set it to the lower of:
       
   717 	 * - 90% of the Rx FIFO size, and
       
   718 	 * - the full Rx FIFO size minus the early receive size (for parts
       
   719 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
       
   720 	 * - the full Rx FIFO size minus one full frame
       
   721 	 */
       
   722 	hwm = min(((pba << 10) * 9 / 10),
       
   723 		  ((pba << 10) - hw->max_frame_size));
       
   724 
       
   725 	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
       
   726 	hw->fc_low_water = hw->fc_high_water - 8;
       
   727 	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
       
   728 	hw->fc_send_xon = 1;
       
   729 	hw->fc = hw->original_fc;
       
   730 
       
   731 	/* Allow time for pending master requests to run */
       
   732 	e1000_reset_hw(hw);
       
   733 	if (hw->mac_type >= e1000_82544)
       
   734 		ew32(WUC, 0);
       
   735 
       
   736 	if (e1000_init_hw(hw))
       
   737 		e_dev_err("Hardware Error\n");
       
   738 	e1000_update_mng_vlan(adapter);
       
   739 
       
   740 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
       
   741 	if (hw->mac_type >= e1000_82544 &&
       
   742 	    hw->autoneg == 1 &&
       
   743 	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
       
   744 		u32 ctrl = er32(CTRL);
       
   745 		/* clear phy power management bit if we are in gig only mode,
       
   746 		 * which if enabled will attempt negotiation to 100Mb, which
       
   747 		 * can cause a loss of link at power off or driver unload */
       
   748 		ctrl &= ~E1000_CTRL_SWDPIN3;
       
   749 		ew32(CTRL, ctrl);
       
   750 	}
       
   751 
       
   752 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
       
   753 	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
       
   754 
       
   755 	e1000_reset_adaptive(hw);
       
   756 	e1000_phy_get_info(hw, &adapter->phy_info);
       
   757 
       
   758 	e1000_release_manageability(adapter);
       
   759 }
       
   760 
       
   761 /* Dump the eeprom for users having checksum issues */
       
   762 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
       
   763 {
       
   764 	struct net_device *netdev = adapter->netdev;
       
   765 	struct ethtool_eeprom eeprom;
       
   766 	const struct ethtool_ops *ops = netdev->ethtool_ops;
       
   767 	u8 *data;
       
   768 	int i;
       
   769 	u16 csum_old, csum_new = 0;
       
   770 
       
   771 	eeprom.len = ops->get_eeprom_len(netdev);
       
   772 	eeprom.offset = 0;
       
   773 
       
   774 	data = kmalloc(eeprom.len, GFP_KERNEL);
       
   775 	if (!data)
       
   776 		return;
       
   777 
       
   778 	ops->get_eeprom(netdev, &eeprom, data);
       
   779 
       
   780 	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
       
   781 		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
       
   782 	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
       
   783 		csum_new += data[i] + (data[i + 1] << 8);
       
   784 	csum_new = EEPROM_SUM - csum_new;
       
   785 
       
   786 	pr_err("/*********************/\n");
       
   787 	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
       
   788 	pr_err("Calculated              : 0x%04x\n", csum_new);
       
   789 
       
   790 	pr_err("Offset    Values\n");
       
   791 	pr_err("========  ======\n");
       
   792 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
       
   793 
       
   794 	pr_err("Include this output when contacting your support provider.\n");
       
   795 	pr_err("This is not a software error! Something bad happened to\n");
       
   796 	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
       
   797 	pr_err("result in further problems, possibly loss of data,\n");
       
   798 	pr_err("corruption or system hangs!\n");
       
   799 	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
       
   800 	pr_err("which is invalid and requires you to set the proper MAC\n");
       
   801 	pr_err("address manually before continuing to enable this network\n");
       
   802 	pr_err("device. Please inspect the EEPROM dump and report the\n");
       
   803 	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
       
   804 	pr_err("/*********************/\n");
       
   805 
       
   806 	kfree(data);
       
   807 }
       
   808 
       
   809 /**
       
   810  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
       
   811  * @pdev: PCI device information struct
       
   812  *
       
   813  * Return true if an adapter needs ioport resources
       
   814  **/
       
   815 static int e1000_is_need_ioport(struct pci_dev *pdev)
       
   816 {
       
   817 	switch (pdev->device) {
       
   818 	case E1000_DEV_ID_82540EM:
       
   819 	case E1000_DEV_ID_82540EM_LOM:
       
   820 	case E1000_DEV_ID_82540EP:
       
   821 	case E1000_DEV_ID_82540EP_LOM:
       
   822 	case E1000_DEV_ID_82540EP_LP:
       
   823 	case E1000_DEV_ID_82541EI:
       
   824 	case E1000_DEV_ID_82541EI_MOBILE:
       
   825 	case E1000_DEV_ID_82541ER:
       
   826 	case E1000_DEV_ID_82541ER_LOM:
       
   827 	case E1000_DEV_ID_82541GI:
       
   828 	case E1000_DEV_ID_82541GI_LF:
       
   829 	case E1000_DEV_ID_82541GI_MOBILE:
       
   830 	case E1000_DEV_ID_82544EI_COPPER:
       
   831 	case E1000_DEV_ID_82544EI_FIBER:
       
   832 	case E1000_DEV_ID_82544GC_COPPER:
       
   833 	case E1000_DEV_ID_82544GC_LOM:
       
   834 	case E1000_DEV_ID_82545EM_COPPER:
       
   835 	case E1000_DEV_ID_82545EM_FIBER:
       
   836 	case E1000_DEV_ID_82546EB_COPPER:
       
   837 	case E1000_DEV_ID_82546EB_FIBER:
       
   838 	case E1000_DEV_ID_82546EB_QUAD_COPPER:
       
   839 		return true;
       
   840 	default:
       
   841 		return false;
       
   842 	}
       
   843 }
       
   844 
       
   845 static netdev_features_t e1000_fix_features(struct net_device *netdev,
       
   846 	netdev_features_t features)
       
   847 {
       
   848 	/*
       
   849 	 * Since there is no support for separate rx/tx vlan accel
       
   850 	 * enable/disable make sure tx flag is always in same state as rx.
       
   851 	 */
       
   852 	if (features & NETIF_F_HW_VLAN_RX)
       
   853 		features |= NETIF_F_HW_VLAN_TX;
       
   854 	else
       
   855 		features &= ~NETIF_F_HW_VLAN_TX;
       
   856 
       
   857 	return features;
       
   858 }
       
   859 
       
   860 static int e1000_set_features(struct net_device *netdev,
       
   861 	netdev_features_t features)
       
   862 {
       
   863 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
   864 	netdev_features_t changed = features ^ netdev->features;
       
   865 
       
   866 	if (changed & NETIF_F_HW_VLAN_RX)
       
   867 		e1000_vlan_mode(netdev, features);
       
   868 
       
   869 	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
       
   870 		return 0;
       
   871 
       
   872 	netdev->features = features;
       
   873 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
       
   874 
       
   875 	if (netif_running(netdev))
       
   876 		e1000_reinit_locked(adapter);
       
   877 	else
       
   878 		e1000_reset(adapter);
       
   879 
       
   880 	return 0;
       
   881 }
       
   882 
       
   883 static const struct net_device_ops e1000_netdev_ops = {
       
   884 	.ndo_open		= e1000_open,
       
   885 	.ndo_stop		= e1000_close,
       
   886 	.ndo_start_xmit		= e1000_xmit_frame,
       
   887 	.ndo_get_stats		= e1000_get_stats,
       
   888 	.ndo_set_rx_mode	= e1000_set_rx_mode,
       
   889 	.ndo_set_mac_address	= e1000_set_mac,
       
   890 	.ndo_tx_timeout		= e1000_tx_timeout,
       
   891 	.ndo_change_mtu		= e1000_change_mtu,
       
   892 	.ndo_do_ioctl		= e1000_ioctl,
       
   893 	.ndo_validate_addr	= eth_validate_addr,
       
   894 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
       
   895 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
       
   896 #ifdef CONFIG_NET_POLL_CONTROLLER
       
   897 	.ndo_poll_controller	= e1000_netpoll,
       
   898 #endif
       
   899 	.ndo_fix_features	= e1000_fix_features,
       
   900 	.ndo_set_features	= e1000_set_features,
       
   901 };
       
   902 
       
   903 /**
       
   904  * e1000_init_hw_struct - initialize members of hw struct
       
   905  * @adapter: board private struct
       
   906  * @hw: structure used by e1000_hw.c
       
   907  *
       
   908  * Factors out initialization of the e1000_hw struct to its own function
       
   909  * that can be called very early at init (just after struct allocation).
       
   910  * Fields are initialized based on PCI device information and
       
   911  * OS network device settings (MTU size).
       
   912  * Returns negative error codes if MAC type setup fails.
       
   913  */
       
   914 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
       
   915 				struct e1000_hw *hw)
       
   916 {
       
   917 	struct pci_dev *pdev = adapter->pdev;
       
   918 
       
   919 	/* PCI config space info */
       
   920 	hw->vendor_id = pdev->vendor;
       
   921 	hw->device_id = pdev->device;
       
   922 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
       
   923 	hw->subsystem_id = pdev->subsystem_device;
       
   924 	hw->revision_id = pdev->revision;
       
   925 
       
   926 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
       
   927 
       
   928 	hw->max_frame_size = adapter->netdev->mtu +
       
   929 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
       
   930 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
       
   931 
       
   932 	/* identify the MAC */
       
   933 	if (e1000_set_mac_type(hw)) {
       
   934 		e_err(probe, "Unknown MAC Type\n");
       
   935 		return -EIO;
       
   936 	}
       
   937 
       
   938 	switch (hw->mac_type) {
       
   939 	default:
       
   940 		break;
       
   941 	case e1000_82541:
       
   942 	case e1000_82547:
       
   943 	case e1000_82541_rev_2:
       
   944 	case e1000_82547_rev_2:
       
   945 		hw->phy_init_script = 1;
       
   946 		break;
       
   947 	}
       
   948 
       
   949 	e1000_set_media_type(hw);
       
   950 	e1000_get_bus_info(hw);
       
   951 
       
   952 	hw->wait_autoneg_complete = false;
       
   953 	hw->tbi_compatibility_en = true;
       
   954 	hw->adaptive_ifs = true;
       
   955 
       
   956 	/* Copper options */
       
   957 
       
   958 	if (hw->media_type == e1000_media_type_copper) {
       
   959 		hw->mdix = AUTO_ALL_MODES;
       
   960 		hw->disable_polarity_correction = false;
       
   961 		hw->master_slave = E1000_MASTER_SLAVE;
       
   962 	}
       
   963 
       
   964 	return 0;
       
   965 }
       
   966 
       
   967 /**
       
   968  * e1000_probe - Device Initialization Routine
       
   969  * @pdev: PCI device information struct
       
   970  * @ent: entry in e1000_pci_tbl
       
   971  *
       
   972  * Returns 0 on success, negative on failure
       
   973  *
       
   974  * e1000_probe initializes an adapter identified by a pci_dev structure.
       
   975  * The OS initialization, configuring of the adapter private structure,
       
   976  * and a hardware reset occur.
       
   977  **/
       
   978 static int __devinit e1000_probe(struct pci_dev *pdev,
       
   979 				 const struct pci_device_id *ent)
       
   980 {
       
   981 	struct net_device *netdev;
       
   982 	struct e1000_adapter *adapter;
       
   983 	struct e1000_hw *hw;
       
   984 
       
   985 	static int cards_found = 0;
       
   986 	static int global_quad_port_a = 0; /* global ksp3 port a indication */
       
   987 	int i, err, pci_using_dac;
       
   988 	u16 eeprom_data = 0;
       
   989 	u16 tmp = 0;
       
   990 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
       
   991 	int bars, need_ioport;
       
   992 
       
   993 	/* do not allocate ioport bars when not needed */
       
   994 	need_ioport = e1000_is_need_ioport(pdev);
       
   995 	if (need_ioport) {
       
   996 		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
       
   997 		err = pci_enable_device(pdev);
       
   998 	} else {
       
   999 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
       
  1000 		err = pci_enable_device_mem(pdev);
       
  1001 	}
       
  1002 	if (err)
       
  1003 		return err;
       
  1004 
       
  1005 	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
       
  1006 	if (err)
       
  1007 		goto err_pci_reg;
       
  1008 
       
  1009 	pci_set_master(pdev);
       
  1010 	err = pci_save_state(pdev);
       
  1011 	if (err)
       
  1012 		goto err_alloc_etherdev;
       
  1013 
       
  1014 	err = -ENOMEM;
       
  1015 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
       
  1016 	if (!netdev)
       
  1017 		goto err_alloc_etherdev;
       
  1018 
       
  1019 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  1020 
       
  1021 	pci_set_drvdata(pdev, netdev);
       
  1022 	adapter = netdev_priv(netdev);
       
  1023 	adapter->netdev = netdev;
       
  1024 	adapter->pdev = pdev;
       
  1025 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
       
  1026 	adapter->bars = bars;
       
  1027 	adapter->need_ioport = need_ioport;
       
  1028 
       
  1029 	hw = &adapter->hw;
       
  1030 	hw->back = adapter;
       
  1031 
       
  1032 	err = -EIO;
       
  1033 	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
       
  1034 	if (!hw->hw_addr)
       
  1035 		goto err_ioremap;
       
  1036 
       
  1037 	if (adapter->need_ioport) {
       
  1038 		for (i = BAR_1; i <= BAR_5; i++) {
       
  1039 			if (pci_resource_len(pdev, i) == 0)
       
  1040 				continue;
       
  1041 			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
       
  1042 				hw->io_base = pci_resource_start(pdev, i);
       
  1043 				break;
       
  1044 			}
       
  1045 		}
       
  1046 	}
       
  1047 
       
  1048 	/* make ready for any if (hw->...) below */
       
  1049 	err = e1000_init_hw_struct(adapter, hw);
       
  1050 	if (err)
       
  1051 		goto err_sw_init;
       
  1052 
       
  1053 	/*
       
  1054 	 * there is a workaround being applied below that limits
       
  1055 	 * 64-bit DMA addresses to 64-bit hardware.  There are some
       
  1056 	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
       
  1057 	 */
       
  1058 	pci_using_dac = 0;
       
  1059 	if ((hw->bus_type == e1000_bus_type_pcix) &&
       
  1060 	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
       
  1061 		/*
       
  1062 		 * according to DMA-API-HOWTO, coherent calls will always
       
  1063 		 * succeed if the set call did
       
  1064 		 */
       
  1065 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
       
  1066 		pci_using_dac = 1;
       
  1067 	} else {
       
  1068 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
       
  1069 		if (err) {
       
  1070 			pr_err("No usable DMA config, aborting\n");
       
  1071 			goto err_dma;
       
  1072 		}
       
  1073 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
       
  1074 	}
       
  1075 
       
  1076 	netdev->netdev_ops = &e1000_netdev_ops;
       
  1077 	e1000_set_ethtool_ops(netdev);
       
  1078 	netdev->watchdog_timeo = 5 * HZ;
       
  1079 	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
       
  1080 
       
  1081 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  1082 
       
  1083 	adapter->bd_number = cards_found;
       
  1084 
       
  1085 	/* setup the private structure */
       
  1086 
       
  1087 	err = e1000_sw_init(adapter);
       
  1088 	if (err)
       
  1089 		goto err_sw_init;
       
  1090 
       
  1091 	err = -EIO;
       
  1092 	if (hw->mac_type == e1000_ce4100) {
       
  1093 		hw->ce4100_gbe_mdio_base_virt =
       
  1094 					ioremap(pci_resource_start(pdev, BAR_1),
       
  1095 		                                pci_resource_len(pdev, BAR_1));
       
  1096 
       
  1097 		if (!hw->ce4100_gbe_mdio_base_virt)
       
  1098 			goto err_mdio_ioremap;
       
  1099 	}
       
  1100 
       
  1101 	if (hw->mac_type >= e1000_82543) {
       
  1102 		netdev->hw_features = NETIF_F_SG |
       
  1103 				   NETIF_F_HW_CSUM |
       
  1104 				   NETIF_F_HW_VLAN_RX;
       
  1105 		netdev->features = NETIF_F_HW_VLAN_TX |
       
  1106 				   NETIF_F_HW_VLAN_FILTER;
       
  1107 	}
       
  1108 
       
  1109 	if ((hw->mac_type >= e1000_82544) &&
       
  1110 	   (hw->mac_type != e1000_82547))
       
  1111 		netdev->hw_features |= NETIF_F_TSO;
       
  1112 
       
  1113 	netdev->priv_flags |= IFF_SUPP_NOFCS;
       
  1114 
       
  1115 	netdev->features |= netdev->hw_features;
       
  1116 	netdev->hw_features |= (NETIF_F_RXCSUM |
       
  1117 				NETIF_F_RXALL |
       
  1118 				NETIF_F_RXFCS);
       
  1119 
       
  1120 	if (pci_using_dac) {
       
  1121 		netdev->features |= NETIF_F_HIGHDMA;
       
  1122 		netdev->vlan_features |= NETIF_F_HIGHDMA;
       
  1123 	}
       
  1124 
       
  1125 	netdev->vlan_features |= (NETIF_F_TSO |
       
  1126 				  NETIF_F_HW_CSUM |
       
  1127 				  NETIF_F_SG);
       
  1128 
       
  1129 	netdev->priv_flags |= IFF_UNICAST_FLT;
       
  1130 
       
  1131 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
       
  1132 
       
  1133 	/* initialize eeprom parameters */
       
  1134 	if (e1000_init_eeprom_params(hw)) {
       
  1135 		e_err(probe, "EEPROM initialization failed\n");
       
  1136 		goto err_eeprom;
       
  1137 	}
       
  1138 
       
  1139 	/* before reading the EEPROM, reset the controller to
       
  1140 	 * put the device in a known good starting state */
       
  1141 
       
  1142 	e1000_reset_hw(hw);
       
  1143 
       
  1144 	/* make sure the EEPROM is good */
       
  1145 	if (e1000_validate_eeprom_checksum(hw) < 0) {
       
  1146 		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
       
  1147 		e1000_dump_eeprom(adapter);
       
  1148 		/*
       
  1149 		 * set MAC address to all zeroes to invalidate and temporary
       
  1150 		 * disable this device for the user. This blocks regular
       
  1151 		 * traffic while still permitting ethtool ioctls from reaching
       
  1152 		 * the hardware as well as allowing the user to run the
       
  1153 		 * interface after manually setting a hw addr using
       
  1154 		 * `ip set address`
       
  1155 		 */
       
  1156 		memset(hw->mac_addr, 0, netdev->addr_len);
       
  1157 	} else {
       
  1158 		/* copy the MAC address out of the EEPROM */
       
  1159 		if (e1000_read_mac_addr(hw))
       
  1160 			e_err(probe, "EEPROM Read Error\n");
       
  1161 	}
       
  1162 	/* don't block initalization here due to bad MAC address */
       
  1163 	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
       
  1164 	memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
       
  1165 
       
  1166 	if (!is_valid_ether_addr(netdev->perm_addr))
       
  1167 		e_err(probe, "Invalid MAC Address\n");
       
  1168 
       
  1169 
       
  1170 	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
       
  1171 	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
       
  1172 			  e1000_82547_tx_fifo_stall_task);
       
  1173 	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
       
  1174 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
       
  1175 
       
  1176 	e1000_check_options(adapter);
       
  1177 
       
  1178 	/* Initial Wake on LAN setting
       
  1179 	 * If APM wake is enabled in the EEPROM,
       
  1180 	 * enable the ACPI Magic Packet filter
       
  1181 	 */
       
  1182 
       
  1183 	switch (hw->mac_type) {
       
  1184 	case e1000_82542_rev2_0:
       
  1185 	case e1000_82542_rev2_1:
       
  1186 	case e1000_82543:
       
  1187 		break;
       
  1188 	case e1000_82544:
       
  1189 		e1000_read_eeprom(hw,
       
  1190 			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
       
  1191 		eeprom_apme_mask = E1000_EEPROM_82544_APM;
       
  1192 		break;
       
  1193 	case e1000_82546:
       
  1194 	case e1000_82546_rev_3:
       
  1195 		if (er32(STATUS) & E1000_STATUS_FUNC_1){
       
  1196 			e1000_read_eeprom(hw,
       
  1197 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
       
  1198 			break;
       
  1199 		}
       
  1200 		/* Fall Through */
       
  1201 	default:
       
  1202 		e1000_read_eeprom(hw,
       
  1203 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
       
  1204 		break;
       
  1205 	}
       
  1206 	if (eeprom_data & eeprom_apme_mask)
       
  1207 		adapter->eeprom_wol |= E1000_WUFC_MAG;
       
  1208 
       
  1209 	/* now that we have the eeprom settings, apply the special cases
       
  1210 	 * where the eeprom may be wrong or the board simply won't support
       
  1211 	 * wake on lan on a particular port */
       
  1212 	switch (pdev->device) {
       
  1213 	case E1000_DEV_ID_82546GB_PCIE:
       
  1214 		adapter->eeprom_wol = 0;
       
  1215 		break;
       
  1216 	case E1000_DEV_ID_82546EB_FIBER:
       
  1217 	case E1000_DEV_ID_82546GB_FIBER:
       
  1218 		/* Wake events only supported on port A for dual fiber
       
  1219 		 * regardless of eeprom setting */
       
  1220 		if (er32(STATUS) & E1000_STATUS_FUNC_1)
       
  1221 			adapter->eeprom_wol = 0;
       
  1222 		break;
       
  1223 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
       
  1224 		/* if quad port adapter, disable WoL on all but port A */
       
  1225 		if (global_quad_port_a != 0)
       
  1226 			adapter->eeprom_wol = 0;
       
  1227 		else
       
  1228 			adapter->quad_port_a = true;
       
  1229 		/* Reset for multiple quad port adapters */
       
  1230 		if (++global_quad_port_a == 4)
       
  1231 			global_quad_port_a = 0;
       
  1232 		break;
       
  1233 	}
       
  1234 
       
  1235 	/* initialize the wol settings based on the eeprom settings */
       
  1236 	adapter->wol = adapter->eeprom_wol;
       
  1237 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
       
  1238 
       
  1239 	/* Auto detect PHY address */
       
  1240 	if (hw->mac_type == e1000_ce4100) {
       
  1241 		for (i = 0; i < 32; i++) {
       
  1242 			hw->phy_addr = i;
       
  1243 			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
       
  1244 			if (tmp == 0 || tmp == 0xFF) {
       
  1245 				if (i == 31)
       
  1246 					goto err_eeprom;
       
  1247 				continue;
       
  1248 			} else
       
  1249 				break;
       
  1250 		}
       
  1251 	}
       
  1252 
       
  1253 	/* reset the hardware with the new settings */
       
  1254 	e1000_reset(adapter);
       
  1255 
       
  1256  	// offer device to EtherCAT master module
       
  1257 	adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE);
       
  1258 	if (adapter->ecdev) {
       
  1259 		if (ecdev_open(adapter->ecdev)) {
       
  1260 			ecdev_withdraw(adapter->ecdev);
       
  1261 			goto err_register;
       
  1262 		}
       
  1263 	} else {
       
  1264 		strcpy(netdev->name, "eth%d");
       
  1265 		err = register_netdev(netdev);
       
  1266 		if (err)
       
  1267 			goto err_register;
       
  1268 	}
       
  1269 
       
  1270 	e1000_vlan_filter_on_off(adapter, false);
       
  1271 
       
  1272 	/* print bus type/speed/width info */
       
  1273 	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
       
  1274 	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
       
  1275 	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
       
  1276 		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
       
  1277 		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
       
  1278 		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
       
  1279 	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
       
  1280 	       netdev->dev_addr);
       
  1281 
       
  1282 	if (!adapter->ecdev) {
       
  1283 		/* carrier off reporting is important to ethtool even BEFORE open */
       
  1284 		netif_carrier_off(netdev);
       
  1285 	}
       
  1286 
       
  1287 	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
       
  1288 
       
  1289 	cards_found++;
       
  1290 	return 0;
       
  1291 
       
  1292 err_register:
       
  1293 err_eeprom:
       
  1294 	e1000_phy_hw_reset(hw);
       
  1295 
       
  1296 	if (hw->flash_address)
       
  1297 		iounmap(hw->flash_address);
       
  1298 	kfree(adapter->tx_ring);
       
  1299 	kfree(adapter->rx_ring);
       
  1300 err_dma:
       
  1301 err_sw_init:
       
  1302 err_mdio_ioremap:
       
  1303 	iounmap(hw->ce4100_gbe_mdio_base_virt);
       
  1304 	iounmap(hw->hw_addr);
       
  1305 err_ioremap:
       
  1306 	free_netdev(netdev);
       
  1307 err_alloc_etherdev:
       
  1308 	pci_release_selected_regions(pdev, bars);
       
  1309 err_pci_reg:
       
  1310 	pci_disable_device(pdev);
       
  1311 	return err;
       
  1312 }
       
  1313 
       
  1314 /**
       
  1315  * e1000_remove - Device Removal Routine
       
  1316  * @pdev: PCI device information struct
       
  1317  *
       
  1318  * e1000_remove is called by the PCI subsystem to alert the driver
       
  1319  * that it should release a PCI device.  The could be caused by a
       
  1320  * Hot-Plug event, or because the driver is going to be removed from
       
  1321  * memory.
       
  1322  **/
       
  1323 
       
  1324 static void __devexit e1000_remove(struct pci_dev *pdev)
       
  1325 {
       
  1326 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  1327 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1328 	struct e1000_hw *hw = &adapter->hw;
       
  1329 
       
  1330 	e1000_down_and_stop(adapter);
       
  1331 	e1000_release_manageability(adapter);
       
  1332 
       
  1333 	if (adapter->ecdev) {
       
  1334 		ecdev_close(adapter->ecdev);
       
  1335 		ecdev_withdraw(adapter->ecdev);
       
  1336 	} else {
       
  1337 		unregister_netdev(netdev);
       
  1338 	}
       
  1339 
       
  1340 	e1000_phy_hw_reset(hw);
       
  1341 
       
  1342 	kfree(adapter->tx_ring);
       
  1343 	kfree(adapter->rx_ring);
       
  1344 
       
  1345 	if (hw->mac_type == e1000_ce4100)
       
  1346 		iounmap(hw->ce4100_gbe_mdio_base_virt);
       
  1347 	iounmap(hw->hw_addr);
       
  1348 	if (hw->flash_address)
       
  1349 		iounmap(hw->flash_address);
       
  1350 	pci_release_selected_regions(pdev, adapter->bars);
       
  1351 
       
  1352 	free_netdev(netdev);
       
  1353 
       
  1354 	pci_disable_device(pdev);
       
  1355 }
       
  1356 
       
  1357 /**
       
  1358  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
       
  1359  * @adapter: board private structure to initialize
       
  1360  *
       
  1361  * e1000_sw_init initializes the Adapter private data structure.
       
  1362  * e1000_init_hw_struct MUST be called before this function
       
  1363  **/
       
  1364 
       
  1365 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
       
  1366 {
       
  1367 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
       
  1368 
       
  1369 	adapter->num_tx_queues = 1;
       
  1370 	adapter->num_rx_queues = 1;
       
  1371 
       
  1372 	if (e1000_alloc_queues(adapter)) {
       
  1373 		e_err(probe, "Unable to allocate memory for queues\n");
       
  1374 		return -ENOMEM;
       
  1375 	}
       
  1376 
       
  1377 	/* Explicitly disable IRQ since the NIC can be in any state. */
       
  1378 	e1000_irq_disable(adapter);
       
  1379 
       
  1380 	spin_lock_init(&adapter->stats_lock);
       
  1381 	mutex_init(&adapter->mutex);
       
  1382 
       
  1383 	set_bit(__E1000_DOWN, &adapter->flags);
       
  1384 
       
  1385 	return 0;
       
  1386 }
       
  1387 
       
  1388 /**
       
  1389  * e1000_alloc_queues - Allocate memory for all rings
       
  1390  * @adapter: board private structure to initialize
       
  1391  *
       
  1392  * We allocate one ring per queue at run-time since we don't know the
       
  1393  * number of queues at compile-time.
       
  1394  **/
       
  1395 
       
  1396 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
       
  1397 {
       
  1398 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
       
  1399 	                           sizeof(struct e1000_tx_ring), GFP_KERNEL);
       
  1400 	if (!adapter->tx_ring)
       
  1401 		return -ENOMEM;
       
  1402 
       
  1403 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
       
  1404 	                           sizeof(struct e1000_rx_ring), GFP_KERNEL);
       
  1405 	if (!adapter->rx_ring) {
       
  1406 		kfree(adapter->tx_ring);
       
  1407 		return -ENOMEM;
       
  1408 	}
       
  1409 
       
  1410 	return E1000_SUCCESS;
       
  1411 }
       
  1412 
       
  1413 /**
       
  1414  * e1000_open - Called when a network interface is made active
       
  1415  * @netdev: network interface device structure
       
  1416  *
       
  1417  * Returns 0 on success, negative value on failure
       
  1418  *
       
  1419  * The open entry point is called when a network interface is made
       
  1420  * active by the system (IFF_UP).  At this point all resources needed
       
  1421  * for transmit and receive operations are allocated, the interrupt
       
  1422  * handler is registered with the OS, the watchdog task is started,
       
  1423  * and the stack is notified that the interface is ready.
       
  1424  **/
       
  1425 
       
  1426 static int e1000_open(struct net_device *netdev)
       
  1427 {
       
  1428 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1429 	struct e1000_hw *hw = &adapter->hw;
       
  1430 	int err;
       
  1431 
       
  1432 	/* disallow open during test */
       
  1433 	if (test_bit(__E1000_TESTING, &adapter->flags))
       
  1434 		return -EBUSY;
       
  1435 
       
  1436 	netif_carrier_off(netdev);
       
  1437 
       
  1438 	/* allocate transmit descriptors */
       
  1439 	err = e1000_setup_all_tx_resources(adapter);
       
  1440 	if (err)
       
  1441 		goto err_setup_tx;
       
  1442 
       
  1443 	/* allocate receive descriptors */
       
  1444 	err = e1000_setup_all_rx_resources(adapter);
       
  1445 	if (err)
       
  1446 		goto err_setup_rx;
       
  1447 
       
  1448 	e1000_power_up_phy(adapter);
       
  1449 
       
  1450 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
  1451 	if ((hw->mng_cookie.status &
       
  1452 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
       
  1453 		e1000_update_mng_vlan(adapter);
       
  1454 	}
       
  1455 
       
  1456 	/* before we allocate an interrupt, we must be ready to handle it.
       
  1457 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
       
  1458 	 * as soon as we call pci_request_irq, so we have to setup our
       
  1459 	 * clean_rx handler before we do so.  */
       
  1460 	e1000_configure(adapter);
       
  1461 
       
  1462 	err = e1000_request_irq(adapter);
       
  1463 	if (err)
       
  1464 		goto err_req_irq;
       
  1465 
       
  1466 	/* From here on the code is the same as e1000_up() */
       
  1467 	clear_bit(__E1000_DOWN, &adapter->flags);
       
  1468 
       
  1469 	if (!adapter->ecdev) {
       
  1470 		napi_enable(&adapter->napi);
       
  1471 
       
  1472 		e1000_irq_enable(adapter);
       
  1473 
       
  1474 		netif_start_queue(netdev);
       
  1475 	}
       
  1476 
       
  1477 	/* fire a link status change interrupt to start the watchdog */
       
  1478 	ew32(ICS, E1000_ICS_LSC);
       
  1479 
       
  1480 	return E1000_SUCCESS;
       
  1481 
       
  1482 err_req_irq:
       
  1483 	e1000_power_down_phy(adapter);
       
  1484 	e1000_free_all_rx_resources(adapter);
       
  1485 err_setup_rx:
       
  1486 	e1000_free_all_tx_resources(adapter);
       
  1487 err_setup_tx:
       
  1488 	e1000_reset(adapter);
       
  1489 
       
  1490 	return err;
       
  1491 }
       
  1492 
       
  1493 /**
       
  1494  * e1000_close - Disables a network interface
       
  1495  * @netdev: network interface device structure
       
  1496  *
       
  1497  * Returns 0, this is not allowed to fail
       
  1498  *
       
  1499  * The close entry point is called when an interface is de-activated
       
  1500  * by the OS.  The hardware is still under the drivers control, but
       
  1501  * needs to be disabled.  A global MAC reset is issued to stop the
       
  1502  * hardware, and all transmit and receive resources are freed.
       
  1503  **/
       
  1504 
       
  1505 static int e1000_close(struct net_device *netdev)
       
  1506 {
       
  1507 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1508 	struct e1000_hw *hw = &adapter->hw;
       
  1509 
       
  1510 	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
       
  1511 	e1000_down(adapter);
       
  1512 	e1000_power_down_phy(adapter);
       
  1513 	e1000_free_irq(adapter);
       
  1514 
       
  1515 	e1000_free_all_tx_resources(adapter);
       
  1516 	e1000_free_all_rx_resources(adapter);
       
  1517 
       
  1518 	/* kill manageability vlan ID if supported, but not if a vlan with
       
  1519 	 * the same ID is registered on the host OS (let 8021q kill it) */
       
  1520 	if ((hw->mng_cookie.status &
       
  1521 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  1522 	     !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
       
  1523 		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
       
  1524 	}
       
  1525 
       
  1526 	return 0;
       
  1527 }
       
  1528 
       
  1529 /**
       
  1530  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
       
  1531  * @adapter: address of board private structure
       
  1532  * @start: address of beginning of memory
       
  1533  * @len: length of memory
       
  1534  **/
       
  1535 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
       
  1536 				  unsigned long len)
       
  1537 {
       
  1538 	struct e1000_hw *hw = &adapter->hw;
       
  1539 	unsigned long begin = (unsigned long)start;
       
  1540 	unsigned long end = begin + len;
       
  1541 
       
  1542 	/* First rev 82545 and 82546 need to not allow any memory
       
  1543 	 * write location to cross 64k boundary due to errata 23 */
       
  1544 	if (hw->mac_type == e1000_82545 ||
       
  1545 	    hw->mac_type == e1000_ce4100 ||
       
  1546 	    hw->mac_type == e1000_82546) {
       
  1547 		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
       
  1548 	}
       
  1549 
       
  1550 	return true;
       
  1551 }
       
  1552 
       
  1553 /**
       
  1554  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
       
  1555  * @adapter: board private structure
       
  1556  * @txdr:    tx descriptor ring (for a specific queue) to setup
       
  1557  *
       
  1558  * Return 0 on success, negative on failure
       
  1559  **/
       
  1560 
       
  1561 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
       
  1562 				    struct e1000_tx_ring *txdr)
       
  1563 {
       
  1564 	struct pci_dev *pdev = adapter->pdev;
       
  1565 	int size;
       
  1566 
       
  1567 	size = sizeof(struct e1000_buffer) * txdr->count;
       
  1568 	txdr->buffer_info = vzalloc(size);
       
  1569 	if (!txdr->buffer_info) {
       
  1570 		e_err(probe, "Unable to allocate memory for the Tx descriptor "
       
  1571 		      "ring\n");
       
  1572 		return -ENOMEM;
       
  1573 	}
       
  1574 
       
  1575 	/* round up to nearest 4K */
       
  1576 
       
  1577 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
       
  1578 	txdr->size = ALIGN(txdr->size, 4096);
       
  1579 
       
  1580 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
       
  1581 					GFP_KERNEL);
       
  1582 	if (!txdr->desc) {
       
  1583 setup_tx_desc_die:
       
  1584 		vfree(txdr->buffer_info);
       
  1585 		e_err(probe, "Unable to allocate memory for the Tx descriptor "
       
  1586 		      "ring\n");
       
  1587 		return -ENOMEM;
       
  1588 	}
       
  1589 
       
  1590 	/* Fix for errata 23, can't cross 64kB boundary */
       
  1591 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
       
  1592 		void *olddesc = txdr->desc;
       
  1593 		dma_addr_t olddma = txdr->dma;
       
  1594 		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
       
  1595 		      txdr->size, txdr->desc);
       
  1596 		/* Try again, without freeing the previous */
       
  1597 		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
       
  1598 						&txdr->dma, GFP_KERNEL);
       
  1599 		/* Failed allocation, critical failure */
       
  1600 		if (!txdr->desc) {
       
  1601 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1602 					  olddma);
       
  1603 			goto setup_tx_desc_die;
       
  1604 		}
       
  1605 
       
  1606 		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
       
  1607 			/* give up */
       
  1608 			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
       
  1609 					  txdr->dma);
       
  1610 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1611 					  olddma);
       
  1612 			e_err(probe, "Unable to allocate aligned memory "
       
  1613 			      "for the transmit descriptor ring\n");
       
  1614 			vfree(txdr->buffer_info);
       
  1615 			return -ENOMEM;
       
  1616 		} else {
       
  1617 			/* Free old allocation, new allocation was successful */
       
  1618 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
       
  1619 					  olddma);
       
  1620 		}
       
  1621 	}
       
  1622 	memset(txdr->desc, 0, txdr->size);
       
  1623 
       
  1624 	txdr->next_to_use = 0;
       
  1625 	txdr->next_to_clean = 0;
       
  1626 
       
  1627 	return 0;
       
  1628 }
       
  1629 
       
  1630 /**
       
  1631  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
       
  1632  * 				  (Descriptors) for all queues
       
  1633  * @adapter: board private structure
       
  1634  *
       
  1635  * Return 0 on success, negative on failure
       
  1636  **/
       
  1637 
       
  1638 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
       
  1639 {
       
  1640 	int i, err = 0;
       
  1641 
       
  1642 	for (i = 0; i < adapter->num_tx_queues; i++) {
       
  1643 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
       
  1644 		if (err) {
       
  1645 			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
       
  1646 			for (i-- ; i >= 0; i--)
       
  1647 				e1000_free_tx_resources(adapter,
       
  1648 							&adapter->tx_ring[i]);
       
  1649 			break;
       
  1650 		}
       
  1651 	}
       
  1652 
       
  1653 	return err;
       
  1654 }
       
  1655 
       
  1656 /**
       
  1657  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
       
  1658  * @adapter: board private structure
       
  1659  *
       
  1660  * Configure the Tx unit of the MAC after a reset.
       
  1661  **/
       
  1662 
       
  1663 static void e1000_configure_tx(struct e1000_adapter *adapter)
       
  1664 {
       
  1665 	u64 tdba;
       
  1666 	struct e1000_hw *hw = &adapter->hw;
       
  1667 	u32 tdlen, tctl, tipg;
       
  1668 	u32 ipgr1, ipgr2;
       
  1669 
       
  1670 	/* Setup the HW Tx Head and Tail descriptor pointers */
       
  1671 
       
  1672 	switch (adapter->num_tx_queues) {
       
  1673 	case 1:
       
  1674 	default:
       
  1675 		tdba = adapter->tx_ring[0].dma;
       
  1676 		tdlen = adapter->tx_ring[0].count *
       
  1677 			sizeof(struct e1000_tx_desc);
       
  1678 		ew32(TDLEN, tdlen);
       
  1679 		ew32(TDBAH, (tdba >> 32));
       
  1680 		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
       
  1681 		ew32(TDT, 0);
       
  1682 		ew32(TDH, 0);
       
  1683 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
       
  1684 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
       
  1685 		break;
       
  1686 	}
       
  1687 
       
  1688 	/* Set the default values for the Tx Inter Packet Gap timer */
       
  1689 	if ((hw->media_type == e1000_media_type_fiber ||
       
  1690 	     hw->media_type == e1000_media_type_internal_serdes))
       
  1691 		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
       
  1692 	else
       
  1693 		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
       
  1694 
       
  1695 	switch (hw->mac_type) {
       
  1696 	case e1000_82542_rev2_0:
       
  1697 	case e1000_82542_rev2_1:
       
  1698 		tipg = DEFAULT_82542_TIPG_IPGT;
       
  1699 		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
       
  1700 		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
       
  1701 		break;
       
  1702 	default:
       
  1703 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
       
  1704 		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
       
  1705 		break;
       
  1706 	}
       
  1707 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
       
  1708 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
       
  1709 	ew32(TIPG, tipg);
       
  1710 
       
  1711 	/* Set the Tx Interrupt Delay register */
       
  1712 
       
  1713 	ew32(TIDV, adapter->tx_int_delay);
       
  1714 	if (hw->mac_type >= e1000_82540)
       
  1715 		ew32(TADV, adapter->tx_abs_int_delay);
       
  1716 
       
  1717 	/* Program the Transmit Control Register */
       
  1718 
       
  1719 	tctl = er32(TCTL);
       
  1720 	tctl &= ~E1000_TCTL_CT;
       
  1721 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
       
  1722 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
       
  1723 
       
  1724 	e1000_config_collision_dist(hw);
       
  1725 
       
  1726 	/* Setup Transmit Descriptor Settings for eop descriptor */
       
  1727 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
       
  1728 
       
  1729 	/* only set IDE if we are delaying interrupts using the timers */
       
  1730 	if (adapter->tx_int_delay)
       
  1731 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
       
  1732 
       
  1733 	if (hw->mac_type < e1000_82543)
       
  1734 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
       
  1735 	else
       
  1736 		adapter->txd_cmd |= E1000_TXD_CMD_RS;
       
  1737 
       
  1738 	/* Cache if we're 82544 running in PCI-X because we'll
       
  1739 	 * need this to apply a workaround later in the send path. */
       
  1740 	if (hw->mac_type == e1000_82544 &&
       
  1741 	    hw->bus_type == e1000_bus_type_pcix)
       
  1742 		adapter->pcix_82544 = true;
       
  1743 
       
  1744 	ew32(TCTL, tctl);
       
  1745 
       
  1746 }
       
  1747 
       
  1748 /**
       
  1749  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
       
  1750  * @adapter: board private structure
       
  1751  * @rxdr:    rx descriptor ring (for a specific queue) to setup
       
  1752  *
       
  1753  * Returns 0 on success, negative on failure
       
  1754  **/
       
  1755 
       
  1756 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
       
  1757 				    struct e1000_rx_ring *rxdr)
       
  1758 {
       
  1759 	struct pci_dev *pdev = adapter->pdev;
       
  1760 	int size, desc_len;
       
  1761 
       
  1762 	size = sizeof(struct e1000_buffer) * rxdr->count;
       
  1763 	rxdr->buffer_info = vzalloc(size);
       
  1764 	if (!rxdr->buffer_info) {
       
  1765 		e_err(probe, "Unable to allocate memory for the Rx descriptor "
       
  1766 		      "ring\n");
       
  1767 		return -ENOMEM;
       
  1768 	}
       
  1769 
       
  1770 	desc_len = sizeof(struct e1000_rx_desc);
       
  1771 
       
  1772 	/* Round up to nearest 4K */
       
  1773 
       
  1774 	rxdr->size = rxdr->count * desc_len;
       
  1775 	rxdr->size = ALIGN(rxdr->size, 4096);
       
  1776 
       
  1777 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
       
  1778 					GFP_KERNEL);
       
  1779 
       
  1780 	if (!rxdr->desc) {
       
  1781 		e_err(probe, "Unable to allocate memory for the Rx descriptor "
       
  1782 		      "ring\n");
       
  1783 setup_rx_desc_die:
       
  1784 		vfree(rxdr->buffer_info);
       
  1785 		return -ENOMEM;
       
  1786 	}
       
  1787 
       
  1788 	/* Fix for errata 23, can't cross 64kB boundary */
       
  1789 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
       
  1790 		void *olddesc = rxdr->desc;
       
  1791 		dma_addr_t olddma = rxdr->dma;
       
  1792 		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
       
  1793 		      rxdr->size, rxdr->desc);
       
  1794 		/* Try again, without freeing the previous */
       
  1795 		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
       
  1796 						&rxdr->dma, GFP_KERNEL);
       
  1797 		/* Failed allocation, critical failure */
       
  1798 		if (!rxdr->desc) {
       
  1799 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1800 					  olddma);
       
  1801 			e_err(probe, "Unable to allocate memory for the Rx "
       
  1802 			      "descriptor ring\n");
       
  1803 			goto setup_rx_desc_die;
       
  1804 		}
       
  1805 
       
  1806 		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
       
  1807 			/* give up */
       
  1808 			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
       
  1809 					  rxdr->dma);
       
  1810 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1811 					  olddma);
       
  1812 			e_err(probe, "Unable to allocate aligned memory for "
       
  1813 			      "the Rx descriptor ring\n");
       
  1814 			goto setup_rx_desc_die;
       
  1815 		} else {
       
  1816 			/* Free old allocation, new allocation was successful */
       
  1817 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
       
  1818 					  olddma);
       
  1819 		}
       
  1820 	}
       
  1821 	memset(rxdr->desc, 0, rxdr->size);
       
  1822 
       
  1823 	rxdr->next_to_clean = 0;
       
  1824 	rxdr->next_to_use = 0;
       
  1825 	rxdr->rx_skb_top = NULL;
       
  1826 
       
  1827 	return 0;
       
  1828 }
       
  1829 
       
  1830 /**
       
  1831  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
       
  1832  * 				  (Descriptors) for all queues
       
  1833  * @adapter: board private structure
       
  1834  *
       
  1835  * Return 0 on success, negative on failure
       
  1836  **/
       
  1837 
       
  1838 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
       
  1839 {
       
  1840 	int i, err = 0;
       
  1841 
       
  1842 	for (i = 0; i < adapter->num_rx_queues; i++) {
       
  1843 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
       
  1844 		if (err) {
       
  1845 			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
       
  1846 			for (i-- ; i >= 0; i--)
       
  1847 				e1000_free_rx_resources(adapter,
       
  1848 							&adapter->rx_ring[i]);
       
  1849 			break;
       
  1850 		}
       
  1851 	}
       
  1852 
       
  1853 	return err;
       
  1854 }
       
  1855 
       
  1856 /**
       
  1857  * e1000_setup_rctl - configure the receive control registers
       
  1858  * @adapter: Board private structure
       
  1859  **/
       
  1860 static void e1000_setup_rctl(struct e1000_adapter *adapter)
       
  1861 {
       
  1862 	struct e1000_hw *hw = &adapter->hw;
       
  1863 	u32 rctl;
       
  1864 
       
  1865 	rctl = er32(RCTL);
       
  1866 
       
  1867 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
       
  1868 
       
  1869 	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
       
  1870 		E1000_RCTL_RDMTS_HALF |
       
  1871 		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
       
  1872 
       
  1873 	if (hw->tbi_compatibility_on == 1)
       
  1874 		rctl |= E1000_RCTL_SBP;
       
  1875 	else
       
  1876 		rctl &= ~E1000_RCTL_SBP;
       
  1877 
       
  1878 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
       
  1879 		rctl &= ~E1000_RCTL_LPE;
       
  1880 	else
       
  1881 		rctl |= E1000_RCTL_LPE;
       
  1882 
       
  1883 	/* Setup buffer sizes */
       
  1884 	rctl &= ~E1000_RCTL_SZ_4096;
       
  1885 	rctl |= E1000_RCTL_BSEX;
       
  1886 	switch (adapter->rx_buffer_len) {
       
  1887 		case E1000_RXBUFFER_2048:
       
  1888 		default:
       
  1889 			rctl |= E1000_RCTL_SZ_2048;
       
  1890 			rctl &= ~E1000_RCTL_BSEX;
       
  1891 			break;
       
  1892 		case E1000_RXBUFFER_4096:
       
  1893 			rctl |= E1000_RCTL_SZ_4096;
       
  1894 			break;
       
  1895 		case E1000_RXBUFFER_8192:
       
  1896 			rctl |= E1000_RCTL_SZ_8192;
       
  1897 			break;
       
  1898 		case E1000_RXBUFFER_16384:
       
  1899 			rctl |= E1000_RCTL_SZ_16384;
       
  1900 			break;
       
  1901 	}
       
  1902 
       
  1903 	/* This is useful for sniffing bad packets. */
       
  1904 	if (adapter->netdev->features & NETIF_F_RXALL) {
       
  1905 		/* UPE and MPE will be handled by normal PROMISC logic
       
  1906 		 * in e1000e_set_rx_mode */
       
  1907 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
       
  1908 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
       
  1909 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
       
  1910 
       
  1911 		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
       
  1912 			  E1000_RCTL_DPF | /* Allow filtered pause */
       
  1913 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
       
  1914 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
       
  1915 		 * and that breaks VLANs.
       
  1916 		 */
       
  1917 	}
       
  1918 
       
  1919 	ew32(RCTL, rctl);
       
  1920 }
       
  1921 
       
  1922 /**
       
  1923  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
       
  1924  * @adapter: board private structure
       
  1925  *
       
  1926  * Configure the Rx unit of the MAC after a reset.
       
  1927  **/
       
  1928 
       
  1929 static void e1000_configure_rx(struct e1000_adapter *adapter)
       
  1930 {
       
  1931 	u64 rdba;
       
  1932 	struct e1000_hw *hw = &adapter->hw;
       
  1933 	u32 rdlen, rctl, rxcsum;
       
  1934 
       
  1935 	if (adapter->netdev->mtu > ETH_DATA_LEN) {
       
  1936 		rdlen = adapter->rx_ring[0].count *
       
  1937 		        sizeof(struct e1000_rx_desc);
       
  1938 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
       
  1939 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
       
  1940 	} else {
       
  1941 		rdlen = adapter->rx_ring[0].count *
       
  1942 		        sizeof(struct e1000_rx_desc);
       
  1943 		adapter->clean_rx = e1000_clean_rx_irq;
       
  1944 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
       
  1945 	}
       
  1946 
       
  1947 	/* disable receives while setting up the descriptors */
       
  1948 	rctl = er32(RCTL);
       
  1949 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
  1950 
       
  1951 	/* set the Receive Delay Timer Register */
       
  1952 	ew32(RDTR, adapter->rx_int_delay);
       
  1953 
       
  1954 	if (hw->mac_type >= e1000_82540) {
       
  1955 		ew32(RADV, adapter->rx_abs_int_delay);
       
  1956 		if (adapter->itr_setting != 0)
       
  1957 			ew32(ITR, 1000000000 / (adapter->itr * 256));
       
  1958 	}
       
  1959 
       
  1960 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
       
  1961 	 * the Base and Length of the Rx Descriptor Ring */
       
  1962 	switch (adapter->num_rx_queues) {
       
  1963 	case 1:
       
  1964 	default:
       
  1965 		rdba = adapter->rx_ring[0].dma;
       
  1966 		ew32(RDLEN, rdlen);
       
  1967 		ew32(RDBAH, (rdba >> 32));
       
  1968 		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
       
  1969 		ew32(RDT, 0);
       
  1970 		ew32(RDH, 0);
       
  1971 		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
       
  1972 		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
       
  1973 		break;
       
  1974 	}
       
  1975 
       
  1976 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
       
  1977 	if (hw->mac_type >= e1000_82543) {
       
  1978 		rxcsum = er32(RXCSUM);
       
  1979 		if (adapter->rx_csum)
       
  1980 			rxcsum |= E1000_RXCSUM_TUOFL;
       
  1981 		else
       
  1982 			/* don't need to clear IPPCSE as it defaults to 0 */
       
  1983 			rxcsum &= ~E1000_RXCSUM_TUOFL;
       
  1984 		ew32(RXCSUM, rxcsum);
       
  1985 	}
       
  1986 
       
  1987 	/* Enable Receives */
       
  1988 	ew32(RCTL, rctl | E1000_RCTL_EN);
       
  1989 }
       
  1990 
       
  1991 /**
       
  1992  * e1000_free_tx_resources - Free Tx Resources per Queue
       
  1993  * @adapter: board private structure
       
  1994  * @tx_ring: Tx descriptor ring for a specific queue
       
  1995  *
       
  1996  * Free all transmit software resources
       
  1997  **/
       
  1998 
       
  1999 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
       
  2000 				    struct e1000_tx_ring *tx_ring)
       
  2001 {
       
  2002 	struct pci_dev *pdev = adapter->pdev;
       
  2003 
       
  2004 	e1000_clean_tx_ring(adapter, tx_ring);
       
  2005 
       
  2006 	vfree(tx_ring->buffer_info);
       
  2007 	tx_ring->buffer_info = NULL;
       
  2008 
       
  2009 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
       
  2010 			  tx_ring->dma);
       
  2011 
       
  2012 	tx_ring->desc = NULL;
       
  2013 }
       
  2014 
       
  2015 /**
       
  2016  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
       
  2017  * @adapter: board private structure
       
  2018  *
       
  2019  * Free all transmit software resources
       
  2020  **/
       
  2021 
       
  2022 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
       
  2023 {
       
  2024 	int i;
       
  2025 
       
  2026 	for (i = 0; i < adapter->num_tx_queues; i++)
       
  2027 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
       
  2028 }
       
  2029 
       
  2030 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
       
  2031 					     struct e1000_buffer *buffer_info)
       
  2032 {
       
  2033 	if (adapter->ecdev) {
       
  2034 		return;
       
  2035 	}
       
  2036 
       
  2037 	if (buffer_info->dma) {
       
  2038 		if (buffer_info->mapped_as_page)
       
  2039 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
       
  2040 				       buffer_info->length, DMA_TO_DEVICE);
       
  2041 		else
       
  2042 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
       
  2043 					 buffer_info->length,
       
  2044 					 DMA_TO_DEVICE);
       
  2045 		buffer_info->dma = 0;
       
  2046 	}
       
  2047 	if (buffer_info->skb) {
       
  2048 		dev_kfree_skb_any(buffer_info->skb);
       
  2049 		buffer_info->skb = NULL;
       
  2050 	}
       
  2051 	buffer_info->time_stamp = 0;
       
  2052 	/* buffer_info must be completely set up in the transmit path */
       
  2053 }
       
  2054 
       
  2055 /**
       
  2056  * e1000_clean_tx_ring - Free Tx Buffers
       
  2057  * @adapter: board private structure
       
  2058  * @tx_ring: ring to be cleaned
       
  2059  **/
       
  2060 
       
  2061 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
       
  2062 				struct e1000_tx_ring *tx_ring)
       
  2063 {
       
  2064 	struct e1000_hw *hw = &adapter->hw;
       
  2065 	struct e1000_buffer *buffer_info;
       
  2066 	unsigned long size;
       
  2067 	unsigned int i;
       
  2068 
       
  2069 	/* Free all the Tx ring sk_buffs */
       
  2070 
       
  2071 	for (i = 0; i < tx_ring->count; i++) {
       
  2072 		buffer_info = &tx_ring->buffer_info[i];
       
  2073 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  2074 	}
       
  2075 
       
  2076 	size = sizeof(struct e1000_buffer) * tx_ring->count;
       
  2077 	memset(tx_ring->buffer_info, 0, size);
       
  2078 
       
  2079 	/* Zero out the descriptor ring */
       
  2080 
       
  2081 	memset(tx_ring->desc, 0, tx_ring->size);
       
  2082 
       
  2083 	tx_ring->next_to_use = 0;
       
  2084 	tx_ring->next_to_clean = 0;
       
  2085 	tx_ring->last_tx_tso = false;
       
  2086 
       
  2087 	writel(0, hw->hw_addr + tx_ring->tdh);
       
  2088 	writel(0, hw->hw_addr + tx_ring->tdt);
       
  2089 }
       
  2090 
       
  2091 /**
       
  2092  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
       
  2093  * @adapter: board private structure
       
  2094  **/
       
  2095 
       
  2096 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
       
  2097 {
       
  2098 	int i;
       
  2099 
       
  2100 	for (i = 0; i < adapter->num_tx_queues; i++)
       
  2101 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
       
  2102 }
       
  2103 
       
  2104 /**
       
  2105  * e1000_free_rx_resources - Free Rx Resources
       
  2106  * @adapter: board private structure
       
  2107  * @rx_ring: ring to clean the resources from
       
  2108  *
       
  2109  * Free all receive software resources
       
  2110  **/
       
  2111 
       
  2112 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
       
  2113 				    struct e1000_rx_ring *rx_ring)
       
  2114 {
       
  2115 	struct pci_dev *pdev = adapter->pdev;
       
  2116 
       
  2117 	e1000_clean_rx_ring(adapter, rx_ring);
       
  2118 
       
  2119 	vfree(rx_ring->buffer_info);
       
  2120 	rx_ring->buffer_info = NULL;
       
  2121 
       
  2122 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
       
  2123 			  rx_ring->dma);
       
  2124 
       
  2125 	rx_ring->desc = NULL;
       
  2126 }
       
  2127 
       
  2128 /**
       
  2129  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
       
  2130  * @adapter: board private structure
       
  2131  *
       
  2132  * Free all receive software resources
       
  2133  **/
       
  2134 
       
  2135 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
       
  2136 {
       
  2137 	int i;
       
  2138 
       
  2139 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  2140 		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
       
  2141 }
       
  2142 
       
  2143 /**
       
  2144  * e1000_clean_rx_ring - Free Rx Buffers per Queue
       
  2145  * @adapter: board private structure
       
  2146  * @rx_ring: ring to free buffers from
       
  2147  **/
       
  2148 
       
  2149 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
       
  2150 				struct e1000_rx_ring *rx_ring)
       
  2151 {
       
  2152 	struct e1000_hw *hw = &adapter->hw;
       
  2153 	struct e1000_buffer *buffer_info;
       
  2154 	struct pci_dev *pdev = adapter->pdev;
       
  2155 	unsigned long size;
       
  2156 	unsigned int i;
       
  2157 
       
  2158 	/* Free all the Rx ring sk_buffs */
       
  2159 	for (i = 0; i < rx_ring->count; i++) {
       
  2160 		buffer_info = &rx_ring->buffer_info[i];
       
  2161 		if (buffer_info->dma &&
       
  2162 		    adapter->clean_rx == e1000_clean_rx_irq) {
       
  2163 			dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  2164 			                 buffer_info->length,
       
  2165 					 DMA_FROM_DEVICE);
       
  2166 		} else if (buffer_info->dma &&
       
  2167 		           adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
       
  2168 			dma_unmap_page(&pdev->dev, buffer_info->dma,
       
  2169 				       buffer_info->length,
       
  2170 				       DMA_FROM_DEVICE);
       
  2171 		}
       
  2172 
       
  2173 		buffer_info->dma = 0;
       
  2174 		if (buffer_info->page) {
       
  2175 			put_page(buffer_info->page);
       
  2176 			buffer_info->page = NULL;
       
  2177 		}
       
  2178 		if (buffer_info->skb) {
       
  2179 			dev_kfree_skb(buffer_info->skb);
       
  2180 			buffer_info->skb = NULL;
       
  2181 		}
       
  2182 	}
       
  2183 
       
  2184 	/* there also may be some cached data from a chained receive */
       
  2185 	if (rx_ring->rx_skb_top) {
       
  2186 		dev_kfree_skb(rx_ring->rx_skb_top);
       
  2187 		rx_ring->rx_skb_top = NULL;
       
  2188 	}
       
  2189 
       
  2190 	size = sizeof(struct e1000_buffer) * rx_ring->count;
       
  2191 	memset(rx_ring->buffer_info, 0, size);
       
  2192 
       
  2193 	/* Zero out the descriptor ring */
       
  2194 	memset(rx_ring->desc, 0, rx_ring->size);
       
  2195 
       
  2196 	rx_ring->next_to_clean = 0;
       
  2197 	rx_ring->next_to_use = 0;
       
  2198 
       
  2199 	writel(0, hw->hw_addr + rx_ring->rdh);
       
  2200 	writel(0, hw->hw_addr + rx_ring->rdt);
       
  2201 }
       
  2202 
       
  2203 /**
       
  2204  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
       
  2205  * @adapter: board private structure
       
  2206  **/
       
  2207 
       
  2208 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
       
  2209 {
       
  2210 	int i;
       
  2211 
       
  2212 	for (i = 0; i < adapter->num_rx_queues; i++)
       
  2213 		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
       
  2214 }
       
  2215 
       
  2216 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
       
  2217  * and memory write and invalidate disabled for certain operations
       
  2218  */
       
  2219 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
       
  2220 {
       
  2221 	struct e1000_hw *hw = &adapter->hw;
       
  2222 	struct net_device *netdev = adapter->netdev;
       
  2223 	u32 rctl;
       
  2224 
       
  2225 	e1000_pci_clear_mwi(hw);
       
  2226 
       
  2227 	rctl = er32(RCTL);
       
  2228 	rctl |= E1000_RCTL_RST;
       
  2229 	ew32(RCTL, rctl);
       
  2230 	E1000_WRITE_FLUSH();
       
  2231 	mdelay(5);
       
  2232 
       
  2233 	if (!adapter->ecdev && netif_running(netdev))
       
  2234 		e1000_clean_all_rx_rings(adapter);
       
  2235 }
       
  2236 
       
  2237 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
       
  2238 {
       
  2239 	struct e1000_hw *hw = &adapter->hw;
       
  2240 	struct net_device *netdev = adapter->netdev;
       
  2241 	u32 rctl;
       
  2242 
       
  2243 	rctl = er32(RCTL);
       
  2244 	rctl &= ~E1000_RCTL_RST;
       
  2245 	ew32(RCTL, rctl);
       
  2246 	E1000_WRITE_FLUSH();
       
  2247 	mdelay(5);
       
  2248 
       
  2249 	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
       
  2250 		e1000_pci_set_mwi(hw);
       
  2251 
       
  2252 	if (!adapter->netdev && netif_running(netdev)) {
       
  2253 		/* No need to loop, because 82542 supports only 1 queue */
       
  2254 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
       
  2255 		e1000_configure_rx(adapter);
       
  2256 		if (adapter->ecdev) {
       
  2257 			/* fill rx ring completely! */
       
  2258 			adapter->alloc_rx_buf(adapter, ring, ring->count);
       
  2259 		} else {
       
  2260 			/* this one leaves the last ring element unallocated! */
       
  2261 			adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
       
  2262 		}
       
  2263 
       
  2264 	}
       
  2265 }
       
  2266 
       
  2267 /**
       
  2268  * e1000_set_mac - Change the Ethernet Address of the NIC
       
  2269  * @netdev: network interface device structure
       
  2270  * @p: pointer to an address structure
       
  2271  *
       
  2272  * Returns 0 on success, negative on failure
       
  2273  **/
       
  2274 
       
  2275 static int e1000_set_mac(struct net_device *netdev, void *p)
       
  2276 {
       
  2277 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2278 	struct e1000_hw *hw = &adapter->hw;
       
  2279 	struct sockaddr *addr = p;
       
  2280 
       
  2281 	if (!is_valid_ether_addr(addr->sa_data))
       
  2282 		return -EADDRNOTAVAIL;
       
  2283 
       
  2284 	/* 82542 2.0 needs to be in reset to write receive address registers */
       
  2285 
       
  2286 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2287 		e1000_enter_82542_rst(adapter);
       
  2288 
       
  2289 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  2290 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
       
  2291 
       
  2292 	e1000_rar_set(hw, hw->mac_addr, 0);
       
  2293 
       
  2294 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2295 		e1000_leave_82542_rst(adapter);
       
  2296 
       
  2297 	return 0;
       
  2298 }
       
  2299 
       
  2300 /**
       
  2301  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
       
  2302  * @netdev: network interface device structure
       
  2303  *
       
  2304  * The set_rx_mode entry point is called whenever the unicast or multicast
       
  2305  * address lists or the network interface flags are updated. This routine is
       
  2306  * responsible for configuring the hardware for proper unicast, multicast,
       
  2307  * promiscuous mode, and all-multi behavior.
       
  2308  **/
       
  2309 
       
  2310 static void e1000_set_rx_mode(struct net_device *netdev)
       
  2311 {
       
  2312 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2313 	struct e1000_hw *hw = &adapter->hw;
       
  2314 	struct netdev_hw_addr *ha;
       
  2315 	bool use_uc = false;
       
  2316 	u32 rctl;
       
  2317 	u32 hash_value;
       
  2318 	int i, rar_entries = E1000_RAR_ENTRIES;
       
  2319 	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
       
  2320 	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
       
  2321 
       
  2322 	if (!mcarray) {
       
  2323 		e_err(probe, "memory allocation failed\n");
       
  2324 		return;
       
  2325 	}
       
  2326 
       
  2327 	/* Check for Promiscuous and All Multicast modes */
       
  2328 
       
  2329 	rctl = er32(RCTL);
       
  2330 
       
  2331 	if (netdev->flags & IFF_PROMISC) {
       
  2332 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
       
  2333 		rctl &= ~E1000_RCTL_VFE;
       
  2334 	} else {
       
  2335 		if (netdev->flags & IFF_ALLMULTI)
       
  2336 			rctl |= E1000_RCTL_MPE;
       
  2337 		else
       
  2338 			rctl &= ~E1000_RCTL_MPE;
       
  2339 		/* Enable VLAN filter if there is a VLAN */
       
  2340 		if (e1000_vlan_used(adapter))
       
  2341 			rctl |= E1000_RCTL_VFE;
       
  2342 	}
       
  2343 
       
  2344 	if (netdev_uc_count(netdev) > rar_entries - 1) {
       
  2345 		rctl |= E1000_RCTL_UPE;
       
  2346 	} else if (!(netdev->flags & IFF_PROMISC)) {
       
  2347 		rctl &= ~E1000_RCTL_UPE;
       
  2348 		use_uc = true;
       
  2349 	}
       
  2350 
       
  2351 	ew32(RCTL, rctl);
       
  2352 
       
  2353 	/* 82542 2.0 needs to be in reset to write receive address registers */
       
  2354 
       
  2355 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2356 		e1000_enter_82542_rst(adapter);
       
  2357 
       
  2358 	/* load the first 14 addresses into the exact filters 1-14. Unicast
       
  2359 	 * addresses take precedence to avoid disabling unicast filtering
       
  2360 	 * when possible.
       
  2361 	 *
       
  2362 	 * RAR 0 is used for the station MAC address
       
  2363 	 * if there are not 14 addresses, go ahead and clear the filters
       
  2364 	 */
       
  2365 	i = 1;
       
  2366 	if (use_uc)
       
  2367 		netdev_for_each_uc_addr(ha, netdev) {
       
  2368 			if (i == rar_entries)
       
  2369 				break;
       
  2370 			e1000_rar_set(hw, ha->addr, i++);
       
  2371 		}
       
  2372 
       
  2373 	netdev_for_each_mc_addr(ha, netdev) {
       
  2374 		if (i == rar_entries) {
       
  2375 			/* load any remaining addresses into the hash table */
       
  2376 			u32 hash_reg, hash_bit, mta;
       
  2377 			hash_value = e1000_hash_mc_addr(hw, ha->addr);
       
  2378 			hash_reg = (hash_value >> 5) & 0x7F;
       
  2379 			hash_bit = hash_value & 0x1F;
       
  2380 			mta = (1 << hash_bit);
       
  2381 			mcarray[hash_reg] |= mta;
       
  2382 		} else {
       
  2383 			e1000_rar_set(hw, ha->addr, i++);
       
  2384 		}
       
  2385 	}
       
  2386 
       
  2387 	for (; i < rar_entries; i++) {
       
  2388 		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
       
  2389 		E1000_WRITE_FLUSH();
       
  2390 		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
       
  2391 		E1000_WRITE_FLUSH();
       
  2392 	}
       
  2393 
       
  2394 	/* write the hash table completely, write from bottom to avoid
       
  2395 	 * both stupid write combining chipsets, and flushing each write */
       
  2396 	for (i = mta_reg_count - 1; i >= 0 ; i--) {
       
  2397 		/*
       
  2398 		 * If we are on an 82544 has an errata where writing odd
       
  2399 		 * offsets overwrites the previous even offset, but writing
       
  2400 		 * backwards over the range solves the issue by always
       
  2401 		 * writing the odd offset first
       
  2402 		 */
       
  2403 		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
       
  2404 	}
       
  2405 	E1000_WRITE_FLUSH();
       
  2406 
       
  2407 	if (hw->mac_type == e1000_82542_rev2_0)
       
  2408 		e1000_leave_82542_rst(adapter);
       
  2409 
       
  2410 	kfree(mcarray);
       
  2411 }
       
  2412 
       
  2413 /**
       
  2414  * e1000_update_phy_info_task - get phy info
       
  2415  * @work: work struct contained inside adapter struct
       
  2416  *
       
  2417  * Need to wait a few seconds after link up to get diagnostic information from
       
  2418  * the phy
       
  2419  */
       
  2420 static void e1000_update_phy_info_task(struct work_struct *work)
       
  2421 {
       
  2422 	struct e1000_adapter *adapter = container_of(work,
       
  2423 						     struct e1000_adapter,
       
  2424 						     phy_info_task.work);
       
  2425 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2426 		return;
       
  2427 	mutex_lock(&adapter->mutex);
       
  2428 	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
       
  2429 	mutex_unlock(&adapter->mutex);
       
  2430 }
       
  2431 
       
  2432 /**
       
  2433  * e1000_82547_tx_fifo_stall_task - task to complete work
       
  2434  * @work: work struct contained inside adapter struct
       
  2435  **/
       
  2436 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
       
  2437 {
       
  2438 	struct e1000_adapter *adapter = container_of(work,
       
  2439 						     struct e1000_adapter,
       
  2440 						     fifo_stall_task.work);
       
  2441 	struct e1000_hw *hw = &adapter->hw;
       
  2442 	struct net_device *netdev = adapter->netdev;
       
  2443 	u32 tctl;
       
  2444 
       
  2445 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2446 		return;
       
  2447 	mutex_lock(&adapter->mutex);
       
  2448 	if (atomic_read(&adapter->tx_fifo_stall)) {
       
  2449 		if ((er32(TDT) == er32(TDH)) &&
       
  2450 		   (er32(TDFT) == er32(TDFH)) &&
       
  2451 		   (er32(TDFTS) == er32(TDFHS))) {
       
  2452 			tctl = er32(TCTL);
       
  2453 			ew32(TCTL, tctl & ~E1000_TCTL_EN);
       
  2454 			ew32(TDFT, adapter->tx_head_addr);
       
  2455 			ew32(TDFH, adapter->tx_head_addr);
       
  2456 			ew32(TDFTS, adapter->tx_head_addr);
       
  2457 			ew32(TDFHS, adapter->tx_head_addr);
       
  2458 			ew32(TCTL, tctl);
       
  2459 			E1000_WRITE_FLUSH();
       
  2460 
       
  2461 			adapter->tx_fifo_head = 0;
       
  2462 			atomic_set(&adapter->tx_fifo_stall, 0);
       
  2463 			netif_wake_queue(netdev);
       
  2464 		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
       
  2465 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
       
  2466 		}
       
  2467 	}
       
  2468 	mutex_unlock(&adapter->mutex);
       
  2469 }
       
  2470 
       
  2471 bool e1000_has_link(struct e1000_adapter *adapter)
       
  2472 {
       
  2473 	struct e1000_hw *hw = &adapter->hw;
       
  2474 	bool link_active = false;
       
  2475 
       
  2476 	/* get_link_status is set on LSC (link status) interrupt or rx
       
  2477 	 * sequence error interrupt (except on intel ce4100).
       
  2478 	 * get_link_status will stay false until the
       
  2479 	 * e1000_check_for_link establishes link for copper adapters
       
  2480 	 * ONLY
       
  2481 	 */
       
  2482 	switch (hw->media_type) {
       
  2483 	case e1000_media_type_copper:
       
  2484 		if (hw->mac_type == e1000_ce4100)
       
  2485 			hw->get_link_status = 1;
       
  2486 		if (hw->get_link_status) {
       
  2487 			e1000_check_for_link(hw);
       
  2488 			link_active = !hw->get_link_status;
       
  2489 		} else {
       
  2490 			link_active = true;
       
  2491 		}
       
  2492 		break;
       
  2493 	case e1000_media_type_fiber:
       
  2494 		e1000_check_for_link(hw);
       
  2495 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
       
  2496 		break;
       
  2497 	case e1000_media_type_internal_serdes:
       
  2498 		e1000_check_for_link(hw);
       
  2499 		link_active = hw->serdes_has_link;
       
  2500 		break;
       
  2501 	default:
       
  2502 		break;
       
  2503 	}
       
  2504 
       
  2505 	return link_active;
       
  2506 }
       
  2507 
       
  2508 /**
       
  2509  * e1000_watchdog - work function
       
  2510  * @work: work struct contained inside adapter struct
       
  2511  **/
       
  2512 static void e1000_watchdog(struct work_struct *work)
       
  2513 {
       
  2514 	struct e1000_adapter *adapter = container_of(work,
       
  2515 						     struct e1000_adapter,
       
  2516 						     watchdog_task.work);
       
  2517 	struct e1000_hw *hw = &adapter->hw;
       
  2518 	struct net_device *netdev = adapter->netdev;
       
  2519 	struct e1000_tx_ring *txdr = adapter->tx_ring;
       
  2520 	u32 link, tctl;
       
  2521 
       
  2522 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  2523 		return;
       
  2524 
       
  2525 	mutex_lock(&adapter->mutex);
       
  2526 	link = e1000_has_link(adapter);
       
  2527 	if (!adapter->ecdev && (netif_carrier_ok(netdev)) && link)
       
  2528 		goto link_up;
       
  2529 
       
  2530 	if (link) {
       
  2531 		if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev))
       
  2532 				|| (!adapter->ecdev && !netif_carrier_ok(netdev))) {
       
  2533 			u32 ctrl;
       
  2534 			bool txb2b __attribute__ ((unused)) = true;
       
  2535 			/* update snapshot of PHY registers on LSC */
       
  2536 			e1000_get_speed_and_duplex(hw,
       
  2537 			                           &adapter->link_speed,
       
  2538 			                           &adapter->link_duplex);
       
  2539 
       
  2540 			ctrl = er32(CTRL);
       
  2541 			pr_info("%s NIC Link is Up %d Mbps %s, "
       
  2542 				"Flow Control: %s\n",
       
  2543 				netdev->name,
       
  2544 				adapter->link_speed,
       
  2545 				adapter->link_duplex == FULL_DUPLEX ?
       
  2546 				"Full Duplex" : "Half Duplex",
       
  2547 				((ctrl & E1000_CTRL_TFCE) && (ctrl &
       
  2548 				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
       
  2549 				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
       
  2550 				E1000_CTRL_TFCE) ? "TX" : "None")));
       
  2551 
       
  2552 			/* adjust timeout factor according to speed/duplex */
       
  2553 			adapter->tx_timeout_factor = 1;
       
  2554 			switch (adapter->link_speed) {
       
  2555 			case SPEED_10:
       
  2556 				txb2b = false;
       
  2557 				adapter->tx_timeout_factor = 16;
       
  2558 				break;
       
  2559 			case SPEED_100:
       
  2560 				txb2b = false;
       
  2561 				/* maybe add some timeout factor ? */
       
  2562 				break;
       
  2563 			}
       
  2564 
       
  2565 			/* enable transmits in the hardware */
       
  2566 			tctl = er32(TCTL);
       
  2567 			tctl |= E1000_TCTL_EN;
       
  2568 			ew32(TCTL, tctl);
       
  2569 
       
  2570 			if (adapter->ecdev) {
       
  2571 				ecdev_set_link(adapter->ecdev, 1);
       
  2572 			}
       
  2573 			else {
       
  2574 				netif_carrier_on(netdev);
       
  2575 				if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  2576 					schedule_delayed_work(&adapter->phy_info_task,
       
  2577 							2 * HZ);
       
  2578 			}
       
  2579 			adapter->smartspeed = 0;
       
  2580 		}
       
  2581 	} else {
       
  2582 		if ((adapter->ecdev && ecdev_get_link(adapter->ecdev))
       
  2583 				|| (!adapter->ecdev && netif_carrier_ok(netdev))) {
       
  2584 			adapter->link_speed = 0;
       
  2585 			adapter->link_duplex = 0;
       
  2586 			pr_info("%s NIC Link is Down\n",
       
  2587 				netdev->name);
       
  2588 
       
  2589 			if (adapter->ecdev) {
       
  2590 				ecdev_set_link(adapter->ecdev, 0);
       
  2591 			} else {
       
  2592 				netif_carrier_off(netdev);
       
  2593 
       
  2594 				if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  2595 					schedule_delayed_work(&adapter->phy_info_task,
       
  2596 							2 * HZ);
       
  2597 			}
       
  2598 		}
       
  2599 
       
  2600 		e1000_smartspeed(adapter);
       
  2601 	}
       
  2602 
       
  2603 link_up:
       
  2604 	e1000_update_stats(adapter);
       
  2605 
       
  2606 	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
       
  2607 	adapter->tpt_old = adapter->stats.tpt;
       
  2608 	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
       
  2609 	adapter->colc_old = adapter->stats.colc;
       
  2610 
       
  2611 	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
       
  2612 	adapter->gorcl_old = adapter->stats.gorcl;
       
  2613 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
       
  2614 	adapter->gotcl_old = adapter->stats.gotcl;
       
  2615 
       
  2616 	e1000_update_adaptive(hw);
       
  2617 
       
  2618 	if (!adapter->ecdev && !netif_carrier_ok(netdev)) {
       
  2619 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
       
  2620 			/* We've lost link, so the controller stops DMA,
       
  2621 			 * but we've got queued Tx work that's never going
       
  2622 			 * to get done, so reset controller to flush Tx.
       
  2623 			 * (Do the reset outside of interrupt context). */
       
  2624 			adapter->tx_timeout_count++;
       
  2625 			schedule_work(&adapter->reset_task);
       
  2626 			/* exit immediately since reset is imminent */
       
  2627 			goto unlock;
       
  2628 		}
       
  2629 	}
       
  2630 
       
  2631 	/* Simple mode for Interrupt Throttle Rate (ITR) */
       
  2632 	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
       
  2633 		/*
       
  2634 		 * Symmetric Tx/Rx gets a reduced ITR=2000;
       
  2635 		 * Total asymmetrical Tx or Rx gets ITR=8000;
       
  2636 		 * everyone else is between 2000-8000.
       
  2637 		 */
       
  2638 		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
       
  2639 		u32 dif = (adapter->gotcl > adapter->gorcl ?
       
  2640 			    adapter->gotcl - adapter->gorcl :
       
  2641 			    adapter->gorcl - adapter->gotcl) / 10000;
       
  2642 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
       
  2643 
       
  2644 		ew32(ITR, 1000000000 / (itr * 256));
       
  2645 	}
       
  2646 
       
  2647 	/* Cause software interrupt to ensure rx ring is cleaned */
       
  2648 	ew32(ICS, E1000_ICS_RXDMT0);
       
  2649 
       
  2650 	/* Force detection of hung controller every watchdog period */
       
  2651 	adapter->detect_tx_hung = true;
       
  2652 
       
  2653 	/* Reschedule the task */
       
  2654 	if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags))
       
  2655 		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
       
  2656 
       
  2657 unlock:
       
  2658 	mutex_unlock(&adapter->mutex);
       
  2659 }
       
  2660 
       
  2661 enum latency_range {
       
  2662 	lowest_latency = 0,
       
  2663 	low_latency = 1,
       
  2664 	bulk_latency = 2,
       
  2665 	latency_invalid = 255
       
  2666 };
       
  2667 
       
  2668 /**
       
  2669  * e1000_update_itr - update the dynamic ITR value based on statistics
       
  2670  * @adapter: pointer to adapter
       
  2671  * @itr_setting: current adapter->itr
       
  2672  * @packets: the number of packets during this measurement interval
       
  2673  * @bytes: the number of bytes during this measurement interval
       
  2674  *
       
  2675  *      Stores a new ITR value based on packets and byte
       
  2676  *      counts during the last interrupt.  The advantage of per interrupt
       
  2677  *      computation is faster updates and more accurate ITR for the current
       
  2678  *      traffic pattern.  Constants in this function were computed
       
  2679  *      based on theoretical maximum wire speed and thresholds were set based
       
  2680  *      on testing data as well as attempting to minimize response time
       
  2681  *      while increasing bulk throughput.
       
  2682  *      this functionality is controlled by the InterruptThrottleRate module
       
  2683  *      parameter (see e1000_param.c)
       
  2684  **/
       
  2685 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
       
  2686 				     u16 itr_setting, int packets, int bytes)
       
  2687 {
       
  2688 	unsigned int retval = itr_setting;
       
  2689 	struct e1000_hw *hw = &adapter->hw;
       
  2690 
       
  2691 	if (unlikely(hw->mac_type < e1000_82540))
       
  2692 		goto update_itr_done;
       
  2693 
       
  2694 	if (packets == 0)
       
  2695 		goto update_itr_done;
       
  2696 
       
  2697 	switch (itr_setting) {
       
  2698 	case lowest_latency:
       
  2699 		/* jumbo frames get bulk treatment*/
       
  2700 		if (bytes/packets > 8000)
       
  2701 			retval = bulk_latency;
       
  2702 		else if ((packets < 5) && (bytes > 512))
       
  2703 			retval = low_latency;
       
  2704 		break;
       
  2705 	case low_latency:  /* 50 usec aka 20000 ints/s */
       
  2706 		if (bytes > 10000) {
       
  2707 			/* jumbo frames need bulk latency setting */
       
  2708 			if (bytes/packets > 8000)
       
  2709 				retval = bulk_latency;
       
  2710 			else if ((packets < 10) || ((bytes/packets) > 1200))
       
  2711 				retval = bulk_latency;
       
  2712 			else if ((packets > 35))
       
  2713 				retval = lowest_latency;
       
  2714 		} else if (bytes/packets > 2000)
       
  2715 			retval = bulk_latency;
       
  2716 		else if (packets <= 2 && bytes < 512)
       
  2717 			retval = lowest_latency;
       
  2718 		break;
       
  2719 	case bulk_latency: /* 250 usec aka 4000 ints/s */
       
  2720 		if (bytes > 25000) {
       
  2721 			if (packets > 35)
       
  2722 				retval = low_latency;
       
  2723 		} else if (bytes < 6000) {
       
  2724 			retval = low_latency;
       
  2725 		}
       
  2726 		break;
       
  2727 	}
       
  2728 
       
  2729 update_itr_done:
       
  2730 	return retval;
       
  2731 }
       
  2732 
       
  2733 static void e1000_set_itr(struct e1000_adapter *adapter)
       
  2734 {
       
  2735 	struct e1000_hw *hw = &adapter->hw;
       
  2736 	u16 current_itr;
       
  2737 	u32 new_itr = adapter->itr;
       
  2738 
       
  2739 	if (unlikely(hw->mac_type < e1000_82540))
       
  2740 		return;
       
  2741 
       
  2742 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
       
  2743 	if (unlikely(adapter->link_speed != SPEED_1000)) {
       
  2744 		current_itr = 0;
       
  2745 		new_itr = 4000;
       
  2746 		goto set_itr_now;
       
  2747 	}
       
  2748 
       
  2749 	adapter->tx_itr = e1000_update_itr(adapter,
       
  2750 	                            adapter->tx_itr,
       
  2751 	                            adapter->total_tx_packets,
       
  2752 	                            adapter->total_tx_bytes);
       
  2753 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2754 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
       
  2755 		adapter->tx_itr = low_latency;
       
  2756 
       
  2757 	adapter->rx_itr = e1000_update_itr(adapter,
       
  2758 	                            adapter->rx_itr,
       
  2759 	                            adapter->total_rx_packets,
       
  2760 	                            adapter->total_rx_bytes);
       
  2761 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2762 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
       
  2763 		adapter->rx_itr = low_latency;
       
  2764 
       
  2765 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
       
  2766 
       
  2767 	switch (current_itr) {
       
  2768 	/* counts and packets in update_itr are dependent on these numbers */
       
  2769 	case lowest_latency:
       
  2770 		new_itr = 70000;
       
  2771 		break;
       
  2772 	case low_latency:
       
  2773 		new_itr = 20000; /* aka hwitr = ~200 */
       
  2774 		break;
       
  2775 	case bulk_latency:
       
  2776 		new_itr = 4000;
       
  2777 		break;
       
  2778 	default:
       
  2779 		break;
       
  2780 	}
       
  2781 
       
  2782 set_itr_now:
       
  2783 	if (new_itr != adapter->itr) {
       
  2784 		/* this attempts to bias the interrupt rate towards Bulk
       
  2785 		 * by adding intermediate steps when interrupt rate is
       
  2786 		 * increasing */
       
  2787 		new_itr = new_itr > adapter->itr ?
       
  2788 		             min(adapter->itr + (new_itr >> 2), new_itr) :
       
  2789 		             new_itr;
       
  2790 		adapter->itr = new_itr;
       
  2791 		ew32(ITR, 1000000000 / (new_itr * 256));
       
  2792 	}
       
  2793 }
       
  2794 
       
  2795 #define E1000_TX_FLAGS_CSUM		0x00000001
       
  2796 #define E1000_TX_FLAGS_VLAN		0x00000002
       
  2797 #define E1000_TX_FLAGS_TSO		0x00000004
       
  2798 #define E1000_TX_FLAGS_IPV4		0x00000008
       
  2799 #define E1000_TX_FLAGS_NO_FCS		0x00000010
       
  2800 #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
       
  2801 #define E1000_TX_FLAGS_VLAN_SHIFT	16
       
  2802 
       
  2803 static int e1000_tso(struct e1000_adapter *adapter,
       
  2804 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
       
  2805 {
       
  2806 	struct e1000_context_desc *context_desc;
       
  2807 	struct e1000_buffer *buffer_info;
       
  2808 	unsigned int i;
       
  2809 	u32 cmd_length = 0;
       
  2810 	u16 ipcse = 0, tucse, mss;
       
  2811 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
       
  2812 	int err;
       
  2813 
       
  2814 	if (skb_is_gso(skb)) {
       
  2815 		if (skb_header_cloned(skb)) {
       
  2816 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
       
  2817 			if (err)
       
  2818 				return err;
       
  2819 		}
       
  2820 
       
  2821 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  2822 		mss = skb_shinfo(skb)->gso_size;
       
  2823 		if (skb->protocol == htons(ETH_P_IP)) {
       
  2824 			struct iphdr *iph = ip_hdr(skb);
       
  2825 			iph->tot_len = 0;
       
  2826 			iph->check = 0;
       
  2827 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
       
  2828 								 iph->daddr, 0,
       
  2829 								 IPPROTO_TCP,
       
  2830 								 0);
       
  2831 			cmd_length = E1000_TXD_CMD_IP;
       
  2832 			ipcse = skb_transport_offset(skb) - 1;
       
  2833 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
       
  2834 			ipv6_hdr(skb)->payload_len = 0;
       
  2835 			tcp_hdr(skb)->check =
       
  2836 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
       
  2837 						 &ipv6_hdr(skb)->daddr,
       
  2838 						 0, IPPROTO_TCP, 0);
       
  2839 			ipcse = 0;
       
  2840 		}
       
  2841 		ipcss = skb_network_offset(skb);
       
  2842 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
       
  2843 		tucss = skb_transport_offset(skb);
       
  2844 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
       
  2845 		tucse = 0;
       
  2846 
       
  2847 		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
       
  2848 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
       
  2849 
       
  2850 		i = tx_ring->next_to_use;
       
  2851 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  2852 		buffer_info = &tx_ring->buffer_info[i];
       
  2853 
       
  2854 		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
       
  2855 		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
       
  2856 		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
       
  2857 		context_desc->upper_setup.tcp_fields.tucss = tucss;
       
  2858 		context_desc->upper_setup.tcp_fields.tucso = tucso;
       
  2859 		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
       
  2860 		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
       
  2861 		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
       
  2862 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
       
  2863 
       
  2864 		buffer_info->time_stamp = jiffies;
       
  2865 		buffer_info->next_to_watch = i;
       
  2866 
       
  2867 		if (++i == tx_ring->count) i = 0;
       
  2868 		tx_ring->next_to_use = i;
       
  2869 
       
  2870 		return true;
       
  2871 	}
       
  2872 	return false;
       
  2873 }
       
  2874 
       
  2875 static bool e1000_tx_csum(struct e1000_adapter *adapter,
       
  2876 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
       
  2877 {
       
  2878 	struct e1000_context_desc *context_desc;
       
  2879 	struct e1000_buffer *buffer_info;
       
  2880 	unsigned int i;
       
  2881 	u8 css;
       
  2882 	u32 cmd_len = E1000_TXD_CMD_DEXT;
       
  2883 
       
  2884 	if (skb->ip_summed != CHECKSUM_PARTIAL)
       
  2885 		return false;
       
  2886 
       
  2887 	switch (skb->protocol) {
       
  2888 	case cpu_to_be16(ETH_P_IP):
       
  2889 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
       
  2890 			cmd_len |= E1000_TXD_CMD_TCP;
       
  2891 		break;
       
  2892 	case cpu_to_be16(ETH_P_IPV6):
       
  2893 		/* XXX not handling all IPV6 headers */
       
  2894 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
       
  2895 			cmd_len |= E1000_TXD_CMD_TCP;
       
  2896 		break;
       
  2897 	default:
       
  2898 		if (unlikely(net_ratelimit()))
       
  2899 			e_warn(drv, "checksum_partial proto=%x!\n",
       
  2900 			       skb->protocol);
       
  2901 		break;
       
  2902 	}
       
  2903 
       
  2904 	css = skb_checksum_start_offset(skb);
       
  2905 
       
  2906 	i = tx_ring->next_to_use;
       
  2907 	buffer_info = &tx_ring->buffer_info[i];
       
  2908 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  2909 
       
  2910 	context_desc->lower_setup.ip_config = 0;
       
  2911 	context_desc->upper_setup.tcp_fields.tucss = css;
       
  2912 	context_desc->upper_setup.tcp_fields.tucso =
       
  2913 		css + skb->csum_offset;
       
  2914 	context_desc->upper_setup.tcp_fields.tucse = 0;
       
  2915 	context_desc->tcp_seg_setup.data = 0;
       
  2916 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
       
  2917 
       
  2918 	buffer_info->time_stamp = jiffies;
       
  2919 	buffer_info->next_to_watch = i;
       
  2920 
       
  2921 	if (unlikely(++i == tx_ring->count)) i = 0;
       
  2922 	tx_ring->next_to_use = i;
       
  2923 
       
  2924 	return true;
       
  2925 }
       
  2926 
       
  2927 #define E1000_MAX_TXD_PWR	12
       
  2928 #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
       
  2929 
       
  2930 static int e1000_tx_map(struct e1000_adapter *adapter,
       
  2931 			struct e1000_tx_ring *tx_ring,
       
  2932 			struct sk_buff *skb, unsigned int first,
       
  2933 			unsigned int max_per_txd, unsigned int nr_frags,
       
  2934 			unsigned int mss)
       
  2935 {
       
  2936 	struct e1000_hw *hw = &adapter->hw;
       
  2937 	struct pci_dev *pdev = adapter->pdev;
       
  2938 	struct e1000_buffer *buffer_info;
       
  2939 	unsigned int len = skb_headlen(skb);
       
  2940 	unsigned int offset = 0, size, count = 0, i;
       
  2941 	unsigned int f, bytecount, segs;
       
  2942 
       
  2943 	i = tx_ring->next_to_use;
       
  2944 
       
  2945 	while (len) {
       
  2946 		buffer_info = &tx_ring->buffer_info[i];
       
  2947 		size = min(len, max_per_txd);
       
  2948 		/* Workaround for Controller erratum --
       
  2949 		 * descriptor for non-tso packet in a linear SKB that follows a
       
  2950 		 * tso gets written back prematurely before the data is fully
       
  2951 		 * DMA'd to the controller */
       
  2952 		if (!skb->data_len && tx_ring->last_tx_tso &&
       
  2953 		    !skb_is_gso(skb)) {
       
  2954 			tx_ring->last_tx_tso = false;
       
  2955 			size -= 4;
       
  2956 		}
       
  2957 
       
  2958 		/* Workaround for premature desc write-backs
       
  2959 		 * in TSO mode.  Append 4-byte sentinel desc */
       
  2960 		if (unlikely(mss && !nr_frags && size == len && size > 8))
       
  2961 			size -= 4;
       
  2962 		/* work-around for errata 10 and it applies
       
  2963 		 * to all controllers in PCI-X mode
       
  2964 		 * The fix is to make sure that the first descriptor of a
       
  2965 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
       
  2966 		 */
       
  2967 		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
       
  2968 		                (size > 2015) && count == 0))
       
  2969 		        size = 2015;
       
  2970 
       
  2971 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
       
  2972 		 * terminating buffers within evenly-aligned dwords. */
       
  2973 		if (unlikely(adapter->pcix_82544 &&
       
  2974 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
       
  2975 		   size > 4))
       
  2976 			size -= 4;
       
  2977 
       
  2978 		buffer_info->length = size;
       
  2979 		/* set time_stamp *before* dma to help avoid a possible race */
       
  2980 		buffer_info->time_stamp = jiffies;
       
  2981 		buffer_info->mapped_as_page = false;
       
  2982 		buffer_info->dma = dma_map_single(&pdev->dev,
       
  2983 						  skb->data + offset,
       
  2984 						  size,	DMA_TO_DEVICE);
       
  2985 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
       
  2986 			goto dma_error;
       
  2987 		buffer_info->next_to_watch = i;
       
  2988 
       
  2989 		len -= size;
       
  2990 		offset += size;
       
  2991 		count++;
       
  2992 		if (len) {
       
  2993 			i++;
       
  2994 			if (unlikely(i == tx_ring->count))
       
  2995 				i = 0;
       
  2996 		}
       
  2997 	}
       
  2998 
       
  2999 	for (f = 0; f < nr_frags; f++) {
       
  3000 		const struct skb_frag_struct *frag;
       
  3001 
       
  3002 		frag = &skb_shinfo(skb)->frags[f];
       
  3003 		len = skb_frag_size(frag);
       
  3004 		offset = 0;
       
  3005 
       
  3006 		while (len) {
       
  3007 			unsigned long bufend;
       
  3008 			i++;
       
  3009 			if (unlikely(i == tx_ring->count))
       
  3010 				i = 0;
       
  3011 
       
  3012 			buffer_info = &tx_ring->buffer_info[i];
       
  3013 			size = min(len, max_per_txd);
       
  3014 			/* Workaround for premature desc write-backs
       
  3015 			 * in TSO mode.  Append 4-byte sentinel desc */
       
  3016 			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
       
  3017 				size -= 4;
       
  3018 			/* Workaround for potential 82544 hang in PCI-X.
       
  3019 			 * Avoid terminating buffers within evenly-aligned
       
  3020 			 * dwords. */
       
  3021 			bufend = (unsigned long)
       
  3022 				page_to_phys(skb_frag_page(frag));
       
  3023 			bufend += offset + size - 1;
       
  3024 			if (unlikely(adapter->pcix_82544 &&
       
  3025 				     !(bufend & 4) &&
       
  3026 				     size > 4))
       
  3027 				size -= 4;
       
  3028 
       
  3029 			buffer_info->length = size;
       
  3030 			buffer_info->time_stamp = jiffies;
       
  3031 			buffer_info->mapped_as_page = true;
       
  3032 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
       
  3033 						offset, size, DMA_TO_DEVICE);
       
  3034 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
       
  3035 				goto dma_error;
       
  3036 			buffer_info->next_to_watch = i;
       
  3037 
       
  3038 			len -= size;
       
  3039 			offset += size;
       
  3040 			count++;
       
  3041 		}
       
  3042 	}
       
  3043 
       
  3044 	segs = skb_shinfo(skb)->gso_segs ?: 1;
       
  3045 	/* multiply data chunks by size of headers */
       
  3046 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
       
  3047 
       
  3048 	tx_ring->buffer_info[i].skb = skb;
       
  3049 	tx_ring->buffer_info[i].segs = segs;
       
  3050 	tx_ring->buffer_info[i].bytecount = bytecount;
       
  3051 	tx_ring->buffer_info[first].next_to_watch = i;
       
  3052 
       
  3053 	return count;
       
  3054 
       
  3055 dma_error:
       
  3056 	dev_err(&pdev->dev, "TX DMA map failed\n");
       
  3057 	buffer_info->dma = 0;
       
  3058 	if (count)
       
  3059 		count--;
       
  3060 
       
  3061 	while (count--) {
       
  3062 		if (i==0)
       
  3063 			i += tx_ring->count;
       
  3064 		i--;
       
  3065 		buffer_info = &tx_ring->buffer_info[i];
       
  3066 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  3067 	}
       
  3068 
       
  3069 	return 0;
       
  3070 }
       
  3071 
       
  3072 static void e1000_tx_queue(struct e1000_adapter *adapter,
       
  3073 			   struct e1000_tx_ring *tx_ring, int tx_flags,
       
  3074 			   int count)
       
  3075 {
       
  3076 	struct e1000_hw *hw = &adapter->hw;
       
  3077 	struct e1000_tx_desc *tx_desc = NULL;
       
  3078 	struct e1000_buffer *buffer_info;
       
  3079 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
       
  3080 	unsigned int i;
       
  3081 
       
  3082 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
       
  3083 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
       
  3084 		             E1000_TXD_CMD_TSE;
       
  3085 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  3086 
       
  3087 		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
       
  3088 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
       
  3089 	}
       
  3090 
       
  3091 	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
       
  3092 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
       
  3093 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  3094 	}
       
  3095 
       
  3096 	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
       
  3097 		txd_lower |= E1000_TXD_CMD_VLE;
       
  3098 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
       
  3099 	}
       
  3100 
       
  3101 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
       
  3102 		txd_lower &= ~(E1000_TXD_CMD_IFCS);
       
  3103 
       
  3104 	i = tx_ring->next_to_use;
       
  3105 
       
  3106 	while (count--) {
       
  3107 		buffer_info = &tx_ring->buffer_info[i];
       
  3108 		tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3109 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  3110 		tx_desc->lower.data =
       
  3111 			cpu_to_le32(txd_lower | buffer_info->length);
       
  3112 		tx_desc->upper.data = cpu_to_le32(txd_upper);
       
  3113 		if (unlikely(++i == tx_ring->count)) i = 0;
       
  3114 	}
       
  3115 
       
  3116 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
       
  3117 
       
  3118 	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
       
  3119 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
       
  3120 		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
       
  3121 
       
  3122 	/* Force memory writes to complete before letting h/w
       
  3123 	 * know there are new descriptors to fetch.  (Only
       
  3124 	 * applicable for weak-ordered memory model archs,
       
  3125 	 * such as IA-64). */
       
  3126 	wmb();
       
  3127 
       
  3128 	tx_ring->next_to_use = i;
       
  3129 	writel(i, hw->hw_addr + tx_ring->tdt);
       
  3130 	/* we need this if more than one processor can write to our tail
       
  3131 	 * at a time, it syncronizes IO on IA64/Altix systems */
       
  3132 	mmiowb();
       
  3133 }
       
  3134 
       
  3135 /* 82547 workaround to avoid controller hang in half-duplex environment.
       
  3136  * The workaround is to avoid queuing a large packet that would span
       
  3137  * the internal Tx FIFO ring boundary by notifying the stack to resend
       
  3138  * the packet at a later time.  This gives the Tx FIFO an opportunity to
       
  3139  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
       
  3140  * to the beginning of the Tx FIFO.
       
  3141  */
       
  3142 
       
  3143 #define E1000_FIFO_HDR			0x10
       
  3144 #define E1000_82547_PAD_LEN		0x3E0
       
  3145 
       
  3146 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
       
  3147 				       struct sk_buff *skb)
       
  3148 {
       
  3149 	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
       
  3150 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
       
  3151 
       
  3152 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
       
  3153 
       
  3154 	if (adapter->link_duplex != HALF_DUPLEX)
       
  3155 		goto no_fifo_stall_required;
       
  3156 
       
  3157 	if (atomic_read(&adapter->tx_fifo_stall))
       
  3158 		return 1;
       
  3159 
       
  3160 	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
       
  3161 		atomic_set(&adapter->tx_fifo_stall, 1);
       
  3162 		return 1;
       
  3163 	}
       
  3164 
       
  3165 no_fifo_stall_required:
       
  3166 	adapter->tx_fifo_head += skb_fifo_len;
       
  3167 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
       
  3168 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
       
  3169 	return 0;
       
  3170 }
       
  3171 
       
  3172 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
       
  3173 {
       
  3174 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3175 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
       
  3176 
       
  3177 	if (adapter->ecdev) {
       
  3178 		return -EBUSY;
       
  3179 	}
       
  3180 
       
  3181 	netif_stop_queue(netdev);
       
  3182 	/* Herbert's original patch had:
       
  3183 	 *  smp_mb__after_netif_stop_queue();
       
  3184 	 * but since that doesn't exist yet, just open code it. */
       
  3185 	smp_mb();
       
  3186 
       
  3187 	/* We need to check again in a case another CPU has just
       
  3188 	 * made room available. */
       
  3189 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
       
  3190 		return -EBUSY;
       
  3191 
       
  3192 	/* A reprieve! */
       
  3193 	netif_start_queue(netdev);
       
  3194 	++adapter->restart_queue;
       
  3195 	return 0;
       
  3196 }
       
  3197 
       
  3198 static int e1000_maybe_stop_tx(struct net_device *netdev,
       
  3199                                struct e1000_tx_ring *tx_ring, int size)
       
  3200 {
       
  3201 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
       
  3202 		return 0;
       
  3203 	return __e1000_maybe_stop_tx(netdev, size);
       
  3204 }
       
  3205 
       
  3206 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
       
  3207 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
       
  3208 				    struct net_device *netdev)
       
  3209 {
       
  3210 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3211 	struct e1000_hw *hw = &adapter->hw;
       
  3212 	struct e1000_tx_ring *tx_ring;
       
  3213 	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
       
  3214 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
       
  3215 	unsigned int tx_flags = 0;
       
  3216 	unsigned int len = skb_headlen(skb);
       
  3217 	unsigned int nr_frags;
       
  3218 	unsigned int mss;
       
  3219 	int count = 0;
       
  3220 	int tso;
       
  3221 	unsigned int f;
       
  3222 
       
  3223 	/* This goes back to the question of how to logically map a tx queue
       
  3224 	 * to a flow.  Right now, performance is impacted slightly negatively
       
  3225 	 * if using multiple tx queues.  If the stack breaks away from a
       
  3226 	 * single qdisc implementation, we can look at this again. */
       
  3227 	tx_ring = adapter->tx_ring;
       
  3228 
       
  3229 	if (unlikely(skb->len <= 0)) {
       
  3230 		if (!adapter->ecdev) {
       
  3231 			dev_kfree_skb_any(skb);
       
  3232 		}
       
  3233 		return NETDEV_TX_OK;
       
  3234 	}
       
  3235 
       
  3236 	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
       
  3237 	 * packets may get corrupted during padding by HW.
       
  3238 	 * To WA this issue, pad all small packets manually.
       
  3239 	 */
       
  3240 	if (skb->len < ETH_ZLEN) {
       
  3241 		if (skb_pad(skb, ETH_ZLEN - skb->len))
       
  3242 			return NETDEV_TX_OK;
       
  3243 		skb->len = ETH_ZLEN;
       
  3244 		skb_set_tail_pointer(skb, ETH_ZLEN);
       
  3245 	}
       
  3246 
       
  3247 	mss = skb_shinfo(skb)->gso_size;
       
  3248 	/* The controller does a simple calculation to
       
  3249 	 * make sure there is enough room in the FIFO before
       
  3250 	 * initiating the DMA for each buffer.  The calc is:
       
  3251 	 * 4 = ceil(buffer len/mss).  To make sure we don't
       
  3252 	 * overrun the FIFO, adjust the max buffer len if mss
       
  3253 	 * drops. */
       
  3254 	if (mss) {
       
  3255 		u8 hdr_len;
       
  3256 		max_per_txd = min(mss << 2, max_per_txd);
       
  3257 		max_txd_pwr = fls(max_per_txd) - 1;
       
  3258 
       
  3259 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  3260 		if (skb->data_len && hdr_len == len) {
       
  3261 			switch (hw->mac_type) {
       
  3262 				unsigned int pull_size;
       
  3263 			case e1000_82544:
       
  3264 				/* Make sure we have room to chop off 4 bytes,
       
  3265 				 * and that the end alignment will work out to
       
  3266 				 * this hardware's requirements
       
  3267 				 * NOTE: this is a TSO only workaround
       
  3268 				 * if end byte alignment not correct move us
       
  3269 				 * into the next dword */
       
  3270 				if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
       
  3271 					break;
       
  3272 				/* fall through */
       
  3273 				pull_size = min((unsigned int)4, skb->data_len);
       
  3274 				if (!__pskb_pull_tail(skb, pull_size)) {
       
  3275 					e_err(drv, "__pskb_pull_tail "
       
  3276 					      "failed.\n");
       
  3277 					if (!adapter->ecdev) {
       
  3278 						dev_kfree_skb_any(skb);
       
  3279 					}
       
  3280 					return NETDEV_TX_OK;
       
  3281 				}
       
  3282 				len = skb_headlen(skb);
       
  3283 				break;
       
  3284 			default:
       
  3285 				/* do nothing */
       
  3286 				break;
       
  3287 			}
       
  3288 		}
       
  3289 	}
       
  3290 
       
  3291 	/* reserve a descriptor for the offload context */
       
  3292 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
       
  3293 		count++;
       
  3294 	count++;
       
  3295 
       
  3296 	/* Controller Erratum workaround */
       
  3297 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
       
  3298 		count++;
       
  3299 
       
  3300 	count += TXD_USE_COUNT(len, max_txd_pwr);
       
  3301 
       
  3302 	if (adapter->pcix_82544)
       
  3303 		count++;
       
  3304 
       
  3305 	/* work-around for errata 10 and it applies to all controllers
       
  3306 	 * in PCI-X mode, so add one more descriptor to the count
       
  3307 	 */
       
  3308 	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
       
  3309 			(len > 2015)))
       
  3310 		count++;
       
  3311 
       
  3312 	nr_frags = skb_shinfo(skb)->nr_frags;
       
  3313 	for (f = 0; f < nr_frags; f++)
       
  3314 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
       
  3315 				       max_txd_pwr);
       
  3316 	if (adapter->pcix_82544)
       
  3317 		count += nr_frags;
       
  3318 
       
  3319 	/* need: count + 2 desc gap to keep tail from touching
       
  3320 	 * head, otherwise try next time */
       
  3321 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
       
  3322 		return NETDEV_TX_BUSY;
       
  3323 
       
  3324 	if (unlikely((hw->mac_type == e1000_82547) &&
       
  3325 		     (e1000_82547_fifo_workaround(adapter, skb)))) {
       
  3326 		if (!adapter->ecdev) {
       
  3327 			netif_stop_queue(netdev);
       
  3328 			if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3329 				schedule_delayed_work(&adapter->fifo_stall_task, 1);
       
  3330 		}
       
  3331 		return NETDEV_TX_BUSY;
       
  3332 	}
       
  3333 
       
  3334 	if (vlan_tx_tag_present(skb)) {
       
  3335 		tx_flags |= E1000_TX_FLAGS_VLAN;
       
  3336 		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
       
  3337 	}
       
  3338 
       
  3339 	first = tx_ring->next_to_use;
       
  3340 
       
  3341 	tso = e1000_tso(adapter, tx_ring, skb);
       
  3342 	if (tso < 0) {
       
  3343 		if (!adapter->ecdev) {
       
  3344 			dev_kfree_skb_any(skb);
       
  3345 		}
       
  3346 		return NETDEV_TX_OK;
       
  3347 	}
       
  3348 
       
  3349 	if (likely(tso)) {
       
  3350 		if (likely(hw->mac_type != e1000_82544))
       
  3351 			tx_ring->last_tx_tso = true;
       
  3352 		tx_flags |= E1000_TX_FLAGS_TSO;
       
  3353 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
       
  3354 		tx_flags |= E1000_TX_FLAGS_CSUM;
       
  3355 
       
  3356 	if (likely(skb->protocol == htons(ETH_P_IP)))
       
  3357 		tx_flags |= E1000_TX_FLAGS_IPV4;
       
  3358 
       
  3359 	if (unlikely(skb->no_fcs))
       
  3360 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
       
  3361 
       
  3362 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
       
  3363 	                     nr_frags, mss);
       
  3364 
       
  3365 	if (count) {
       
  3366 		skb_tx_timestamp(skb);
       
  3367 
       
  3368 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
       
  3369 		if (!adapter->ecdev) {
       
  3370 			/* Make sure there is space in the ring for the next send. */
       
  3371 			e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
       
  3372 		}
       
  3373 
       
  3374 	} else {
       
  3375 		if (!adapter->ecdev) {
       
  3376 			dev_kfree_skb_any(skb);
       
  3377 		}
       
  3378 		tx_ring->buffer_info[first].time_stamp = 0;
       
  3379 		tx_ring->next_to_use = first;
       
  3380 	}
       
  3381 
       
  3382 	return NETDEV_TX_OK;
       
  3383 }
       
  3384 
       
  3385 #define NUM_REGS 38 /* 1 based count */
       
  3386 static void e1000_regdump(struct e1000_adapter *adapter)
       
  3387 {
       
  3388 	struct e1000_hw *hw = &adapter->hw;
       
  3389 	u32 regs[NUM_REGS];
       
  3390 	u32 *regs_buff = regs;
       
  3391 	int i = 0;
       
  3392 
       
  3393 	static const char * const reg_name[] = {
       
  3394 		"CTRL",  "STATUS",
       
  3395 		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
       
  3396 		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
       
  3397 		"TIDV", "TXDCTL", "TADV", "TARC0",
       
  3398 		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
       
  3399 		"TXDCTL1", "TARC1",
       
  3400 		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
       
  3401 		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
       
  3402 		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
       
  3403 	};
       
  3404 
       
  3405 	regs_buff[0]  = er32(CTRL);
       
  3406 	regs_buff[1]  = er32(STATUS);
       
  3407 
       
  3408 	regs_buff[2]  = er32(RCTL);
       
  3409 	regs_buff[3]  = er32(RDLEN);
       
  3410 	regs_buff[4]  = er32(RDH);
       
  3411 	regs_buff[5]  = er32(RDT);
       
  3412 	regs_buff[6]  = er32(RDTR);
       
  3413 
       
  3414 	regs_buff[7]  = er32(TCTL);
       
  3415 	regs_buff[8]  = er32(TDBAL);
       
  3416 	regs_buff[9]  = er32(TDBAH);
       
  3417 	regs_buff[10] = er32(TDLEN);
       
  3418 	regs_buff[11] = er32(TDH);
       
  3419 	regs_buff[12] = er32(TDT);
       
  3420 	regs_buff[13] = er32(TIDV);
       
  3421 	regs_buff[14] = er32(TXDCTL);
       
  3422 	regs_buff[15] = er32(TADV);
       
  3423 	regs_buff[16] = er32(TARC0);
       
  3424 
       
  3425 	regs_buff[17] = er32(TDBAL1);
       
  3426 	regs_buff[18] = er32(TDBAH1);
       
  3427 	regs_buff[19] = er32(TDLEN1);
       
  3428 	regs_buff[20] = er32(TDH1);
       
  3429 	regs_buff[21] = er32(TDT1);
       
  3430 	regs_buff[22] = er32(TXDCTL1);
       
  3431 	regs_buff[23] = er32(TARC1);
       
  3432 	regs_buff[24] = er32(CTRL_EXT);
       
  3433 	regs_buff[25] = er32(ERT);
       
  3434 	regs_buff[26] = er32(RDBAL0);
       
  3435 	regs_buff[27] = er32(RDBAH0);
       
  3436 	regs_buff[28] = er32(TDFH);
       
  3437 	regs_buff[29] = er32(TDFT);
       
  3438 	regs_buff[30] = er32(TDFHS);
       
  3439 	regs_buff[31] = er32(TDFTS);
       
  3440 	regs_buff[32] = er32(TDFPC);
       
  3441 	regs_buff[33] = er32(RDFH);
       
  3442 	regs_buff[34] = er32(RDFT);
       
  3443 	regs_buff[35] = er32(RDFHS);
       
  3444 	regs_buff[36] = er32(RDFTS);
       
  3445 	regs_buff[37] = er32(RDFPC);
       
  3446 
       
  3447 	pr_info("Register dump\n");
       
  3448 	for (i = 0; i < NUM_REGS; i++)
       
  3449 		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
       
  3450 }
       
  3451 
       
  3452 /*
       
  3453  * e1000_dump: Print registers, tx ring and rx ring
       
  3454  */
       
  3455 static void e1000_dump(struct e1000_adapter *adapter)
       
  3456 {
       
  3457 	/* this code doesn't handle multiple rings */
       
  3458 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
       
  3459 	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
       
  3460 	int i;
       
  3461 
       
  3462 	if (!netif_msg_hw(adapter))
       
  3463 		return;
       
  3464 
       
  3465 	/* Print Registers */
       
  3466 	e1000_regdump(adapter);
       
  3467 
       
  3468 	/*
       
  3469 	 * transmit dump
       
  3470 	 */
       
  3471 	pr_info("TX Desc ring0 dump\n");
       
  3472 
       
  3473 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
       
  3474 	 *
       
  3475 	 * Legacy Transmit Descriptor
       
  3476 	 *   +--------------------------------------------------------------+
       
  3477 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
       
  3478 	 *   +--------------------------------------------------------------+
       
  3479 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
       
  3480 	 *   +--------------------------------------------------------------+
       
  3481 	 *   63       48 47        36 35    32 31     24 23    16 15        0
       
  3482 	 *
       
  3483 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
       
  3484 	 *   63      48 47    40 39       32 31             16 15    8 7      0
       
  3485 	 *   +----------------------------------------------------------------+
       
  3486 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
       
  3487 	 *   +----------------------------------------------------------------+
       
  3488 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
       
  3489 	 *   +----------------------------------------------------------------+
       
  3490 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
       
  3491 	 *
       
  3492 	 * Extended Data Descriptor (DTYP=0x1)
       
  3493 	 *   +----------------------------------------------------------------+
       
  3494 	 * 0 |                     Buffer Address [63:0]                      |
       
  3495 	 *   +----------------------------------------------------------------+
       
  3496 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
       
  3497 	 *   +----------------------------------------------------------------+
       
  3498 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
       
  3499 	 */
       
  3500 	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
       
  3501 	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
       
  3502 
       
  3503 	if (!netif_msg_tx_done(adapter))
       
  3504 		goto rx_ring_summary;
       
  3505 
       
  3506 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
       
  3507 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3508 		struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
       
  3509 		struct my_u { __le64 a; __le64 b; };
       
  3510 		struct my_u *u = (struct my_u *)tx_desc;
       
  3511 		const char *type;
       
  3512 
       
  3513 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
       
  3514 			type = "NTC/U";
       
  3515 		else if (i == tx_ring->next_to_use)
       
  3516 			type = "NTU";
       
  3517 		else if (i == tx_ring->next_to_clean)
       
  3518 			type = "NTC";
       
  3519 		else
       
  3520 			type = "";
       
  3521 
       
  3522 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
       
  3523 			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
       
  3524 			le64_to_cpu(u->a), le64_to_cpu(u->b),
       
  3525 			(u64)buffer_info->dma, buffer_info->length,
       
  3526 			buffer_info->next_to_watch,
       
  3527 			(u64)buffer_info->time_stamp, buffer_info->skb, type);
       
  3528 	}
       
  3529 
       
  3530 rx_ring_summary:
       
  3531 	/*
       
  3532 	 * receive dump
       
  3533 	 */
       
  3534 	pr_info("\nRX Desc ring dump\n");
       
  3535 
       
  3536 	/* Legacy Receive Descriptor Format
       
  3537 	 *
       
  3538 	 * +-----------------------------------------------------+
       
  3539 	 * |                Buffer Address [63:0]                |
       
  3540 	 * +-----------------------------------------------------+
       
  3541 	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
       
  3542 	 * +-----------------------------------------------------+
       
  3543 	 * 63       48 47    40 39      32 31         16 15      0
       
  3544 	 */
       
  3545 	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
       
  3546 
       
  3547 	if (!netif_msg_rx_status(adapter))
       
  3548 		goto exit;
       
  3549 
       
  3550 	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
       
  3551 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  3552 		struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
       
  3553 		struct my_u { __le64 a; __le64 b; };
       
  3554 		struct my_u *u = (struct my_u *)rx_desc;
       
  3555 		const char *type;
       
  3556 
       
  3557 		if (i == rx_ring->next_to_use)
       
  3558 			type = "NTU";
       
  3559 		else if (i == rx_ring->next_to_clean)
       
  3560 			type = "NTC";
       
  3561 		else
       
  3562 			type = "";
       
  3563 
       
  3564 		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
       
  3565 			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
       
  3566 			(u64)buffer_info->dma, buffer_info->skb, type);
       
  3567 	} /* for */
       
  3568 
       
  3569 	/* dump the descriptor caches */
       
  3570 	/* rx */
       
  3571 	pr_info("Rx descriptor cache in 64bit format\n");
       
  3572 	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
       
  3573 		pr_info("R%04X: %08X|%08X %08X|%08X\n",
       
  3574 			i,
       
  3575 			readl(adapter->hw.hw_addr + i+4),
       
  3576 			readl(adapter->hw.hw_addr + i),
       
  3577 			readl(adapter->hw.hw_addr + i+12),
       
  3578 			readl(adapter->hw.hw_addr + i+8));
       
  3579 	}
       
  3580 	/* tx */
       
  3581 	pr_info("Tx descriptor cache in 64bit format\n");
       
  3582 	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
       
  3583 		pr_info("T%04X: %08X|%08X %08X|%08X\n",
       
  3584 			i,
       
  3585 			readl(adapter->hw.hw_addr + i+4),
       
  3586 			readl(adapter->hw.hw_addr + i),
       
  3587 			readl(adapter->hw.hw_addr + i+12),
       
  3588 			readl(adapter->hw.hw_addr + i+8));
       
  3589 	}
       
  3590 exit:
       
  3591 	return;
       
  3592 }
       
  3593 
       
  3594 /**
       
  3595  * e1000_tx_timeout - Respond to a Tx Hang
       
  3596  * @netdev: network interface device structure
       
  3597  **/
       
  3598 
       
  3599 static void e1000_tx_timeout(struct net_device *netdev)
       
  3600 {
       
  3601 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3602 
       
  3603 	/* Do the reset outside of interrupt context */
       
  3604 	adapter->tx_timeout_count++;
       
  3605 	schedule_work(&adapter->reset_task);
       
  3606 }
       
  3607 
       
  3608 static void e1000_reset_task(struct work_struct *work)
       
  3609 {
       
  3610 	struct e1000_adapter *adapter =
       
  3611 		container_of(work, struct e1000_adapter, reset_task);
       
  3612 
       
  3613 	if (test_bit(__E1000_DOWN, &adapter->flags))
       
  3614 		return;
       
  3615 	e_err(drv, "Reset adapter\n");
       
  3616 	e1000_reinit_safe(adapter);
       
  3617 }
       
  3618 
       
  3619 /**
       
  3620  * e1000_get_stats - Get System Network Statistics
       
  3621  * @netdev: network interface device structure
       
  3622  *
       
  3623  * Returns the address of the device statistics structure.
       
  3624  * The statistics are actually updated from the watchdog.
       
  3625  **/
       
  3626 
       
  3627 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
       
  3628 {
       
  3629 	/* only return the current stats */
       
  3630 	return &netdev->stats;
       
  3631 }
       
  3632 
       
  3633 /**
       
  3634  * e1000_change_mtu - Change the Maximum Transfer Unit
       
  3635  * @netdev: network interface device structure
       
  3636  * @new_mtu: new value for maximum frame size
       
  3637  *
       
  3638  * Returns 0 on success, negative on failure
       
  3639  **/
       
  3640 
       
  3641 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
       
  3642 {
       
  3643 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3644 	struct e1000_hw *hw = &adapter->hw;
       
  3645 	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
       
  3646 
       
  3647 	if (adapter->ecdev) {
       
  3648 		return -EBUSY;
       
  3649 	}
       
  3650 
       
  3651 	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
       
  3652 	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
       
  3653 		e_err(probe, "Invalid MTU setting\n");
       
  3654 		return -EINVAL;
       
  3655 	}
       
  3656 
       
  3657 	/* Adapter-specific max frame size limits. */
       
  3658 	switch (hw->mac_type) {
       
  3659 	case e1000_undefined ... e1000_82542_rev2_1:
       
  3660 		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
       
  3661 			e_err(probe, "Jumbo Frames not supported.\n");
       
  3662 			return -EINVAL;
       
  3663 		}
       
  3664 		break;
       
  3665 	default:
       
  3666 		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
       
  3667 		break;
       
  3668 	}
       
  3669 
       
  3670 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
       
  3671 		msleep(1);
       
  3672 	/* e1000_down has a dependency on max_frame_size */
       
  3673 	hw->max_frame_size = max_frame;
       
  3674 	if (netif_running(netdev))
       
  3675 		e1000_down(adapter);
       
  3676 
       
  3677 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
       
  3678 	 * means we reserve 2 more, this pushes us to allocate from the next
       
  3679 	 * larger slab size.
       
  3680 	 * i.e. RXBUFFER_2048 --> size-4096 slab
       
  3681 	 *  however with the new *_jumbo_rx* routines, jumbo receives will use
       
  3682 	 *  fragmented skbs */
       
  3683 
       
  3684 	if (max_frame <= E1000_RXBUFFER_2048)
       
  3685 		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
       
  3686 	else
       
  3687 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
       
  3688 		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
       
  3689 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
       
  3690 		adapter->rx_buffer_len = PAGE_SIZE;
       
  3691 #endif
       
  3692 
       
  3693 	/* adjust allocation if LPE protects us, and we aren't using SBP */
       
  3694 	if (!hw->tbi_compatibility_on &&
       
  3695 	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
       
  3696 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
       
  3697 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
       
  3698 
       
  3699 	pr_info("%s changing MTU from %d to %d\n",
       
  3700 		netdev->name, netdev->mtu, new_mtu);
       
  3701 	netdev->mtu = new_mtu;
       
  3702 
       
  3703 	if (netif_running(netdev))
       
  3704 		e1000_up(adapter);
       
  3705 	else
       
  3706 		e1000_reset(adapter);
       
  3707 
       
  3708 	clear_bit(__E1000_RESETTING, &adapter->flags);
       
  3709 
       
  3710 	return 0;
       
  3711 }
       
  3712 
       
  3713 /**
       
  3714  * e1000_update_stats - Update the board statistics counters
       
  3715  * @adapter: board private structure
       
  3716  **/
       
  3717 
       
  3718 void e1000_update_stats(struct e1000_adapter *adapter)
       
  3719 {
       
  3720 	struct net_device *netdev = adapter->netdev;
       
  3721 	struct e1000_hw *hw = &adapter->hw;
       
  3722 	struct pci_dev *pdev = adapter->pdev;
       
  3723 	unsigned long flags = 0;
       
  3724 	u16 phy_tmp;
       
  3725 
       
  3726 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
       
  3727 
       
  3728 	/*
       
  3729 	 * Prevent stats update while adapter is being reset, or if the pci
       
  3730 	 * connection is down.
       
  3731 	 */
       
  3732 	if (adapter->link_speed == 0)
       
  3733 		return;
       
  3734 	if (pci_channel_offline(pdev))
       
  3735 		return;
       
  3736 
       
  3737 	if (!adapter->ecdev) {
       
  3738 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  3739 	}
       
  3740 
       
  3741 	/* these counters are modified from e1000_tbi_adjust_stats,
       
  3742 	 * called from the interrupt context, so they must only
       
  3743 	 * be written while holding adapter->stats_lock
       
  3744 	 */
       
  3745 
       
  3746 	adapter->stats.crcerrs += er32(CRCERRS);
       
  3747 	adapter->stats.gprc += er32(GPRC);
       
  3748 	adapter->stats.gorcl += er32(GORCL);
       
  3749 	adapter->stats.gorch += er32(GORCH);
       
  3750 	adapter->stats.bprc += er32(BPRC);
       
  3751 	adapter->stats.mprc += er32(MPRC);
       
  3752 	adapter->stats.roc += er32(ROC);
       
  3753 
       
  3754 	adapter->stats.prc64 += er32(PRC64);
       
  3755 	adapter->stats.prc127 += er32(PRC127);
       
  3756 	adapter->stats.prc255 += er32(PRC255);
       
  3757 	adapter->stats.prc511 += er32(PRC511);
       
  3758 	adapter->stats.prc1023 += er32(PRC1023);
       
  3759 	adapter->stats.prc1522 += er32(PRC1522);
       
  3760 
       
  3761 	adapter->stats.symerrs += er32(SYMERRS);
       
  3762 	adapter->stats.mpc += er32(MPC);
       
  3763 	adapter->stats.scc += er32(SCC);
       
  3764 	adapter->stats.ecol += er32(ECOL);
       
  3765 	adapter->stats.mcc += er32(MCC);
       
  3766 	adapter->stats.latecol += er32(LATECOL);
       
  3767 	adapter->stats.dc += er32(DC);
       
  3768 	adapter->stats.sec += er32(SEC);
       
  3769 	adapter->stats.rlec += er32(RLEC);
       
  3770 	adapter->stats.xonrxc += er32(XONRXC);
       
  3771 	adapter->stats.xontxc += er32(XONTXC);
       
  3772 	adapter->stats.xoffrxc += er32(XOFFRXC);
       
  3773 	adapter->stats.xofftxc += er32(XOFFTXC);
       
  3774 	adapter->stats.fcruc += er32(FCRUC);
       
  3775 	adapter->stats.gptc += er32(GPTC);
       
  3776 	adapter->stats.gotcl += er32(GOTCL);
       
  3777 	adapter->stats.gotch += er32(GOTCH);
       
  3778 	adapter->stats.rnbc += er32(RNBC);
       
  3779 	adapter->stats.ruc += er32(RUC);
       
  3780 	adapter->stats.rfc += er32(RFC);
       
  3781 	adapter->stats.rjc += er32(RJC);
       
  3782 	adapter->stats.torl += er32(TORL);
       
  3783 	adapter->stats.torh += er32(TORH);
       
  3784 	adapter->stats.totl += er32(TOTL);
       
  3785 	adapter->stats.toth += er32(TOTH);
       
  3786 	adapter->stats.tpr += er32(TPR);
       
  3787 
       
  3788 	adapter->stats.ptc64 += er32(PTC64);
       
  3789 	adapter->stats.ptc127 += er32(PTC127);
       
  3790 	adapter->stats.ptc255 += er32(PTC255);
       
  3791 	adapter->stats.ptc511 += er32(PTC511);
       
  3792 	adapter->stats.ptc1023 += er32(PTC1023);
       
  3793 	adapter->stats.ptc1522 += er32(PTC1522);
       
  3794 
       
  3795 	adapter->stats.mptc += er32(MPTC);
       
  3796 	adapter->stats.bptc += er32(BPTC);
       
  3797 
       
  3798 	/* used for adaptive IFS */
       
  3799 
       
  3800 	hw->tx_packet_delta = er32(TPT);
       
  3801 	adapter->stats.tpt += hw->tx_packet_delta;
       
  3802 	hw->collision_delta = er32(COLC);
       
  3803 	adapter->stats.colc += hw->collision_delta;
       
  3804 
       
  3805 	if (hw->mac_type >= e1000_82543) {
       
  3806 		adapter->stats.algnerrc += er32(ALGNERRC);
       
  3807 		adapter->stats.rxerrc += er32(RXERRC);
       
  3808 		adapter->stats.tncrs += er32(TNCRS);
       
  3809 		adapter->stats.cexterr += er32(CEXTERR);
       
  3810 		adapter->stats.tsctc += er32(TSCTC);
       
  3811 		adapter->stats.tsctfc += er32(TSCTFC);
       
  3812 	}
       
  3813 
       
  3814 	/* Fill out the OS statistics structure */
       
  3815 	netdev->stats.multicast = adapter->stats.mprc;
       
  3816 	netdev->stats.collisions = adapter->stats.colc;
       
  3817 
       
  3818 	/* Rx Errors */
       
  3819 
       
  3820 	/* RLEC on some newer hardware can be incorrect so build
       
  3821 	* our own version based on RUC and ROC */
       
  3822 	netdev->stats.rx_errors = adapter->stats.rxerrc +
       
  3823 		adapter->stats.crcerrs + adapter->stats.algnerrc +
       
  3824 		adapter->stats.ruc + adapter->stats.roc +
       
  3825 		adapter->stats.cexterr;
       
  3826 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
       
  3827 	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
       
  3828 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
       
  3829 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
       
  3830 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
       
  3831 
       
  3832 	/* Tx Errors */
       
  3833 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
       
  3834 	netdev->stats.tx_errors = adapter->stats.txerrc;
       
  3835 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
       
  3836 	netdev->stats.tx_window_errors = adapter->stats.latecol;
       
  3837 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
       
  3838 	if (hw->bad_tx_carr_stats_fd &&
       
  3839 	    adapter->link_duplex == FULL_DUPLEX) {
       
  3840 		netdev->stats.tx_carrier_errors = 0;
       
  3841 		adapter->stats.tncrs = 0;
       
  3842 	}
       
  3843 
       
  3844 	/* Tx Dropped needs to be maintained elsewhere */
       
  3845 
       
  3846 	/* Phy Stats */
       
  3847 	if (hw->media_type == e1000_media_type_copper) {
       
  3848 		if ((adapter->link_speed == SPEED_1000) &&
       
  3849 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
       
  3850 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
       
  3851 			adapter->phy_stats.idle_errors += phy_tmp;
       
  3852 		}
       
  3853 
       
  3854 		if ((hw->mac_type <= e1000_82546) &&
       
  3855 		   (hw->phy_type == e1000_phy_m88) &&
       
  3856 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
       
  3857 			adapter->phy_stats.receive_errors += phy_tmp;
       
  3858 	}
       
  3859 
       
  3860 	/* Management Stats */
       
  3861 	if (hw->has_smbus) {
       
  3862 		adapter->stats.mgptc += er32(MGTPTC);
       
  3863 		adapter->stats.mgprc += er32(MGTPRC);
       
  3864 		adapter->stats.mgpdc += er32(MGTPDC);
       
  3865 	}
       
  3866 
       
  3867 	if (!adapter->ecdev) {
       
  3868 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  3869 	}
       
  3870 }
       
  3871 
       
  3872 void ec_poll(struct net_device *netdev)
       
  3873 {
       
  3874 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3875 	if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) {
       
  3876 		e1000_watchdog(&adapter->watchdog_task.work);
       
  3877 		adapter->ec_watchdog_jiffies = jiffies;
       
  3878 	}
       
  3879 
       
  3880 	e1000_intr(0, netdev);
       
  3881 }
       
  3882 
       
  3883 /**
       
  3884  * e1000_intr - Interrupt Handler
       
  3885  * @irq: interrupt number
       
  3886  * @data: pointer to a network interface device structure
       
  3887  **/
       
  3888 
       
  3889 static irqreturn_t e1000_intr(int irq, void *data)
       
  3890 {
       
  3891 	struct net_device *netdev = data;
       
  3892 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3893 	struct e1000_hw *hw = &adapter->hw;
       
  3894 	u32 icr = er32(ICR);
       
  3895 
       
  3896 	if (unlikely((!icr)))
       
  3897 		return IRQ_NONE;  /* Not our interrupt */
       
  3898 
       
  3899 	/*
       
  3900 	 * we might have caused the interrupt, but the above
       
  3901 	 * read cleared it, and just in case the driver is
       
  3902 	 * down there is nothing to do so return handled
       
  3903 	 */
       
  3904 	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
       
  3905 		return IRQ_HANDLED;
       
  3906 
       
  3907 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
       
  3908 		hw->get_link_status = 1;
       
  3909 		/* guard against interrupt when we're going down */
       
  3910 		if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags))
       
  3911 			schedule_delayed_work(&adapter->watchdog_task, 1);
       
  3912 	}
       
  3913 
       
  3914 	if (adapter->ecdev) {
       
  3915 		int i, ec_work_done = 0;
       
  3916 		for (i = 0; i < E1000_MAX_INTR; i++) {
       
  3917 			if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring,
       
  3918 							&ec_work_done, 100) &&
       
  3919 						!e1000_clean_tx_irq(adapter, adapter->tx_ring))) {
       
  3920 				break;
       
  3921 			}
       
  3922 		}
       
  3923  	} else {
       
  3924 		/* disable interrupts, without the synchronize_irq bit */
       
  3925 		ew32(IMC, ~0);
       
  3926 		E1000_WRITE_FLUSH();
       
  3927 
       
  3928 		if (likely(napi_schedule_prep(&adapter->napi))) {
       
  3929 			adapter->total_tx_bytes = 0;
       
  3930 			adapter->total_tx_packets = 0;
       
  3931 			adapter->total_rx_bytes = 0;
       
  3932 			adapter->total_rx_packets = 0;
       
  3933 			__napi_schedule(&adapter->napi);
       
  3934 		} else {
       
  3935 			/* this really should not happen! if it does it is basically a
       
  3936 			 * bug, but not a hard error, so enable ints and continue */
       
  3937 			if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3938 				e1000_irq_enable(adapter);
       
  3939 		}
       
  3940 	}
       
  3941 
       
  3942 	return IRQ_HANDLED;
       
  3943 }
       
  3944 
       
  3945 /**
       
  3946  * e1000_clean - NAPI Rx polling callback
       
  3947  * @adapter: board private structure
       
  3948  * EtherCAT: never called
       
  3949  **/
       
  3950 static int e1000_clean(struct napi_struct *napi, int budget)
       
  3951 {
       
  3952 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
       
  3953 	int tx_clean_complete = 0, work_done = 0;
       
  3954 
       
  3955 	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
       
  3956 
       
  3957 	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
       
  3958 
       
  3959 	if (!tx_clean_complete)
       
  3960 		work_done = budget;
       
  3961 
       
  3962 	/* If budget not fully consumed, exit the polling mode */
       
  3963 	if (work_done < budget) {
       
  3964 		if (likely(adapter->itr_setting & 3))
       
  3965 			e1000_set_itr(adapter);
       
  3966 		napi_complete(napi);
       
  3967 		if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  3968 			e1000_irq_enable(adapter);
       
  3969 	}
       
  3970 
       
  3971 	return work_done;
       
  3972 }
       
  3973 
       
  3974 /**
       
  3975  * e1000_clean_tx_irq - Reclaim resources after transmit completes
       
  3976  * @adapter: board private structure
       
  3977  **/
       
  3978 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
       
  3979 			       struct e1000_tx_ring *tx_ring)
       
  3980 {
       
  3981 	struct e1000_hw *hw = &adapter->hw;
       
  3982 	struct net_device *netdev = adapter->netdev;
       
  3983 	struct e1000_tx_desc *tx_desc, *eop_desc;
       
  3984 	struct e1000_buffer *buffer_info;
       
  3985 	unsigned int i, eop;
       
  3986 	unsigned int count = 0;
       
  3987 	unsigned int total_tx_bytes=0, total_tx_packets=0;
       
  3988 
       
  3989 	i = tx_ring->next_to_clean;
       
  3990 	eop = tx_ring->buffer_info[i].next_to_watch;
       
  3991 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  3992 
       
  3993 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
       
  3994 	       (count < tx_ring->count)) {
       
  3995 		bool cleaned = false;
       
  3996 		rmb();	/* read buffer_info after eop_desc */
       
  3997 		for ( ; !cleaned; count++) {
       
  3998 			tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  3999 			buffer_info = &tx_ring->buffer_info[i];
       
  4000 			cleaned = (i == eop);
       
  4001 
       
  4002 			if (cleaned) {
       
  4003 				total_tx_packets += buffer_info->segs;
       
  4004 				total_tx_bytes += buffer_info->bytecount;
       
  4005 			}
       
  4006 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
       
  4007 			tx_desc->upper.data = 0;
       
  4008 
       
  4009 			if (unlikely(++i == tx_ring->count)) i = 0;
       
  4010 		}
       
  4011 
       
  4012 		eop = tx_ring->buffer_info[i].next_to_watch;
       
  4013 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  4014 	}
       
  4015 
       
  4016 	tx_ring->next_to_clean = i;
       
  4017 
       
  4018 #define TX_WAKE_THRESHOLD 32
       
  4019 	if (!adapter->ecdev && unlikely(count && netif_carrier_ok(netdev) &&
       
  4020 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
       
  4021 		/* Make sure that anybody stopping the queue after this
       
  4022 		 * sees the new next_to_clean.
       
  4023 		 */
       
  4024 		smp_mb();
       
  4025 
       
  4026 		if (netif_queue_stopped(netdev) &&
       
  4027 		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
       
  4028 			netif_wake_queue(netdev);
       
  4029 			++adapter->restart_queue;
       
  4030 		}
       
  4031 	}
       
  4032 
       
  4033 	if (!adapter->ecdev && adapter->detect_tx_hung) {
       
  4034 		/* Detect a transmit hang in hardware, this serializes the
       
  4035 		 * check with the clearing of time_stamp and movement of i */
       
  4036 		adapter->detect_tx_hung = false;
       
  4037 		if (tx_ring->buffer_info[eop].time_stamp &&
       
  4038 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
       
  4039 		               (adapter->tx_timeout_factor * HZ)) &&
       
  4040 		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
       
  4041 
       
  4042 			/* detected Tx unit hang */
       
  4043 			e_err(drv, "Detected Tx Unit Hang\n"
       
  4044 			      "  Tx Queue             <%lu>\n"
       
  4045 			      "  TDH                  <%x>\n"
       
  4046 			      "  TDT                  <%x>\n"
       
  4047 			      "  next_to_use          <%x>\n"
       
  4048 			      "  next_to_clean        <%x>\n"
       
  4049 			      "buffer_info[next_to_clean]\n"
       
  4050 			      "  time_stamp           <%lx>\n"
       
  4051 			      "  next_to_watch        <%x>\n"
       
  4052 			      "  jiffies              <%lx>\n"
       
  4053 			      "  next_to_watch.status <%x>\n",
       
  4054 				(unsigned long)((tx_ring - adapter->tx_ring) /
       
  4055 					sizeof(struct e1000_tx_ring)),
       
  4056 				readl(hw->hw_addr + tx_ring->tdh),
       
  4057 				readl(hw->hw_addr + tx_ring->tdt),
       
  4058 				tx_ring->next_to_use,
       
  4059 				tx_ring->next_to_clean,
       
  4060 				tx_ring->buffer_info[eop].time_stamp,
       
  4061 				eop,
       
  4062 				jiffies,
       
  4063 				eop_desc->upper.fields.status);
       
  4064 			e1000_dump(adapter);
       
  4065 			netif_stop_queue(netdev);
       
  4066 		}
       
  4067 	}
       
  4068 	adapter->total_tx_bytes += total_tx_bytes;
       
  4069 	adapter->total_tx_packets += total_tx_packets;
       
  4070 	netdev->stats.tx_bytes += total_tx_bytes;
       
  4071 	netdev->stats.tx_packets += total_tx_packets;
       
  4072 	return count < tx_ring->count;
       
  4073 }
       
  4074 
       
  4075 /**
       
  4076  * e1000_rx_checksum - Receive Checksum Offload for 82543
       
  4077  * @adapter:     board private structure
       
  4078  * @status_err:  receive descriptor status and error fields
       
  4079  * @csum:        receive descriptor csum field
       
  4080  * @sk_buff:     socket buffer with received data
       
  4081  **/
       
  4082 
       
  4083 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
       
  4084 			      u32 csum, struct sk_buff *skb)
       
  4085 {
       
  4086 	struct e1000_hw *hw = &adapter->hw;
       
  4087 	u16 status = (u16)status_err;
       
  4088 	u8 errors = (u8)(status_err >> 24);
       
  4089 
       
  4090 	skb_checksum_none_assert(skb);
       
  4091 
       
  4092 	/* 82543 or newer only */
       
  4093 	if (unlikely(hw->mac_type < e1000_82543)) return;
       
  4094 	/* Ignore Checksum bit is set */
       
  4095 	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
       
  4096 	/* TCP/UDP checksum error bit is set */
       
  4097 	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
       
  4098 		/* let the stack verify checksum errors */
       
  4099 		adapter->hw_csum_err++;
       
  4100 		return;
       
  4101 	}
       
  4102 	/* TCP/UDP Checksum has not been calculated */
       
  4103 	if (!(status & E1000_RXD_STAT_TCPCS))
       
  4104 		return;
       
  4105 
       
  4106 	/* It must be a TCP or UDP packet with a valid checksum */
       
  4107 	if (likely(status & E1000_RXD_STAT_TCPCS)) {
       
  4108 		/* TCP checksum is good */
       
  4109 		skb->ip_summed = CHECKSUM_UNNECESSARY;
       
  4110 	}
       
  4111 	adapter->hw_csum_good++;
       
  4112 }
       
  4113 
       
  4114 /**
       
  4115  * e1000_consume_page - helper function
       
  4116  **/
       
  4117 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
       
  4118                                u16 length)
       
  4119 {
       
  4120 	bi->page = NULL;
       
  4121 	skb->len += length;
       
  4122 	skb->data_len += length;
       
  4123 	skb->truesize += PAGE_SIZE;
       
  4124 }
       
  4125 
       
  4126 /**
       
  4127  * e1000_receive_skb - helper function to handle rx indications
       
  4128  * @adapter: board private structure
       
  4129  * @status: descriptor status field as written by hardware
       
  4130  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
       
  4131  * @skb: pointer to sk_buff to be indicated to stack
       
  4132  */
       
  4133 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
       
  4134 			      __le16 vlan, struct sk_buff *skb)
       
  4135 {
       
  4136 	skb->protocol = eth_type_trans(skb, adapter->netdev);
       
  4137 
       
  4138 	if (status & E1000_RXD_STAT_VP) {
       
  4139 		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
       
  4140 
       
  4141 		__vlan_hwaccel_put_tag(skb, vid);
       
  4142 	}
       
  4143 	napi_gro_receive(&adapter->napi, skb);
       
  4144 }
       
  4145 
       
  4146 /**
       
  4147  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
       
  4148  * @adapter: board private structure
       
  4149  * @rx_ring: ring to clean
       
  4150  * @work_done: amount of napi work completed this call
       
  4151  * @work_to_do: max amount of work allowed for this call to do
       
  4152  *
       
  4153  * the return value indicates whether actual cleaning was done, there
       
  4154  * is no guarantee that everything was cleaned
       
  4155  */
       
  4156 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
       
  4157 				     struct e1000_rx_ring *rx_ring,
       
  4158 				     int *work_done, int work_to_do)
       
  4159 {
       
  4160 	struct e1000_hw *hw = &adapter->hw;
       
  4161 	struct net_device *netdev = adapter->netdev;
       
  4162 	struct pci_dev *pdev = adapter->pdev;
       
  4163 	struct e1000_rx_desc *rx_desc, *next_rxd;
       
  4164 	struct e1000_buffer *buffer_info, *next_buffer;
       
  4165 	unsigned long irq_flags;
       
  4166 	u32 length;
       
  4167 	unsigned int i;
       
  4168 	int cleaned_count = 0;
       
  4169 	bool cleaned = false;
       
  4170 	unsigned int total_rx_bytes=0, total_rx_packets=0;
       
  4171 
       
  4172 	i = rx_ring->next_to_clean;
       
  4173 	rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4174 	buffer_info = &rx_ring->buffer_info[i];
       
  4175 
       
  4176 	while (rx_desc->status & E1000_RXD_STAT_DD) {
       
  4177 		struct sk_buff *skb;
       
  4178 		u8 status;
       
  4179 
       
  4180 		if (*work_done >= work_to_do)
       
  4181 			break;
       
  4182 		(*work_done)++;
       
  4183 		rmb(); /* read descriptor and rx_buffer_info after status DD */
       
  4184 
       
  4185 		status = rx_desc->status;
       
  4186 		skb = buffer_info->skb;
       
  4187 		if (!adapter->ecdev) {
       
  4188 			buffer_info->skb = NULL;
       
  4189 		}
       
  4190 
       
  4191 		if (++i == rx_ring->count) i = 0;
       
  4192 		next_rxd = E1000_RX_DESC(*rx_ring, i);
       
  4193 		prefetch(next_rxd);
       
  4194 
       
  4195 		next_buffer = &rx_ring->buffer_info[i];
       
  4196 
       
  4197 		cleaned = true;
       
  4198 		cleaned_count++;
       
  4199 		dma_unmap_page(&pdev->dev, buffer_info->dma,
       
  4200 			       buffer_info->length, DMA_FROM_DEVICE);
       
  4201 		buffer_info->dma = 0;
       
  4202 
       
  4203 		length = le16_to_cpu(rx_desc->length);
       
  4204 
       
  4205 		/* errors is only valid for DD + EOP descriptors */
       
  4206 		if (!adapter->ecdev &&
       
  4207 		    unlikely((status & E1000_RXD_STAT_EOP) &&
       
  4208 		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
       
  4209 			u8 *mapped;
       
  4210 			u8 last_byte;
       
  4211 
       
  4212 			mapped = page_address(buffer_info->page);
       
  4213 			last_byte = *(mapped + length - 1);
       
  4214 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
       
  4215 				       last_byte)) {
       
  4216 				spin_lock_irqsave(&adapter->stats_lock,
       
  4217 				                  irq_flags);
       
  4218 				e1000_tbi_adjust_stats(hw, &adapter->stats,
       
  4219 						       length, mapped);
       
  4220 				spin_unlock_irqrestore(&adapter->stats_lock,
       
  4221 				                       irq_flags);
       
  4222 				length--;
       
  4223 			} else {
       
  4224 				if (netdev->features & NETIF_F_RXALL)
       
  4225 					goto process_skb;
       
  4226 				/* recycle both page and skb */
       
  4227 				buffer_info->skb = skb;
       
  4228 				/* an error means any chain goes out the window
       
  4229 				 * too */
       
  4230 				if (rx_ring->rx_skb_top)
       
  4231 					dev_kfree_skb(rx_ring->rx_skb_top);
       
  4232 				rx_ring->rx_skb_top = NULL;
       
  4233 				goto next_desc;
       
  4234 			}
       
  4235 		}
       
  4236 
       
  4237 #define rxtop rx_ring->rx_skb_top
       
  4238 process_skb:
       
  4239 		if (!(status & E1000_RXD_STAT_EOP)) {
       
  4240 			/* this descriptor is only the beginning (or middle) */
       
  4241 			if (!rxtop) {
       
  4242 				/* this is the beginning of a chain */
       
  4243 				rxtop = skb;
       
  4244 				skb_fill_page_desc(rxtop, 0, buffer_info->page,
       
  4245 				                   0, length);
       
  4246 			} else {
       
  4247 				/* this is the middle of a chain */
       
  4248 				skb_fill_page_desc(rxtop,
       
  4249 				    skb_shinfo(rxtop)->nr_frags,
       
  4250 				    buffer_info->page, 0, length);
       
  4251 				/* re-use the skb, only consumed the page */
       
  4252 				buffer_info->skb = skb;
       
  4253 			}
       
  4254 			e1000_consume_page(buffer_info, rxtop, length);
       
  4255 			goto next_desc;
       
  4256 		} else {
       
  4257 			if (rxtop) {
       
  4258 				/* end of the chain */
       
  4259 				skb_fill_page_desc(rxtop,
       
  4260 				    skb_shinfo(rxtop)->nr_frags,
       
  4261 				    buffer_info->page, 0, length);
       
  4262 				/* re-use the current skb, we only consumed the
       
  4263 				 * page */
       
  4264 				buffer_info->skb = skb;
       
  4265 				skb = rxtop;
       
  4266 				rxtop = NULL;
       
  4267 				e1000_consume_page(buffer_info, skb, length);
       
  4268 			} else {
       
  4269 				/* no chain, got EOP, this buf is the packet
       
  4270 				 * copybreak to save the put_page/alloc_page */
       
  4271 				if (length <= copybreak &&
       
  4272 				    skb_tailroom(skb) >= length) {
       
  4273 					u8 *vaddr;
       
  4274 					vaddr = kmap_atomic(buffer_info->page);
       
  4275 					memcpy(skb_tail_pointer(skb), vaddr, length);
       
  4276 					kunmap_atomic(vaddr);
       
  4277 					/* re-use the page, so don't erase
       
  4278 					 * buffer_info->page */
       
  4279 					skb_put(skb, length);
       
  4280 				} else {
       
  4281 					skb_fill_page_desc(skb, 0,
       
  4282 					                   buffer_info->page, 0,
       
  4283 				                           length);
       
  4284 					e1000_consume_page(buffer_info, skb,
       
  4285 					                   length);
       
  4286 				}
       
  4287 			}
       
  4288 		}
       
  4289 
       
  4290 		/* Receive Checksum Offload XXX recompute due to CRC strip? */
       
  4291 		e1000_rx_checksum(adapter,
       
  4292 		                  (u32)(status) |
       
  4293 		                  ((u32)(rx_desc->errors) << 24),
       
  4294 		                  le16_to_cpu(rx_desc->csum), skb);
       
  4295 
       
  4296 		total_rx_bytes += (skb->len - 4); /* don't count FCS */
       
  4297 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
       
  4298 			pskb_trim(skb, skb->len - 4);
       
  4299 		total_rx_packets++;
       
  4300 
       
  4301 		/* eth type trans needs skb->data to point to something */
       
  4302 		if (!pskb_may_pull(skb, ETH_HLEN)) {
       
  4303 			e_err(drv, "pskb_may_pull failed.\n");
       
  4304 			if (!adapter->ecdev) {
       
  4305 				dev_kfree_skb(skb);
       
  4306 			}
       
  4307 			goto next_desc;
       
  4308 		}
       
  4309 
       
  4310 		if (adapter->ecdev) {
       
  4311 			ecdev_receive(adapter->ecdev, skb->data, length);
       
  4312 
       
  4313 			// No need to detect link status as
       
  4314 			// long as frames are received: Reset watchdog.
       
  4315 			adapter->ec_watchdog_jiffies = jiffies;
       
  4316 		} else {
       
  4317 			e1000_receive_skb(adapter, status, rx_desc->special, skb);
       
  4318 		}
       
  4319 
       
  4320 next_desc:
       
  4321 		rx_desc->status = 0;
       
  4322 
       
  4323 		/* return some buffers to hardware, one at a time is too slow */
       
  4324 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
       
  4325 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4326 			cleaned_count = 0;
       
  4327 		}
       
  4328 
       
  4329 		/* use prefetched values */
       
  4330 		rx_desc = next_rxd;
       
  4331 		buffer_info = next_buffer;
       
  4332 	}
       
  4333 	rx_ring->next_to_clean = i;
       
  4334 
       
  4335 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
       
  4336 	if (cleaned_count)
       
  4337 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4338 
       
  4339 	adapter->total_rx_packets += total_rx_packets;
       
  4340 	adapter->total_rx_bytes += total_rx_bytes;
       
  4341 	netdev->stats.rx_bytes += total_rx_bytes;
       
  4342 	netdev->stats.rx_packets += total_rx_packets;
       
  4343 	return cleaned;
       
  4344 }
       
  4345 
       
  4346 /*
       
  4347  * this should improve performance for small packets with large amounts
       
  4348  * of reassembly being done in the stack
       
  4349  */
       
  4350 static void e1000_check_copybreak(struct net_device *netdev,
       
  4351 				 struct e1000_buffer *buffer_info,
       
  4352 				 u32 length, struct sk_buff **skb)
       
  4353 {
       
  4354 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4355 	struct sk_buff *new_skb;
       
  4356 
       
  4357 	if (adapter->ecdev || length > copybreak)
       
  4358 		return;
       
  4359 
       
  4360 	new_skb = netdev_alloc_skb_ip_align(netdev, length);
       
  4361 	if (!new_skb)
       
  4362 		return;
       
  4363 
       
  4364 	skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
       
  4365 				       (*skb)->data - NET_IP_ALIGN,
       
  4366 				       length + NET_IP_ALIGN);
       
  4367 	/* save the skb in buffer_info as good */
       
  4368 	buffer_info->skb = *skb;
       
  4369 	*skb = new_skb;
       
  4370 }
       
  4371 
       
  4372 /**
       
  4373  * e1000_clean_rx_irq - Send received data up the network stack; legacy
       
  4374  * @adapter: board private structure
       
  4375  * @rx_ring: ring to clean
       
  4376  * @work_done: amount of napi work completed this call
       
  4377  * @work_to_do: max amount of work allowed for this call to do
       
  4378  */
       
  4379 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
  4380 			       struct e1000_rx_ring *rx_ring,
       
  4381 			       int *work_done, int work_to_do)
       
  4382 {
       
  4383 	struct e1000_hw *hw = &adapter->hw;
       
  4384 	struct net_device *netdev = adapter->netdev;
       
  4385 	struct pci_dev *pdev = adapter->pdev;
       
  4386 	struct e1000_rx_desc *rx_desc, *next_rxd;
       
  4387 	struct e1000_buffer *buffer_info, *next_buffer;
       
  4388 	unsigned long flags;
       
  4389 	u32 length;
       
  4390 	unsigned int i;
       
  4391 	int cleaned_count = 0;
       
  4392 	bool cleaned = false;
       
  4393 	unsigned int total_rx_bytes=0, total_rx_packets=0;
       
  4394 
       
  4395 	i = rx_ring->next_to_clean;
       
  4396 	rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4397 	buffer_info = &rx_ring->buffer_info[i];
       
  4398 
       
  4399 	while (rx_desc->status & E1000_RXD_STAT_DD) {
       
  4400 		struct sk_buff *skb;
       
  4401 		u8 status;
       
  4402 
       
  4403 		if (*work_done >= work_to_do)
       
  4404 			break;
       
  4405 		(*work_done)++;
       
  4406 		rmb(); /* read descriptor and rx_buffer_info after status DD */
       
  4407 
       
  4408 		status = rx_desc->status;
       
  4409 		skb = buffer_info->skb;
       
  4410 		if (!adapter->ecdev) {
       
  4411 			buffer_info->skb = NULL;
       
  4412 		}
       
  4413 
       
  4414 		prefetch(skb->data - NET_IP_ALIGN);
       
  4415 
       
  4416 		if (++i == rx_ring->count) i = 0;
       
  4417 		next_rxd = E1000_RX_DESC(*rx_ring, i);
       
  4418 		prefetch(next_rxd);
       
  4419 
       
  4420 		next_buffer = &rx_ring->buffer_info[i];
       
  4421 
       
  4422 		cleaned = true;
       
  4423 		cleaned_count++;
       
  4424 		dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  4425 				 buffer_info->length, DMA_FROM_DEVICE);
       
  4426 		buffer_info->dma = 0;
       
  4427 
       
  4428 		length = le16_to_cpu(rx_desc->length);
       
  4429 		/* !EOP means multiple descriptors were used to store a single
       
  4430 		 * packet, if thats the case we need to toss it.  In fact, we
       
  4431 		 * to toss every packet with the EOP bit clear and the next
       
  4432 		 * frame that _does_ have the EOP bit set, as it is by
       
  4433 		 * definition only a frame fragment
       
  4434 		 */
       
  4435 		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
       
  4436 			adapter->discarding = true;
       
  4437 
       
  4438 		if (adapter->discarding) {
       
  4439 			/* All receives must fit into a single buffer */
       
  4440 			e_dbg("Receive packet consumed multiple buffers\n");
       
  4441 			/* recycle */
       
  4442 			buffer_info->skb = skb;
       
  4443 			if (status & E1000_RXD_STAT_EOP)
       
  4444 				adapter->discarding = false;
       
  4445 			goto next_desc;
       
  4446 		}
       
  4447 
       
  4448 		if (!adapter->ecdev &&
       
  4449 		    unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
       
  4450 			u8 last_byte = *(skb->data + length - 1);
       
  4451 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
       
  4452 				       last_byte)) {
       
  4453 				spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4454 				e1000_tbi_adjust_stats(hw, &adapter->stats,
       
  4455 				                       length, skb->data);
       
  4456 				spin_unlock_irqrestore(&adapter->stats_lock,
       
  4457 				                       flags);
       
  4458 				length--;
       
  4459 			} else {
       
  4460 				if (netdev->features & NETIF_F_RXALL)
       
  4461 					goto process_skb;
       
  4462 				/* recycle */
       
  4463 				buffer_info->skb = skb;
       
  4464 				goto next_desc;
       
  4465 			}
       
  4466 		}
       
  4467 
       
  4468 process_skb:
       
  4469 		total_rx_bytes += (length - 4); /* don't count FCS */
       
  4470 		total_rx_packets++;
       
  4471 
       
  4472 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
       
  4473 			/* adjust length to remove Ethernet CRC, this must be
       
  4474 			 * done after the TBI_ACCEPT workaround above
       
  4475 			 */
       
  4476 			length -= 4;
       
  4477 
       
  4478 		e1000_check_copybreak(netdev, buffer_info, length, &skb);
       
  4479 
       
  4480 		skb_put(skb, length);
       
  4481 
       
  4482 		/* Receive Checksum Offload */
       
  4483 		e1000_rx_checksum(adapter,
       
  4484 				  (u32)(status) |
       
  4485 				  ((u32)(rx_desc->errors) << 24),
       
  4486 				  le16_to_cpu(rx_desc->csum), skb);
       
  4487 
       
  4488 		if (adapter->ecdev) {
       
  4489 			ecdev_receive(adapter->ecdev, skb->data, length);
       
  4490 
       
  4491 			// No need to detect link status as
       
  4492 			// long as frames are received: Reset watchdog.
       
  4493 			adapter->ec_watchdog_jiffies = jiffies;
       
  4494 		} else {
       
  4495 			e1000_receive_skb(adapter, status, rx_desc->special, skb);
       
  4496 		}
       
  4497 
       
  4498 next_desc:
       
  4499 		rx_desc->status = 0;
       
  4500 
       
  4501 		/* return some buffers to hardware, one at a time is too slow */
       
  4502 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
       
  4503 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4504 			cleaned_count = 0;
       
  4505 		}
       
  4506 
       
  4507 		/* use prefetched values */
       
  4508 		rx_desc = next_rxd;
       
  4509 		buffer_info = next_buffer;
       
  4510 	}
       
  4511 	rx_ring->next_to_clean = i;
       
  4512 
       
  4513 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
       
  4514 	if (cleaned_count)
       
  4515 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
       
  4516 
       
  4517 	adapter->total_rx_packets += total_rx_packets;
       
  4518 	adapter->total_rx_bytes += total_rx_bytes;
       
  4519 	netdev->stats.rx_bytes += total_rx_bytes;
       
  4520 	netdev->stats.rx_packets += total_rx_packets;
       
  4521 	return cleaned;
       
  4522 }
       
  4523 
       
  4524 /**
       
  4525  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
       
  4526  * @adapter: address of board private structure
       
  4527  * @rx_ring: pointer to receive ring structure
       
  4528  * @cleaned_count: number of buffers to allocate this pass
       
  4529  **/
       
  4530 
       
  4531 static void
       
  4532 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
       
  4533                              struct e1000_rx_ring *rx_ring, int cleaned_count)
       
  4534 {
       
  4535 	struct net_device *netdev = adapter->netdev;
       
  4536 	struct pci_dev *pdev = adapter->pdev;
       
  4537 	struct e1000_rx_desc *rx_desc;
       
  4538 	struct e1000_buffer *buffer_info;
       
  4539 	struct sk_buff *skb;
       
  4540 	unsigned int i;
       
  4541 	unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
       
  4542 
       
  4543 	i = rx_ring->next_to_use;
       
  4544 	buffer_info = &rx_ring->buffer_info[i];
       
  4545 
       
  4546 	while (cleaned_count--) {
       
  4547 		skb = buffer_info->skb;
       
  4548 		if (skb) {
       
  4549 			skb_trim(skb, 0);
       
  4550 			goto check_page;
       
  4551 		}
       
  4552 
       
  4553 		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4554 		if (unlikely(!skb)) {
       
  4555 			/* Better luck next round */
       
  4556 			adapter->alloc_rx_buff_failed++;
       
  4557 			break;
       
  4558 		}
       
  4559 
       
  4560 		buffer_info->skb = skb;
       
  4561 		buffer_info->length = adapter->rx_buffer_len;
       
  4562 check_page:
       
  4563 		/* allocate a new page if necessary */
       
  4564 		if (!buffer_info->page) {
       
  4565 			buffer_info->page = alloc_page(GFP_ATOMIC);
       
  4566 			if (unlikely(!buffer_info->page)) {
       
  4567 				adapter->alloc_rx_buff_failed++;
       
  4568 				break;
       
  4569 			}
       
  4570 		}
       
  4571 
       
  4572 		if (!buffer_info->dma) {
       
  4573 			buffer_info->dma = dma_map_page(&pdev->dev,
       
  4574 			                                buffer_info->page, 0,
       
  4575 							buffer_info->length,
       
  4576 							DMA_FROM_DEVICE);
       
  4577 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
       
  4578 				put_page(buffer_info->page);
       
  4579 				dev_kfree_skb(skb);
       
  4580 				buffer_info->page = NULL;
       
  4581 				buffer_info->skb = NULL;
       
  4582 				buffer_info->dma = 0;
       
  4583 				adapter->alloc_rx_buff_failed++;
       
  4584 				break; /* while !buffer_info->skb */
       
  4585 			}
       
  4586 		}
       
  4587 
       
  4588 		rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4589 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  4590 
       
  4591 		if (unlikely(++i == rx_ring->count))
       
  4592 			i = 0;
       
  4593 		buffer_info = &rx_ring->buffer_info[i];
       
  4594 	}
       
  4595 
       
  4596 	if (likely(rx_ring->next_to_use != i)) {
       
  4597 		rx_ring->next_to_use = i;
       
  4598 		if (unlikely(i-- == 0))
       
  4599 			i = (rx_ring->count - 1);
       
  4600 
       
  4601 		/* Force memory writes to complete before letting h/w
       
  4602 		 * know there are new descriptors to fetch.  (Only
       
  4603 		 * applicable for weak-ordered memory model archs,
       
  4604 		 * such as IA-64). */
       
  4605 		wmb();
       
  4606 		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
       
  4607 	}
       
  4608 }
       
  4609 
       
  4610 /**
       
  4611  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
       
  4612  * @adapter: address of board private structure
       
  4613  **/
       
  4614 
       
  4615 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
  4616 				   struct e1000_rx_ring *rx_ring,
       
  4617 				   int cleaned_count)
       
  4618 {
       
  4619 	struct e1000_hw *hw = &adapter->hw;
       
  4620 	struct net_device *netdev = adapter->netdev;
       
  4621 	struct pci_dev *pdev = adapter->pdev;
       
  4622 	struct e1000_rx_desc *rx_desc;
       
  4623 	struct e1000_buffer *buffer_info;
       
  4624 	struct sk_buff *skb;
       
  4625 	unsigned int i;
       
  4626 	unsigned int bufsz = adapter->rx_buffer_len;
       
  4627 
       
  4628 	i = rx_ring->next_to_use;
       
  4629 	buffer_info = &rx_ring->buffer_info[i];
       
  4630 
       
  4631 	while (cleaned_count--) {
       
  4632 		skb = buffer_info->skb;
       
  4633 		if (skb) {
       
  4634 			skb_trim(skb, 0);
       
  4635 			goto map_skb;
       
  4636 		}
       
  4637 
       
  4638 		skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4639 		if (unlikely(!skb)) {
       
  4640 			/* Better luck next round */
       
  4641 			adapter->alloc_rx_buff_failed++;
       
  4642 			break;
       
  4643 		}
       
  4644 
       
  4645 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4646 		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4647 			struct sk_buff *oldskb = skb;
       
  4648 			e_err(rx_err, "skb align check failed: %u bytes at "
       
  4649 			      "%p\n", bufsz, skb->data);
       
  4650 			/* Try again, without freeing the previous */
       
  4651 			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
       
  4652 			/* Failed allocation, critical failure */
       
  4653 			if (!skb) {
       
  4654 				dev_kfree_skb(oldskb);
       
  4655 				adapter->alloc_rx_buff_failed++;
       
  4656 				break;
       
  4657 			}
       
  4658 
       
  4659 			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
       
  4660 				/* give up */
       
  4661 				dev_kfree_skb(skb);
       
  4662 				dev_kfree_skb(oldskb);
       
  4663 				adapter->alloc_rx_buff_failed++;
       
  4664 				break; /* while !buffer_info->skb */
       
  4665 			}
       
  4666 
       
  4667 			/* Use new allocation */
       
  4668 			dev_kfree_skb(oldskb);
       
  4669 		}
       
  4670 		buffer_info->skb = skb;
       
  4671 		buffer_info->length = adapter->rx_buffer_len;
       
  4672 map_skb:
       
  4673 		buffer_info->dma = dma_map_single(&pdev->dev,
       
  4674 						  skb->data,
       
  4675 						  buffer_info->length,
       
  4676 						  DMA_FROM_DEVICE);
       
  4677 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
       
  4678 			dev_kfree_skb(skb);
       
  4679 			buffer_info->skb = NULL;
       
  4680 			buffer_info->dma = 0;
       
  4681 			adapter->alloc_rx_buff_failed++;
       
  4682 			break; /* while !buffer_info->skb */
       
  4683 		}
       
  4684 
       
  4685 		/*
       
  4686 		 * XXX if it was allocated cleanly it will never map to a
       
  4687 		 * boundary crossing
       
  4688 		 */
       
  4689 
       
  4690 		/* Fix for errata 23, can't cross 64kB boundary */
       
  4691 		if (!e1000_check_64k_bound(adapter,
       
  4692 					(void *)(unsigned long)buffer_info->dma,
       
  4693 					adapter->rx_buffer_len)) {
       
  4694 			e_err(rx_err, "dma align check failed: %u bytes at "
       
  4695 			      "%p\n", adapter->rx_buffer_len,
       
  4696 			      (void *)(unsigned long)buffer_info->dma);
       
  4697 			dev_kfree_skb(skb);
       
  4698 			buffer_info->skb = NULL;
       
  4699 
       
  4700 			dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  4701 					 adapter->rx_buffer_len,
       
  4702 					 DMA_FROM_DEVICE);
       
  4703 			buffer_info->dma = 0;
       
  4704 
       
  4705 			adapter->alloc_rx_buff_failed++;
       
  4706 			break; /* while !buffer_info->skb */
       
  4707 		}
       
  4708 		rx_desc = E1000_RX_DESC(*rx_ring, i);
       
  4709 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  4710 
       
  4711 		if (unlikely(++i == rx_ring->count))
       
  4712 			i = 0;
       
  4713 		buffer_info = &rx_ring->buffer_info[i];
       
  4714 	}
       
  4715 
       
  4716 	if (likely(rx_ring->next_to_use != i)) {
       
  4717 		rx_ring->next_to_use = i;
       
  4718 		if (unlikely(i-- == 0))
       
  4719 			i = (rx_ring->count - 1);
       
  4720 
       
  4721 		/* Force memory writes to complete before letting h/w
       
  4722 		 * know there are new descriptors to fetch.  (Only
       
  4723 		 * applicable for weak-ordered memory model archs,
       
  4724 		 * such as IA-64). */
       
  4725 		wmb();
       
  4726 		writel(i, hw->hw_addr + rx_ring->rdt);
       
  4727 	}
       
  4728 }
       
  4729 
       
  4730 /**
       
  4731  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
       
  4732  * @adapter:
       
  4733  **/
       
  4734 
       
  4735 static void e1000_smartspeed(struct e1000_adapter *adapter)
       
  4736 {
       
  4737 	struct e1000_hw *hw = &adapter->hw;
       
  4738 	u16 phy_status;
       
  4739 	u16 phy_ctrl;
       
  4740 
       
  4741 	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
       
  4742 	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
       
  4743 		return;
       
  4744 
       
  4745 	if (adapter->smartspeed == 0) {
       
  4746 		/* If Master/Slave config fault is asserted twice,
       
  4747 		 * we assume back-to-back */
       
  4748 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
       
  4749 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
       
  4750 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
       
  4751 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
       
  4752 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
       
  4753 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
       
  4754 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
       
  4755 			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
       
  4756 					    phy_ctrl);
       
  4757 			adapter->smartspeed++;
       
  4758 			if (!e1000_phy_setup_autoneg(hw) &&
       
  4759 			   !e1000_read_phy_reg(hw, PHY_CTRL,
       
  4760 				   	       &phy_ctrl)) {
       
  4761 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
       
  4762 					     MII_CR_RESTART_AUTO_NEG);
       
  4763 				e1000_write_phy_reg(hw, PHY_CTRL,
       
  4764 						    phy_ctrl);
       
  4765 			}
       
  4766 		}
       
  4767 		return;
       
  4768 	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
       
  4769 		/* If still no link, perhaps using 2/3 pair cable */
       
  4770 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
       
  4771 		phy_ctrl |= CR_1000T_MS_ENABLE;
       
  4772 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
       
  4773 		if (!e1000_phy_setup_autoneg(hw) &&
       
  4774 		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
       
  4775 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
       
  4776 				     MII_CR_RESTART_AUTO_NEG);
       
  4777 			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
       
  4778 		}
       
  4779 	}
       
  4780 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
       
  4781 	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
       
  4782 		adapter->smartspeed = 0;
       
  4783 }
       
  4784 
       
  4785 /**
       
  4786  * e1000_ioctl -
       
  4787  * @netdev:
       
  4788  * @ifreq:
       
  4789  * @cmd:
       
  4790  **/
       
  4791 
       
  4792 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  4793 {
       
  4794 	switch (cmd) {
       
  4795 	case SIOCGMIIPHY:
       
  4796 	case SIOCGMIIREG:
       
  4797 	case SIOCSMIIREG:
       
  4798 		return e1000_mii_ioctl(netdev, ifr, cmd);
       
  4799 	default:
       
  4800 		return -EOPNOTSUPP;
       
  4801 	}
       
  4802 }
       
  4803 
       
  4804 /**
       
  4805  * e1000_mii_ioctl -
       
  4806  * @netdev:
       
  4807  * @ifreq:
       
  4808  * @cmd:
       
  4809  **/
       
  4810 
       
  4811 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
  4812 			   int cmd)
       
  4813 {
       
  4814 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4815 	struct e1000_hw *hw = &adapter->hw;
       
  4816 	struct mii_ioctl_data *data = if_mii(ifr);
       
  4817 	int retval;
       
  4818 	u16 mii_reg;
       
  4819 	unsigned long flags;
       
  4820 
       
  4821 	if (hw->media_type != e1000_media_type_copper)
       
  4822 		return -EOPNOTSUPP;
       
  4823 
       
  4824 	switch (cmd) {
       
  4825 	case SIOCGMIIPHY:
       
  4826 		data->phy_id = hw->phy_addr;
       
  4827 		break;
       
  4828 	case SIOCGMIIREG:
       
  4829 		if (adapter->ecdev) {
       
  4830 			return -EPERM;
       
  4831 		}
       
  4832 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4833 		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
       
  4834 				   &data->val_out)) {
       
  4835 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4836 			return -EIO;
       
  4837 		}
       
  4838 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4839 		break;
       
  4840 	case SIOCSMIIREG:
       
  4841 		if (adapter->ecdev) {
       
  4842 			return -EPERM;
       
  4843 		}
       
  4844 		if (data->reg_num & ~(0x1F))
       
  4845 			return -EFAULT;
       
  4846 		mii_reg = data->val_in;
       
  4847 		spin_lock_irqsave(&adapter->stats_lock, flags);
       
  4848 		if (e1000_write_phy_reg(hw, data->reg_num,
       
  4849 					mii_reg)) {
       
  4850 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4851 			return -EIO;
       
  4852 		}
       
  4853 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
       
  4854 		if (hw->media_type == e1000_media_type_copper) {
       
  4855 			switch (data->reg_num) {
       
  4856 			case PHY_CTRL:
       
  4857 				if (mii_reg & MII_CR_POWER_DOWN)
       
  4858 					break;
       
  4859 				if (mii_reg & MII_CR_AUTO_NEG_EN) {
       
  4860 					hw->autoneg = 1;
       
  4861 					hw->autoneg_advertised = 0x2F;
       
  4862 				} else {
       
  4863 					u32 speed;
       
  4864 					if (mii_reg & 0x40)
       
  4865 						speed = SPEED_1000;
       
  4866 					else if (mii_reg & 0x2000)
       
  4867 						speed = SPEED_100;
       
  4868 					else
       
  4869 						speed = SPEED_10;
       
  4870 					retval = e1000_set_spd_dplx(
       
  4871 						adapter, speed,
       
  4872 						((mii_reg & 0x100)
       
  4873 						 ? DUPLEX_FULL :
       
  4874 						 DUPLEX_HALF));
       
  4875 					if (retval)
       
  4876 						return retval;
       
  4877 				}
       
  4878 				if (netif_running(adapter->netdev))
       
  4879 					e1000_reinit_locked(adapter);
       
  4880 				else
       
  4881 					e1000_reset(adapter);
       
  4882 				break;
       
  4883 			case M88E1000_PHY_SPEC_CTRL:
       
  4884 			case M88E1000_EXT_PHY_SPEC_CTRL:
       
  4885 				if (e1000_phy_reset(hw))
       
  4886 					return -EIO;
       
  4887 				break;
       
  4888 			}
       
  4889 		} else {
       
  4890 			switch (data->reg_num) {
       
  4891 			case PHY_CTRL:
       
  4892 				if (mii_reg & MII_CR_POWER_DOWN)
       
  4893 					break;
       
  4894 				if (netif_running(adapter->netdev))
       
  4895 					e1000_reinit_locked(adapter);
       
  4896 				else
       
  4897 					e1000_reset(adapter);
       
  4898 				break;
       
  4899 			}
       
  4900 		}
       
  4901 		break;
       
  4902 	default:
       
  4903 		return -EOPNOTSUPP;
       
  4904 	}
       
  4905 	return E1000_SUCCESS;
       
  4906 }
       
  4907 
       
  4908 void e1000_pci_set_mwi(struct e1000_hw *hw)
       
  4909 {
       
  4910 	struct e1000_adapter *adapter = hw->back;
       
  4911 	int ret_val = pci_set_mwi(adapter->pdev);
       
  4912 
       
  4913 	if (ret_val)
       
  4914 		e_err(probe, "Error in setting MWI\n");
       
  4915 }
       
  4916 
       
  4917 void e1000_pci_clear_mwi(struct e1000_hw *hw)
       
  4918 {
       
  4919 	struct e1000_adapter *adapter = hw->back;
       
  4920 
       
  4921 	pci_clear_mwi(adapter->pdev);
       
  4922 }
       
  4923 
       
  4924 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
       
  4925 {
       
  4926 	struct e1000_adapter *adapter = hw->back;
       
  4927 	return pcix_get_mmrbc(adapter->pdev);
       
  4928 }
       
  4929 
       
  4930 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
       
  4931 {
       
  4932 	struct e1000_adapter *adapter = hw->back;
       
  4933 	pcix_set_mmrbc(adapter->pdev, mmrbc);
       
  4934 }
       
  4935 
       
  4936 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
       
  4937 {
       
  4938 	outl(value, port);
       
  4939 }
       
  4940 
       
  4941 static bool e1000_vlan_used(struct e1000_adapter *adapter)
       
  4942 {
       
  4943 	u16 vid;
       
  4944 
       
  4945 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
       
  4946 		return true;
       
  4947 	return false;
       
  4948 }
       
  4949 
       
  4950 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
       
  4951 			      netdev_features_t features)
       
  4952 {
       
  4953 	struct e1000_hw *hw = &adapter->hw;
       
  4954 	u32 ctrl;
       
  4955 
       
  4956 	ctrl = er32(CTRL);
       
  4957 	if (features & NETIF_F_HW_VLAN_RX) {
       
  4958 		/* enable VLAN tag insert/strip */
       
  4959 		ctrl |= E1000_CTRL_VME;
       
  4960 	} else {
       
  4961 		/* disable VLAN tag insert/strip */
       
  4962 		ctrl &= ~E1000_CTRL_VME;
       
  4963 	}
       
  4964 	ew32(CTRL, ctrl);
       
  4965 }
       
  4966 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
       
  4967 				     bool filter_on)
       
  4968 {
       
  4969 	struct e1000_hw *hw = &adapter->hw;
       
  4970 	u32 rctl;
       
  4971 
       
  4972 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4973 		e1000_irq_disable(adapter);
       
  4974 
       
  4975 	__e1000_vlan_mode(adapter, adapter->netdev->features);
       
  4976 	if (filter_on) {
       
  4977 		/* enable VLAN receive filtering */
       
  4978 		rctl = er32(RCTL);
       
  4979 		rctl &= ~E1000_RCTL_CFIEN;
       
  4980 		if (!(adapter->netdev->flags & IFF_PROMISC))
       
  4981 			rctl |= E1000_RCTL_VFE;
       
  4982 		ew32(RCTL, rctl);
       
  4983 		e1000_update_mng_vlan(adapter);
       
  4984 	} else {
       
  4985 		/* disable VLAN receive filtering */
       
  4986 		rctl = er32(RCTL);
       
  4987 		rctl &= ~E1000_RCTL_VFE;
       
  4988 		ew32(RCTL, rctl);
       
  4989 	}
       
  4990 
       
  4991 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  4992 		e1000_irq_enable(adapter);
       
  4993 }
       
  4994 
       
  4995 static void e1000_vlan_mode(struct net_device *netdev,
       
  4996 			    netdev_features_t features)
       
  4997 {
       
  4998 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4999 
       
  5000 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5001 		e1000_irq_disable(adapter);
       
  5002 
       
  5003 	__e1000_vlan_mode(adapter, features);
       
  5004 
       
  5005 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5006 		e1000_irq_enable(adapter);
       
  5007 }
       
  5008 
       
  5009 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
       
  5010 {
       
  5011 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5012 	struct e1000_hw *hw = &adapter->hw;
       
  5013 	u32 vfta, index;
       
  5014 
       
  5015 	if ((hw->mng_cookie.status &
       
  5016 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
       
  5017 	    (vid == adapter->mng_vlan_id))
       
  5018 		return 0;
       
  5019 
       
  5020 	if (!e1000_vlan_used(adapter))
       
  5021 		e1000_vlan_filter_on_off(adapter, true);
       
  5022 
       
  5023 	/* add VID to filter table */
       
  5024 	index = (vid >> 5) & 0x7F;
       
  5025 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
       
  5026 	vfta |= (1 << (vid & 0x1F));
       
  5027 	e1000_write_vfta(hw, index, vfta);
       
  5028 
       
  5029 	set_bit(vid, adapter->active_vlans);
       
  5030 
       
  5031 	return 0;
       
  5032 }
       
  5033 
       
  5034 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
       
  5035 {
       
  5036 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5037 	struct e1000_hw *hw = &adapter->hw;
       
  5038 	u32 vfta, index;
       
  5039 
       
  5040 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5041 		e1000_irq_disable(adapter);
       
  5042 	if (!test_bit(__E1000_DOWN, &adapter->flags))
       
  5043 		e1000_irq_enable(adapter);
       
  5044 
       
  5045 	/* remove VID from filter table */
       
  5046 	index = (vid >> 5) & 0x7F;
       
  5047 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
       
  5048 	vfta &= ~(1 << (vid & 0x1F));
       
  5049 	e1000_write_vfta(hw, index, vfta);
       
  5050 
       
  5051 	clear_bit(vid, adapter->active_vlans);
       
  5052 
       
  5053 	if (!e1000_vlan_used(adapter))
       
  5054 		e1000_vlan_filter_on_off(adapter, false);
       
  5055 
       
  5056 	return 0;
       
  5057 }
       
  5058 
       
  5059 static void e1000_restore_vlan(struct e1000_adapter *adapter)
       
  5060 {
       
  5061 	u16 vid;
       
  5062 
       
  5063 	if (!e1000_vlan_used(adapter))
       
  5064 		return;
       
  5065 
       
  5066 	e1000_vlan_filter_on_off(adapter, true);
       
  5067 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
       
  5068 		e1000_vlan_rx_add_vid(adapter->netdev, vid);
       
  5069 }
       
  5070 
       
  5071 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
       
  5072 {
       
  5073 	struct e1000_hw *hw = &adapter->hw;
       
  5074 
       
  5075 	hw->autoneg = 0;
       
  5076 
       
  5077 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
       
  5078 	 * for the switch() below to work */
       
  5079 	if ((spd & 1) || (dplx & ~1))
       
  5080 		goto err_inval;
       
  5081 
       
  5082 	/* Fiber NICs only allow 1000 gbps Full duplex */
       
  5083 	if ((hw->media_type == e1000_media_type_fiber) &&
       
  5084 	    spd != SPEED_1000 &&
       
  5085 	    dplx != DUPLEX_FULL)
       
  5086 		goto err_inval;
       
  5087 
       
  5088 	switch (spd + dplx) {
       
  5089 	case SPEED_10 + DUPLEX_HALF:
       
  5090 		hw->forced_speed_duplex = e1000_10_half;
       
  5091 		break;
       
  5092 	case SPEED_10 + DUPLEX_FULL:
       
  5093 		hw->forced_speed_duplex = e1000_10_full;
       
  5094 		break;
       
  5095 	case SPEED_100 + DUPLEX_HALF:
       
  5096 		hw->forced_speed_duplex = e1000_100_half;
       
  5097 		break;
       
  5098 	case SPEED_100 + DUPLEX_FULL:
       
  5099 		hw->forced_speed_duplex = e1000_100_full;
       
  5100 		break;
       
  5101 	case SPEED_1000 + DUPLEX_FULL:
       
  5102 		hw->autoneg = 1;
       
  5103 		hw->autoneg_advertised = ADVERTISE_1000_FULL;
       
  5104 		break;
       
  5105 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
       
  5106 	default:
       
  5107 		goto err_inval;
       
  5108 	}
       
  5109 	return 0;
       
  5110 
       
  5111 err_inval:
       
  5112 	e_err(probe, "Unsupported Speed/Duplex configuration\n");
       
  5113 	return -EINVAL;
       
  5114 }
       
  5115 
       
  5116 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
       
  5117 {
       
  5118 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5119 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5120 	struct e1000_hw *hw = &adapter->hw;
       
  5121 	u32 ctrl, ctrl_ext, rctl, status;
       
  5122 	u32 wufc = adapter->wol;
       
  5123 #ifdef CONFIG_PM
       
  5124 	int retval = 0;
       
  5125 #endif
       
  5126 
       
  5127 	if (adapter->ecdev) {
       
  5128 		return -EBUSY;
       
  5129 	}
       
  5130 
       
  5131 	netif_device_detach(netdev);
       
  5132 
       
  5133 	if (netif_running(netdev)) {
       
  5134 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
       
  5135 		e1000_down(adapter);
       
  5136 	}
       
  5137 
       
  5138 #ifdef CONFIG_PM
       
  5139 	retval = pci_save_state(pdev);
       
  5140 	if (retval)
       
  5141 		return retval;
       
  5142 #endif
       
  5143 
       
  5144 	status = er32(STATUS);
       
  5145 	if (status & E1000_STATUS_LU)
       
  5146 		wufc &= ~E1000_WUFC_LNKC;
       
  5147 
       
  5148 	if (wufc) {
       
  5149 		e1000_setup_rctl(adapter);
       
  5150 		e1000_set_rx_mode(netdev);
       
  5151 
       
  5152 		rctl = er32(RCTL);
       
  5153 
       
  5154 		/* turn on all-multi mode if wake on multicast is enabled */
       
  5155 		if (wufc & E1000_WUFC_MC)
       
  5156 			rctl |= E1000_RCTL_MPE;
       
  5157 
       
  5158 		/* enable receives in the hardware */
       
  5159 		ew32(RCTL, rctl | E1000_RCTL_EN);
       
  5160 
       
  5161 		if (hw->mac_type >= e1000_82540) {
       
  5162 			ctrl = er32(CTRL);
       
  5163 			/* advertise wake from D3Cold */
       
  5164 			#define E1000_CTRL_ADVD3WUC 0x00100000
       
  5165 			/* phy power management enable */
       
  5166 			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
       
  5167 			ctrl |= E1000_CTRL_ADVD3WUC |
       
  5168 				E1000_CTRL_EN_PHY_PWR_MGMT;
       
  5169 			ew32(CTRL, ctrl);
       
  5170 		}
       
  5171 
       
  5172 		if (hw->media_type == e1000_media_type_fiber ||
       
  5173 		    hw->media_type == e1000_media_type_internal_serdes) {
       
  5174 			/* keep the laser running in D3 */
       
  5175 			ctrl_ext = er32(CTRL_EXT);
       
  5176 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
       
  5177 			ew32(CTRL_EXT, ctrl_ext);
       
  5178 		}
       
  5179 
       
  5180 		ew32(WUC, E1000_WUC_PME_EN);
       
  5181 		ew32(WUFC, wufc);
       
  5182 	} else {
       
  5183 		ew32(WUC, 0);
       
  5184 		ew32(WUFC, 0);
       
  5185 	}
       
  5186 
       
  5187 	e1000_release_manageability(adapter);
       
  5188 
       
  5189 	*enable_wake = !!wufc;
       
  5190 
       
  5191 	/* make sure adapter isn't asleep if manageability is enabled */
       
  5192 	if (adapter->en_mng_pt)
       
  5193 		*enable_wake = true;
       
  5194 
       
  5195 	if (netif_running(netdev))
       
  5196 		e1000_free_irq(adapter);
       
  5197 
       
  5198 	pci_disable_device(pdev);
       
  5199 
       
  5200 	return 0;
       
  5201 }
       
  5202 
       
  5203 #ifdef CONFIG_PM
       
  5204 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
       
  5205 {
       
  5206 	int retval;
       
  5207 	bool wake;
       
  5208 
       
  5209 	retval = __e1000_shutdown(pdev, &wake);
       
  5210 	if (retval)
       
  5211 		return retval;
       
  5212 
       
  5213 	if (wake) {
       
  5214 		pci_prepare_to_sleep(pdev);
       
  5215 	} else {
       
  5216 		pci_wake_from_d3(pdev, false);
       
  5217 		pci_set_power_state(pdev, PCI_D3hot);
       
  5218 	}
       
  5219 
       
  5220 	return 0;
       
  5221 }
       
  5222 
       
  5223 static int e1000_resume(struct pci_dev *pdev)
       
  5224 {
       
  5225 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5226 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5227 	struct e1000_hw *hw = &adapter->hw;
       
  5228 	u32 err;
       
  5229 
       
  5230 	if (adapter->ecdev) {
       
  5231 		return -EBUSY;
       
  5232 	}
       
  5233 
       
  5234 	pci_set_power_state(pdev, PCI_D0);
       
  5235 	pci_restore_state(pdev);
       
  5236 	pci_save_state(pdev);
       
  5237 
       
  5238 	if (adapter->need_ioport)
       
  5239 		err = pci_enable_device(pdev);
       
  5240 	else
       
  5241 		err = pci_enable_device_mem(pdev);
       
  5242 	if (err) {
       
  5243 		pr_err("Cannot enable PCI device from suspend\n");
       
  5244 		return err;
       
  5245 	}
       
  5246 	pci_set_master(pdev);
       
  5247 
       
  5248 	pci_enable_wake(pdev, PCI_D3hot, 0);
       
  5249 	pci_enable_wake(pdev, PCI_D3cold, 0);
       
  5250 
       
  5251 	if (netif_running(netdev)) {
       
  5252 		err = e1000_request_irq(adapter);
       
  5253 		if (err)
       
  5254 			return err;
       
  5255 	}
       
  5256 
       
  5257 	e1000_power_up_phy(adapter);
       
  5258 	e1000_reset(adapter);
       
  5259 	ew32(WUS, ~0);
       
  5260 
       
  5261 	e1000_init_manageability(adapter);
       
  5262 
       
  5263 	if (netif_running(netdev))
       
  5264 		e1000_up(adapter);
       
  5265 
       
  5266 	if (!adapter->ecdev) {
       
  5267 		netif_device_attach(netdev);
       
  5268 	}
       
  5269 
       
  5270 	return 0;
       
  5271 }
       
  5272 #endif
       
  5273 
       
  5274 static void e1000_shutdown(struct pci_dev *pdev)
       
  5275 {
       
  5276 	bool wake;
       
  5277 
       
  5278 	__e1000_shutdown(pdev, &wake);
       
  5279 
       
  5280 	if (system_state == SYSTEM_POWER_OFF) {
       
  5281 		pci_wake_from_d3(pdev, wake);
       
  5282 		pci_set_power_state(pdev, PCI_D3hot);
       
  5283 	}
       
  5284 }
       
  5285 
       
  5286 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  5287 /*
       
  5288  * Polling 'interrupt' - used by things like netconsole to send skbs
       
  5289  * without having to re-enable interrupts. It's not called while
       
  5290  * the interrupt routine is executing.
       
  5291  */
       
  5292 static void e1000_netpoll(struct net_device *netdev)
       
  5293 {
       
  5294 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5295 
       
  5296 	disable_irq(adapter->pdev->irq);
       
  5297 	e1000_intr(adapter->pdev->irq, netdev);
       
  5298 	enable_irq(adapter->pdev->irq);
       
  5299 }
       
  5300 #endif
       
  5301 
       
  5302 /**
       
  5303  * e1000_io_error_detected - called when PCI error is detected
       
  5304  * @pdev: Pointer to PCI device
       
  5305  * @state: The current pci connection state
       
  5306  *
       
  5307  * This function is called after a PCI bus error affecting
       
  5308  * this device has been detected.
       
  5309  */
       
  5310 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
  5311 						pci_channel_state_t state)
       
  5312 {
       
  5313 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5314 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5315 
       
  5316 	netif_device_detach(netdev);
       
  5317 
       
  5318 	if (state == pci_channel_io_perm_failure)
       
  5319 		return PCI_ERS_RESULT_DISCONNECT;
       
  5320 
       
  5321 	if (netif_running(netdev))
       
  5322 		e1000_down(adapter);
       
  5323 	pci_disable_device(pdev);
       
  5324 
       
  5325 	/* Request a slot slot reset. */
       
  5326 	return PCI_ERS_RESULT_NEED_RESET;
       
  5327 }
       
  5328 
       
  5329 /**
       
  5330  * e1000_io_slot_reset - called after the pci bus has been reset.
       
  5331  * @pdev: Pointer to PCI device
       
  5332  *
       
  5333  * Restart the card from scratch, as if from a cold-boot. Implementation
       
  5334  * resembles the first-half of the e1000_resume routine.
       
  5335  */
       
  5336 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
       
  5337 {
       
  5338 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5339 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5340 	struct e1000_hw *hw = &adapter->hw;
       
  5341 	int err;
       
  5342 
       
  5343 	if (adapter->need_ioport)
       
  5344 		err = pci_enable_device(pdev);
       
  5345 	else
       
  5346 		err = pci_enable_device_mem(pdev);
       
  5347 	if (err) {
       
  5348 		pr_err("Cannot re-enable PCI device after reset.\n");
       
  5349 		return PCI_ERS_RESULT_DISCONNECT;
       
  5350 	}
       
  5351 	pci_set_master(pdev);
       
  5352 
       
  5353 	pci_enable_wake(pdev, PCI_D3hot, 0);
       
  5354 	pci_enable_wake(pdev, PCI_D3cold, 0);
       
  5355 
       
  5356 	e1000_reset(adapter);
       
  5357 	ew32(WUS, ~0);
       
  5358 
       
  5359 	return PCI_ERS_RESULT_RECOVERED;
       
  5360 }
       
  5361 
       
  5362 /**
       
  5363  * e1000_io_resume - called when traffic can start flowing again.
       
  5364  * @pdev: Pointer to PCI device
       
  5365  *
       
  5366  * This callback is called when the error recovery driver tells us that
       
  5367  * its OK to resume normal operation. Implementation resembles the
       
  5368  * second-half of the e1000_resume routine.
       
  5369  */
       
  5370 static void e1000_io_resume(struct pci_dev *pdev)
       
  5371 {
       
  5372 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5373 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5374 
       
  5375 	e1000_init_manageability(adapter);
       
  5376 
       
  5377 	if (netif_running(netdev)) {
       
  5378 		if (e1000_up(adapter)) {
       
  5379 			pr_info("can't bring device back up after reset\n");
       
  5380 			return;
       
  5381 		}
       
  5382 	}
       
  5383 
       
  5384 	netif_device_attach(netdev);
       
  5385 }
       
  5386 
       
  5387 /* e1000_main.c */