devices/e1000e/netdev-3.2-orig.c
branchstable-1.5
changeset 2407 35223d2e6e72
equal deleted inserted replaced
2405:214be3e0640e 2407:35223d2e6e72
       
     1 /*******************************************************************************
       
     2 
       
     3   Intel PRO/1000 Linux driver
       
     4   Copyright(c) 1999 - 2011 Intel Corporation.
       
     5 
       
     6   This program is free software; you can redistribute it and/or modify it
       
     7   under the terms and conditions of the GNU General Public License,
       
     8   version 2, as published by the Free Software Foundation.
       
     9 
       
    10   This program is distributed in the hope it will be useful, but WITHOUT
       
    11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
       
    13   more details.
       
    14 
       
    15   You should have received a copy of the GNU General Public License along with
       
    16   this program; if not, write to the Free Software Foundation, Inc.,
       
    17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
       
    18 
       
    19   The full GNU General Public License is included in this distribution in
       
    20   the file called "COPYING".
       
    21 
       
    22   Contact Information:
       
    23   Linux NICS <linux.nics@intel.com>
       
    24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
       
    25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
       
    26 
       
    27 *******************************************************************************/
       
    28 
       
    29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
       
    30 
       
    31 #include <linux/module.h>
       
    32 #include <linux/types.h>
       
    33 #include <linux/init.h>
       
    34 #include <linux/pci.h>
       
    35 #include <linux/vmalloc.h>
       
    36 #include <linux/pagemap.h>
       
    37 #include <linux/delay.h>
       
    38 #include <linux/netdevice.h>
       
    39 #include <linux/interrupt.h>
       
    40 #include <linux/tcp.h>
       
    41 #include <linux/ipv6.h>
       
    42 #include <linux/slab.h>
       
    43 #include <net/checksum.h>
       
    44 #include <net/ip6_checksum.h>
       
    45 #include <linux/mii.h>
       
    46 #include <linux/ethtool.h>
       
    47 #include <linux/if_vlan.h>
       
    48 #include <linux/cpu.h>
       
    49 #include <linux/smp.h>
       
    50 #include <linux/pm_qos.h>
       
    51 #include <linux/pm_runtime.h>
       
    52 #include <linux/aer.h>
       
    53 #include <linux/prefetch.h>
       
    54 
       
    55 #include "e1000.h"
       
    56 
       
    57 #define DRV_EXTRAVERSION "-k"
       
    58 
       
    59 #define DRV_VERSION "1.5.1" DRV_EXTRAVERSION
       
    60 char e1000e_driver_name[] = "e1000e";
       
    61 const char e1000e_driver_version[] = DRV_VERSION;
       
    62 
       
    63 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
       
    64 
       
    65 static const struct e1000_info *e1000_info_tbl[] = {
       
    66 	[board_82571]		= &e1000_82571_info,
       
    67 	[board_82572]		= &e1000_82572_info,
       
    68 	[board_82573]		= &e1000_82573_info,
       
    69 	[board_82574]		= &e1000_82574_info,
       
    70 	[board_82583]		= &e1000_82583_info,
       
    71 	[board_80003es2lan]	= &e1000_es2_info,
       
    72 	[board_ich8lan]		= &e1000_ich8_info,
       
    73 	[board_ich9lan]		= &e1000_ich9_info,
       
    74 	[board_ich10lan]	= &e1000_ich10_info,
       
    75 	[board_pchlan]		= &e1000_pch_info,
       
    76 	[board_pch2lan]		= &e1000_pch2_info,
       
    77 };
       
    78 
       
    79 struct e1000_reg_info {
       
    80 	u32 ofs;
       
    81 	char *name;
       
    82 };
       
    83 
       
    84 #define E1000_RDFH	0x02410	/* Rx Data FIFO Head - RW */
       
    85 #define E1000_RDFT	0x02418	/* Rx Data FIFO Tail - RW */
       
    86 #define E1000_RDFHS	0x02420	/* Rx Data FIFO Head Saved - RW */
       
    87 #define E1000_RDFTS	0x02428	/* Rx Data FIFO Tail Saved - RW */
       
    88 #define E1000_RDFPC	0x02430	/* Rx Data FIFO Packet Count - RW */
       
    89 
       
    90 #define E1000_TDFH	0x03410	/* Tx Data FIFO Head - RW */
       
    91 #define E1000_TDFT	0x03418	/* Tx Data FIFO Tail - RW */
       
    92 #define E1000_TDFHS	0x03420	/* Tx Data FIFO Head Saved - RW */
       
    93 #define E1000_TDFTS	0x03428	/* Tx Data FIFO Tail Saved - RW */
       
    94 #define E1000_TDFPC	0x03430	/* Tx Data FIFO Packet Count - RW */
       
    95 
       
    96 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
       
    97 
       
    98 	/* General Registers */
       
    99 	{E1000_CTRL, "CTRL"},
       
   100 	{E1000_STATUS, "STATUS"},
       
   101 	{E1000_CTRL_EXT, "CTRL_EXT"},
       
   102 
       
   103 	/* Interrupt Registers */
       
   104 	{E1000_ICR, "ICR"},
       
   105 
       
   106 	/* Rx Registers */
       
   107 	{E1000_RCTL, "RCTL"},
       
   108 	{E1000_RDLEN, "RDLEN"},
       
   109 	{E1000_RDH, "RDH"},
       
   110 	{E1000_RDT, "RDT"},
       
   111 	{E1000_RDTR, "RDTR"},
       
   112 	{E1000_RXDCTL(0), "RXDCTL"},
       
   113 	{E1000_ERT, "ERT"},
       
   114 	{E1000_RDBAL, "RDBAL"},
       
   115 	{E1000_RDBAH, "RDBAH"},
       
   116 	{E1000_RDFH, "RDFH"},
       
   117 	{E1000_RDFT, "RDFT"},
       
   118 	{E1000_RDFHS, "RDFHS"},
       
   119 	{E1000_RDFTS, "RDFTS"},
       
   120 	{E1000_RDFPC, "RDFPC"},
       
   121 
       
   122 	/* Tx Registers */
       
   123 	{E1000_TCTL, "TCTL"},
       
   124 	{E1000_TDBAL, "TDBAL"},
       
   125 	{E1000_TDBAH, "TDBAH"},
       
   126 	{E1000_TDLEN, "TDLEN"},
       
   127 	{E1000_TDH, "TDH"},
       
   128 	{E1000_TDT, "TDT"},
       
   129 	{E1000_TIDV, "TIDV"},
       
   130 	{E1000_TXDCTL(0), "TXDCTL"},
       
   131 	{E1000_TADV, "TADV"},
       
   132 	{E1000_TARC(0), "TARC"},
       
   133 	{E1000_TDFH, "TDFH"},
       
   134 	{E1000_TDFT, "TDFT"},
       
   135 	{E1000_TDFHS, "TDFHS"},
       
   136 	{E1000_TDFTS, "TDFTS"},
       
   137 	{E1000_TDFPC, "TDFPC"},
       
   138 
       
   139 	/* List Terminator */
       
   140 	{}
       
   141 };
       
   142 
       
   143 /*
       
   144  * e1000_regdump - register printout routine
       
   145  */
       
   146 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
       
   147 {
       
   148 	int n = 0;
       
   149 	char rname[16];
       
   150 	u32 regs[8];
       
   151 
       
   152 	switch (reginfo->ofs) {
       
   153 	case E1000_RXDCTL(0):
       
   154 		for (n = 0; n < 2; n++)
       
   155 			regs[n] = __er32(hw, E1000_RXDCTL(n));
       
   156 		break;
       
   157 	case E1000_TXDCTL(0):
       
   158 		for (n = 0; n < 2; n++)
       
   159 			regs[n] = __er32(hw, E1000_TXDCTL(n));
       
   160 		break;
       
   161 	case E1000_TARC(0):
       
   162 		for (n = 0; n < 2; n++)
       
   163 			regs[n] = __er32(hw, E1000_TARC(n));
       
   164 		break;
       
   165 	default:
       
   166 		printk(KERN_INFO "%-15s %08x\n",
       
   167 		       reginfo->name, __er32(hw, reginfo->ofs));
       
   168 		return;
       
   169 	}
       
   170 
       
   171 	snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
       
   172 	printk(KERN_INFO "%-15s ", rname);
       
   173 	for (n = 0; n < 2; n++)
       
   174 		printk(KERN_CONT "%08x ", regs[n]);
       
   175 	printk(KERN_CONT "\n");
       
   176 }
       
   177 
       
   178 /*
       
   179  * e1000e_dump - Print registers, Tx-ring and Rx-ring
       
   180  */
       
   181 static void e1000e_dump(struct e1000_adapter *adapter)
       
   182 {
       
   183 	struct net_device *netdev = adapter->netdev;
       
   184 	struct e1000_hw *hw = &adapter->hw;
       
   185 	struct e1000_reg_info *reginfo;
       
   186 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
   187 	struct e1000_tx_desc *tx_desc;
       
   188 	struct my_u0 {
       
   189 		u64 a;
       
   190 		u64 b;
       
   191 	} *u0;
       
   192 	struct e1000_buffer *buffer_info;
       
   193 	struct e1000_ring *rx_ring = adapter->rx_ring;
       
   194 	union e1000_rx_desc_packet_split *rx_desc_ps;
       
   195 	union e1000_rx_desc_extended *rx_desc;
       
   196 	struct my_u1 {
       
   197 		u64 a;
       
   198 		u64 b;
       
   199 		u64 c;
       
   200 		u64 d;
       
   201 	} *u1;
       
   202 	u32 staterr;
       
   203 	int i = 0;
       
   204 
       
   205 	if (!netif_msg_hw(adapter))
       
   206 		return;
       
   207 
       
   208 	/* Print netdevice Info */
       
   209 	if (netdev) {
       
   210 		dev_info(&adapter->pdev->dev, "Net device Info\n");
       
   211 		printk(KERN_INFO "Device Name     state            "
       
   212 		       "trans_start      last_rx\n");
       
   213 		printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
       
   214 		       netdev->name, netdev->state, netdev->trans_start,
       
   215 		       netdev->last_rx);
       
   216 	}
       
   217 
       
   218 	/* Print Registers */
       
   219 	dev_info(&adapter->pdev->dev, "Register Dump\n");
       
   220 	printk(KERN_INFO " Register Name   Value\n");
       
   221 	for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
       
   222 	     reginfo->name; reginfo++) {
       
   223 		e1000_regdump(hw, reginfo);
       
   224 	}
       
   225 
       
   226 	/* Print Tx Ring Summary */
       
   227 	if (!netdev || !netif_running(netdev))
       
   228 		goto exit;
       
   229 
       
   230 	dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
       
   231 	printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
       
   232 	       " leng ntw timestamp\n");
       
   233 	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
       
   234 	printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
       
   235 	       0, tx_ring->next_to_use, tx_ring->next_to_clean,
       
   236 	       (unsigned long long)buffer_info->dma,
       
   237 	       buffer_info->length,
       
   238 	       buffer_info->next_to_watch,
       
   239 	       (unsigned long long)buffer_info->time_stamp);
       
   240 
       
   241 	/* Print Tx Ring */
       
   242 	if (!netif_msg_tx_done(adapter))
       
   243 		goto rx_ring_summary;
       
   244 
       
   245 	dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
       
   246 
       
   247 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
       
   248 	 *
       
   249 	 * Legacy Transmit Descriptor
       
   250 	 *   +--------------------------------------------------------------+
       
   251 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
       
   252 	 *   +--------------------------------------------------------------+
       
   253 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
       
   254 	 *   +--------------------------------------------------------------+
       
   255 	 *   63       48 47        36 35    32 31     24 23    16 15        0
       
   256 	 *
       
   257 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
       
   258 	 *   63      48 47    40 39       32 31             16 15    8 7      0
       
   259 	 *   +----------------------------------------------------------------+
       
   260 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
       
   261 	 *   +----------------------------------------------------------------+
       
   262 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
       
   263 	 *   +----------------------------------------------------------------+
       
   264 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
       
   265 	 *
       
   266 	 * Extended Data Descriptor (DTYP=0x1)
       
   267 	 *   +----------------------------------------------------------------+
       
   268 	 * 0 |                     Buffer Address [63:0]                      |
       
   269 	 *   +----------------------------------------------------------------+
       
   270 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
       
   271 	 *   +----------------------------------------------------------------+
       
   272 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
       
   273 	 */
       
   274 	printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
       
   275 	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
       
   276 	       "<-- Legacy format\n");
       
   277 	printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
       
   278 	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
       
   279 	       "<-- Ext Context format\n");
       
   280 	printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
       
   281 	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
       
   282 	       "<-- Ext Data format\n");
       
   283 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
       
   284 		tx_desc = E1000_TX_DESC(*tx_ring, i);
       
   285 		buffer_info = &tx_ring->buffer_info[i];
       
   286 		u0 = (struct my_u0 *)tx_desc;
       
   287 		printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
       
   288 		       "%04X  %3X %016llX %p",
       
   289 		       (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
       
   290 			((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
       
   291 		       (unsigned long long)le64_to_cpu(u0->a),
       
   292 		       (unsigned long long)le64_to_cpu(u0->b),
       
   293 		       (unsigned long long)buffer_info->dma,
       
   294 		       buffer_info->length, buffer_info->next_to_watch,
       
   295 		       (unsigned long long)buffer_info->time_stamp,
       
   296 		       buffer_info->skb);
       
   297 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
       
   298 			printk(KERN_CONT " NTC/U\n");
       
   299 		else if (i == tx_ring->next_to_use)
       
   300 			printk(KERN_CONT " NTU\n");
       
   301 		else if (i == tx_ring->next_to_clean)
       
   302 			printk(KERN_CONT " NTC\n");
       
   303 		else
       
   304 			printk(KERN_CONT "\n");
       
   305 
       
   306 		if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
       
   307 			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
       
   308 				       16, 1, phys_to_virt(buffer_info->dma),
       
   309 				       buffer_info->length, true);
       
   310 	}
       
   311 
       
   312 	/* Print Rx Ring Summary */
       
   313 rx_ring_summary:
       
   314 	dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
       
   315 	printk(KERN_INFO "Queue [NTU] [NTC]\n");
       
   316 	printk(KERN_INFO " %5d %5X %5X\n", 0,
       
   317 	       rx_ring->next_to_use, rx_ring->next_to_clean);
       
   318 
       
   319 	/* Print Rx Ring */
       
   320 	if (!netif_msg_rx_status(adapter))
       
   321 		goto exit;
       
   322 
       
   323 	dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
       
   324 	switch (adapter->rx_ps_pages) {
       
   325 	case 1:
       
   326 	case 2:
       
   327 	case 3:
       
   328 		/* [Extended] Packet Split Receive Descriptor Format
       
   329 		 *
       
   330 		 *    +-----------------------------------------------------+
       
   331 		 *  0 |                Buffer Address 0 [63:0]              |
       
   332 		 *    +-----------------------------------------------------+
       
   333 		 *  8 |                Buffer Address 1 [63:0]              |
       
   334 		 *    +-----------------------------------------------------+
       
   335 		 * 16 |                Buffer Address 2 [63:0]              |
       
   336 		 *    +-----------------------------------------------------+
       
   337 		 * 24 |                Buffer Address 3 [63:0]              |
       
   338 		 *    +-----------------------------------------------------+
       
   339 		 */
       
   340 		printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
       
   341 		       "[buffer 1 63:0 ] "
       
   342 		       "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
       
   343 		       "[bi->skb] <-- Ext Pkt Split format\n");
       
   344 		/* [Extended] Receive Descriptor (Write-Back) Format
       
   345 		 *
       
   346 		 *   63       48 47    32 31     13 12    8 7    4 3        0
       
   347 		 *   +------------------------------------------------------+
       
   348 		 * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
       
   349 		 *   | Checksum | Ident  |         | Queue |      |  Type   |
       
   350 		 *   +------------------------------------------------------+
       
   351 		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
       
   352 		 *   +------------------------------------------------------+
       
   353 		 *   63       48 47    32 31            20 19               0
       
   354 		 */
       
   355 		printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
       
   356 		       "[vl   l0 ee  es] "
       
   357 		       "[ l3  l2  l1 hs] [reserved      ] ---------------- "
       
   358 		       "[bi->skb] <-- Ext Rx Write-Back format\n");
       
   359 		for (i = 0; i < rx_ring->count; i++) {
       
   360 			buffer_info = &rx_ring->buffer_info[i];
       
   361 			rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
       
   362 			u1 = (struct my_u1 *)rx_desc_ps;
       
   363 			staterr =
       
   364 			    le32_to_cpu(rx_desc_ps->wb.middle.status_error);
       
   365 			if (staterr & E1000_RXD_STAT_DD) {
       
   366 				/* Descriptor Done */
       
   367 				printk(KERN_INFO "RWB[0x%03X]     %016llX "
       
   368 				       "%016llX %016llX %016llX "
       
   369 				       "---------------- %p", i,
       
   370 				       (unsigned long long)le64_to_cpu(u1->a),
       
   371 				       (unsigned long long)le64_to_cpu(u1->b),
       
   372 				       (unsigned long long)le64_to_cpu(u1->c),
       
   373 				       (unsigned long long)le64_to_cpu(u1->d),
       
   374 				       buffer_info->skb);
       
   375 			} else {
       
   376 				printk(KERN_INFO "R  [0x%03X]     %016llX "
       
   377 				       "%016llX %016llX %016llX %016llX %p", i,
       
   378 				       (unsigned long long)le64_to_cpu(u1->a),
       
   379 				       (unsigned long long)le64_to_cpu(u1->b),
       
   380 				       (unsigned long long)le64_to_cpu(u1->c),
       
   381 				       (unsigned long long)le64_to_cpu(u1->d),
       
   382 				       (unsigned long long)buffer_info->dma,
       
   383 				       buffer_info->skb);
       
   384 
       
   385 				if (netif_msg_pktdata(adapter))
       
   386 					print_hex_dump(KERN_INFO, "",
       
   387 						DUMP_PREFIX_ADDRESS, 16, 1,
       
   388 						phys_to_virt(buffer_info->dma),
       
   389 						adapter->rx_ps_bsize0, true);
       
   390 			}
       
   391 
       
   392 			if (i == rx_ring->next_to_use)
       
   393 				printk(KERN_CONT " NTU\n");
       
   394 			else if (i == rx_ring->next_to_clean)
       
   395 				printk(KERN_CONT " NTC\n");
       
   396 			else
       
   397 				printk(KERN_CONT "\n");
       
   398 		}
       
   399 		break;
       
   400 	default:
       
   401 	case 0:
       
   402 		/* Extended Receive Descriptor (Read) Format
       
   403 		 *
       
   404 		 *   +-----------------------------------------------------+
       
   405 		 * 0 |                Buffer Address [63:0]                |
       
   406 		 *   +-----------------------------------------------------+
       
   407 		 * 8 |                      Reserved                       |
       
   408 		 *   +-----------------------------------------------------+
       
   409 		 */
       
   410 		printk(KERN_INFO "R  [desc]      [buf addr 63:0 ] "
       
   411 		       "[reserved 63:0 ] [bi->dma       ] "
       
   412 		       "[bi->skb] <-- Ext (Read) format\n");
       
   413 		/* Extended Receive Descriptor (Write-Back) Format
       
   414 		 *
       
   415 		 *   63       48 47    32 31    24 23            4 3        0
       
   416 		 *   +------------------------------------------------------+
       
   417 		 *   |     RSS Hash      |        |               |         |
       
   418 		 * 0 +-------------------+  Rsvd  |   Reserved    | MRQ RSS |
       
   419 		 *   | Packet   | IP     |        |               |  Type   |
       
   420 		 *   | Checksum | Ident  |        |               |         |
       
   421 		 *   +------------------------------------------------------+
       
   422 		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
       
   423 		 *   +------------------------------------------------------+
       
   424 		 *   63       48 47    32 31            20 19               0
       
   425 		 */
       
   426 		printk(KERN_INFO "RWB[desc]      [cs ipid    mrq] "
       
   427 		       "[vt   ln xe  xs] "
       
   428 		       "[bi->skb] <-- Ext (Write-Back) format\n");
       
   429 
       
   430 		for (i = 0; i < rx_ring->count; i++) {
       
   431 			buffer_info = &rx_ring->buffer_info[i];
       
   432 			rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
       
   433 			u1 = (struct my_u1 *)rx_desc;
       
   434 			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
       
   435 			if (staterr & E1000_RXD_STAT_DD) {
       
   436 				/* Descriptor Done */
       
   437 				printk(KERN_INFO "RWB[0x%03X]     %016llX "
       
   438 				       "%016llX ---------------- %p", i,
       
   439 				       (unsigned long long)le64_to_cpu(u1->a),
       
   440 				       (unsigned long long)le64_to_cpu(u1->b),
       
   441 				       buffer_info->skb);
       
   442 			} else {
       
   443 				printk(KERN_INFO "R  [0x%03X]     %016llX "
       
   444 				       "%016llX %016llX %p", i,
       
   445 				       (unsigned long long)le64_to_cpu(u1->a),
       
   446 				       (unsigned long long)le64_to_cpu(u1->b),
       
   447 				       (unsigned long long)buffer_info->dma,
       
   448 				       buffer_info->skb);
       
   449 
       
   450 				if (netif_msg_pktdata(adapter))
       
   451 					print_hex_dump(KERN_INFO, "",
       
   452 						       DUMP_PREFIX_ADDRESS, 16,
       
   453 						       1,
       
   454 						       phys_to_virt
       
   455 						       (buffer_info->dma),
       
   456 						       adapter->rx_buffer_len,
       
   457 						       true);
       
   458 			}
       
   459 
       
   460 			if (i == rx_ring->next_to_use)
       
   461 				printk(KERN_CONT " NTU\n");
       
   462 			else if (i == rx_ring->next_to_clean)
       
   463 				printk(KERN_CONT " NTC\n");
       
   464 			else
       
   465 				printk(KERN_CONT "\n");
       
   466 		}
       
   467 	}
       
   468 
       
   469 exit:
       
   470 	return;
       
   471 }
       
   472 
       
   473 /**
       
   474  * e1000_desc_unused - calculate if we have unused descriptors
       
   475  **/
       
   476 static int e1000_desc_unused(struct e1000_ring *ring)
       
   477 {
       
   478 	if (ring->next_to_clean > ring->next_to_use)
       
   479 		return ring->next_to_clean - ring->next_to_use - 1;
       
   480 
       
   481 	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
       
   482 }
       
   483 
       
   484 /**
       
   485  * e1000_receive_skb - helper function to handle Rx indications
       
   486  * @adapter: board private structure
       
   487  * @status: descriptor status field as written by hardware
       
   488  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
       
   489  * @skb: pointer to sk_buff to be indicated to stack
       
   490  **/
       
   491 static void e1000_receive_skb(struct e1000_adapter *adapter,
       
   492 			      struct net_device *netdev, struct sk_buff *skb,
       
   493 			      u8 status, __le16 vlan)
       
   494 {
       
   495 	u16 tag = le16_to_cpu(vlan);
       
   496 	skb->protocol = eth_type_trans(skb, netdev);
       
   497 
       
   498 	if (status & E1000_RXD_STAT_VP)
       
   499 		__vlan_hwaccel_put_tag(skb, tag);
       
   500 
       
   501 	napi_gro_receive(&adapter->napi, skb);
       
   502 }
       
   503 
       
   504 /**
       
   505  * e1000_rx_checksum - Receive Checksum Offload
       
   506  * @adapter:     board private structure
       
   507  * @status_err:  receive descriptor status and error fields
       
   508  * @csum:	receive descriptor csum field
       
   509  * @sk_buff:     socket buffer with received data
       
   510  **/
       
   511 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
       
   512 			      u32 csum, struct sk_buff *skb)
       
   513 {
       
   514 	u16 status = (u16)status_err;
       
   515 	u8 errors = (u8)(status_err >> 24);
       
   516 
       
   517 	skb_checksum_none_assert(skb);
       
   518 
       
   519 	/* Ignore Checksum bit is set */
       
   520 	if (status & E1000_RXD_STAT_IXSM)
       
   521 		return;
       
   522 	/* TCP/UDP checksum error bit is set */
       
   523 	if (errors & E1000_RXD_ERR_TCPE) {
       
   524 		/* let the stack verify checksum errors */
       
   525 		adapter->hw_csum_err++;
       
   526 		return;
       
   527 	}
       
   528 
       
   529 	/* TCP/UDP Checksum has not been calculated */
       
   530 	if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
       
   531 		return;
       
   532 
       
   533 	/* It must be a TCP or UDP packet with a valid checksum */
       
   534 	if (status & E1000_RXD_STAT_TCPCS) {
       
   535 		/* TCP checksum is good */
       
   536 		skb->ip_summed = CHECKSUM_UNNECESSARY;
       
   537 	} else {
       
   538 		/*
       
   539 		 * IP fragment with UDP payload
       
   540 		 * Hardware complements the payload checksum, so we undo it
       
   541 		 * and then put the value in host order for further stack use.
       
   542 		 */
       
   543 		__sum16 sum = (__force __sum16)htons(csum);
       
   544 		skb->csum = csum_unfold(~sum);
       
   545 		skb->ip_summed = CHECKSUM_COMPLETE;
       
   546 	}
       
   547 	adapter->hw_csum_good++;
       
   548 }
       
   549 
       
   550 /**
       
   551  * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
       
   552  * @hw: pointer to the HW structure
       
   553  * @tail: address of tail descriptor register
       
   554  * @i: value to write to tail descriptor register
       
   555  *
       
   556  * When updating the tail register, the ME could be accessing Host CSR
       
   557  * registers at the same time.  Normally, this is handled in h/w by an
       
   558  * arbiter but on some parts there is a bug that acknowledges Host accesses
       
   559  * later than it should which could result in the descriptor register to
       
   560  * have an incorrect value.  Workaround this by checking the FWSM register
       
   561  * which has bit 24 set while ME is accessing Host CSR registers, wait
       
   562  * if it is set and try again a number of times.
       
   563  **/
       
   564 static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
       
   565 					unsigned int i)
       
   566 {
       
   567 	unsigned int j = 0;
       
   568 
       
   569 	while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
       
   570 	       (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
       
   571 		udelay(50);
       
   572 
       
   573 	writel(i, tail);
       
   574 
       
   575 	if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
       
   576 		return E1000_ERR_SWFW_SYNC;
       
   577 
       
   578 	return 0;
       
   579 }
       
   580 
       
   581 static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
       
   582 {
       
   583 	u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
       
   584 	struct e1000_hw *hw = &adapter->hw;
       
   585 
       
   586 	if (e1000e_update_tail_wa(hw, tail, i)) {
       
   587 		u32 rctl = er32(RCTL);
       
   588 		ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
   589 		e_err("ME firmware caused invalid RDT - resetting\n");
       
   590 		schedule_work(&adapter->reset_task);
       
   591 	}
       
   592 }
       
   593 
       
   594 static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
       
   595 {
       
   596 	u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
       
   597 	struct e1000_hw *hw = &adapter->hw;
       
   598 
       
   599 	if (e1000e_update_tail_wa(hw, tail, i)) {
       
   600 		u32 tctl = er32(TCTL);
       
   601 		ew32(TCTL, tctl & ~E1000_TCTL_EN);
       
   602 		e_err("ME firmware caused invalid TDT - resetting\n");
       
   603 		schedule_work(&adapter->reset_task);
       
   604 	}
       
   605 }
       
   606 
       
   607 /**
       
   608  * e1000_alloc_rx_buffers - Replace used receive buffers
       
   609  * @adapter: address of board private structure
       
   610  **/
       
   611 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
       
   612 				   int cleaned_count, gfp_t gfp)
       
   613 {
       
   614 	struct net_device *netdev = adapter->netdev;
       
   615 	struct pci_dev *pdev = adapter->pdev;
       
   616 	struct e1000_ring *rx_ring = adapter->rx_ring;
       
   617 	union e1000_rx_desc_extended *rx_desc;
       
   618 	struct e1000_buffer *buffer_info;
       
   619 	struct sk_buff *skb;
       
   620 	unsigned int i;
       
   621 	unsigned int bufsz = adapter->rx_buffer_len;
       
   622 
       
   623 	i = rx_ring->next_to_use;
       
   624 	buffer_info = &rx_ring->buffer_info[i];
       
   625 
       
   626 	while (cleaned_count--) {
       
   627 		skb = buffer_info->skb;
       
   628 		if (skb) {
       
   629 			skb_trim(skb, 0);
       
   630 			goto map_skb;
       
   631 		}
       
   632 
       
   633 		skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
       
   634 		if (!skb) {
       
   635 			/* Better luck next round */
       
   636 			adapter->alloc_rx_buff_failed++;
       
   637 			break;
       
   638 		}
       
   639 
       
   640 		buffer_info->skb = skb;
       
   641 map_skb:
       
   642 		buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
       
   643 						  adapter->rx_buffer_len,
       
   644 						  DMA_FROM_DEVICE);
       
   645 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
       
   646 			dev_err(&pdev->dev, "Rx DMA map failed\n");
       
   647 			adapter->rx_dma_failed++;
       
   648 			break;
       
   649 		}
       
   650 
       
   651 		rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
       
   652 		rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
       
   653 
       
   654 		if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
       
   655 			/*
       
   656 			 * Force memory writes to complete before letting h/w
       
   657 			 * know there are new descriptors to fetch.  (Only
       
   658 			 * applicable for weak-ordered memory model archs,
       
   659 			 * such as IA-64).
       
   660 			 */
       
   661 			wmb();
       
   662 			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
       
   663 				e1000e_update_rdt_wa(adapter, i);
       
   664 			else
       
   665 				writel(i, adapter->hw.hw_addr + rx_ring->tail);
       
   666 		}
       
   667 		i++;
       
   668 		if (i == rx_ring->count)
       
   669 			i = 0;
       
   670 		buffer_info = &rx_ring->buffer_info[i];
       
   671 	}
       
   672 
       
   673 	rx_ring->next_to_use = i;
       
   674 }
       
   675 
       
   676 /**
       
   677  * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
       
   678  * @adapter: address of board private structure
       
   679  **/
       
   680 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
       
   681 				      int cleaned_count, gfp_t gfp)
       
   682 {
       
   683 	struct net_device *netdev = adapter->netdev;
       
   684 	struct pci_dev *pdev = adapter->pdev;
       
   685 	union e1000_rx_desc_packet_split *rx_desc;
       
   686 	struct e1000_ring *rx_ring = adapter->rx_ring;
       
   687 	struct e1000_buffer *buffer_info;
       
   688 	struct e1000_ps_page *ps_page;
       
   689 	struct sk_buff *skb;
       
   690 	unsigned int i, j;
       
   691 
       
   692 	i = rx_ring->next_to_use;
       
   693 	buffer_info = &rx_ring->buffer_info[i];
       
   694 
       
   695 	while (cleaned_count--) {
       
   696 		rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
       
   697 
       
   698 		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
       
   699 			ps_page = &buffer_info->ps_pages[j];
       
   700 			if (j >= adapter->rx_ps_pages) {
       
   701 				/* all unused desc entries get hw null ptr */
       
   702 				rx_desc->read.buffer_addr[j + 1] =
       
   703 				    ~cpu_to_le64(0);
       
   704 				continue;
       
   705 			}
       
   706 			if (!ps_page->page) {
       
   707 				ps_page->page = alloc_page(gfp);
       
   708 				if (!ps_page->page) {
       
   709 					adapter->alloc_rx_buff_failed++;
       
   710 					goto no_buffers;
       
   711 				}
       
   712 				ps_page->dma = dma_map_page(&pdev->dev,
       
   713 							    ps_page->page,
       
   714 							    0, PAGE_SIZE,
       
   715 							    DMA_FROM_DEVICE);
       
   716 				if (dma_mapping_error(&pdev->dev,
       
   717 						      ps_page->dma)) {
       
   718 					dev_err(&adapter->pdev->dev,
       
   719 						"Rx DMA page map failed\n");
       
   720 					adapter->rx_dma_failed++;
       
   721 					goto no_buffers;
       
   722 				}
       
   723 			}
       
   724 			/*
       
   725 			 * Refresh the desc even if buffer_addrs
       
   726 			 * didn't change because each write-back
       
   727 			 * erases this info.
       
   728 			 */
       
   729 			rx_desc->read.buffer_addr[j + 1] =
       
   730 			    cpu_to_le64(ps_page->dma);
       
   731 		}
       
   732 
       
   733 		skb = __netdev_alloc_skb_ip_align(netdev,
       
   734 						  adapter->rx_ps_bsize0,
       
   735 						  gfp);
       
   736 
       
   737 		if (!skb) {
       
   738 			adapter->alloc_rx_buff_failed++;
       
   739 			break;
       
   740 		}
       
   741 
       
   742 		buffer_info->skb = skb;
       
   743 		buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
       
   744 						  adapter->rx_ps_bsize0,
       
   745 						  DMA_FROM_DEVICE);
       
   746 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
       
   747 			dev_err(&pdev->dev, "Rx DMA map failed\n");
       
   748 			adapter->rx_dma_failed++;
       
   749 			/* cleanup skb */
       
   750 			dev_kfree_skb_any(skb);
       
   751 			buffer_info->skb = NULL;
       
   752 			break;
       
   753 		}
       
   754 
       
   755 		rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
       
   756 
       
   757 		if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
       
   758 			/*
       
   759 			 * Force memory writes to complete before letting h/w
       
   760 			 * know there are new descriptors to fetch.  (Only
       
   761 			 * applicable for weak-ordered memory model archs,
       
   762 			 * such as IA-64).
       
   763 			 */
       
   764 			wmb();
       
   765 			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
       
   766 				e1000e_update_rdt_wa(adapter, i << 1);
       
   767 			else
       
   768 				writel(i << 1,
       
   769 				       adapter->hw.hw_addr + rx_ring->tail);
       
   770 		}
       
   771 
       
   772 		i++;
       
   773 		if (i == rx_ring->count)
       
   774 			i = 0;
       
   775 		buffer_info = &rx_ring->buffer_info[i];
       
   776 	}
       
   777 
       
   778 no_buffers:
       
   779 	rx_ring->next_to_use = i;
       
   780 }
       
   781 
       
   782 /**
       
   783  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
       
   784  * @adapter: address of board private structure
       
   785  * @cleaned_count: number of buffers to allocate this pass
       
   786  **/
       
   787 
       
   788 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
       
   789 					 int cleaned_count, gfp_t gfp)
       
   790 {
       
   791 	struct net_device *netdev = adapter->netdev;
       
   792 	struct pci_dev *pdev = adapter->pdev;
       
   793 	union e1000_rx_desc_extended *rx_desc;
       
   794 	struct e1000_ring *rx_ring = adapter->rx_ring;
       
   795 	struct e1000_buffer *buffer_info;
       
   796 	struct sk_buff *skb;
       
   797 	unsigned int i;
       
   798 	unsigned int bufsz = 256 - 16 /* for skb_reserve */;
       
   799 
       
   800 	i = rx_ring->next_to_use;
       
   801 	buffer_info = &rx_ring->buffer_info[i];
       
   802 
       
   803 	while (cleaned_count--) {
       
   804 		skb = buffer_info->skb;
       
   805 		if (skb) {
       
   806 			skb_trim(skb, 0);
       
   807 			goto check_page;
       
   808 		}
       
   809 
       
   810 		skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
       
   811 		if (unlikely(!skb)) {
       
   812 			/* Better luck next round */
       
   813 			adapter->alloc_rx_buff_failed++;
       
   814 			break;
       
   815 		}
       
   816 
       
   817 		buffer_info->skb = skb;
       
   818 check_page:
       
   819 		/* allocate a new page if necessary */
       
   820 		if (!buffer_info->page) {
       
   821 			buffer_info->page = alloc_page(gfp);
       
   822 			if (unlikely(!buffer_info->page)) {
       
   823 				adapter->alloc_rx_buff_failed++;
       
   824 				break;
       
   825 			}
       
   826 		}
       
   827 
       
   828 		if (!buffer_info->dma)
       
   829 			buffer_info->dma = dma_map_page(&pdev->dev,
       
   830 			                                buffer_info->page, 0,
       
   831 			                                PAGE_SIZE,
       
   832 							DMA_FROM_DEVICE);
       
   833 
       
   834 		rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
       
   835 		rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
       
   836 
       
   837 		if (unlikely(++i == rx_ring->count))
       
   838 			i = 0;
       
   839 		buffer_info = &rx_ring->buffer_info[i];
       
   840 	}
       
   841 
       
   842 	if (likely(rx_ring->next_to_use != i)) {
       
   843 		rx_ring->next_to_use = i;
       
   844 		if (unlikely(i-- == 0))
       
   845 			i = (rx_ring->count - 1);
       
   846 
       
   847 		/* Force memory writes to complete before letting h/w
       
   848 		 * know there are new descriptors to fetch.  (Only
       
   849 		 * applicable for weak-ordered memory model archs,
       
   850 		 * such as IA-64). */
       
   851 		wmb();
       
   852 		if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
       
   853 			e1000e_update_rdt_wa(adapter, i);
       
   854 		else
       
   855 			writel(i, adapter->hw.hw_addr + rx_ring->tail);
       
   856 	}
       
   857 }
       
   858 
       
   859 /**
       
   860  * e1000_clean_rx_irq - Send received data up the network stack; legacy
       
   861  * @adapter: board private structure
       
   862  *
       
   863  * the return value indicates whether actual cleaning was done, there
       
   864  * is no guarantee that everything was cleaned
       
   865  **/
       
   866 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
       
   867 			       int *work_done, int work_to_do)
       
   868 {
       
   869 	struct net_device *netdev = adapter->netdev;
       
   870 	struct pci_dev *pdev = adapter->pdev;
       
   871 	struct e1000_hw *hw = &adapter->hw;
       
   872 	struct e1000_ring *rx_ring = adapter->rx_ring;
       
   873 	union e1000_rx_desc_extended *rx_desc, *next_rxd;
       
   874 	struct e1000_buffer *buffer_info, *next_buffer;
       
   875 	u32 length, staterr;
       
   876 	unsigned int i;
       
   877 	int cleaned_count = 0;
       
   878 	bool cleaned = 0;
       
   879 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
       
   880 
       
   881 	i = rx_ring->next_to_clean;
       
   882 	rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
       
   883 	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
       
   884 	buffer_info = &rx_ring->buffer_info[i];
       
   885 
       
   886 	while (staterr & E1000_RXD_STAT_DD) {
       
   887 		struct sk_buff *skb;
       
   888 
       
   889 		if (*work_done >= work_to_do)
       
   890 			break;
       
   891 		(*work_done)++;
       
   892 		rmb();	/* read descriptor and rx_buffer_info after status DD */
       
   893 
       
   894 		skb = buffer_info->skb;
       
   895 		buffer_info->skb = NULL;
       
   896 
       
   897 		prefetch(skb->data - NET_IP_ALIGN);
       
   898 
       
   899 		i++;
       
   900 		if (i == rx_ring->count)
       
   901 			i = 0;
       
   902 		next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
       
   903 		prefetch(next_rxd);
       
   904 
       
   905 		next_buffer = &rx_ring->buffer_info[i];
       
   906 
       
   907 		cleaned = 1;
       
   908 		cleaned_count++;
       
   909 		dma_unmap_single(&pdev->dev,
       
   910 				 buffer_info->dma,
       
   911 				 adapter->rx_buffer_len,
       
   912 				 DMA_FROM_DEVICE);
       
   913 		buffer_info->dma = 0;
       
   914 
       
   915 		length = le16_to_cpu(rx_desc->wb.upper.length);
       
   916 
       
   917 		/*
       
   918 		 * !EOP means multiple descriptors were used to store a single
       
   919 		 * packet, if that's the case we need to toss it.  In fact, we
       
   920 		 * need to toss every packet with the EOP bit clear and the
       
   921 		 * next frame that _does_ have the EOP bit set, as it is by
       
   922 		 * definition only a frame fragment
       
   923 		 */
       
   924 		if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
       
   925 			adapter->flags2 |= FLAG2_IS_DISCARDING;
       
   926 
       
   927 		if (adapter->flags2 & FLAG2_IS_DISCARDING) {
       
   928 			/* All receives must fit into a single buffer */
       
   929 			e_dbg("Receive packet consumed multiple buffers\n");
       
   930 			/* recycle */
       
   931 			buffer_info->skb = skb;
       
   932 			if (staterr & E1000_RXD_STAT_EOP)
       
   933 				adapter->flags2 &= ~FLAG2_IS_DISCARDING;
       
   934 			goto next_desc;
       
   935 		}
       
   936 
       
   937 		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
       
   938 			/* recycle */
       
   939 			buffer_info->skb = skb;
       
   940 			goto next_desc;
       
   941 		}
       
   942 
       
   943 		/* adjust length to remove Ethernet CRC */
       
   944 		if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
       
   945 			length -= 4;
       
   946 
       
   947 		total_rx_bytes += length;
       
   948 		total_rx_packets++;
       
   949 
       
   950 		/*
       
   951 		 * code added for copybreak, this should improve
       
   952 		 * performance for small packets with large amounts
       
   953 		 * of reassembly being done in the stack
       
   954 		 */
       
   955 		if (length < copybreak) {
       
   956 			struct sk_buff *new_skb =
       
   957 			    netdev_alloc_skb_ip_align(netdev, length);
       
   958 			if (new_skb) {
       
   959 				skb_copy_to_linear_data_offset(new_skb,
       
   960 							       -NET_IP_ALIGN,
       
   961 							       (skb->data -
       
   962 								NET_IP_ALIGN),
       
   963 							       (length +
       
   964 								NET_IP_ALIGN));
       
   965 				/* save the skb in buffer_info as good */
       
   966 				buffer_info->skb = skb;
       
   967 				skb = new_skb;
       
   968 			}
       
   969 			/* else just continue with the old one */
       
   970 		}
       
   971 		/* end copybreak code */
       
   972 		skb_put(skb, length);
       
   973 
       
   974 		/* Receive Checksum Offload */
       
   975 		e1000_rx_checksum(adapter, staterr,
       
   976 				  le16_to_cpu(rx_desc->wb.lower.hi_dword.
       
   977 					      csum_ip.csum), skb);
       
   978 
       
   979 		e1000_receive_skb(adapter, netdev, skb, staterr,
       
   980 				  rx_desc->wb.upper.vlan);
       
   981 
       
   982 next_desc:
       
   983 		rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
       
   984 
       
   985 		/* return some buffers to hardware, one at a time is too slow */
       
   986 		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
       
   987 			adapter->alloc_rx_buf(adapter, cleaned_count,
       
   988 					      GFP_ATOMIC);
       
   989 			cleaned_count = 0;
       
   990 		}
       
   991 
       
   992 		/* use prefetched values */
       
   993 		rx_desc = next_rxd;
       
   994 		buffer_info = next_buffer;
       
   995 
       
   996 		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
       
   997 	}
       
   998 	rx_ring->next_to_clean = i;
       
   999 
       
  1000 	cleaned_count = e1000_desc_unused(rx_ring);
       
  1001 	if (cleaned_count)
       
  1002 		adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
       
  1003 
       
  1004 	adapter->total_rx_bytes += total_rx_bytes;
       
  1005 	adapter->total_rx_packets += total_rx_packets;
       
  1006 	return cleaned;
       
  1007 }
       
  1008 
       
  1009 static void e1000_put_txbuf(struct e1000_adapter *adapter,
       
  1010 			     struct e1000_buffer *buffer_info)
       
  1011 {
       
  1012 	if (buffer_info->dma) {
       
  1013 		if (buffer_info->mapped_as_page)
       
  1014 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
       
  1015 				       buffer_info->length, DMA_TO_DEVICE);
       
  1016 		else
       
  1017 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
       
  1018 					 buffer_info->length, DMA_TO_DEVICE);
       
  1019 		buffer_info->dma = 0;
       
  1020 	}
       
  1021 	if (buffer_info->skb) {
       
  1022 		dev_kfree_skb_any(buffer_info->skb);
       
  1023 		buffer_info->skb = NULL;
       
  1024 	}
       
  1025 	buffer_info->time_stamp = 0;
       
  1026 }
       
  1027 
       
  1028 static void e1000_print_hw_hang(struct work_struct *work)
       
  1029 {
       
  1030 	struct e1000_adapter *adapter = container_of(work,
       
  1031 	                                             struct e1000_adapter,
       
  1032 	                                             print_hang_task);
       
  1033 	struct net_device *netdev = adapter->netdev;
       
  1034 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  1035 	unsigned int i = tx_ring->next_to_clean;
       
  1036 	unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
       
  1037 	struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  1038 	struct e1000_hw *hw = &adapter->hw;
       
  1039 	u16 phy_status, phy_1000t_status, phy_ext_status;
       
  1040 	u16 pci_status;
       
  1041 
       
  1042 	if (test_bit(__E1000_DOWN, &adapter->state))
       
  1043 		return;
       
  1044 
       
  1045 	if (!adapter->tx_hang_recheck &&
       
  1046 	    (adapter->flags2 & FLAG2_DMA_BURST)) {
       
  1047 		/* May be block on write-back, flush and detect again
       
  1048 		 * flush pending descriptor writebacks to memory
       
  1049 		 */
       
  1050 		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
       
  1051 		/* execute the writes immediately */
       
  1052 		e1e_flush();
       
  1053 		adapter->tx_hang_recheck = true;
       
  1054 		return;
       
  1055 	}
       
  1056 	/* Real hang detected */
       
  1057 	adapter->tx_hang_recheck = false;
       
  1058 	netif_stop_queue(netdev);
       
  1059 
       
  1060 	e1e_rphy(hw, PHY_STATUS, &phy_status);
       
  1061 	e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
       
  1062 	e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
       
  1063 
       
  1064 	pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
       
  1065 
       
  1066 	/* detected Hardware unit hang */
       
  1067 	e_err("Detected Hardware Unit Hang:\n"
       
  1068 	      "  TDH                  <%x>\n"
       
  1069 	      "  TDT                  <%x>\n"
       
  1070 	      "  next_to_use          <%x>\n"
       
  1071 	      "  next_to_clean        <%x>\n"
       
  1072 	      "buffer_info[next_to_clean]:\n"
       
  1073 	      "  time_stamp           <%lx>\n"
       
  1074 	      "  next_to_watch        <%x>\n"
       
  1075 	      "  jiffies              <%lx>\n"
       
  1076 	      "  next_to_watch.status <%x>\n"
       
  1077 	      "MAC Status             <%x>\n"
       
  1078 	      "PHY Status             <%x>\n"
       
  1079 	      "PHY 1000BASE-T Status  <%x>\n"
       
  1080 	      "PHY Extended Status    <%x>\n"
       
  1081 	      "PCI Status             <%x>\n",
       
  1082 	      readl(adapter->hw.hw_addr + tx_ring->head),
       
  1083 	      readl(adapter->hw.hw_addr + tx_ring->tail),
       
  1084 	      tx_ring->next_to_use,
       
  1085 	      tx_ring->next_to_clean,
       
  1086 	      tx_ring->buffer_info[eop].time_stamp,
       
  1087 	      eop,
       
  1088 	      jiffies,
       
  1089 	      eop_desc->upper.fields.status,
       
  1090 	      er32(STATUS),
       
  1091 	      phy_status,
       
  1092 	      phy_1000t_status,
       
  1093 	      phy_ext_status,
       
  1094 	      pci_status);
       
  1095 }
       
  1096 
       
  1097 /**
       
  1098  * e1000_clean_tx_irq - Reclaim resources after transmit completes
       
  1099  * @adapter: board private structure
       
  1100  *
       
  1101  * the return value indicates whether actual cleaning was done, there
       
  1102  * is no guarantee that everything was cleaned
       
  1103  **/
       
  1104 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
       
  1105 {
       
  1106 	struct net_device *netdev = adapter->netdev;
       
  1107 	struct e1000_hw *hw = &adapter->hw;
       
  1108 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  1109 	struct e1000_tx_desc *tx_desc, *eop_desc;
       
  1110 	struct e1000_buffer *buffer_info;
       
  1111 	unsigned int i, eop;
       
  1112 	unsigned int count = 0;
       
  1113 	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
       
  1114 
       
  1115 	i = tx_ring->next_to_clean;
       
  1116 	eop = tx_ring->buffer_info[i].next_to_watch;
       
  1117 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  1118 
       
  1119 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
       
  1120 	       (count < tx_ring->count)) {
       
  1121 		bool cleaned = false;
       
  1122 		rmb(); /* read buffer_info after eop_desc */
       
  1123 		for (; !cleaned; count++) {
       
  1124 			tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  1125 			buffer_info = &tx_ring->buffer_info[i];
       
  1126 			cleaned = (i == eop);
       
  1127 
       
  1128 			if (cleaned) {
       
  1129 				total_tx_packets += buffer_info->segs;
       
  1130 				total_tx_bytes += buffer_info->bytecount;
       
  1131 			}
       
  1132 
       
  1133 			e1000_put_txbuf(adapter, buffer_info);
       
  1134 			tx_desc->upper.data = 0;
       
  1135 
       
  1136 			i++;
       
  1137 			if (i == tx_ring->count)
       
  1138 				i = 0;
       
  1139 		}
       
  1140 
       
  1141 		if (i == tx_ring->next_to_use)
       
  1142 			break;
       
  1143 		eop = tx_ring->buffer_info[i].next_to_watch;
       
  1144 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
       
  1145 	}
       
  1146 
       
  1147 	tx_ring->next_to_clean = i;
       
  1148 
       
  1149 #define TX_WAKE_THRESHOLD 32
       
  1150 	if (count && netif_carrier_ok(netdev) &&
       
  1151 	    e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
       
  1152 		/* Make sure that anybody stopping the queue after this
       
  1153 		 * sees the new next_to_clean.
       
  1154 		 */
       
  1155 		smp_mb();
       
  1156 
       
  1157 		if (netif_queue_stopped(netdev) &&
       
  1158 		    !(test_bit(__E1000_DOWN, &adapter->state))) {
       
  1159 			netif_wake_queue(netdev);
       
  1160 			++adapter->restart_queue;
       
  1161 		}
       
  1162 	}
       
  1163 
       
  1164 	if (adapter->detect_tx_hung) {
       
  1165 		/*
       
  1166 		 * Detect a transmit hang in hardware, this serializes the
       
  1167 		 * check with the clearing of time_stamp and movement of i
       
  1168 		 */
       
  1169 		adapter->detect_tx_hung = 0;
       
  1170 		if (tx_ring->buffer_info[i].time_stamp &&
       
  1171 		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
       
  1172 			       + (adapter->tx_timeout_factor * HZ)) &&
       
  1173 		    !(er32(STATUS) & E1000_STATUS_TXOFF))
       
  1174 			schedule_work(&adapter->print_hang_task);
       
  1175 		else
       
  1176 			adapter->tx_hang_recheck = false;
       
  1177 	}
       
  1178 	adapter->total_tx_bytes += total_tx_bytes;
       
  1179 	adapter->total_tx_packets += total_tx_packets;
       
  1180 	return count < tx_ring->count;
       
  1181 }
       
  1182 
       
  1183 /**
       
  1184  * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
       
  1185  * @adapter: board private structure
       
  1186  *
       
  1187  * the return value indicates whether actual cleaning was done, there
       
  1188  * is no guarantee that everything was cleaned
       
  1189  **/
       
  1190 static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
       
  1191 				  int *work_done, int work_to_do)
       
  1192 {
       
  1193 	struct e1000_hw *hw = &adapter->hw;
       
  1194 	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
       
  1195 	struct net_device *netdev = adapter->netdev;
       
  1196 	struct pci_dev *pdev = adapter->pdev;
       
  1197 	struct e1000_ring *rx_ring = adapter->rx_ring;
       
  1198 	struct e1000_buffer *buffer_info, *next_buffer;
       
  1199 	struct e1000_ps_page *ps_page;
       
  1200 	struct sk_buff *skb;
       
  1201 	unsigned int i, j;
       
  1202 	u32 length, staterr;
       
  1203 	int cleaned_count = 0;
       
  1204 	bool cleaned = 0;
       
  1205 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
       
  1206 
       
  1207 	i = rx_ring->next_to_clean;
       
  1208 	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
       
  1209 	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
       
  1210 	buffer_info = &rx_ring->buffer_info[i];
       
  1211 
       
  1212 	while (staterr & E1000_RXD_STAT_DD) {
       
  1213 		if (*work_done >= work_to_do)
       
  1214 			break;
       
  1215 		(*work_done)++;
       
  1216 		skb = buffer_info->skb;
       
  1217 		rmb();	/* read descriptor and rx_buffer_info after status DD */
       
  1218 
       
  1219 		/* in the packet split case this is header only */
       
  1220 		prefetch(skb->data - NET_IP_ALIGN);
       
  1221 
       
  1222 		i++;
       
  1223 		if (i == rx_ring->count)
       
  1224 			i = 0;
       
  1225 		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
       
  1226 		prefetch(next_rxd);
       
  1227 
       
  1228 		next_buffer = &rx_ring->buffer_info[i];
       
  1229 
       
  1230 		cleaned = 1;
       
  1231 		cleaned_count++;
       
  1232 		dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  1233 				 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
       
  1234 		buffer_info->dma = 0;
       
  1235 
       
  1236 		/* see !EOP comment in other Rx routine */
       
  1237 		if (!(staterr & E1000_RXD_STAT_EOP))
       
  1238 			adapter->flags2 |= FLAG2_IS_DISCARDING;
       
  1239 
       
  1240 		if (adapter->flags2 & FLAG2_IS_DISCARDING) {
       
  1241 			e_dbg("Packet Split buffers didn't pick up the full "
       
  1242 			      "packet\n");
       
  1243 			dev_kfree_skb_irq(skb);
       
  1244 			if (staterr & E1000_RXD_STAT_EOP)
       
  1245 				adapter->flags2 &= ~FLAG2_IS_DISCARDING;
       
  1246 			goto next_desc;
       
  1247 		}
       
  1248 
       
  1249 		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
       
  1250 			dev_kfree_skb_irq(skb);
       
  1251 			goto next_desc;
       
  1252 		}
       
  1253 
       
  1254 		length = le16_to_cpu(rx_desc->wb.middle.length0);
       
  1255 
       
  1256 		if (!length) {
       
  1257 			e_dbg("Last part of the packet spanning multiple "
       
  1258 			      "descriptors\n");
       
  1259 			dev_kfree_skb_irq(skb);
       
  1260 			goto next_desc;
       
  1261 		}
       
  1262 
       
  1263 		/* Good Receive */
       
  1264 		skb_put(skb, length);
       
  1265 
       
  1266 		{
       
  1267 		/*
       
  1268 		 * this looks ugly, but it seems compiler issues make it
       
  1269 		 * more efficient than reusing j
       
  1270 		 */
       
  1271 		int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
       
  1272 
       
  1273 		/*
       
  1274 		 * page alloc/put takes too long and effects small packet
       
  1275 		 * throughput, so unsplit small packets and save the alloc/put
       
  1276 		 * only valid in softirq (napi) context to call kmap_*
       
  1277 		 */
       
  1278 		if (l1 && (l1 <= copybreak) &&
       
  1279 		    ((length + l1) <= adapter->rx_ps_bsize0)) {
       
  1280 			u8 *vaddr;
       
  1281 
       
  1282 			ps_page = &buffer_info->ps_pages[0];
       
  1283 
       
  1284 			/*
       
  1285 			 * there is no documentation about how to call
       
  1286 			 * kmap_atomic, so we can't hold the mapping
       
  1287 			 * very long
       
  1288 			 */
       
  1289 			dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
       
  1290 						PAGE_SIZE, DMA_FROM_DEVICE);
       
  1291 			vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
       
  1292 			memcpy(skb_tail_pointer(skb), vaddr, l1);
       
  1293 			kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
       
  1294 			dma_sync_single_for_device(&pdev->dev, ps_page->dma,
       
  1295 						   PAGE_SIZE, DMA_FROM_DEVICE);
       
  1296 
       
  1297 			/* remove the CRC */
       
  1298 			if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
       
  1299 				l1 -= 4;
       
  1300 
       
  1301 			skb_put(skb, l1);
       
  1302 			goto copydone;
       
  1303 		} /* if */
       
  1304 		}
       
  1305 
       
  1306 		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
       
  1307 			length = le16_to_cpu(rx_desc->wb.upper.length[j]);
       
  1308 			if (!length)
       
  1309 				break;
       
  1310 
       
  1311 			ps_page = &buffer_info->ps_pages[j];
       
  1312 			dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
       
  1313 				       DMA_FROM_DEVICE);
       
  1314 			ps_page->dma = 0;
       
  1315 			skb_fill_page_desc(skb, j, ps_page->page, 0, length);
       
  1316 			ps_page->page = NULL;
       
  1317 			skb->len += length;
       
  1318 			skb->data_len += length;
       
  1319 			skb->truesize += PAGE_SIZE;
       
  1320 		}
       
  1321 
       
  1322 		/* strip the ethernet crc, problem is we're using pages now so
       
  1323 		 * this whole operation can get a little cpu intensive
       
  1324 		 */
       
  1325 		if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
       
  1326 			pskb_trim(skb, skb->len - 4);
       
  1327 
       
  1328 copydone:
       
  1329 		total_rx_bytes += skb->len;
       
  1330 		total_rx_packets++;
       
  1331 
       
  1332 		e1000_rx_checksum(adapter, staterr, le16_to_cpu(
       
  1333 			rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
       
  1334 
       
  1335 		if (rx_desc->wb.upper.header_status &
       
  1336 			   cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
       
  1337 			adapter->rx_hdr_split++;
       
  1338 
       
  1339 		e1000_receive_skb(adapter, netdev, skb,
       
  1340 				  staterr, rx_desc->wb.middle.vlan);
       
  1341 
       
  1342 next_desc:
       
  1343 		rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
       
  1344 		buffer_info->skb = NULL;
       
  1345 
       
  1346 		/* return some buffers to hardware, one at a time is too slow */
       
  1347 		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
       
  1348 			adapter->alloc_rx_buf(adapter, cleaned_count,
       
  1349 					      GFP_ATOMIC);
       
  1350 			cleaned_count = 0;
       
  1351 		}
       
  1352 
       
  1353 		/* use prefetched values */
       
  1354 		rx_desc = next_rxd;
       
  1355 		buffer_info = next_buffer;
       
  1356 
       
  1357 		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
       
  1358 	}
       
  1359 	rx_ring->next_to_clean = i;
       
  1360 
       
  1361 	cleaned_count = e1000_desc_unused(rx_ring);
       
  1362 	if (cleaned_count)
       
  1363 		adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
       
  1364 
       
  1365 	adapter->total_rx_bytes += total_rx_bytes;
       
  1366 	adapter->total_rx_packets += total_rx_packets;
       
  1367 	return cleaned;
       
  1368 }
       
  1369 
       
  1370 /**
       
  1371  * e1000_consume_page - helper function
       
  1372  **/
       
  1373 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
       
  1374                                u16 length)
       
  1375 {
       
  1376 	bi->page = NULL;
       
  1377 	skb->len += length;
       
  1378 	skb->data_len += length;
       
  1379 	skb->truesize += PAGE_SIZE;
       
  1380 }
       
  1381 
       
  1382 /**
       
  1383  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
       
  1384  * @adapter: board private structure
       
  1385  *
       
  1386  * the return value indicates whether actual cleaning was done, there
       
  1387  * is no guarantee that everything was cleaned
       
  1388  **/
       
  1389 
       
  1390 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
       
  1391                                      int *work_done, int work_to_do)
       
  1392 {
       
  1393 	struct net_device *netdev = adapter->netdev;
       
  1394 	struct pci_dev *pdev = adapter->pdev;
       
  1395 	struct e1000_ring *rx_ring = adapter->rx_ring;
       
  1396 	union e1000_rx_desc_extended *rx_desc, *next_rxd;
       
  1397 	struct e1000_buffer *buffer_info, *next_buffer;
       
  1398 	u32 length, staterr;
       
  1399 	unsigned int i;
       
  1400 	int cleaned_count = 0;
       
  1401 	bool cleaned = false;
       
  1402 	unsigned int total_rx_bytes=0, total_rx_packets=0;
       
  1403 
       
  1404 	i = rx_ring->next_to_clean;
       
  1405 	rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
       
  1406 	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
       
  1407 	buffer_info = &rx_ring->buffer_info[i];
       
  1408 
       
  1409 	while (staterr & E1000_RXD_STAT_DD) {
       
  1410 		struct sk_buff *skb;
       
  1411 
       
  1412 		if (*work_done >= work_to_do)
       
  1413 			break;
       
  1414 		(*work_done)++;
       
  1415 		rmb();	/* read descriptor and rx_buffer_info after status DD */
       
  1416 
       
  1417 		skb = buffer_info->skb;
       
  1418 		buffer_info->skb = NULL;
       
  1419 
       
  1420 		++i;
       
  1421 		if (i == rx_ring->count)
       
  1422 			i = 0;
       
  1423 		next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
       
  1424 		prefetch(next_rxd);
       
  1425 
       
  1426 		next_buffer = &rx_ring->buffer_info[i];
       
  1427 
       
  1428 		cleaned = true;
       
  1429 		cleaned_count++;
       
  1430 		dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
       
  1431 			       DMA_FROM_DEVICE);
       
  1432 		buffer_info->dma = 0;
       
  1433 
       
  1434 		length = le16_to_cpu(rx_desc->wb.upper.length);
       
  1435 
       
  1436 		/* errors is only valid for DD + EOP descriptors */
       
  1437 		if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
       
  1438 			     (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) {
       
  1439 			/* recycle both page and skb */
       
  1440 			buffer_info->skb = skb;
       
  1441 			/* an error means any chain goes out the window too */
       
  1442 			if (rx_ring->rx_skb_top)
       
  1443 				dev_kfree_skb_irq(rx_ring->rx_skb_top);
       
  1444 			rx_ring->rx_skb_top = NULL;
       
  1445 			goto next_desc;
       
  1446 		}
       
  1447 
       
  1448 #define rxtop (rx_ring->rx_skb_top)
       
  1449 		if (!(staterr & E1000_RXD_STAT_EOP)) {
       
  1450 			/* this descriptor is only the beginning (or middle) */
       
  1451 			if (!rxtop) {
       
  1452 				/* this is the beginning of a chain */
       
  1453 				rxtop = skb;
       
  1454 				skb_fill_page_desc(rxtop, 0, buffer_info->page,
       
  1455 				                   0, length);
       
  1456 			} else {
       
  1457 				/* this is the middle of a chain */
       
  1458 				skb_fill_page_desc(rxtop,
       
  1459 				    skb_shinfo(rxtop)->nr_frags,
       
  1460 				    buffer_info->page, 0, length);
       
  1461 				/* re-use the skb, only consumed the page */
       
  1462 				buffer_info->skb = skb;
       
  1463 			}
       
  1464 			e1000_consume_page(buffer_info, rxtop, length);
       
  1465 			goto next_desc;
       
  1466 		} else {
       
  1467 			if (rxtop) {
       
  1468 				/* end of the chain */
       
  1469 				skb_fill_page_desc(rxtop,
       
  1470 				    skb_shinfo(rxtop)->nr_frags,
       
  1471 				    buffer_info->page, 0, length);
       
  1472 				/* re-use the current skb, we only consumed the
       
  1473 				 * page */
       
  1474 				buffer_info->skb = skb;
       
  1475 				skb = rxtop;
       
  1476 				rxtop = NULL;
       
  1477 				e1000_consume_page(buffer_info, skb, length);
       
  1478 			} else {
       
  1479 				/* no chain, got EOP, this buf is the packet
       
  1480 				 * copybreak to save the put_page/alloc_page */
       
  1481 				if (length <= copybreak &&
       
  1482 				    skb_tailroom(skb) >= length) {
       
  1483 					u8 *vaddr;
       
  1484 					vaddr = kmap_atomic(buffer_info->page,
       
  1485 					                   KM_SKB_DATA_SOFTIRQ);
       
  1486 					memcpy(skb_tail_pointer(skb), vaddr,
       
  1487 					       length);
       
  1488 					kunmap_atomic(vaddr,
       
  1489 					              KM_SKB_DATA_SOFTIRQ);
       
  1490 					/* re-use the page, so don't erase
       
  1491 					 * buffer_info->page */
       
  1492 					skb_put(skb, length);
       
  1493 				} else {
       
  1494 					skb_fill_page_desc(skb, 0,
       
  1495 					                   buffer_info->page, 0,
       
  1496 				                           length);
       
  1497 					e1000_consume_page(buffer_info, skb,
       
  1498 					                   length);
       
  1499 				}
       
  1500 			}
       
  1501 		}
       
  1502 
       
  1503 		/* Receive Checksum Offload XXX recompute due to CRC strip? */
       
  1504 		e1000_rx_checksum(adapter, staterr,
       
  1505 				  le16_to_cpu(rx_desc->wb.lower.hi_dword.
       
  1506 					      csum_ip.csum), skb);
       
  1507 
       
  1508 		/* probably a little skewed due to removing CRC */
       
  1509 		total_rx_bytes += skb->len;
       
  1510 		total_rx_packets++;
       
  1511 
       
  1512 		/* eth type trans needs skb->data to point to something */
       
  1513 		if (!pskb_may_pull(skb, ETH_HLEN)) {
       
  1514 			e_err("pskb_may_pull failed.\n");
       
  1515 			dev_kfree_skb_irq(skb);
       
  1516 			goto next_desc;
       
  1517 		}
       
  1518 
       
  1519 		e1000_receive_skb(adapter, netdev, skb, staterr,
       
  1520 				  rx_desc->wb.upper.vlan);
       
  1521 
       
  1522 next_desc:
       
  1523 		rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
       
  1524 
       
  1525 		/* return some buffers to hardware, one at a time is too slow */
       
  1526 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
       
  1527 			adapter->alloc_rx_buf(adapter, cleaned_count,
       
  1528 					      GFP_ATOMIC);
       
  1529 			cleaned_count = 0;
       
  1530 		}
       
  1531 
       
  1532 		/* use prefetched values */
       
  1533 		rx_desc = next_rxd;
       
  1534 		buffer_info = next_buffer;
       
  1535 
       
  1536 		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
       
  1537 	}
       
  1538 	rx_ring->next_to_clean = i;
       
  1539 
       
  1540 	cleaned_count = e1000_desc_unused(rx_ring);
       
  1541 	if (cleaned_count)
       
  1542 		adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
       
  1543 
       
  1544 	adapter->total_rx_bytes += total_rx_bytes;
       
  1545 	adapter->total_rx_packets += total_rx_packets;
       
  1546 	return cleaned;
       
  1547 }
       
  1548 
       
  1549 /**
       
  1550  * e1000_clean_rx_ring - Free Rx Buffers per Queue
       
  1551  * @adapter: board private structure
       
  1552  **/
       
  1553 static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
       
  1554 {
       
  1555 	struct e1000_ring *rx_ring = adapter->rx_ring;
       
  1556 	struct e1000_buffer *buffer_info;
       
  1557 	struct e1000_ps_page *ps_page;
       
  1558 	struct pci_dev *pdev = adapter->pdev;
       
  1559 	unsigned int i, j;
       
  1560 
       
  1561 	/* Free all the Rx ring sk_buffs */
       
  1562 	for (i = 0; i < rx_ring->count; i++) {
       
  1563 		buffer_info = &rx_ring->buffer_info[i];
       
  1564 		if (buffer_info->dma) {
       
  1565 			if (adapter->clean_rx == e1000_clean_rx_irq)
       
  1566 				dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  1567 						 adapter->rx_buffer_len,
       
  1568 						 DMA_FROM_DEVICE);
       
  1569 			else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
       
  1570 				dma_unmap_page(&pdev->dev, buffer_info->dma,
       
  1571 				               PAGE_SIZE,
       
  1572 					       DMA_FROM_DEVICE);
       
  1573 			else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
       
  1574 				dma_unmap_single(&pdev->dev, buffer_info->dma,
       
  1575 						 adapter->rx_ps_bsize0,
       
  1576 						 DMA_FROM_DEVICE);
       
  1577 			buffer_info->dma = 0;
       
  1578 		}
       
  1579 
       
  1580 		if (buffer_info->page) {
       
  1581 			put_page(buffer_info->page);
       
  1582 			buffer_info->page = NULL;
       
  1583 		}
       
  1584 
       
  1585 		if (buffer_info->skb) {
       
  1586 			dev_kfree_skb(buffer_info->skb);
       
  1587 			buffer_info->skb = NULL;
       
  1588 		}
       
  1589 
       
  1590 		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
       
  1591 			ps_page = &buffer_info->ps_pages[j];
       
  1592 			if (!ps_page->page)
       
  1593 				break;
       
  1594 			dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
       
  1595 				       DMA_FROM_DEVICE);
       
  1596 			ps_page->dma = 0;
       
  1597 			put_page(ps_page->page);
       
  1598 			ps_page->page = NULL;
       
  1599 		}
       
  1600 	}
       
  1601 
       
  1602 	/* there also may be some cached data from a chained receive */
       
  1603 	if (rx_ring->rx_skb_top) {
       
  1604 		dev_kfree_skb(rx_ring->rx_skb_top);
       
  1605 		rx_ring->rx_skb_top = NULL;
       
  1606 	}
       
  1607 
       
  1608 	/* Zero out the descriptor ring */
       
  1609 	memset(rx_ring->desc, 0, rx_ring->size);
       
  1610 
       
  1611 	rx_ring->next_to_clean = 0;
       
  1612 	rx_ring->next_to_use = 0;
       
  1613 	adapter->flags2 &= ~FLAG2_IS_DISCARDING;
       
  1614 
       
  1615 	writel(0, adapter->hw.hw_addr + rx_ring->head);
       
  1616 	writel(0, adapter->hw.hw_addr + rx_ring->tail);
       
  1617 }
       
  1618 
       
  1619 static void e1000e_downshift_workaround(struct work_struct *work)
       
  1620 {
       
  1621 	struct e1000_adapter *adapter = container_of(work,
       
  1622 					struct e1000_adapter, downshift_task);
       
  1623 
       
  1624 	if (test_bit(__E1000_DOWN, &adapter->state))
       
  1625 		return;
       
  1626 
       
  1627 	e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
       
  1628 }
       
  1629 
       
  1630 /**
       
  1631  * e1000_intr_msi - Interrupt Handler
       
  1632  * @irq: interrupt number
       
  1633  * @data: pointer to a network interface device structure
       
  1634  **/
       
  1635 static irqreturn_t e1000_intr_msi(int irq, void *data)
       
  1636 {
       
  1637 	struct net_device *netdev = data;
       
  1638 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1639 	struct e1000_hw *hw = &adapter->hw;
       
  1640 	u32 icr = er32(ICR);
       
  1641 
       
  1642 	/*
       
  1643 	 * read ICR disables interrupts using IAM
       
  1644 	 */
       
  1645 
       
  1646 	if (icr & E1000_ICR_LSC) {
       
  1647 		hw->mac.get_link_status = 1;
       
  1648 		/*
       
  1649 		 * ICH8 workaround-- Call gig speed drop workaround on cable
       
  1650 		 * disconnect (LSC) before accessing any PHY registers
       
  1651 		 */
       
  1652 		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
       
  1653 		    (!(er32(STATUS) & E1000_STATUS_LU)))
       
  1654 			schedule_work(&adapter->downshift_task);
       
  1655 
       
  1656 		/*
       
  1657 		 * 80003ES2LAN workaround-- For packet buffer work-around on
       
  1658 		 * link down event; disable receives here in the ISR and reset
       
  1659 		 * adapter in watchdog
       
  1660 		 */
       
  1661 		if (netif_carrier_ok(netdev) &&
       
  1662 		    adapter->flags & FLAG_RX_NEEDS_RESTART) {
       
  1663 			/* disable receives */
       
  1664 			u32 rctl = er32(RCTL);
       
  1665 			ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
  1666 			adapter->flags |= FLAG_RX_RESTART_NOW;
       
  1667 		}
       
  1668 		/* guard against interrupt when we're going down */
       
  1669 		if (!test_bit(__E1000_DOWN, &adapter->state))
       
  1670 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
       
  1671 	}
       
  1672 
       
  1673 	if (napi_schedule_prep(&adapter->napi)) {
       
  1674 		adapter->total_tx_bytes = 0;
       
  1675 		adapter->total_tx_packets = 0;
       
  1676 		adapter->total_rx_bytes = 0;
       
  1677 		adapter->total_rx_packets = 0;
       
  1678 		__napi_schedule(&adapter->napi);
       
  1679 	}
       
  1680 
       
  1681 	return IRQ_HANDLED;
       
  1682 }
       
  1683 
       
  1684 /**
       
  1685  * e1000_intr - Interrupt Handler
       
  1686  * @irq: interrupt number
       
  1687  * @data: pointer to a network interface device structure
       
  1688  **/
       
  1689 static irqreturn_t e1000_intr(int irq, void *data)
       
  1690 {
       
  1691 	struct net_device *netdev = data;
       
  1692 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1693 	struct e1000_hw *hw = &adapter->hw;
       
  1694 	u32 rctl, icr = er32(ICR);
       
  1695 
       
  1696 	if (!icr || test_bit(__E1000_DOWN, &adapter->state))
       
  1697 		return IRQ_NONE;  /* Not our interrupt */
       
  1698 
       
  1699 	/*
       
  1700 	 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
       
  1701 	 * not set, then the adapter didn't send an interrupt
       
  1702 	 */
       
  1703 	if (!(icr & E1000_ICR_INT_ASSERTED))
       
  1704 		return IRQ_NONE;
       
  1705 
       
  1706 	/*
       
  1707 	 * Interrupt Auto-Mask...upon reading ICR,
       
  1708 	 * interrupts are masked.  No need for the
       
  1709 	 * IMC write
       
  1710 	 */
       
  1711 
       
  1712 	if (icr & E1000_ICR_LSC) {
       
  1713 		hw->mac.get_link_status = 1;
       
  1714 		/*
       
  1715 		 * ICH8 workaround-- Call gig speed drop workaround on cable
       
  1716 		 * disconnect (LSC) before accessing any PHY registers
       
  1717 		 */
       
  1718 		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
       
  1719 		    (!(er32(STATUS) & E1000_STATUS_LU)))
       
  1720 			schedule_work(&adapter->downshift_task);
       
  1721 
       
  1722 		/*
       
  1723 		 * 80003ES2LAN workaround--
       
  1724 		 * For packet buffer work-around on link down event;
       
  1725 		 * disable receives here in the ISR and
       
  1726 		 * reset adapter in watchdog
       
  1727 		 */
       
  1728 		if (netif_carrier_ok(netdev) &&
       
  1729 		    (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
       
  1730 			/* disable receives */
       
  1731 			rctl = er32(RCTL);
       
  1732 			ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
  1733 			adapter->flags |= FLAG_RX_RESTART_NOW;
       
  1734 		}
       
  1735 		/* guard against interrupt when we're going down */
       
  1736 		if (!test_bit(__E1000_DOWN, &adapter->state))
       
  1737 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
       
  1738 	}
       
  1739 
       
  1740 	if (napi_schedule_prep(&adapter->napi)) {
       
  1741 		adapter->total_tx_bytes = 0;
       
  1742 		adapter->total_tx_packets = 0;
       
  1743 		adapter->total_rx_bytes = 0;
       
  1744 		adapter->total_rx_packets = 0;
       
  1745 		__napi_schedule(&adapter->napi);
       
  1746 	}
       
  1747 
       
  1748 	return IRQ_HANDLED;
       
  1749 }
       
  1750 
       
  1751 static irqreturn_t e1000_msix_other(int irq, void *data)
       
  1752 {
       
  1753 	struct net_device *netdev = data;
       
  1754 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1755 	struct e1000_hw *hw = &adapter->hw;
       
  1756 	u32 icr = er32(ICR);
       
  1757 
       
  1758 	if (!(icr & E1000_ICR_INT_ASSERTED)) {
       
  1759 		if (!test_bit(__E1000_DOWN, &adapter->state))
       
  1760 			ew32(IMS, E1000_IMS_OTHER);
       
  1761 		return IRQ_NONE;
       
  1762 	}
       
  1763 
       
  1764 	if (icr & adapter->eiac_mask)
       
  1765 		ew32(ICS, (icr & adapter->eiac_mask));
       
  1766 
       
  1767 	if (icr & E1000_ICR_OTHER) {
       
  1768 		if (!(icr & E1000_ICR_LSC))
       
  1769 			goto no_link_interrupt;
       
  1770 		hw->mac.get_link_status = 1;
       
  1771 		/* guard against interrupt when we're going down */
       
  1772 		if (!test_bit(__E1000_DOWN, &adapter->state))
       
  1773 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
       
  1774 	}
       
  1775 
       
  1776 no_link_interrupt:
       
  1777 	if (!test_bit(__E1000_DOWN, &adapter->state))
       
  1778 		ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
       
  1779 
       
  1780 	return IRQ_HANDLED;
       
  1781 }
       
  1782 
       
  1783 
       
  1784 static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
       
  1785 {
       
  1786 	struct net_device *netdev = data;
       
  1787 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1788 	struct e1000_hw *hw = &adapter->hw;
       
  1789 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  1790 
       
  1791 
       
  1792 	adapter->total_tx_bytes = 0;
       
  1793 	adapter->total_tx_packets = 0;
       
  1794 
       
  1795 	if (!e1000_clean_tx_irq(adapter))
       
  1796 		/* Ring was not completely cleaned, so fire another interrupt */
       
  1797 		ew32(ICS, tx_ring->ims_val);
       
  1798 
       
  1799 	return IRQ_HANDLED;
       
  1800 }
       
  1801 
       
  1802 static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
       
  1803 {
       
  1804 	struct net_device *netdev = data;
       
  1805 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  1806 
       
  1807 	/* Write the ITR value calculated at the end of the
       
  1808 	 * previous interrupt.
       
  1809 	 */
       
  1810 	if (adapter->rx_ring->set_itr) {
       
  1811 		writel(1000000000 / (adapter->rx_ring->itr_val * 256),
       
  1812 		       adapter->hw.hw_addr + adapter->rx_ring->itr_register);
       
  1813 		adapter->rx_ring->set_itr = 0;
       
  1814 	}
       
  1815 
       
  1816 	if (napi_schedule_prep(&adapter->napi)) {
       
  1817 		adapter->total_rx_bytes = 0;
       
  1818 		adapter->total_rx_packets = 0;
       
  1819 		__napi_schedule(&adapter->napi);
       
  1820 	}
       
  1821 	return IRQ_HANDLED;
       
  1822 }
       
  1823 
       
  1824 /**
       
  1825  * e1000_configure_msix - Configure MSI-X hardware
       
  1826  *
       
  1827  * e1000_configure_msix sets up the hardware to properly
       
  1828  * generate MSI-X interrupts.
       
  1829  **/
       
  1830 static void e1000_configure_msix(struct e1000_adapter *adapter)
       
  1831 {
       
  1832 	struct e1000_hw *hw = &adapter->hw;
       
  1833 	struct e1000_ring *rx_ring = adapter->rx_ring;
       
  1834 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  1835 	int vector = 0;
       
  1836 	u32 ctrl_ext, ivar = 0;
       
  1837 
       
  1838 	adapter->eiac_mask = 0;
       
  1839 
       
  1840 	/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
       
  1841 	if (hw->mac.type == e1000_82574) {
       
  1842 		u32 rfctl = er32(RFCTL);
       
  1843 		rfctl |= E1000_RFCTL_ACK_DIS;
       
  1844 		ew32(RFCTL, rfctl);
       
  1845 	}
       
  1846 
       
  1847 #define E1000_IVAR_INT_ALLOC_VALID	0x8
       
  1848 	/* Configure Rx vector */
       
  1849 	rx_ring->ims_val = E1000_IMS_RXQ0;
       
  1850 	adapter->eiac_mask |= rx_ring->ims_val;
       
  1851 	if (rx_ring->itr_val)
       
  1852 		writel(1000000000 / (rx_ring->itr_val * 256),
       
  1853 		       hw->hw_addr + rx_ring->itr_register);
       
  1854 	else
       
  1855 		writel(1, hw->hw_addr + rx_ring->itr_register);
       
  1856 	ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
       
  1857 
       
  1858 	/* Configure Tx vector */
       
  1859 	tx_ring->ims_val = E1000_IMS_TXQ0;
       
  1860 	vector++;
       
  1861 	if (tx_ring->itr_val)
       
  1862 		writel(1000000000 / (tx_ring->itr_val * 256),
       
  1863 		       hw->hw_addr + tx_ring->itr_register);
       
  1864 	else
       
  1865 		writel(1, hw->hw_addr + tx_ring->itr_register);
       
  1866 	adapter->eiac_mask |= tx_ring->ims_val;
       
  1867 	ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
       
  1868 
       
  1869 	/* set vector for Other Causes, e.g. link changes */
       
  1870 	vector++;
       
  1871 	ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
       
  1872 	if (rx_ring->itr_val)
       
  1873 		writel(1000000000 / (rx_ring->itr_val * 256),
       
  1874 		       hw->hw_addr + E1000_EITR_82574(vector));
       
  1875 	else
       
  1876 		writel(1, hw->hw_addr + E1000_EITR_82574(vector));
       
  1877 
       
  1878 	/* Cause Tx interrupts on every write back */
       
  1879 	ivar |= (1 << 31);
       
  1880 
       
  1881 	ew32(IVAR, ivar);
       
  1882 
       
  1883 	/* enable MSI-X PBA support */
       
  1884 	ctrl_ext = er32(CTRL_EXT);
       
  1885 	ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
       
  1886 
       
  1887 	/* Auto-Mask Other interrupts upon ICR read */
       
  1888 #define E1000_EIAC_MASK_82574   0x01F00000
       
  1889 	ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
       
  1890 	ctrl_ext |= E1000_CTRL_EXT_EIAME;
       
  1891 	ew32(CTRL_EXT, ctrl_ext);
       
  1892 	e1e_flush();
       
  1893 }
       
  1894 
       
  1895 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
       
  1896 {
       
  1897 	if (adapter->msix_entries) {
       
  1898 		pci_disable_msix(adapter->pdev);
       
  1899 		kfree(adapter->msix_entries);
       
  1900 		adapter->msix_entries = NULL;
       
  1901 	} else if (adapter->flags & FLAG_MSI_ENABLED) {
       
  1902 		pci_disable_msi(adapter->pdev);
       
  1903 		adapter->flags &= ~FLAG_MSI_ENABLED;
       
  1904 	}
       
  1905 }
       
  1906 
       
  1907 /**
       
  1908  * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
       
  1909  *
       
  1910  * Attempt to configure interrupts using the best available
       
  1911  * capabilities of the hardware and kernel.
       
  1912  **/
       
  1913 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
       
  1914 {
       
  1915 	int err;
       
  1916 	int i;
       
  1917 
       
  1918 	switch (adapter->int_mode) {
       
  1919 	case E1000E_INT_MODE_MSIX:
       
  1920 		if (adapter->flags & FLAG_HAS_MSIX) {
       
  1921 			adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
       
  1922 			adapter->msix_entries = kcalloc(adapter->num_vectors,
       
  1923 						      sizeof(struct msix_entry),
       
  1924 						      GFP_KERNEL);
       
  1925 			if (adapter->msix_entries) {
       
  1926 				for (i = 0; i < adapter->num_vectors; i++)
       
  1927 					adapter->msix_entries[i].entry = i;
       
  1928 
       
  1929 				err = pci_enable_msix(adapter->pdev,
       
  1930 						      adapter->msix_entries,
       
  1931 						      adapter->num_vectors);
       
  1932 				if (err == 0)
       
  1933 					return;
       
  1934 			}
       
  1935 			/* MSI-X failed, so fall through and try MSI */
       
  1936 			e_err("Failed to initialize MSI-X interrupts.  "
       
  1937 			      "Falling back to MSI interrupts.\n");
       
  1938 			e1000e_reset_interrupt_capability(adapter);
       
  1939 		}
       
  1940 		adapter->int_mode = E1000E_INT_MODE_MSI;
       
  1941 		/* Fall through */
       
  1942 	case E1000E_INT_MODE_MSI:
       
  1943 		if (!pci_enable_msi(adapter->pdev)) {
       
  1944 			adapter->flags |= FLAG_MSI_ENABLED;
       
  1945 		} else {
       
  1946 			adapter->int_mode = E1000E_INT_MODE_LEGACY;
       
  1947 			e_err("Failed to initialize MSI interrupts.  Falling "
       
  1948 			      "back to legacy interrupts.\n");
       
  1949 		}
       
  1950 		/* Fall through */
       
  1951 	case E1000E_INT_MODE_LEGACY:
       
  1952 		/* Don't do anything; this is the system default */
       
  1953 		break;
       
  1954 	}
       
  1955 
       
  1956 	/* store the number of vectors being used */
       
  1957 	adapter->num_vectors = 1;
       
  1958 }
       
  1959 
       
  1960 /**
       
  1961  * e1000_request_msix - Initialize MSI-X interrupts
       
  1962  *
       
  1963  * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
       
  1964  * kernel.
       
  1965  **/
       
  1966 static int e1000_request_msix(struct e1000_adapter *adapter)
       
  1967 {
       
  1968 	struct net_device *netdev = adapter->netdev;
       
  1969 	int err = 0, vector = 0;
       
  1970 
       
  1971 	if (strlen(netdev->name) < (IFNAMSIZ - 5))
       
  1972 		snprintf(adapter->rx_ring->name,
       
  1973 			 sizeof(adapter->rx_ring->name) - 1,
       
  1974 			 "%s-rx-0", netdev->name);
       
  1975 	else
       
  1976 		memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
       
  1977 	err = request_irq(adapter->msix_entries[vector].vector,
       
  1978 			  e1000_intr_msix_rx, 0, adapter->rx_ring->name,
       
  1979 			  netdev);
       
  1980 	if (err)
       
  1981 		goto out;
       
  1982 	adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
       
  1983 	adapter->rx_ring->itr_val = adapter->itr;
       
  1984 	vector++;
       
  1985 
       
  1986 	if (strlen(netdev->name) < (IFNAMSIZ - 5))
       
  1987 		snprintf(adapter->tx_ring->name,
       
  1988 			 sizeof(adapter->tx_ring->name) - 1,
       
  1989 			 "%s-tx-0", netdev->name);
       
  1990 	else
       
  1991 		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
       
  1992 	err = request_irq(adapter->msix_entries[vector].vector,
       
  1993 			  e1000_intr_msix_tx, 0, adapter->tx_ring->name,
       
  1994 			  netdev);
       
  1995 	if (err)
       
  1996 		goto out;
       
  1997 	adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
       
  1998 	adapter->tx_ring->itr_val = adapter->itr;
       
  1999 	vector++;
       
  2000 
       
  2001 	err = request_irq(adapter->msix_entries[vector].vector,
       
  2002 			  e1000_msix_other, 0, netdev->name, netdev);
       
  2003 	if (err)
       
  2004 		goto out;
       
  2005 
       
  2006 	e1000_configure_msix(adapter);
       
  2007 	return 0;
       
  2008 out:
       
  2009 	return err;
       
  2010 }
       
  2011 
       
  2012 /**
       
  2013  * e1000_request_irq - initialize interrupts
       
  2014  *
       
  2015  * Attempts to configure interrupts using the best available
       
  2016  * capabilities of the hardware and kernel.
       
  2017  **/
       
  2018 static int e1000_request_irq(struct e1000_adapter *adapter)
       
  2019 {
       
  2020 	struct net_device *netdev = adapter->netdev;
       
  2021 	int err;
       
  2022 
       
  2023 	if (adapter->msix_entries) {
       
  2024 		err = e1000_request_msix(adapter);
       
  2025 		if (!err)
       
  2026 			return err;
       
  2027 		/* fall back to MSI */
       
  2028 		e1000e_reset_interrupt_capability(adapter);
       
  2029 		adapter->int_mode = E1000E_INT_MODE_MSI;
       
  2030 		e1000e_set_interrupt_capability(adapter);
       
  2031 	}
       
  2032 	if (adapter->flags & FLAG_MSI_ENABLED) {
       
  2033 		err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
       
  2034 				  netdev->name, netdev);
       
  2035 		if (!err)
       
  2036 			return err;
       
  2037 
       
  2038 		/* fall back to legacy interrupt */
       
  2039 		e1000e_reset_interrupt_capability(adapter);
       
  2040 		adapter->int_mode = E1000E_INT_MODE_LEGACY;
       
  2041 	}
       
  2042 
       
  2043 	err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
       
  2044 			  netdev->name, netdev);
       
  2045 	if (err)
       
  2046 		e_err("Unable to allocate interrupt, Error: %d\n", err);
       
  2047 
       
  2048 	return err;
       
  2049 }
       
  2050 
       
  2051 static void e1000_free_irq(struct e1000_adapter *adapter)
       
  2052 {
       
  2053 	struct net_device *netdev = adapter->netdev;
       
  2054 
       
  2055 	if (adapter->msix_entries) {
       
  2056 		int vector = 0;
       
  2057 
       
  2058 		free_irq(adapter->msix_entries[vector].vector, netdev);
       
  2059 		vector++;
       
  2060 
       
  2061 		free_irq(adapter->msix_entries[vector].vector, netdev);
       
  2062 		vector++;
       
  2063 
       
  2064 		/* Other Causes interrupt vector */
       
  2065 		free_irq(adapter->msix_entries[vector].vector, netdev);
       
  2066 		return;
       
  2067 	}
       
  2068 
       
  2069 	free_irq(adapter->pdev->irq, netdev);
       
  2070 }
       
  2071 
       
  2072 /**
       
  2073  * e1000_irq_disable - Mask off interrupt generation on the NIC
       
  2074  **/
       
  2075 static void e1000_irq_disable(struct e1000_adapter *adapter)
       
  2076 {
       
  2077 	struct e1000_hw *hw = &adapter->hw;
       
  2078 
       
  2079 	ew32(IMC, ~0);
       
  2080 	if (adapter->msix_entries)
       
  2081 		ew32(EIAC_82574, 0);
       
  2082 	e1e_flush();
       
  2083 
       
  2084 	if (adapter->msix_entries) {
       
  2085 		int i;
       
  2086 		for (i = 0; i < adapter->num_vectors; i++)
       
  2087 			synchronize_irq(adapter->msix_entries[i].vector);
       
  2088 	} else {
       
  2089 		synchronize_irq(adapter->pdev->irq);
       
  2090 	}
       
  2091 }
       
  2092 
       
  2093 /**
       
  2094  * e1000_irq_enable - Enable default interrupt generation settings
       
  2095  **/
       
  2096 static void e1000_irq_enable(struct e1000_adapter *adapter)
       
  2097 {
       
  2098 	struct e1000_hw *hw = &adapter->hw;
       
  2099 
       
  2100 	if (adapter->msix_entries) {
       
  2101 		ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
       
  2102 		ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
       
  2103 	} else {
       
  2104 		ew32(IMS, IMS_ENABLE_MASK);
       
  2105 	}
       
  2106 	e1e_flush();
       
  2107 }
       
  2108 
       
  2109 /**
       
  2110  * e1000e_get_hw_control - get control of the h/w from f/w
       
  2111  * @adapter: address of board private structure
       
  2112  *
       
  2113  * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
       
  2114  * For ASF and Pass Through versions of f/w this means that
       
  2115  * the driver is loaded. For AMT version (only with 82573)
       
  2116  * of the f/w this means that the network i/f is open.
       
  2117  **/
       
  2118 void e1000e_get_hw_control(struct e1000_adapter *adapter)
       
  2119 {
       
  2120 	struct e1000_hw *hw = &adapter->hw;
       
  2121 	u32 ctrl_ext;
       
  2122 	u32 swsm;
       
  2123 
       
  2124 	/* Let firmware know the driver has taken over */
       
  2125 	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
       
  2126 		swsm = er32(SWSM);
       
  2127 		ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
       
  2128 	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
       
  2129 		ctrl_ext = er32(CTRL_EXT);
       
  2130 		ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
       
  2131 	}
       
  2132 }
       
  2133 
       
  2134 /**
       
  2135  * e1000e_release_hw_control - release control of the h/w to f/w
       
  2136  * @adapter: address of board private structure
       
  2137  *
       
  2138  * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
       
  2139  * For ASF and Pass Through versions of f/w this means that the
       
  2140  * driver is no longer loaded. For AMT version (only with 82573) i
       
  2141  * of the f/w this means that the network i/f is closed.
       
  2142  *
       
  2143  **/
       
  2144 void e1000e_release_hw_control(struct e1000_adapter *adapter)
       
  2145 {
       
  2146 	struct e1000_hw *hw = &adapter->hw;
       
  2147 	u32 ctrl_ext;
       
  2148 	u32 swsm;
       
  2149 
       
  2150 	/* Let firmware taken over control of h/w */
       
  2151 	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
       
  2152 		swsm = er32(SWSM);
       
  2153 		ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
       
  2154 	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
       
  2155 		ctrl_ext = er32(CTRL_EXT);
       
  2156 		ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
       
  2157 	}
       
  2158 }
       
  2159 
       
  2160 /**
       
  2161  * @e1000_alloc_ring - allocate memory for a ring structure
       
  2162  **/
       
  2163 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
       
  2164 				struct e1000_ring *ring)
       
  2165 {
       
  2166 	struct pci_dev *pdev = adapter->pdev;
       
  2167 
       
  2168 	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
       
  2169 					GFP_KERNEL);
       
  2170 	if (!ring->desc)
       
  2171 		return -ENOMEM;
       
  2172 
       
  2173 	return 0;
       
  2174 }
       
  2175 
       
  2176 /**
       
  2177  * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
       
  2178  * @adapter: board private structure
       
  2179  *
       
  2180  * Return 0 on success, negative on failure
       
  2181  **/
       
  2182 int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
       
  2183 {
       
  2184 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  2185 	int err = -ENOMEM, size;
       
  2186 
       
  2187 	size = sizeof(struct e1000_buffer) * tx_ring->count;
       
  2188 	tx_ring->buffer_info = vzalloc(size);
       
  2189 	if (!tx_ring->buffer_info)
       
  2190 		goto err;
       
  2191 
       
  2192 	/* round up to nearest 4K */
       
  2193 	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
       
  2194 	tx_ring->size = ALIGN(tx_ring->size, 4096);
       
  2195 
       
  2196 	err = e1000_alloc_ring_dma(adapter, tx_ring);
       
  2197 	if (err)
       
  2198 		goto err;
       
  2199 
       
  2200 	tx_ring->next_to_use = 0;
       
  2201 	tx_ring->next_to_clean = 0;
       
  2202 
       
  2203 	return 0;
       
  2204 err:
       
  2205 	vfree(tx_ring->buffer_info);
       
  2206 	e_err("Unable to allocate memory for the transmit descriptor ring\n");
       
  2207 	return err;
       
  2208 }
       
  2209 
       
  2210 /**
       
  2211  * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
       
  2212  * @adapter: board private structure
       
  2213  *
       
  2214  * Returns 0 on success, negative on failure
       
  2215  **/
       
  2216 int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
       
  2217 {
       
  2218 	struct e1000_ring *rx_ring = adapter->rx_ring;
       
  2219 	struct e1000_buffer *buffer_info;
       
  2220 	int i, size, desc_len, err = -ENOMEM;
       
  2221 
       
  2222 	size = sizeof(struct e1000_buffer) * rx_ring->count;
       
  2223 	rx_ring->buffer_info = vzalloc(size);
       
  2224 	if (!rx_ring->buffer_info)
       
  2225 		goto err;
       
  2226 
       
  2227 	for (i = 0; i < rx_ring->count; i++) {
       
  2228 		buffer_info = &rx_ring->buffer_info[i];
       
  2229 		buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
       
  2230 						sizeof(struct e1000_ps_page),
       
  2231 						GFP_KERNEL);
       
  2232 		if (!buffer_info->ps_pages)
       
  2233 			goto err_pages;
       
  2234 	}
       
  2235 
       
  2236 	desc_len = sizeof(union e1000_rx_desc_packet_split);
       
  2237 
       
  2238 	/* Round up to nearest 4K */
       
  2239 	rx_ring->size = rx_ring->count * desc_len;
       
  2240 	rx_ring->size = ALIGN(rx_ring->size, 4096);
       
  2241 
       
  2242 	err = e1000_alloc_ring_dma(adapter, rx_ring);
       
  2243 	if (err)
       
  2244 		goto err_pages;
       
  2245 
       
  2246 	rx_ring->next_to_clean = 0;
       
  2247 	rx_ring->next_to_use = 0;
       
  2248 	rx_ring->rx_skb_top = NULL;
       
  2249 
       
  2250 	return 0;
       
  2251 
       
  2252 err_pages:
       
  2253 	for (i = 0; i < rx_ring->count; i++) {
       
  2254 		buffer_info = &rx_ring->buffer_info[i];
       
  2255 		kfree(buffer_info->ps_pages);
       
  2256 	}
       
  2257 err:
       
  2258 	vfree(rx_ring->buffer_info);
       
  2259 	e_err("Unable to allocate memory for the receive descriptor ring\n");
       
  2260 	return err;
       
  2261 }
       
  2262 
       
  2263 /**
       
  2264  * e1000_clean_tx_ring - Free Tx Buffers
       
  2265  * @adapter: board private structure
       
  2266  **/
       
  2267 static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
       
  2268 {
       
  2269 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  2270 	struct e1000_buffer *buffer_info;
       
  2271 	unsigned long size;
       
  2272 	unsigned int i;
       
  2273 
       
  2274 	for (i = 0; i < tx_ring->count; i++) {
       
  2275 		buffer_info = &tx_ring->buffer_info[i];
       
  2276 		e1000_put_txbuf(adapter, buffer_info);
       
  2277 	}
       
  2278 
       
  2279 	size = sizeof(struct e1000_buffer) * tx_ring->count;
       
  2280 	memset(tx_ring->buffer_info, 0, size);
       
  2281 
       
  2282 	memset(tx_ring->desc, 0, tx_ring->size);
       
  2283 
       
  2284 	tx_ring->next_to_use = 0;
       
  2285 	tx_ring->next_to_clean = 0;
       
  2286 
       
  2287 	writel(0, adapter->hw.hw_addr + tx_ring->head);
       
  2288 	writel(0, adapter->hw.hw_addr + tx_ring->tail);
       
  2289 }
       
  2290 
       
  2291 /**
       
  2292  * e1000e_free_tx_resources - Free Tx Resources per Queue
       
  2293  * @adapter: board private structure
       
  2294  *
       
  2295  * Free all transmit software resources
       
  2296  **/
       
  2297 void e1000e_free_tx_resources(struct e1000_adapter *adapter)
       
  2298 {
       
  2299 	struct pci_dev *pdev = adapter->pdev;
       
  2300 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  2301 
       
  2302 	e1000_clean_tx_ring(adapter);
       
  2303 
       
  2304 	vfree(tx_ring->buffer_info);
       
  2305 	tx_ring->buffer_info = NULL;
       
  2306 
       
  2307 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
       
  2308 			  tx_ring->dma);
       
  2309 	tx_ring->desc = NULL;
       
  2310 }
       
  2311 
       
  2312 /**
       
  2313  * e1000e_free_rx_resources - Free Rx Resources
       
  2314  * @adapter: board private structure
       
  2315  *
       
  2316  * Free all receive software resources
       
  2317  **/
       
  2318 
       
  2319 void e1000e_free_rx_resources(struct e1000_adapter *adapter)
       
  2320 {
       
  2321 	struct pci_dev *pdev = adapter->pdev;
       
  2322 	struct e1000_ring *rx_ring = adapter->rx_ring;
       
  2323 	int i;
       
  2324 
       
  2325 	e1000_clean_rx_ring(adapter);
       
  2326 
       
  2327 	for (i = 0; i < rx_ring->count; i++)
       
  2328 		kfree(rx_ring->buffer_info[i].ps_pages);
       
  2329 
       
  2330 	vfree(rx_ring->buffer_info);
       
  2331 	rx_ring->buffer_info = NULL;
       
  2332 
       
  2333 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
       
  2334 			  rx_ring->dma);
       
  2335 	rx_ring->desc = NULL;
       
  2336 }
       
  2337 
       
  2338 /**
       
  2339  * e1000_update_itr - update the dynamic ITR value based on statistics
       
  2340  * @adapter: pointer to adapter
       
  2341  * @itr_setting: current adapter->itr
       
  2342  * @packets: the number of packets during this measurement interval
       
  2343  * @bytes: the number of bytes during this measurement interval
       
  2344  *
       
  2345  *      Stores a new ITR value based on packets and byte
       
  2346  *      counts during the last interrupt.  The advantage of per interrupt
       
  2347  *      computation is faster updates and more accurate ITR for the current
       
  2348  *      traffic pattern.  Constants in this function were computed
       
  2349  *      based on theoretical maximum wire speed and thresholds were set based
       
  2350  *      on testing data as well as attempting to minimize response time
       
  2351  *      while increasing bulk throughput.  This functionality is controlled
       
  2352  *      by the InterruptThrottleRate module parameter.
       
  2353  **/
       
  2354 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
       
  2355 				     u16 itr_setting, int packets,
       
  2356 				     int bytes)
       
  2357 {
       
  2358 	unsigned int retval = itr_setting;
       
  2359 
       
  2360 	if (packets == 0)
       
  2361 		goto update_itr_done;
       
  2362 
       
  2363 	switch (itr_setting) {
       
  2364 	case lowest_latency:
       
  2365 		/* handle TSO and jumbo frames */
       
  2366 		if (bytes/packets > 8000)
       
  2367 			retval = bulk_latency;
       
  2368 		else if ((packets < 5) && (bytes > 512))
       
  2369 			retval = low_latency;
       
  2370 		break;
       
  2371 	case low_latency:  /* 50 usec aka 20000 ints/s */
       
  2372 		if (bytes > 10000) {
       
  2373 			/* this if handles the TSO accounting */
       
  2374 			if (bytes/packets > 8000)
       
  2375 				retval = bulk_latency;
       
  2376 			else if ((packets < 10) || ((bytes/packets) > 1200))
       
  2377 				retval = bulk_latency;
       
  2378 			else if ((packets > 35))
       
  2379 				retval = lowest_latency;
       
  2380 		} else if (bytes/packets > 2000) {
       
  2381 			retval = bulk_latency;
       
  2382 		} else if (packets <= 2 && bytes < 512) {
       
  2383 			retval = lowest_latency;
       
  2384 		}
       
  2385 		break;
       
  2386 	case bulk_latency: /* 250 usec aka 4000 ints/s */
       
  2387 		if (bytes > 25000) {
       
  2388 			if (packets > 35)
       
  2389 				retval = low_latency;
       
  2390 		} else if (bytes < 6000) {
       
  2391 			retval = low_latency;
       
  2392 		}
       
  2393 		break;
       
  2394 	}
       
  2395 
       
  2396 update_itr_done:
       
  2397 	return retval;
       
  2398 }
       
  2399 
       
  2400 static void e1000_set_itr(struct e1000_adapter *adapter)
       
  2401 {
       
  2402 	struct e1000_hw *hw = &adapter->hw;
       
  2403 	u16 current_itr;
       
  2404 	u32 new_itr = adapter->itr;
       
  2405 
       
  2406 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
       
  2407 	if (adapter->link_speed != SPEED_1000) {
       
  2408 		current_itr = 0;
       
  2409 		new_itr = 4000;
       
  2410 		goto set_itr_now;
       
  2411 	}
       
  2412 
       
  2413 	if (adapter->flags2 & FLAG2_DISABLE_AIM) {
       
  2414 		new_itr = 0;
       
  2415 		goto set_itr_now;
       
  2416 	}
       
  2417 
       
  2418 	adapter->tx_itr = e1000_update_itr(adapter,
       
  2419 				    adapter->tx_itr,
       
  2420 				    adapter->total_tx_packets,
       
  2421 				    adapter->total_tx_bytes);
       
  2422 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2423 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
       
  2424 		adapter->tx_itr = low_latency;
       
  2425 
       
  2426 	adapter->rx_itr = e1000_update_itr(adapter,
       
  2427 				    adapter->rx_itr,
       
  2428 				    adapter->total_rx_packets,
       
  2429 				    adapter->total_rx_bytes);
       
  2430 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
       
  2431 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
       
  2432 		adapter->rx_itr = low_latency;
       
  2433 
       
  2434 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
       
  2435 
       
  2436 	switch (current_itr) {
       
  2437 	/* counts and packets in update_itr are dependent on these numbers */
       
  2438 	case lowest_latency:
       
  2439 		new_itr = 70000;
       
  2440 		break;
       
  2441 	case low_latency:
       
  2442 		new_itr = 20000; /* aka hwitr = ~200 */
       
  2443 		break;
       
  2444 	case bulk_latency:
       
  2445 		new_itr = 4000;
       
  2446 		break;
       
  2447 	default:
       
  2448 		break;
       
  2449 	}
       
  2450 
       
  2451 set_itr_now:
       
  2452 	if (new_itr != adapter->itr) {
       
  2453 		/*
       
  2454 		 * this attempts to bias the interrupt rate towards Bulk
       
  2455 		 * by adding intermediate steps when interrupt rate is
       
  2456 		 * increasing
       
  2457 		 */
       
  2458 		new_itr = new_itr > adapter->itr ?
       
  2459 			     min(adapter->itr + (new_itr >> 2), new_itr) :
       
  2460 			     new_itr;
       
  2461 		adapter->itr = new_itr;
       
  2462 		adapter->rx_ring->itr_val = new_itr;
       
  2463 		if (adapter->msix_entries)
       
  2464 			adapter->rx_ring->set_itr = 1;
       
  2465 		else
       
  2466 			if (new_itr)
       
  2467 				ew32(ITR, 1000000000 / (new_itr * 256));
       
  2468 			else
       
  2469 				ew32(ITR, 0);
       
  2470 	}
       
  2471 }
       
  2472 
       
  2473 /**
       
  2474  * e1000_alloc_queues - Allocate memory for all rings
       
  2475  * @adapter: board private structure to initialize
       
  2476  **/
       
  2477 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
       
  2478 {
       
  2479 	adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
       
  2480 	if (!adapter->tx_ring)
       
  2481 		goto err;
       
  2482 
       
  2483 	adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
       
  2484 	if (!adapter->rx_ring)
       
  2485 		goto err;
       
  2486 
       
  2487 	return 0;
       
  2488 err:
       
  2489 	e_err("Unable to allocate memory for queues\n");
       
  2490 	kfree(adapter->rx_ring);
       
  2491 	kfree(adapter->tx_ring);
       
  2492 	return -ENOMEM;
       
  2493 }
       
  2494 
       
  2495 /**
       
  2496  * e1000_clean - NAPI Rx polling callback
       
  2497  * @napi: struct associated with this polling callback
       
  2498  * @budget: amount of packets driver is allowed to process this poll
       
  2499  **/
       
  2500 static int e1000_clean(struct napi_struct *napi, int budget)
       
  2501 {
       
  2502 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
       
  2503 	struct e1000_hw *hw = &adapter->hw;
       
  2504 	struct net_device *poll_dev = adapter->netdev;
       
  2505 	int tx_cleaned = 1, work_done = 0;
       
  2506 
       
  2507 	adapter = netdev_priv(poll_dev);
       
  2508 
       
  2509 	if (adapter->msix_entries &&
       
  2510 	    !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
       
  2511 		goto clean_rx;
       
  2512 
       
  2513 	tx_cleaned = e1000_clean_tx_irq(adapter);
       
  2514 
       
  2515 clean_rx:
       
  2516 	adapter->clean_rx(adapter, &work_done, budget);
       
  2517 
       
  2518 	if (!tx_cleaned)
       
  2519 		work_done = budget;
       
  2520 
       
  2521 	/* If budget not fully consumed, exit the polling mode */
       
  2522 	if (work_done < budget) {
       
  2523 		if (adapter->itr_setting & 3)
       
  2524 			e1000_set_itr(adapter);
       
  2525 		napi_complete(napi);
       
  2526 		if (!test_bit(__E1000_DOWN, &adapter->state)) {
       
  2527 			if (adapter->msix_entries)
       
  2528 				ew32(IMS, adapter->rx_ring->ims_val);
       
  2529 			else
       
  2530 				e1000_irq_enable(adapter);
       
  2531 		}
       
  2532 	}
       
  2533 
       
  2534 	return work_done;
       
  2535 }
       
  2536 
       
  2537 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
       
  2538 {
       
  2539 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2540 	struct e1000_hw *hw = &adapter->hw;
       
  2541 	u32 vfta, index;
       
  2542 
       
  2543 	/* don't update vlan cookie if already programmed */
       
  2544 	if ((adapter->hw.mng_cookie.status &
       
  2545 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
       
  2546 	    (vid == adapter->mng_vlan_id))
       
  2547 		return;
       
  2548 
       
  2549 	/* add VID to filter table */
       
  2550 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
       
  2551 		index = (vid >> 5) & 0x7F;
       
  2552 		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
       
  2553 		vfta |= (1 << (vid & 0x1F));
       
  2554 		hw->mac.ops.write_vfta(hw, index, vfta);
       
  2555 	}
       
  2556 
       
  2557 	set_bit(vid, adapter->active_vlans);
       
  2558 }
       
  2559 
       
  2560 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
       
  2561 {
       
  2562 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  2563 	struct e1000_hw *hw = &adapter->hw;
       
  2564 	u32 vfta, index;
       
  2565 
       
  2566 	if ((adapter->hw.mng_cookie.status &
       
  2567 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
       
  2568 	    (vid == adapter->mng_vlan_id)) {
       
  2569 		/* release control to f/w */
       
  2570 		e1000e_release_hw_control(adapter);
       
  2571 		return;
       
  2572 	}
       
  2573 
       
  2574 	/* remove VID from filter table */
       
  2575 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
       
  2576 		index = (vid >> 5) & 0x7F;
       
  2577 		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
       
  2578 		vfta &= ~(1 << (vid & 0x1F));
       
  2579 		hw->mac.ops.write_vfta(hw, index, vfta);
       
  2580 	}
       
  2581 
       
  2582 	clear_bit(vid, adapter->active_vlans);
       
  2583 }
       
  2584 
       
  2585 /**
       
  2586  * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
       
  2587  * @adapter: board private structure to initialize
       
  2588  **/
       
  2589 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
       
  2590 {
       
  2591 	struct net_device *netdev = adapter->netdev;
       
  2592 	struct e1000_hw *hw = &adapter->hw;
       
  2593 	u32 rctl;
       
  2594 
       
  2595 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
       
  2596 		/* disable VLAN receive filtering */
       
  2597 		rctl = er32(RCTL);
       
  2598 		rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
       
  2599 		ew32(RCTL, rctl);
       
  2600 
       
  2601 		if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
       
  2602 			e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
       
  2603 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
  2604 		}
       
  2605 	}
       
  2606 }
       
  2607 
       
  2608 /**
       
  2609  * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
       
  2610  * @adapter: board private structure to initialize
       
  2611  **/
       
  2612 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
       
  2613 {
       
  2614 	struct e1000_hw *hw = &adapter->hw;
       
  2615 	u32 rctl;
       
  2616 
       
  2617 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
       
  2618 		/* enable VLAN receive filtering */
       
  2619 		rctl = er32(RCTL);
       
  2620 		rctl |= E1000_RCTL_VFE;
       
  2621 		rctl &= ~E1000_RCTL_CFIEN;
       
  2622 		ew32(RCTL, rctl);
       
  2623 	}
       
  2624 }
       
  2625 
       
  2626 /**
       
  2627  * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
       
  2628  * @adapter: board private structure to initialize
       
  2629  **/
       
  2630 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
       
  2631 {
       
  2632 	struct e1000_hw *hw = &adapter->hw;
       
  2633 	u32 ctrl;
       
  2634 
       
  2635 	/* disable VLAN tag insert/strip */
       
  2636 	ctrl = er32(CTRL);
       
  2637 	ctrl &= ~E1000_CTRL_VME;
       
  2638 	ew32(CTRL, ctrl);
       
  2639 }
       
  2640 
       
  2641 /**
       
  2642  * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
       
  2643  * @adapter: board private structure to initialize
       
  2644  **/
       
  2645 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
       
  2646 {
       
  2647 	struct e1000_hw *hw = &adapter->hw;
       
  2648 	u32 ctrl;
       
  2649 
       
  2650 	/* enable VLAN tag insert/strip */
       
  2651 	ctrl = er32(CTRL);
       
  2652 	ctrl |= E1000_CTRL_VME;
       
  2653 	ew32(CTRL, ctrl);
       
  2654 }
       
  2655 
       
  2656 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
       
  2657 {
       
  2658 	struct net_device *netdev = adapter->netdev;
       
  2659 	u16 vid = adapter->hw.mng_cookie.vlan_id;
       
  2660 	u16 old_vid = adapter->mng_vlan_id;
       
  2661 
       
  2662 	if (adapter->hw.mng_cookie.status &
       
  2663 	    E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
       
  2664 		e1000_vlan_rx_add_vid(netdev, vid);
       
  2665 		adapter->mng_vlan_id = vid;
       
  2666 	}
       
  2667 
       
  2668 	if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
       
  2669 		e1000_vlan_rx_kill_vid(netdev, old_vid);
       
  2670 }
       
  2671 
       
  2672 static void e1000_restore_vlan(struct e1000_adapter *adapter)
       
  2673 {
       
  2674 	u16 vid;
       
  2675 
       
  2676 	e1000_vlan_rx_add_vid(adapter->netdev, 0);
       
  2677 
       
  2678 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
       
  2679 		e1000_vlan_rx_add_vid(adapter->netdev, vid);
       
  2680 }
       
  2681 
       
  2682 static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
       
  2683 {
       
  2684 	struct e1000_hw *hw = &adapter->hw;
       
  2685 	u32 manc, manc2h, mdef, i, j;
       
  2686 
       
  2687 	if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
       
  2688 		return;
       
  2689 
       
  2690 	manc = er32(MANC);
       
  2691 
       
  2692 	/*
       
  2693 	 * enable receiving management packets to the host. this will probably
       
  2694 	 * generate destination unreachable messages from the host OS, but
       
  2695 	 * the packets will be handled on SMBUS
       
  2696 	 */
       
  2697 	manc |= E1000_MANC_EN_MNG2HOST;
       
  2698 	manc2h = er32(MANC2H);
       
  2699 
       
  2700 	switch (hw->mac.type) {
       
  2701 	default:
       
  2702 		manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
       
  2703 		break;
       
  2704 	case e1000_82574:
       
  2705 	case e1000_82583:
       
  2706 		/*
       
  2707 		 * Check if IPMI pass-through decision filter already exists;
       
  2708 		 * if so, enable it.
       
  2709 		 */
       
  2710 		for (i = 0, j = 0; i < 8; i++) {
       
  2711 			mdef = er32(MDEF(i));
       
  2712 
       
  2713 			/* Ignore filters with anything other than IPMI ports */
       
  2714 			if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
       
  2715 				continue;
       
  2716 
       
  2717 			/* Enable this decision filter in MANC2H */
       
  2718 			if (mdef)
       
  2719 				manc2h |= (1 << i);
       
  2720 
       
  2721 			j |= mdef;
       
  2722 		}
       
  2723 
       
  2724 		if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
       
  2725 			break;
       
  2726 
       
  2727 		/* Create new decision filter in an empty filter */
       
  2728 		for (i = 0, j = 0; i < 8; i++)
       
  2729 			if (er32(MDEF(i)) == 0) {
       
  2730 				ew32(MDEF(i), (E1000_MDEF_PORT_623 |
       
  2731 					       E1000_MDEF_PORT_664));
       
  2732 				manc2h |= (1 << 1);
       
  2733 				j++;
       
  2734 				break;
       
  2735 			}
       
  2736 
       
  2737 		if (!j)
       
  2738 			e_warn("Unable to create IPMI pass-through filter\n");
       
  2739 		break;
       
  2740 	}
       
  2741 
       
  2742 	ew32(MANC2H, manc2h);
       
  2743 	ew32(MANC, manc);
       
  2744 }
       
  2745 
       
  2746 /**
       
  2747  * e1000_configure_tx - Configure Transmit Unit after Reset
       
  2748  * @adapter: board private structure
       
  2749  *
       
  2750  * Configure the Tx unit of the MAC after a reset.
       
  2751  **/
       
  2752 static void e1000_configure_tx(struct e1000_adapter *adapter)
       
  2753 {
       
  2754 	struct e1000_hw *hw = &adapter->hw;
       
  2755 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  2756 	u64 tdba;
       
  2757 	u32 tdlen, tctl, tipg, tarc;
       
  2758 	u32 ipgr1, ipgr2;
       
  2759 
       
  2760 	/* Setup the HW Tx Head and Tail descriptor pointers */
       
  2761 	tdba = tx_ring->dma;
       
  2762 	tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
       
  2763 	ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
       
  2764 	ew32(TDBAH, (tdba >> 32));
       
  2765 	ew32(TDLEN, tdlen);
       
  2766 	ew32(TDH, 0);
       
  2767 	ew32(TDT, 0);
       
  2768 	tx_ring->head = E1000_TDH;
       
  2769 	tx_ring->tail = E1000_TDT;
       
  2770 
       
  2771 	/* Set the default values for the Tx Inter Packet Gap timer */
       
  2772 	tipg = DEFAULT_82543_TIPG_IPGT_COPPER;          /*  8  */
       
  2773 	ipgr1 = DEFAULT_82543_TIPG_IPGR1;               /*  8  */
       
  2774 	ipgr2 = DEFAULT_82543_TIPG_IPGR2;               /*  6  */
       
  2775 
       
  2776 	if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
       
  2777 		ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /*  7  */
       
  2778 
       
  2779 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
       
  2780 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
       
  2781 	ew32(TIPG, tipg);
       
  2782 
       
  2783 	/* Set the Tx Interrupt Delay register */
       
  2784 	ew32(TIDV, adapter->tx_int_delay);
       
  2785 	/* Tx irq moderation */
       
  2786 	ew32(TADV, adapter->tx_abs_int_delay);
       
  2787 
       
  2788 	if (adapter->flags2 & FLAG2_DMA_BURST) {
       
  2789 		u32 txdctl = er32(TXDCTL(0));
       
  2790 		txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
       
  2791 			    E1000_TXDCTL_WTHRESH);
       
  2792 		/*
       
  2793 		 * set up some performance related parameters to encourage the
       
  2794 		 * hardware to use the bus more efficiently in bursts, depends
       
  2795 		 * on the tx_int_delay to be enabled,
       
  2796 		 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
       
  2797 		 * hthresh = 1 ==> prefetch when one or more available
       
  2798 		 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
       
  2799 		 * BEWARE: this seems to work but should be considered first if
       
  2800 		 * there are Tx hangs or other Tx related bugs
       
  2801 		 */
       
  2802 		txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
       
  2803 		ew32(TXDCTL(0), txdctl);
       
  2804 		/* erratum work around: set txdctl the same for both queues */
       
  2805 		ew32(TXDCTL(1), txdctl);
       
  2806 	}
       
  2807 
       
  2808 	/* Program the Transmit Control Register */
       
  2809 	tctl = er32(TCTL);
       
  2810 	tctl &= ~E1000_TCTL_CT;
       
  2811 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
       
  2812 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
       
  2813 
       
  2814 	if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
       
  2815 		tarc = er32(TARC(0));
       
  2816 		/*
       
  2817 		 * set the speed mode bit, we'll clear it if we're not at
       
  2818 		 * gigabit link later
       
  2819 		 */
       
  2820 #define SPEED_MODE_BIT (1 << 21)
       
  2821 		tarc |= SPEED_MODE_BIT;
       
  2822 		ew32(TARC(0), tarc);
       
  2823 	}
       
  2824 
       
  2825 	/* errata: program both queues to unweighted RR */
       
  2826 	if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
       
  2827 		tarc = er32(TARC(0));
       
  2828 		tarc |= 1;
       
  2829 		ew32(TARC(0), tarc);
       
  2830 		tarc = er32(TARC(1));
       
  2831 		tarc |= 1;
       
  2832 		ew32(TARC(1), tarc);
       
  2833 	}
       
  2834 
       
  2835 	/* Setup Transmit Descriptor Settings for eop descriptor */
       
  2836 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
       
  2837 
       
  2838 	/* only set IDE if we are delaying interrupts using the timers */
       
  2839 	if (adapter->tx_int_delay)
       
  2840 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
       
  2841 
       
  2842 	/* enable Report Status bit */
       
  2843 	adapter->txd_cmd |= E1000_TXD_CMD_RS;
       
  2844 
       
  2845 	ew32(TCTL, tctl);
       
  2846 
       
  2847 	e1000e_config_collision_dist(hw);
       
  2848 }
       
  2849 
       
  2850 /**
       
  2851  * e1000_setup_rctl - configure the receive control registers
       
  2852  * @adapter: Board private structure
       
  2853  **/
       
  2854 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
       
  2855 			   (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
       
  2856 static void e1000_setup_rctl(struct e1000_adapter *adapter)
       
  2857 {
       
  2858 	struct e1000_hw *hw = &adapter->hw;
       
  2859 	u32 rctl, rfctl;
       
  2860 	u32 pages = 0;
       
  2861 
       
  2862 	/* Workaround Si errata on 82579 - configure jumbo frame flow */
       
  2863 	if (hw->mac.type == e1000_pch2lan) {
       
  2864 		s32 ret_val;
       
  2865 
       
  2866 		if (adapter->netdev->mtu > ETH_DATA_LEN)
       
  2867 			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
       
  2868 		else
       
  2869 			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
       
  2870 
       
  2871 		if (ret_val)
       
  2872 			e_dbg("failed to enable jumbo frame workaround mode\n");
       
  2873 	}
       
  2874 
       
  2875 	/* Program MC offset vector base */
       
  2876 	rctl = er32(RCTL);
       
  2877 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
       
  2878 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
       
  2879 		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
       
  2880 		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
       
  2881 
       
  2882 	/* Do not Store bad packets */
       
  2883 	rctl &= ~E1000_RCTL_SBP;
       
  2884 
       
  2885 	/* Enable Long Packet receive */
       
  2886 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
       
  2887 		rctl &= ~E1000_RCTL_LPE;
       
  2888 	else
       
  2889 		rctl |= E1000_RCTL_LPE;
       
  2890 
       
  2891 	/* Some systems expect that the CRC is included in SMBUS traffic. The
       
  2892 	 * hardware strips the CRC before sending to both SMBUS (BMC) and to
       
  2893 	 * host memory when this is enabled
       
  2894 	 */
       
  2895 	if (adapter->flags2 & FLAG2_CRC_STRIPPING)
       
  2896 		rctl |= E1000_RCTL_SECRC;
       
  2897 
       
  2898 	/* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
       
  2899 	if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
       
  2900 		u16 phy_data;
       
  2901 
       
  2902 		e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
       
  2903 		phy_data &= 0xfff8;
       
  2904 		phy_data |= (1 << 2);
       
  2905 		e1e_wphy(hw, PHY_REG(770, 26), phy_data);
       
  2906 
       
  2907 		e1e_rphy(hw, 22, &phy_data);
       
  2908 		phy_data &= 0x0fff;
       
  2909 		phy_data |= (1 << 14);
       
  2910 		e1e_wphy(hw, 0x10, 0x2823);
       
  2911 		e1e_wphy(hw, 0x11, 0x0003);
       
  2912 		e1e_wphy(hw, 22, phy_data);
       
  2913 	}
       
  2914 
       
  2915 	/* Setup buffer sizes */
       
  2916 	rctl &= ~E1000_RCTL_SZ_4096;
       
  2917 	rctl |= E1000_RCTL_BSEX;
       
  2918 	switch (adapter->rx_buffer_len) {
       
  2919 	case 2048:
       
  2920 	default:
       
  2921 		rctl |= E1000_RCTL_SZ_2048;
       
  2922 		rctl &= ~E1000_RCTL_BSEX;
       
  2923 		break;
       
  2924 	case 4096:
       
  2925 		rctl |= E1000_RCTL_SZ_4096;
       
  2926 		break;
       
  2927 	case 8192:
       
  2928 		rctl |= E1000_RCTL_SZ_8192;
       
  2929 		break;
       
  2930 	case 16384:
       
  2931 		rctl |= E1000_RCTL_SZ_16384;
       
  2932 		break;
       
  2933 	}
       
  2934 
       
  2935 	/* Enable Extended Status in all Receive Descriptors */
       
  2936 	rfctl = er32(RFCTL);
       
  2937 	rfctl |= E1000_RFCTL_EXTEN;
       
  2938 
       
  2939 	/*
       
  2940 	 * 82571 and greater support packet-split where the protocol
       
  2941 	 * header is placed in skb->data and the packet data is
       
  2942 	 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
       
  2943 	 * In the case of a non-split, skb->data is linearly filled,
       
  2944 	 * followed by the page buffers.  Therefore, skb->data is
       
  2945 	 * sized to hold the largest protocol header.
       
  2946 	 *
       
  2947 	 * allocations using alloc_page take too long for regular MTU
       
  2948 	 * so only enable packet split for jumbo frames
       
  2949 	 *
       
  2950 	 * Using pages when the page size is greater than 16k wastes
       
  2951 	 * a lot of memory, since we allocate 3 pages at all times
       
  2952 	 * per packet.
       
  2953 	 */
       
  2954 	pages = PAGE_USE_COUNT(adapter->netdev->mtu);
       
  2955 	if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
       
  2956 	    (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
       
  2957 		adapter->rx_ps_pages = pages;
       
  2958 	else
       
  2959 		adapter->rx_ps_pages = 0;
       
  2960 
       
  2961 	if (adapter->rx_ps_pages) {
       
  2962 		u32 psrctl = 0;
       
  2963 
       
  2964 		/*
       
  2965 		 * disable packet split support for IPv6 extension headers,
       
  2966 		 * because some malformed IPv6 headers can hang the Rx
       
  2967 		 */
       
  2968 		rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
       
  2969 			  E1000_RFCTL_NEW_IPV6_EXT_DIS);
       
  2970 
       
  2971 		/* Enable Packet split descriptors */
       
  2972 		rctl |= E1000_RCTL_DTYP_PS;
       
  2973 
       
  2974 		psrctl |= adapter->rx_ps_bsize0 >>
       
  2975 			E1000_PSRCTL_BSIZE0_SHIFT;
       
  2976 
       
  2977 		switch (adapter->rx_ps_pages) {
       
  2978 		case 3:
       
  2979 			psrctl |= PAGE_SIZE <<
       
  2980 				E1000_PSRCTL_BSIZE3_SHIFT;
       
  2981 		case 2:
       
  2982 			psrctl |= PAGE_SIZE <<
       
  2983 				E1000_PSRCTL_BSIZE2_SHIFT;
       
  2984 		case 1:
       
  2985 			psrctl |= PAGE_SIZE >>
       
  2986 				E1000_PSRCTL_BSIZE1_SHIFT;
       
  2987 			break;
       
  2988 		}
       
  2989 
       
  2990 		ew32(PSRCTL, psrctl);
       
  2991 	}
       
  2992 
       
  2993 	ew32(RFCTL, rfctl);
       
  2994 	ew32(RCTL, rctl);
       
  2995 	/* just started the receive unit, no need to restart */
       
  2996 	adapter->flags &= ~FLAG_RX_RESTART_NOW;
       
  2997 }
       
  2998 
       
  2999 /**
       
  3000  * e1000_configure_rx - Configure Receive Unit after Reset
       
  3001  * @adapter: board private structure
       
  3002  *
       
  3003  * Configure the Rx unit of the MAC after a reset.
       
  3004  **/
       
  3005 static void e1000_configure_rx(struct e1000_adapter *adapter)
       
  3006 {
       
  3007 	struct e1000_hw *hw = &adapter->hw;
       
  3008 	struct e1000_ring *rx_ring = adapter->rx_ring;
       
  3009 	u64 rdba;
       
  3010 	u32 rdlen, rctl, rxcsum, ctrl_ext;
       
  3011 
       
  3012 	if (adapter->rx_ps_pages) {
       
  3013 		/* this is a 32 byte descriptor */
       
  3014 		rdlen = rx_ring->count *
       
  3015 		    sizeof(union e1000_rx_desc_packet_split);
       
  3016 		adapter->clean_rx = e1000_clean_rx_irq_ps;
       
  3017 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
       
  3018 	} else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
       
  3019 		rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
       
  3020 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
       
  3021 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
       
  3022 	} else {
       
  3023 		rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
       
  3024 		adapter->clean_rx = e1000_clean_rx_irq;
       
  3025 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
       
  3026 	}
       
  3027 
       
  3028 	/* disable receives while setting up the descriptors */
       
  3029 	rctl = er32(RCTL);
       
  3030 	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
       
  3031 		ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
  3032 	e1e_flush();
       
  3033 	usleep_range(10000, 20000);
       
  3034 
       
  3035 	if (adapter->flags2 & FLAG2_DMA_BURST) {
       
  3036 		/*
       
  3037 		 * set the writeback threshold (only takes effect if the RDTR
       
  3038 		 * is set). set GRAN=1 and write back up to 0x4 worth, and
       
  3039 		 * enable prefetching of 0x20 Rx descriptors
       
  3040 		 * granularity = 01
       
  3041 		 * wthresh = 04,
       
  3042 		 * hthresh = 04,
       
  3043 		 * pthresh = 0x20
       
  3044 		 */
       
  3045 		ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
       
  3046 		ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
       
  3047 
       
  3048 		/*
       
  3049 		 * override the delay timers for enabling bursting, only if
       
  3050 		 * the value was not set by the user via module options
       
  3051 		 */
       
  3052 		if (adapter->rx_int_delay == DEFAULT_RDTR)
       
  3053 			adapter->rx_int_delay = BURST_RDTR;
       
  3054 		if (adapter->rx_abs_int_delay == DEFAULT_RADV)
       
  3055 			adapter->rx_abs_int_delay = BURST_RADV;
       
  3056 	}
       
  3057 
       
  3058 	/* set the Receive Delay Timer Register */
       
  3059 	ew32(RDTR, adapter->rx_int_delay);
       
  3060 
       
  3061 	/* irq moderation */
       
  3062 	ew32(RADV, adapter->rx_abs_int_delay);
       
  3063 	if ((adapter->itr_setting != 0) && (adapter->itr != 0))
       
  3064 		ew32(ITR, 1000000000 / (adapter->itr * 256));
       
  3065 
       
  3066 	ctrl_ext = er32(CTRL_EXT);
       
  3067 	/* Auto-Mask interrupts upon ICR access */
       
  3068 	ctrl_ext |= E1000_CTRL_EXT_IAME;
       
  3069 	ew32(IAM, 0xffffffff);
       
  3070 	ew32(CTRL_EXT, ctrl_ext);
       
  3071 	e1e_flush();
       
  3072 
       
  3073 	/*
       
  3074 	 * Setup the HW Rx Head and Tail Descriptor Pointers and
       
  3075 	 * the Base and Length of the Rx Descriptor Ring
       
  3076 	 */
       
  3077 	rdba = rx_ring->dma;
       
  3078 	ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
       
  3079 	ew32(RDBAH, (rdba >> 32));
       
  3080 	ew32(RDLEN, rdlen);
       
  3081 	ew32(RDH, 0);
       
  3082 	ew32(RDT, 0);
       
  3083 	rx_ring->head = E1000_RDH;
       
  3084 	rx_ring->tail = E1000_RDT;
       
  3085 
       
  3086 	/* Enable Receive Checksum Offload for TCP and UDP */
       
  3087 	rxcsum = er32(RXCSUM);
       
  3088 	if (adapter->netdev->features & NETIF_F_RXCSUM) {
       
  3089 		rxcsum |= E1000_RXCSUM_TUOFL;
       
  3090 
       
  3091 		/*
       
  3092 		 * IPv4 payload checksum for UDP fragments must be
       
  3093 		 * used in conjunction with packet-split.
       
  3094 		 */
       
  3095 		if (adapter->rx_ps_pages)
       
  3096 			rxcsum |= E1000_RXCSUM_IPPCSE;
       
  3097 	} else {
       
  3098 		rxcsum &= ~E1000_RXCSUM_TUOFL;
       
  3099 		/* no need to clear IPPCSE as it defaults to 0 */
       
  3100 	}
       
  3101 	ew32(RXCSUM, rxcsum);
       
  3102 
       
  3103 	/*
       
  3104 	 * Enable early receives on supported devices, only takes effect when
       
  3105 	 * packet size is equal or larger than the specified value (in 8 byte
       
  3106 	 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
       
  3107 	 */
       
  3108 	if ((adapter->flags & FLAG_HAS_ERT) ||
       
  3109 	    (adapter->hw.mac.type == e1000_pch2lan)) {
       
  3110 		if (adapter->netdev->mtu > ETH_DATA_LEN) {
       
  3111 			u32 rxdctl = er32(RXDCTL(0));
       
  3112 			ew32(RXDCTL(0), rxdctl | 0x3);
       
  3113 			if (adapter->flags & FLAG_HAS_ERT)
       
  3114 				ew32(ERT, E1000_ERT_2048 | (1 << 13));
       
  3115 			/*
       
  3116 			 * With jumbo frames and early-receive enabled,
       
  3117 			 * excessive C-state transition latencies result in
       
  3118 			 * dropped transactions.
       
  3119 			 */
       
  3120 			pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
       
  3121 		} else {
       
  3122 			pm_qos_update_request(&adapter->netdev->pm_qos_req,
       
  3123 					      PM_QOS_DEFAULT_VALUE);
       
  3124 		}
       
  3125 	}
       
  3126 
       
  3127 	/* Enable Receives */
       
  3128 	ew32(RCTL, rctl);
       
  3129 }
       
  3130 
       
  3131 /**
       
  3132  *  e1000_update_mc_addr_list - Update Multicast addresses
       
  3133  *  @hw: pointer to the HW structure
       
  3134  *  @mc_addr_list: array of multicast addresses to program
       
  3135  *  @mc_addr_count: number of multicast addresses to program
       
  3136  *
       
  3137  *  Updates the Multicast Table Array.
       
  3138  *  The caller must have a packed mc_addr_list of multicast addresses.
       
  3139  **/
       
  3140 static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
       
  3141 				      u32 mc_addr_count)
       
  3142 {
       
  3143 	hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
       
  3144 }
       
  3145 
       
  3146 /**
       
  3147  * e1000_set_multi - Multicast and Promiscuous mode set
       
  3148  * @netdev: network interface device structure
       
  3149  *
       
  3150  * The set_multi entry point is called whenever the multicast address
       
  3151  * list or the network interface flags are updated.  This routine is
       
  3152  * responsible for configuring the hardware for proper multicast,
       
  3153  * promiscuous mode, and all-multi behavior.
       
  3154  **/
       
  3155 static void e1000_set_multi(struct net_device *netdev)
       
  3156 {
       
  3157 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3158 	struct e1000_hw *hw = &adapter->hw;
       
  3159 	struct netdev_hw_addr *ha;
       
  3160 	u8  *mta_list;
       
  3161 	u32 rctl;
       
  3162 
       
  3163 	/* Check for Promiscuous and All Multicast modes */
       
  3164 
       
  3165 	rctl = er32(RCTL);
       
  3166 
       
  3167 	if (netdev->flags & IFF_PROMISC) {
       
  3168 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
       
  3169 		rctl &= ~E1000_RCTL_VFE;
       
  3170 		/* Do not hardware filter VLANs in promisc mode */
       
  3171 		e1000e_vlan_filter_disable(adapter);
       
  3172 	} else {
       
  3173 		if (netdev->flags & IFF_ALLMULTI) {
       
  3174 			rctl |= E1000_RCTL_MPE;
       
  3175 			rctl &= ~E1000_RCTL_UPE;
       
  3176 		} else {
       
  3177 			rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
       
  3178 		}
       
  3179 		e1000e_vlan_filter_enable(adapter);
       
  3180 	}
       
  3181 
       
  3182 	ew32(RCTL, rctl);
       
  3183 
       
  3184 	if (!netdev_mc_empty(netdev)) {
       
  3185 		int i = 0;
       
  3186 
       
  3187 		mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
       
  3188 		if (!mta_list)
       
  3189 			return;
       
  3190 
       
  3191 		/* prepare a packed array of only addresses. */
       
  3192 		netdev_for_each_mc_addr(ha, netdev)
       
  3193 			memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
       
  3194 
       
  3195 		e1000_update_mc_addr_list(hw, mta_list, i);
       
  3196 		kfree(mta_list);
       
  3197 	} else {
       
  3198 		/*
       
  3199 		 * if we're called from probe, we might not have
       
  3200 		 * anything to do here, so clear out the list
       
  3201 		 */
       
  3202 		e1000_update_mc_addr_list(hw, NULL, 0);
       
  3203 	}
       
  3204 
       
  3205 	if (netdev->features & NETIF_F_HW_VLAN_RX)
       
  3206 		e1000e_vlan_strip_enable(adapter);
       
  3207 	else
       
  3208 		e1000e_vlan_strip_disable(adapter);
       
  3209 }
       
  3210 
       
  3211 /**
       
  3212  * e1000_configure - configure the hardware for Rx and Tx
       
  3213  * @adapter: private board structure
       
  3214  **/
       
  3215 static void e1000_configure(struct e1000_adapter *adapter)
       
  3216 {
       
  3217 	e1000_set_multi(adapter->netdev);
       
  3218 
       
  3219 	e1000_restore_vlan(adapter);
       
  3220 	e1000_init_manageability_pt(adapter);
       
  3221 
       
  3222 	e1000_configure_tx(adapter);
       
  3223 	e1000_setup_rctl(adapter);
       
  3224 	e1000_configure_rx(adapter);
       
  3225 	adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
       
  3226 			      GFP_KERNEL);
       
  3227 }
       
  3228 
       
  3229 /**
       
  3230  * e1000e_power_up_phy - restore link in case the phy was powered down
       
  3231  * @adapter: address of board private structure
       
  3232  *
       
  3233  * The phy may be powered down to save power and turn off link when the
       
  3234  * driver is unloaded and wake on lan is not enabled (among others)
       
  3235  * *** this routine MUST be followed by a call to e1000e_reset ***
       
  3236  **/
       
  3237 void e1000e_power_up_phy(struct e1000_adapter *adapter)
       
  3238 {
       
  3239 	if (adapter->hw.phy.ops.power_up)
       
  3240 		adapter->hw.phy.ops.power_up(&adapter->hw);
       
  3241 
       
  3242 	adapter->hw.mac.ops.setup_link(&adapter->hw);
       
  3243 }
       
  3244 
       
  3245 /**
       
  3246  * e1000_power_down_phy - Power down the PHY
       
  3247  *
       
  3248  * Power down the PHY so no link is implied when interface is down.
       
  3249  * The PHY cannot be powered down if management or WoL is active.
       
  3250  */
       
  3251 static void e1000_power_down_phy(struct e1000_adapter *adapter)
       
  3252 {
       
  3253 	/* WoL is enabled */
       
  3254 	if (adapter->wol)
       
  3255 		return;
       
  3256 
       
  3257 	if (adapter->hw.phy.ops.power_down)
       
  3258 		adapter->hw.phy.ops.power_down(&adapter->hw);
       
  3259 }
       
  3260 
       
  3261 /**
       
  3262  * e1000e_reset - bring the hardware into a known good state
       
  3263  *
       
  3264  * This function boots the hardware and enables some settings that
       
  3265  * require a configuration cycle of the hardware - those cannot be
       
  3266  * set/changed during runtime. After reset the device needs to be
       
  3267  * properly configured for Rx, Tx etc.
       
  3268  */
       
  3269 void e1000e_reset(struct e1000_adapter *adapter)
       
  3270 {
       
  3271 	struct e1000_mac_info *mac = &adapter->hw.mac;
       
  3272 	struct e1000_fc_info *fc = &adapter->hw.fc;
       
  3273 	struct e1000_hw *hw = &adapter->hw;
       
  3274 	u32 tx_space, min_tx_space, min_rx_space;
       
  3275 	u32 pba = adapter->pba;
       
  3276 	u16 hwm;
       
  3277 
       
  3278 	/* reset Packet Buffer Allocation to default */
       
  3279 	ew32(PBA, pba);
       
  3280 
       
  3281 	if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
       
  3282 		/*
       
  3283 		 * To maintain wire speed transmits, the Tx FIFO should be
       
  3284 		 * large enough to accommodate two full transmit packets,
       
  3285 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
       
  3286 		 * the Rx FIFO should be large enough to accommodate at least
       
  3287 		 * one full receive packet and is similarly rounded up and
       
  3288 		 * expressed in KB.
       
  3289 		 */
       
  3290 		pba = er32(PBA);
       
  3291 		/* upper 16 bits has Tx packet buffer allocation size in KB */
       
  3292 		tx_space = pba >> 16;
       
  3293 		/* lower 16 bits has Rx packet buffer allocation size in KB */
       
  3294 		pba &= 0xffff;
       
  3295 		/*
       
  3296 		 * the Tx fifo also stores 16 bytes of information about the Tx
       
  3297 		 * but don't include ethernet FCS because hardware appends it
       
  3298 		 */
       
  3299 		min_tx_space = (adapter->max_frame_size +
       
  3300 				sizeof(struct e1000_tx_desc) -
       
  3301 				ETH_FCS_LEN) * 2;
       
  3302 		min_tx_space = ALIGN(min_tx_space, 1024);
       
  3303 		min_tx_space >>= 10;
       
  3304 		/* software strips receive CRC, so leave room for it */
       
  3305 		min_rx_space = adapter->max_frame_size;
       
  3306 		min_rx_space = ALIGN(min_rx_space, 1024);
       
  3307 		min_rx_space >>= 10;
       
  3308 
       
  3309 		/*
       
  3310 		 * If current Tx allocation is less than the min Tx FIFO size,
       
  3311 		 * and the min Tx FIFO size is less than the current Rx FIFO
       
  3312 		 * allocation, take space away from current Rx allocation
       
  3313 		 */
       
  3314 		if ((tx_space < min_tx_space) &&
       
  3315 		    ((min_tx_space - tx_space) < pba)) {
       
  3316 			pba -= min_tx_space - tx_space;
       
  3317 
       
  3318 			/*
       
  3319 			 * if short on Rx space, Rx wins and must trump Tx
       
  3320 			 * adjustment or use Early Receive if available
       
  3321 			 */
       
  3322 			if ((pba < min_rx_space) &&
       
  3323 			    (!(adapter->flags & FLAG_HAS_ERT)))
       
  3324 				/* ERT enabled in e1000_configure_rx */
       
  3325 				pba = min_rx_space;
       
  3326 		}
       
  3327 
       
  3328 		ew32(PBA, pba);
       
  3329 	}
       
  3330 
       
  3331 	/*
       
  3332 	 * flow control settings
       
  3333 	 *
       
  3334 	 * The high water mark must be low enough to fit one full frame
       
  3335 	 * (or the size used for early receive) above it in the Rx FIFO.
       
  3336 	 * Set it to the lower of:
       
  3337 	 * - 90% of the Rx FIFO size, and
       
  3338 	 * - the full Rx FIFO size minus the early receive size (for parts
       
  3339 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
       
  3340 	 * - the full Rx FIFO size minus one full frame
       
  3341 	 */
       
  3342 	if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
       
  3343 		fc->pause_time = 0xFFFF;
       
  3344 	else
       
  3345 		fc->pause_time = E1000_FC_PAUSE_TIME;
       
  3346 	fc->send_xon = 1;
       
  3347 	fc->current_mode = fc->requested_mode;
       
  3348 
       
  3349 	switch (hw->mac.type) {
       
  3350 	default:
       
  3351 		if ((adapter->flags & FLAG_HAS_ERT) &&
       
  3352 		    (adapter->netdev->mtu > ETH_DATA_LEN))
       
  3353 			hwm = min(((pba << 10) * 9 / 10),
       
  3354 				  ((pba << 10) - (E1000_ERT_2048 << 3)));
       
  3355 		else
       
  3356 			hwm = min(((pba << 10) * 9 / 10),
       
  3357 				  ((pba << 10) - adapter->max_frame_size));
       
  3358 
       
  3359 		fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
       
  3360 		fc->low_water = fc->high_water - 8;
       
  3361 		break;
       
  3362 	case e1000_pchlan:
       
  3363 		/*
       
  3364 		 * Workaround PCH LOM adapter hangs with certain network
       
  3365 		 * loads.  If hangs persist, try disabling Tx flow control.
       
  3366 		 */
       
  3367 		if (adapter->netdev->mtu > ETH_DATA_LEN) {
       
  3368 			fc->high_water = 0x3500;
       
  3369 			fc->low_water  = 0x1500;
       
  3370 		} else {
       
  3371 			fc->high_water = 0x5000;
       
  3372 			fc->low_water  = 0x3000;
       
  3373 		}
       
  3374 		fc->refresh_time = 0x1000;
       
  3375 		break;
       
  3376 	case e1000_pch2lan:
       
  3377 		fc->high_water = 0x05C20;
       
  3378 		fc->low_water = 0x05048;
       
  3379 		fc->pause_time = 0x0650;
       
  3380 		fc->refresh_time = 0x0400;
       
  3381 		if (adapter->netdev->mtu > ETH_DATA_LEN) {
       
  3382 			pba = 14;
       
  3383 			ew32(PBA, pba);
       
  3384 		}
       
  3385 		break;
       
  3386 	}
       
  3387 
       
  3388 	/*
       
  3389 	 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
       
  3390 	 * fit in receive buffer and early-receive not supported.
       
  3391 	 */
       
  3392 	if (adapter->itr_setting & 0x3) {
       
  3393 		if (((adapter->max_frame_size * 2) > (pba << 10)) &&
       
  3394 		    !(adapter->flags & FLAG_HAS_ERT)) {
       
  3395 			if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
       
  3396 				dev_info(&adapter->pdev->dev,
       
  3397 					"Interrupt Throttle Rate turned off\n");
       
  3398 				adapter->flags2 |= FLAG2_DISABLE_AIM;
       
  3399 				ew32(ITR, 0);
       
  3400 			}
       
  3401 		} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
       
  3402 			dev_info(&adapter->pdev->dev,
       
  3403 				 "Interrupt Throttle Rate turned on\n");
       
  3404 			adapter->flags2 &= ~FLAG2_DISABLE_AIM;
       
  3405 			adapter->itr = 20000;
       
  3406 			ew32(ITR, 1000000000 / (adapter->itr * 256));
       
  3407 		}
       
  3408 	}
       
  3409 
       
  3410 	/* Allow time for pending master requests to run */
       
  3411 	mac->ops.reset_hw(hw);
       
  3412 
       
  3413 	/*
       
  3414 	 * For parts with AMT enabled, let the firmware know
       
  3415 	 * that the network interface is in control
       
  3416 	 */
       
  3417 	if (adapter->flags & FLAG_HAS_AMT)
       
  3418 		e1000e_get_hw_control(adapter);
       
  3419 
       
  3420 	ew32(WUC, 0);
       
  3421 
       
  3422 	if (mac->ops.init_hw(hw))
       
  3423 		e_err("Hardware Error\n");
       
  3424 
       
  3425 	e1000_update_mng_vlan(adapter);
       
  3426 
       
  3427 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
       
  3428 	ew32(VET, ETH_P_8021Q);
       
  3429 
       
  3430 	e1000e_reset_adaptive(hw);
       
  3431 
       
  3432 	if (!netif_running(adapter->netdev) &&
       
  3433 	    !test_bit(__E1000_TESTING, &adapter->state)) {
       
  3434 		e1000_power_down_phy(adapter);
       
  3435 		return;
       
  3436 	}
       
  3437 
       
  3438 	e1000_get_phy_info(hw);
       
  3439 
       
  3440 	if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
       
  3441 	    !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
       
  3442 		u16 phy_data = 0;
       
  3443 		/*
       
  3444 		 * speed up time to link by disabling smart power down, ignore
       
  3445 		 * the return value of this function because there is nothing
       
  3446 		 * different we would do if it failed
       
  3447 		 */
       
  3448 		e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
       
  3449 		phy_data &= ~IGP02E1000_PM_SPD;
       
  3450 		e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
       
  3451 	}
       
  3452 }
       
  3453 
       
  3454 int e1000e_up(struct e1000_adapter *adapter)
       
  3455 {
       
  3456 	struct e1000_hw *hw = &adapter->hw;
       
  3457 
       
  3458 	/* hardware has been reset, we need to reload some things */
       
  3459 	e1000_configure(adapter);
       
  3460 
       
  3461 	clear_bit(__E1000_DOWN, &adapter->state);
       
  3462 
       
  3463 	napi_enable(&adapter->napi);
       
  3464 	if (adapter->msix_entries)
       
  3465 		e1000_configure_msix(adapter);
       
  3466 	e1000_irq_enable(adapter);
       
  3467 
       
  3468 	netif_start_queue(adapter->netdev);
       
  3469 
       
  3470 	/* fire a link change interrupt to start the watchdog */
       
  3471 	if (adapter->msix_entries)
       
  3472 		ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
       
  3473 	else
       
  3474 		ew32(ICS, E1000_ICS_LSC);
       
  3475 
       
  3476 	return 0;
       
  3477 }
       
  3478 
       
  3479 static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
       
  3480 {
       
  3481 	struct e1000_hw *hw = &adapter->hw;
       
  3482 
       
  3483 	if (!(adapter->flags2 & FLAG2_DMA_BURST))
       
  3484 		return;
       
  3485 
       
  3486 	/* flush pending descriptor writebacks to memory */
       
  3487 	ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
       
  3488 	ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
       
  3489 
       
  3490 	/* execute the writes immediately */
       
  3491 	e1e_flush();
       
  3492 }
       
  3493 
       
  3494 static void e1000e_update_stats(struct e1000_adapter *adapter);
       
  3495 
       
  3496 void e1000e_down(struct e1000_adapter *adapter)
       
  3497 {
       
  3498 	struct net_device *netdev = adapter->netdev;
       
  3499 	struct e1000_hw *hw = &adapter->hw;
       
  3500 	u32 tctl, rctl;
       
  3501 
       
  3502 	/*
       
  3503 	 * signal that we're down so the interrupt handler does not
       
  3504 	 * reschedule our watchdog timer
       
  3505 	 */
       
  3506 	set_bit(__E1000_DOWN, &adapter->state);
       
  3507 
       
  3508 	/* disable receives in the hardware */
       
  3509 	rctl = er32(RCTL);
       
  3510 	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
       
  3511 		ew32(RCTL, rctl & ~E1000_RCTL_EN);
       
  3512 	/* flush and sleep below */
       
  3513 
       
  3514 	netif_stop_queue(netdev);
       
  3515 
       
  3516 	/* disable transmits in the hardware */
       
  3517 	tctl = er32(TCTL);
       
  3518 	tctl &= ~E1000_TCTL_EN;
       
  3519 	ew32(TCTL, tctl);
       
  3520 
       
  3521 	/* flush both disables and wait for them to finish */
       
  3522 	e1e_flush();
       
  3523 	usleep_range(10000, 20000);
       
  3524 
       
  3525 	napi_disable(&adapter->napi);
       
  3526 	e1000_irq_disable(adapter);
       
  3527 
       
  3528 	del_timer_sync(&adapter->watchdog_timer);
       
  3529 	del_timer_sync(&adapter->phy_info_timer);
       
  3530 
       
  3531 	netif_carrier_off(netdev);
       
  3532 
       
  3533 	spin_lock(&adapter->stats64_lock);
       
  3534 	e1000e_update_stats(adapter);
       
  3535 	spin_unlock(&adapter->stats64_lock);
       
  3536 
       
  3537 	e1000e_flush_descriptors(adapter);
       
  3538 	e1000_clean_tx_ring(adapter);
       
  3539 	e1000_clean_rx_ring(adapter);
       
  3540 
       
  3541 	adapter->link_speed = 0;
       
  3542 	adapter->link_duplex = 0;
       
  3543 
       
  3544 	if (!pci_channel_offline(adapter->pdev))
       
  3545 		e1000e_reset(adapter);
       
  3546 
       
  3547 	/*
       
  3548 	 * TODO: for power management, we could drop the link and
       
  3549 	 * pci_disable_device here.
       
  3550 	 */
       
  3551 }
       
  3552 
       
  3553 void e1000e_reinit_locked(struct e1000_adapter *adapter)
       
  3554 {
       
  3555 	might_sleep();
       
  3556 	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
       
  3557 		usleep_range(1000, 2000);
       
  3558 	e1000e_down(adapter);
       
  3559 	e1000e_up(adapter);
       
  3560 	clear_bit(__E1000_RESETTING, &adapter->state);
       
  3561 }
       
  3562 
       
  3563 /**
       
  3564  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
       
  3565  * @adapter: board private structure to initialize
       
  3566  *
       
  3567  * e1000_sw_init initializes the Adapter private data structure.
       
  3568  * Fields are initialized based on PCI device information and
       
  3569  * OS network device settings (MTU size).
       
  3570  **/
       
  3571 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
       
  3572 {
       
  3573 	struct net_device *netdev = adapter->netdev;
       
  3574 
       
  3575 	adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
       
  3576 	adapter->rx_ps_bsize0 = 128;
       
  3577 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
       
  3578 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
       
  3579 
       
  3580 	spin_lock_init(&adapter->stats64_lock);
       
  3581 
       
  3582 	e1000e_set_interrupt_capability(adapter);
       
  3583 
       
  3584 	if (e1000_alloc_queues(adapter))
       
  3585 		return -ENOMEM;
       
  3586 
       
  3587 	/* Explicitly disable IRQ since the NIC can be in any state. */
       
  3588 	e1000_irq_disable(adapter);
       
  3589 
       
  3590 	set_bit(__E1000_DOWN, &adapter->state);
       
  3591 	return 0;
       
  3592 }
       
  3593 
       
  3594 /**
       
  3595  * e1000_intr_msi_test - Interrupt Handler
       
  3596  * @irq: interrupt number
       
  3597  * @data: pointer to a network interface device structure
       
  3598  **/
       
  3599 static irqreturn_t e1000_intr_msi_test(int irq, void *data)
       
  3600 {
       
  3601 	struct net_device *netdev = data;
       
  3602 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3603 	struct e1000_hw *hw = &adapter->hw;
       
  3604 	u32 icr = er32(ICR);
       
  3605 
       
  3606 	e_dbg("icr is %08X\n", icr);
       
  3607 	if (icr & E1000_ICR_RXSEQ) {
       
  3608 		adapter->flags &= ~FLAG_MSI_TEST_FAILED;
       
  3609 		wmb();
       
  3610 	}
       
  3611 
       
  3612 	return IRQ_HANDLED;
       
  3613 }
       
  3614 
       
  3615 /**
       
  3616  * e1000_test_msi_interrupt - Returns 0 for successful test
       
  3617  * @adapter: board private struct
       
  3618  *
       
  3619  * code flow taken from tg3.c
       
  3620  **/
       
  3621 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
       
  3622 {
       
  3623 	struct net_device *netdev = adapter->netdev;
       
  3624 	struct e1000_hw *hw = &adapter->hw;
       
  3625 	int err;
       
  3626 
       
  3627 	/* poll_enable hasn't been called yet, so don't need disable */
       
  3628 	/* clear any pending events */
       
  3629 	er32(ICR);
       
  3630 
       
  3631 	/* free the real vector and request a test handler */
       
  3632 	e1000_free_irq(adapter);
       
  3633 	e1000e_reset_interrupt_capability(adapter);
       
  3634 
       
  3635 	/* Assume that the test fails, if it succeeds then the test
       
  3636 	 * MSI irq handler will unset this flag */
       
  3637 	adapter->flags |= FLAG_MSI_TEST_FAILED;
       
  3638 
       
  3639 	err = pci_enable_msi(adapter->pdev);
       
  3640 	if (err)
       
  3641 		goto msi_test_failed;
       
  3642 
       
  3643 	err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
       
  3644 			  netdev->name, netdev);
       
  3645 	if (err) {
       
  3646 		pci_disable_msi(adapter->pdev);
       
  3647 		goto msi_test_failed;
       
  3648 	}
       
  3649 
       
  3650 	wmb();
       
  3651 
       
  3652 	e1000_irq_enable(adapter);
       
  3653 
       
  3654 	/* fire an unusual interrupt on the test handler */
       
  3655 	ew32(ICS, E1000_ICS_RXSEQ);
       
  3656 	e1e_flush();
       
  3657 	msleep(50);
       
  3658 
       
  3659 	e1000_irq_disable(adapter);
       
  3660 
       
  3661 	rmb();
       
  3662 
       
  3663 	if (adapter->flags & FLAG_MSI_TEST_FAILED) {
       
  3664 		adapter->int_mode = E1000E_INT_MODE_LEGACY;
       
  3665 		e_info("MSI interrupt test failed, using legacy interrupt.\n");
       
  3666 	} else
       
  3667 		e_dbg("MSI interrupt test succeeded!\n");
       
  3668 
       
  3669 	free_irq(adapter->pdev->irq, netdev);
       
  3670 	pci_disable_msi(adapter->pdev);
       
  3671 
       
  3672 msi_test_failed:
       
  3673 	e1000e_set_interrupt_capability(adapter);
       
  3674 	return e1000_request_irq(adapter);
       
  3675 }
       
  3676 
       
  3677 /**
       
  3678  * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
       
  3679  * @adapter: board private struct
       
  3680  *
       
  3681  * code flow taken from tg3.c, called with e1000 interrupts disabled.
       
  3682  **/
       
  3683 static int e1000_test_msi(struct e1000_adapter *adapter)
       
  3684 {
       
  3685 	int err;
       
  3686 	u16 pci_cmd;
       
  3687 
       
  3688 	if (!(adapter->flags & FLAG_MSI_ENABLED))
       
  3689 		return 0;
       
  3690 
       
  3691 	/* disable SERR in case the MSI write causes a master abort */
       
  3692 	pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
       
  3693 	if (pci_cmd & PCI_COMMAND_SERR)
       
  3694 		pci_write_config_word(adapter->pdev, PCI_COMMAND,
       
  3695 				      pci_cmd & ~PCI_COMMAND_SERR);
       
  3696 
       
  3697 	err = e1000_test_msi_interrupt(adapter);
       
  3698 
       
  3699 	/* re-enable SERR */
       
  3700 	if (pci_cmd & PCI_COMMAND_SERR) {
       
  3701 		pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
       
  3702 		pci_cmd |= PCI_COMMAND_SERR;
       
  3703 		pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
       
  3704 	}
       
  3705 
       
  3706 	return err;
       
  3707 }
       
  3708 
       
  3709 /**
       
  3710  * e1000_open - Called when a network interface is made active
       
  3711  * @netdev: network interface device structure
       
  3712  *
       
  3713  * Returns 0 on success, negative value on failure
       
  3714  *
       
  3715  * The open entry point is called when a network interface is made
       
  3716  * active by the system (IFF_UP).  At this point all resources needed
       
  3717  * for transmit and receive operations are allocated, the interrupt
       
  3718  * handler is registered with the OS, the watchdog timer is started,
       
  3719  * and the stack is notified that the interface is ready.
       
  3720  **/
       
  3721 static int e1000_open(struct net_device *netdev)
       
  3722 {
       
  3723 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3724 	struct e1000_hw *hw = &adapter->hw;
       
  3725 	struct pci_dev *pdev = adapter->pdev;
       
  3726 	int err;
       
  3727 
       
  3728 	/* disallow open during test */
       
  3729 	if (test_bit(__E1000_TESTING, &adapter->state))
       
  3730 		return -EBUSY;
       
  3731 
       
  3732 	pm_runtime_get_sync(&pdev->dev);
       
  3733 
       
  3734 	netif_carrier_off(netdev);
       
  3735 
       
  3736 	/* allocate transmit descriptors */
       
  3737 	err = e1000e_setup_tx_resources(adapter);
       
  3738 	if (err)
       
  3739 		goto err_setup_tx;
       
  3740 
       
  3741 	/* allocate receive descriptors */
       
  3742 	err = e1000e_setup_rx_resources(adapter);
       
  3743 	if (err)
       
  3744 		goto err_setup_rx;
       
  3745 
       
  3746 	/*
       
  3747 	 * If AMT is enabled, let the firmware know that the network
       
  3748 	 * interface is now open and reset the part to a known state.
       
  3749 	 */
       
  3750 	if (adapter->flags & FLAG_HAS_AMT) {
       
  3751 		e1000e_get_hw_control(adapter);
       
  3752 		e1000e_reset(adapter);
       
  3753 	}
       
  3754 
       
  3755 	e1000e_power_up_phy(adapter);
       
  3756 
       
  3757 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
       
  3758 	if ((adapter->hw.mng_cookie.status &
       
  3759 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
       
  3760 		e1000_update_mng_vlan(adapter);
       
  3761 
       
  3762 	/* DMA latency requirement to workaround early-receive/jumbo issue */
       
  3763 	if ((adapter->flags & FLAG_HAS_ERT) ||
       
  3764 	    (adapter->hw.mac.type == e1000_pch2lan))
       
  3765 		pm_qos_add_request(&adapter->netdev->pm_qos_req,
       
  3766 				   PM_QOS_CPU_DMA_LATENCY,
       
  3767 				   PM_QOS_DEFAULT_VALUE);
       
  3768 
       
  3769 	/*
       
  3770 	 * before we allocate an interrupt, we must be ready to handle it.
       
  3771 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
       
  3772 	 * as soon as we call pci_request_irq, so we have to setup our
       
  3773 	 * clean_rx handler before we do so.
       
  3774 	 */
       
  3775 	e1000_configure(adapter);
       
  3776 
       
  3777 	err = e1000_request_irq(adapter);
       
  3778 	if (err)
       
  3779 		goto err_req_irq;
       
  3780 
       
  3781 	/*
       
  3782 	 * Work around PCIe errata with MSI interrupts causing some chipsets to
       
  3783 	 * ignore e1000e MSI messages, which means we need to test our MSI
       
  3784 	 * interrupt now
       
  3785 	 */
       
  3786 	if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
       
  3787 		err = e1000_test_msi(adapter);
       
  3788 		if (err) {
       
  3789 			e_err("Interrupt allocation failed\n");
       
  3790 			goto err_req_irq;
       
  3791 		}
       
  3792 	}
       
  3793 
       
  3794 	/* From here on the code is the same as e1000e_up() */
       
  3795 	clear_bit(__E1000_DOWN, &adapter->state);
       
  3796 
       
  3797 	napi_enable(&adapter->napi);
       
  3798 
       
  3799 	e1000_irq_enable(adapter);
       
  3800 
       
  3801 	adapter->tx_hang_recheck = false;
       
  3802 	netif_start_queue(netdev);
       
  3803 
       
  3804 	adapter->idle_check = true;
       
  3805 	pm_runtime_put(&pdev->dev);
       
  3806 
       
  3807 	/* fire a link status change interrupt to start the watchdog */
       
  3808 	if (adapter->msix_entries)
       
  3809 		ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
       
  3810 	else
       
  3811 		ew32(ICS, E1000_ICS_LSC);
       
  3812 
       
  3813 	return 0;
       
  3814 
       
  3815 err_req_irq:
       
  3816 	e1000e_release_hw_control(adapter);
       
  3817 	e1000_power_down_phy(adapter);
       
  3818 	e1000e_free_rx_resources(adapter);
       
  3819 err_setup_rx:
       
  3820 	e1000e_free_tx_resources(adapter);
       
  3821 err_setup_tx:
       
  3822 	e1000e_reset(adapter);
       
  3823 	pm_runtime_put_sync(&pdev->dev);
       
  3824 
       
  3825 	return err;
       
  3826 }
       
  3827 
       
  3828 /**
       
  3829  * e1000_close - Disables a network interface
       
  3830  * @netdev: network interface device structure
       
  3831  *
       
  3832  * Returns 0, this is not allowed to fail
       
  3833  *
       
  3834  * The close entry point is called when an interface is de-activated
       
  3835  * by the OS.  The hardware is still under the drivers control, but
       
  3836  * needs to be disabled.  A global MAC reset is issued to stop the
       
  3837  * hardware, and all transmit and receive resources are freed.
       
  3838  **/
       
  3839 static int e1000_close(struct net_device *netdev)
       
  3840 {
       
  3841 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3842 	struct pci_dev *pdev = adapter->pdev;
       
  3843 
       
  3844 	WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
       
  3845 
       
  3846 	pm_runtime_get_sync(&pdev->dev);
       
  3847 
       
  3848 	if (!test_bit(__E1000_DOWN, &adapter->state)) {
       
  3849 		e1000e_down(adapter);
       
  3850 		e1000_free_irq(adapter);
       
  3851 	}
       
  3852 	e1000_power_down_phy(adapter);
       
  3853 
       
  3854 	e1000e_free_tx_resources(adapter);
       
  3855 	e1000e_free_rx_resources(adapter);
       
  3856 
       
  3857 	/*
       
  3858 	 * kill manageability vlan ID if supported, but not if a vlan with
       
  3859 	 * the same ID is registered on the host OS (let 8021q kill it)
       
  3860 	 */
       
  3861 	if (adapter->hw.mng_cookie.status &
       
  3862 	    E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
       
  3863 		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
       
  3864 
       
  3865 	/*
       
  3866 	 * If AMT is enabled, let the firmware know that the network
       
  3867 	 * interface is now closed
       
  3868 	 */
       
  3869 	if ((adapter->flags & FLAG_HAS_AMT) &&
       
  3870 	    !test_bit(__E1000_TESTING, &adapter->state))
       
  3871 		e1000e_release_hw_control(adapter);
       
  3872 
       
  3873 	if ((adapter->flags & FLAG_HAS_ERT) ||
       
  3874 	    (adapter->hw.mac.type == e1000_pch2lan))
       
  3875 		pm_qos_remove_request(&adapter->netdev->pm_qos_req);
       
  3876 
       
  3877 	pm_runtime_put_sync(&pdev->dev);
       
  3878 
       
  3879 	return 0;
       
  3880 }
       
  3881 /**
       
  3882  * e1000_set_mac - Change the Ethernet Address of the NIC
       
  3883  * @netdev: network interface device structure
       
  3884  * @p: pointer to an address structure
       
  3885  *
       
  3886  * Returns 0 on success, negative on failure
       
  3887  **/
       
  3888 static int e1000_set_mac(struct net_device *netdev, void *p)
       
  3889 {
       
  3890 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  3891 	struct sockaddr *addr = p;
       
  3892 
       
  3893 	if (!is_valid_ether_addr(addr->sa_data))
       
  3894 		return -EADDRNOTAVAIL;
       
  3895 
       
  3896 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
       
  3897 	memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
       
  3898 
       
  3899 	e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
       
  3900 
       
  3901 	if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
       
  3902 		/* activate the work around */
       
  3903 		e1000e_set_laa_state_82571(&adapter->hw, 1);
       
  3904 
       
  3905 		/*
       
  3906 		 * Hold a copy of the LAA in RAR[14] This is done so that
       
  3907 		 * between the time RAR[0] gets clobbered  and the time it
       
  3908 		 * gets fixed (in e1000_watchdog), the actual LAA is in one
       
  3909 		 * of the RARs and no incoming packets directed to this port
       
  3910 		 * are dropped. Eventually the LAA will be in RAR[0] and
       
  3911 		 * RAR[14]
       
  3912 		 */
       
  3913 		e1000e_rar_set(&adapter->hw,
       
  3914 			      adapter->hw.mac.addr,
       
  3915 			      adapter->hw.mac.rar_entry_count - 1);
       
  3916 	}
       
  3917 
       
  3918 	return 0;
       
  3919 }
       
  3920 
       
  3921 /**
       
  3922  * e1000e_update_phy_task - work thread to update phy
       
  3923  * @work: pointer to our work struct
       
  3924  *
       
  3925  * this worker thread exists because we must acquire a
       
  3926  * semaphore to read the phy, which we could msleep while
       
  3927  * waiting for it, and we can't msleep in a timer.
       
  3928  **/
       
  3929 static void e1000e_update_phy_task(struct work_struct *work)
       
  3930 {
       
  3931 	struct e1000_adapter *adapter = container_of(work,
       
  3932 					struct e1000_adapter, update_phy_task);
       
  3933 
       
  3934 	if (test_bit(__E1000_DOWN, &adapter->state))
       
  3935 		return;
       
  3936 
       
  3937 	e1000_get_phy_info(&adapter->hw);
       
  3938 }
       
  3939 
       
  3940 /*
       
  3941  * Need to wait a few seconds after link up to get diagnostic information from
       
  3942  * the phy
       
  3943  */
       
  3944 static void e1000_update_phy_info(unsigned long data)
       
  3945 {
       
  3946 	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
       
  3947 
       
  3948 	if (test_bit(__E1000_DOWN, &adapter->state))
       
  3949 		return;
       
  3950 
       
  3951 	schedule_work(&adapter->update_phy_task);
       
  3952 }
       
  3953 
       
  3954 /**
       
  3955  * e1000e_update_phy_stats - Update the PHY statistics counters
       
  3956  * @adapter: board private structure
       
  3957  *
       
  3958  * Read/clear the upper 16-bit PHY registers and read/accumulate lower
       
  3959  **/
       
  3960 static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
       
  3961 {
       
  3962 	struct e1000_hw *hw = &adapter->hw;
       
  3963 	s32 ret_val;
       
  3964 	u16 phy_data;
       
  3965 
       
  3966 	ret_val = hw->phy.ops.acquire(hw);
       
  3967 	if (ret_val)
       
  3968 		return;
       
  3969 
       
  3970 	/*
       
  3971 	 * A page set is expensive so check if already on desired page.
       
  3972 	 * If not, set to the page with the PHY status registers.
       
  3973 	 */
       
  3974 	hw->phy.addr = 1;
       
  3975 	ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
       
  3976 					   &phy_data);
       
  3977 	if (ret_val)
       
  3978 		goto release;
       
  3979 	if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
       
  3980 		ret_val = hw->phy.ops.set_page(hw,
       
  3981 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
       
  3982 		if (ret_val)
       
  3983 			goto release;
       
  3984 	}
       
  3985 
       
  3986 	/* Single Collision Count */
       
  3987 	hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
       
  3988 	ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
       
  3989 	if (!ret_val)
       
  3990 		adapter->stats.scc += phy_data;
       
  3991 
       
  3992 	/* Excessive Collision Count */
       
  3993 	hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
       
  3994 	ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
       
  3995 	if (!ret_val)
       
  3996 		adapter->stats.ecol += phy_data;
       
  3997 
       
  3998 	/* Multiple Collision Count */
       
  3999 	hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
       
  4000 	ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
       
  4001 	if (!ret_val)
       
  4002 		adapter->stats.mcc += phy_data;
       
  4003 
       
  4004 	/* Late Collision Count */
       
  4005 	hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
       
  4006 	ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
       
  4007 	if (!ret_val)
       
  4008 		adapter->stats.latecol += phy_data;
       
  4009 
       
  4010 	/* Collision Count - also used for adaptive IFS */
       
  4011 	hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
       
  4012 	ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
       
  4013 	if (!ret_val)
       
  4014 		hw->mac.collision_delta = phy_data;
       
  4015 
       
  4016 	/* Defer Count */
       
  4017 	hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
       
  4018 	ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
       
  4019 	if (!ret_val)
       
  4020 		adapter->stats.dc += phy_data;
       
  4021 
       
  4022 	/* Transmit with no CRS */
       
  4023 	hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
       
  4024 	ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
       
  4025 	if (!ret_val)
       
  4026 		adapter->stats.tncrs += phy_data;
       
  4027 
       
  4028 release:
       
  4029 	hw->phy.ops.release(hw);
       
  4030 }
       
  4031 
       
  4032 /**
       
  4033  * e1000e_update_stats - Update the board statistics counters
       
  4034  * @adapter: board private structure
       
  4035  **/
       
  4036 static void e1000e_update_stats(struct e1000_adapter *adapter)
       
  4037 {
       
  4038 	struct net_device *netdev = adapter->netdev;
       
  4039 	struct e1000_hw *hw = &adapter->hw;
       
  4040 	struct pci_dev *pdev = adapter->pdev;
       
  4041 
       
  4042 	/*
       
  4043 	 * Prevent stats update while adapter is being reset, or if the pci
       
  4044 	 * connection is down.
       
  4045 	 */
       
  4046 	if (adapter->link_speed == 0)
       
  4047 		return;
       
  4048 	if (pci_channel_offline(pdev))
       
  4049 		return;
       
  4050 
       
  4051 	adapter->stats.crcerrs += er32(CRCERRS);
       
  4052 	adapter->stats.gprc += er32(GPRC);
       
  4053 	adapter->stats.gorc += er32(GORCL);
       
  4054 	er32(GORCH); /* Clear gorc */
       
  4055 	adapter->stats.bprc += er32(BPRC);
       
  4056 	adapter->stats.mprc += er32(MPRC);
       
  4057 	adapter->stats.roc += er32(ROC);
       
  4058 
       
  4059 	adapter->stats.mpc += er32(MPC);
       
  4060 
       
  4061 	/* Half-duplex statistics */
       
  4062 	if (adapter->link_duplex == HALF_DUPLEX) {
       
  4063 		if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
       
  4064 			e1000e_update_phy_stats(adapter);
       
  4065 		} else {
       
  4066 			adapter->stats.scc += er32(SCC);
       
  4067 			adapter->stats.ecol += er32(ECOL);
       
  4068 			adapter->stats.mcc += er32(MCC);
       
  4069 			adapter->stats.latecol += er32(LATECOL);
       
  4070 			adapter->stats.dc += er32(DC);
       
  4071 
       
  4072 			hw->mac.collision_delta = er32(COLC);
       
  4073 
       
  4074 			if ((hw->mac.type != e1000_82574) &&
       
  4075 			    (hw->mac.type != e1000_82583))
       
  4076 				adapter->stats.tncrs += er32(TNCRS);
       
  4077 		}
       
  4078 		adapter->stats.colc += hw->mac.collision_delta;
       
  4079 	}
       
  4080 
       
  4081 	adapter->stats.xonrxc += er32(XONRXC);
       
  4082 	adapter->stats.xontxc += er32(XONTXC);
       
  4083 	adapter->stats.xoffrxc += er32(XOFFRXC);
       
  4084 	adapter->stats.xofftxc += er32(XOFFTXC);
       
  4085 	adapter->stats.gptc += er32(GPTC);
       
  4086 	adapter->stats.gotc += er32(GOTCL);
       
  4087 	er32(GOTCH); /* Clear gotc */
       
  4088 	adapter->stats.rnbc += er32(RNBC);
       
  4089 	adapter->stats.ruc += er32(RUC);
       
  4090 
       
  4091 	adapter->stats.mptc += er32(MPTC);
       
  4092 	adapter->stats.bptc += er32(BPTC);
       
  4093 
       
  4094 	/* used for adaptive IFS */
       
  4095 
       
  4096 	hw->mac.tx_packet_delta = er32(TPT);
       
  4097 	adapter->stats.tpt += hw->mac.tx_packet_delta;
       
  4098 
       
  4099 	adapter->stats.algnerrc += er32(ALGNERRC);
       
  4100 	adapter->stats.rxerrc += er32(RXERRC);
       
  4101 	adapter->stats.cexterr += er32(CEXTERR);
       
  4102 	adapter->stats.tsctc += er32(TSCTC);
       
  4103 	adapter->stats.tsctfc += er32(TSCTFC);
       
  4104 
       
  4105 	/* Fill out the OS statistics structure */
       
  4106 	netdev->stats.multicast = adapter->stats.mprc;
       
  4107 	netdev->stats.collisions = adapter->stats.colc;
       
  4108 
       
  4109 	/* Rx Errors */
       
  4110 
       
  4111 	/*
       
  4112 	 * RLEC on some newer hardware can be incorrect so build
       
  4113 	 * our own version based on RUC and ROC
       
  4114 	 */
       
  4115 	netdev->stats.rx_errors = adapter->stats.rxerrc +
       
  4116 		adapter->stats.crcerrs + adapter->stats.algnerrc +
       
  4117 		adapter->stats.ruc + adapter->stats.roc +
       
  4118 		adapter->stats.cexterr;
       
  4119 	netdev->stats.rx_length_errors = adapter->stats.ruc +
       
  4120 					      adapter->stats.roc;
       
  4121 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
       
  4122 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
       
  4123 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
       
  4124 
       
  4125 	/* Tx Errors */
       
  4126 	netdev->stats.tx_errors = adapter->stats.ecol +
       
  4127 				       adapter->stats.latecol;
       
  4128 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
       
  4129 	netdev->stats.tx_window_errors = adapter->stats.latecol;
       
  4130 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
       
  4131 
       
  4132 	/* Tx Dropped needs to be maintained elsewhere */
       
  4133 
       
  4134 	/* Management Stats */
       
  4135 	adapter->stats.mgptc += er32(MGTPTC);
       
  4136 	adapter->stats.mgprc += er32(MGTPRC);
       
  4137 	adapter->stats.mgpdc += er32(MGTPDC);
       
  4138 }
       
  4139 
       
  4140 /**
       
  4141  * e1000_phy_read_status - Update the PHY register status snapshot
       
  4142  * @adapter: board private structure
       
  4143  **/
       
  4144 static void e1000_phy_read_status(struct e1000_adapter *adapter)
       
  4145 {
       
  4146 	struct e1000_hw *hw = &adapter->hw;
       
  4147 	struct e1000_phy_regs *phy = &adapter->phy_regs;
       
  4148 
       
  4149 	if ((er32(STATUS) & E1000_STATUS_LU) &&
       
  4150 	    (adapter->hw.phy.media_type == e1000_media_type_copper)) {
       
  4151 		int ret_val;
       
  4152 
       
  4153 		ret_val  = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
       
  4154 		ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
       
  4155 		ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
       
  4156 		ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
       
  4157 		ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
       
  4158 		ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
       
  4159 		ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
       
  4160 		ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
       
  4161 		if (ret_val)
       
  4162 			e_warn("Error reading PHY register\n");
       
  4163 	} else {
       
  4164 		/*
       
  4165 		 * Do not read PHY registers if link is not up
       
  4166 		 * Set values to typical power-on defaults
       
  4167 		 */
       
  4168 		phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
       
  4169 		phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
       
  4170 			     BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
       
  4171 			     BMSR_ERCAP);
       
  4172 		phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
       
  4173 				  ADVERTISE_ALL | ADVERTISE_CSMA);
       
  4174 		phy->lpa = 0;
       
  4175 		phy->expansion = EXPANSION_ENABLENPAGE;
       
  4176 		phy->ctrl1000 = ADVERTISE_1000FULL;
       
  4177 		phy->stat1000 = 0;
       
  4178 		phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
       
  4179 	}
       
  4180 }
       
  4181 
       
  4182 static void e1000_print_link_info(struct e1000_adapter *adapter)
       
  4183 {
       
  4184 	struct e1000_hw *hw = &adapter->hw;
       
  4185 	u32 ctrl = er32(CTRL);
       
  4186 
       
  4187 	/* Link status message must follow this format for user tools */
       
  4188 	printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
       
  4189 	       "Flow Control: %s\n",
       
  4190 	       adapter->netdev->name,
       
  4191 	       adapter->link_speed,
       
  4192 	       (adapter->link_duplex == FULL_DUPLEX) ?
       
  4193 	       "Full Duplex" : "Half Duplex",
       
  4194 	       ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
       
  4195 	       "Rx/Tx" :
       
  4196 	       ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
       
  4197 		((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
       
  4198 }
       
  4199 
       
  4200 static bool e1000e_has_link(struct e1000_adapter *adapter)
       
  4201 {
       
  4202 	struct e1000_hw *hw = &adapter->hw;
       
  4203 	bool link_active = 0;
       
  4204 	s32 ret_val = 0;
       
  4205 
       
  4206 	/*
       
  4207 	 * get_link_status is set on LSC (link status) interrupt or
       
  4208 	 * Rx sequence error interrupt.  get_link_status will stay
       
  4209 	 * false until the check_for_link establishes link
       
  4210 	 * for copper adapters ONLY
       
  4211 	 */
       
  4212 	switch (hw->phy.media_type) {
       
  4213 	case e1000_media_type_copper:
       
  4214 		if (hw->mac.get_link_status) {
       
  4215 			ret_val = hw->mac.ops.check_for_link(hw);
       
  4216 			link_active = !hw->mac.get_link_status;
       
  4217 		} else {
       
  4218 			link_active = 1;
       
  4219 		}
       
  4220 		break;
       
  4221 	case e1000_media_type_fiber:
       
  4222 		ret_val = hw->mac.ops.check_for_link(hw);
       
  4223 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
       
  4224 		break;
       
  4225 	case e1000_media_type_internal_serdes:
       
  4226 		ret_val = hw->mac.ops.check_for_link(hw);
       
  4227 		link_active = adapter->hw.mac.serdes_has_link;
       
  4228 		break;
       
  4229 	default:
       
  4230 	case e1000_media_type_unknown:
       
  4231 		break;
       
  4232 	}
       
  4233 
       
  4234 	if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
       
  4235 	    (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
       
  4236 		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
       
  4237 		e_info("Gigabit has been disabled, downgrading speed\n");
       
  4238 	}
       
  4239 
       
  4240 	return link_active;
       
  4241 }
       
  4242 
       
  4243 static void e1000e_enable_receives(struct e1000_adapter *adapter)
       
  4244 {
       
  4245 	/* make sure the receive unit is started */
       
  4246 	if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
       
  4247 	    (adapter->flags & FLAG_RX_RESTART_NOW)) {
       
  4248 		struct e1000_hw *hw = &adapter->hw;
       
  4249 		u32 rctl = er32(RCTL);
       
  4250 		ew32(RCTL, rctl | E1000_RCTL_EN);
       
  4251 		adapter->flags &= ~FLAG_RX_RESTART_NOW;
       
  4252 	}
       
  4253 }
       
  4254 
       
  4255 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
       
  4256 {
       
  4257 	struct e1000_hw *hw = &adapter->hw;
       
  4258 
       
  4259 	/*
       
  4260 	 * With 82574 controllers, PHY needs to be checked periodically
       
  4261 	 * for hung state and reset, if two calls return true
       
  4262 	 */
       
  4263 	if (e1000_check_phy_82574(hw))
       
  4264 		adapter->phy_hang_count++;
       
  4265 	else
       
  4266 		adapter->phy_hang_count = 0;
       
  4267 
       
  4268 	if (adapter->phy_hang_count > 1) {
       
  4269 		adapter->phy_hang_count = 0;
       
  4270 		schedule_work(&adapter->reset_task);
       
  4271 	}
       
  4272 }
       
  4273 
       
  4274 /**
       
  4275  * e1000_watchdog - Timer Call-back
       
  4276  * @data: pointer to adapter cast into an unsigned long
       
  4277  **/
       
  4278 static void e1000_watchdog(unsigned long data)
       
  4279 {
       
  4280 	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
       
  4281 
       
  4282 	/* Do the rest outside of interrupt context */
       
  4283 	schedule_work(&adapter->watchdog_task);
       
  4284 
       
  4285 	/* TODO: make this use queue_delayed_work() */
       
  4286 }
       
  4287 
       
  4288 static void e1000_watchdog_task(struct work_struct *work)
       
  4289 {
       
  4290 	struct e1000_adapter *adapter = container_of(work,
       
  4291 					struct e1000_adapter, watchdog_task);
       
  4292 	struct net_device *netdev = adapter->netdev;
       
  4293 	struct e1000_mac_info *mac = &adapter->hw.mac;
       
  4294 	struct e1000_phy_info *phy = &adapter->hw.phy;
       
  4295 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  4296 	struct e1000_hw *hw = &adapter->hw;
       
  4297 	u32 link, tctl;
       
  4298 
       
  4299 	if (test_bit(__E1000_DOWN, &adapter->state))
       
  4300 		return;
       
  4301 
       
  4302 	link = e1000e_has_link(adapter);
       
  4303 	if ((netif_carrier_ok(netdev)) && link) {
       
  4304 		/* Cancel scheduled suspend requests. */
       
  4305 		pm_runtime_resume(netdev->dev.parent);
       
  4306 
       
  4307 		e1000e_enable_receives(adapter);
       
  4308 		goto link_up;
       
  4309 	}
       
  4310 
       
  4311 	if ((e1000e_enable_tx_pkt_filtering(hw)) &&
       
  4312 	    (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
       
  4313 		e1000_update_mng_vlan(adapter);
       
  4314 
       
  4315 	if (link) {
       
  4316 		if (!netif_carrier_ok(netdev)) {
       
  4317 			bool txb2b = 1;
       
  4318 
       
  4319 			/* Cancel scheduled suspend requests. */
       
  4320 			pm_runtime_resume(netdev->dev.parent);
       
  4321 
       
  4322 			/* update snapshot of PHY registers on LSC */
       
  4323 			e1000_phy_read_status(adapter);
       
  4324 			mac->ops.get_link_up_info(&adapter->hw,
       
  4325 						   &adapter->link_speed,
       
  4326 						   &adapter->link_duplex);
       
  4327 			e1000_print_link_info(adapter);
       
  4328 			/*
       
  4329 			 * On supported PHYs, check for duplex mismatch only
       
  4330 			 * if link has autonegotiated at 10/100 half
       
  4331 			 */
       
  4332 			if ((hw->phy.type == e1000_phy_igp_3 ||
       
  4333 			     hw->phy.type == e1000_phy_bm) &&
       
  4334 			    (hw->mac.autoneg == true) &&
       
  4335 			    (adapter->link_speed == SPEED_10 ||
       
  4336 			     adapter->link_speed == SPEED_100) &&
       
  4337 			    (adapter->link_duplex == HALF_DUPLEX)) {
       
  4338 				u16 autoneg_exp;
       
  4339 
       
  4340 				e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
       
  4341 
       
  4342 				if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
       
  4343 					e_info("Autonegotiated half duplex but"
       
  4344 					       " link partner cannot autoneg. "
       
  4345 					       " Try forcing full duplex if "
       
  4346 					       "link gets many collisions.\n");
       
  4347 			}
       
  4348 
       
  4349 			/* adjust timeout factor according to speed/duplex */
       
  4350 			adapter->tx_timeout_factor = 1;
       
  4351 			switch (adapter->link_speed) {
       
  4352 			case SPEED_10:
       
  4353 				txb2b = 0;
       
  4354 				adapter->tx_timeout_factor = 16;
       
  4355 				break;
       
  4356 			case SPEED_100:
       
  4357 				txb2b = 0;
       
  4358 				adapter->tx_timeout_factor = 10;
       
  4359 				break;
       
  4360 			}
       
  4361 
       
  4362 			/*
       
  4363 			 * workaround: re-program speed mode bit after
       
  4364 			 * link-up event
       
  4365 			 */
       
  4366 			if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
       
  4367 			    !txb2b) {
       
  4368 				u32 tarc0;
       
  4369 				tarc0 = er32(TARC(0));
       
  4370 				tarc0 &= ~SPEED_MODE_BIT;
       
  4371 				ew32(TARC(0), tarc0);
       
  4372 			}
       
  4373 
       
  4374 			/*
       
  4375 			 * disable TSO for pcie and 10/100 speeds, to avoid
       
  4376 			 * some hardware issues
       
  4377 			 */
       
  4378 			if (!(adapter->flags & FLAG_TSO_FORCE)) {
       
  4379 				switch (adapter->link_speed) {
       
  4380 				case SPEED_10:
       
  4381 				case SPEED_100:
       
  4382 					e_info("10/100 speed: disabling TSO\n");
       
  4383 					netdev->features &= ~NETIF_F_TSO;
       
  4384 					netdev->features &= ~NETIF_F_TSO6;
       
  4385 					break;
       
  4386 				case SPEED_1000:
       
  4387 					netdev->features |= NETIF_F_TSO;
       
  4388 					netdev->features |= NETIF_F_TSO6;
       
  4389 					break;
       
  4390 				default:
       
  4391 					/* oops */
       
  4392 					break;
       
  4393 				}
       
  4394 			}
       
  4395 
       
  4396 			/*
       
  4397 			 * enable transmits in the hardware, need to do this
       
  4398 			 * after setting TARC(0)
       
  4399 			 */
       
  4400 			tctl = er32(TCTL);
       
  4401 			tctl |= E1000_TCTL_EN;
       
  4402 			ew32(TCTL, tctl);
       
  4403 
       
  4404                         /*
       
  4405 			 * Perform any post-link-up configuration before
       
  4406 			 * reporting link up.
       
  4407 			 */
       
  4408 			if (phy->ops.cfg_on_link_up)
       
  4409 				phy->ops.cfg_on_link_up(hw);
       
  4410 
       
  4411 			netif_carrier_on(netdev);
       
  4412 
       
  4413 			if (!test_bit(__E1000_DOWN, &adapter->state))
       
  4414 				mod_timer(&adapter->phy_info_timer,
       
  4415 					  round_jiffies(jiffies + 2 * HZ));
       
  4416 		}
       
  4417 	} else {
       
  4418 		if (netif_carrier_ok(netdev)) {
       
  4419 			adapter->link_speed = 0;
       
  4420 			adapter->link_duplex = 0;
       
  4421 			/* Link status message must follow this format */
       
  4422 			printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
       
  4423 			       adapter->netdev->name);
       
  4424 			netif_carrier_off(netdev);
       
  4425 			if (!test_bit(__E1000_DOWN, &adapter->state))
       
  4426 				mod_timer(&adapter->phy_info_timer,
       
  4427 					  round_jiffies(jiffies + 2 * HZ));
       
  4428 
       
  4429 			if (adapter->flags & FLAG_RX_NEEDS_RESTART)
       
  4430 				schedule_work(&adapter->reset_task);
       
  4431 			else
       
  4432 				pm_schedule_suspend(netdev->dev.parent,
       
  4433 							LINK_TIMEOUT);
       
  4434 		}
       
  4435 	}
       
  4436 
       
  4437 link_up:
       
  4438 	spin_lock(&adapter->stats64_lock);
       
  4439 	e1000e_update_stats(adapter);
       
  4440 
       
  4441 	mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
       
  4442 	adapter->tpt_old = adapter->stats.tpt;
       
  4443 	mac->collision_delta = adapter->stats.colc - adapter->colc_old;
       
  4444 	adapter->colc_old = adapter->stats.colc;
       
  4445 
       
  4446 	adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
       
  4447 	adapter->gorc_old = adapter->stats.gorc;
       
  4448 	adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
       
  4449 	adapter->gotc_old = adapter->stats.gotc;
       
  4450 	spin_unlock(&adapter->stats64_lock);
       
  4451 
       
  4452 	e1000e_update_adaptive(&adapter->hw);
       
  4453 
       
  4454 	if (!netif_carrier_ok(netdev) &&
       
  4455 	    (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
       
  4456 		/*
       
  4457 		 * We've lost link, so the controller stops DMA,
       
  4458 		 * but we've got queued Tx work that's never going
       
  4459 		 * to get done, so reset controller to flush Tx.
       
  4460 		 * (Do the reset outside of interrupt context).
       
  4461 		 */
       
  4462 		schedule_work(&adapter->reset_task);
       
  4463 		/* return immediately since reset is imminent */
       
  4464 		return;
       
  4465 	}
       
  4466 
       
  4467 	/* Simple mode for Interrupt Throttle Rate (ITR) */
       
  4468 	if (adapter->itr_setting == 4) {
       
  4469 		/*
       
  4470 		 * Symmetric Tx/Rx gets a reduced ITR=2000;
       
  4471 		 * Total asymmetrical Tx or Rx gets ITR=8000;
       
  4472 		 * everyone else is between 2000-8000.
       
  4473 		 */
       
  4474 		u32 goc = (adapter->gotc + adapter->gorc) / 10000;
       
  4475 		u32 dif = (adapter->gotc > adapter->gorc ?
       
  4476 			    adapter->gotc - adapter->gorc :
       
  4477 			    adapter->gorc - adapter->gotc) / 10000;
       
  4478 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
       
  4479 
       
  4480 		ew32(ITR, 1000000000 / (itr * 256));
       
  4481 	}
       
  4482 
       
  4483 	/* Cause software interrupt to ensure Rx ring is cleaned */
       
  4484 	if (adapter->msix_entries)
       
  4485 		ew32(ICS, adapter->rx_ring->ims_val);
       
  4486 	else
       
  4487 		ew32(ICS, E1000_ICS_RXDMT0);
       
  4488 
       
  4489 	/* flush pending descriptors to memory before detecting Tx hang */
       
  4490 	e1000e_flush_descriptors(adapter);
       
  4491 
       
  4492 	/* Force detection of hung controller every watchdog period */
       
  4493 	adapter->detect_tx_hung = 1;
       
  4494 
       
  4495 	/*
       
  4496 	 * With 82571 controllers, LAA may be overwritten due to controller
       
  4497 	 * reset from the other port. Set the appropriate LAA in RAR[0]
       
  4498 	 */
       
  4499 	if (e1000e_get_laa_state_82571(hw))
       
  4500 		e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
       
  4501 
       
  4502 	if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
       
  4503 		e1000e_check_82574_phy_workaround(adapter);
       
  4504 
       
  4505 	/* Reset the timer */
       
  4506 	if (!test_bit(__E1000_DOWN, &adapter->state))
       
  4507 		mod_timer(&adapter->watchdog_timer,
       
  4508 			  round_jiffies(jiffies + 2 * HZ));
       
  4509 }
       
  4510 
       
  4511 #define E1000_TX_FLAGS_CSUM		0x00000001
       
  4512 #define E1000_TX_FLAGS_VLAN		0x00000002
       
  4513 #define E1000_TX_FLAGS_TSO		0x00000004
       
  4514 #define E1000_TX_FLAGS_IPV4		0x00000008
       
  4515 #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
       
  4516 #define E1000_TX_FLAGS_VLAN_SHIFT	16
       
  4517 
       
  4518 static int e1000_tso(struct e1000_adapter *adapter,
       
  4519 		     struct sk_buff *skb)
       
  4520 {
       
  4521 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  4522 	struct e1000_context_desc *context_desc;
       
  4523 	struct e1000_buffer *buffer_info;
       
  4524 	unsigned int i;
       
  4525 	u32 cmd_length = 0;
       
  4526 	u16 ipcse = 0, tucse, mss;
       
  4527 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
       
  4528 
       
  4529 	if (!skb_is_gso(skb))
       
  4530 		return 0;
       
  4531 
       
  4532 	if (skb_header_cloned(skb)) {
       
  4533 		int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
       
  4534 
       
  4535 		if (err)
       
  4536 			return err;
       
  4537 	}
       
  4538 
       
  4539 	hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  4540 	mss = skb_shinfo(skb)->gso_size;
       
  4541 	if (skb->protocol == htons(ETH_P_IP)) {
       
  4542 		struct iphdr *iph = ip_hdr(skb);
       
  4543 		iph->tot_len = 0;
       
  4544 		iph->check = 0;
       
  4545 		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
       
  4546 		                                         0, IPPROTO_TCP, 0);
       
  4547 		cmd_length = E1000_TXD_CMD_IP;
       
  4548 		ipcse = skb_transport_offset(skb) - 1;
       
  4549 	} else if (skb_is_gso_v6(skb)) {
       
  4550 		ipv6_hdr(skb)->payload_len = 0;
       
  4551 		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
       
  4552 		                                       &ipv6_hdr(skb)->daddr,
       
  4553 		                                       0, IPPROTO_TCP, 0);
       
  4554 		ipcse = 0;
       
  4555 	}
       
  4556 	ipcss = skb_network_offset(skb);
       
  4557 	ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
       
  4558 	tucss = skb_transport_offset(skb);
       
  4559 	tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
       
  4560 	tucse = 0;
       
  4561 
       
  4562 	cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
       
  4563 	               E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
       
  4564 
       
  4565 	i = tx_ring->next_to_use;
       
  4566 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  4567 	buffer_info = &tx_ring->buffer_info[i];
       
  4568 
       
  4569 	context_desc->lower_setup.ip_fields.ipcss  = ipcss;
       
  4570 	context_desc->lower_setup.ip_fields.ipcso  = ipcso;
       
  4571 	context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
       
  4572 	context_desc->upper_setup.tcp_fields.tucss = tucss;
       
  4573 	context_desc->upper_setup.tcp_fields.tucso = tucso;
       
  4574 	context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
       
  4575 	context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
       
  4576 	context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
       
  4577 	context_desc->cmd_and_length = cpu_to_le32(cmd_length);
       
  4578 
       
  4579 	buffer_info->time_stamp = jiffies;
       
  4580 	buffer_info->next_to_watch = i;
       
  4581 
       
  4582 	i++;
       
  4583 	if (i == tx_ring->count)
       
  4584 		i = 0;
       
  4585 	tx_ring->next_to_use = i;
       
  4586 
       
  4587 	return 1;
       
  4588 }
       
  4589 
       
  4590 static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
       
  4591 {
       
  4592 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  4593 	struct e1000_context_desc *context_desc;
       
  4594 	struct e1000_buffer *buffer_info;
       
  4595 	unsigned int i;
       
  4596 	u8 css;
       
  4597 	u32 cmd_len = E1000_TXD_CMD_DEXT;
       
  4598 	__be16 protocol;
       
  4599 
       
  4600 	if (skb->ip_summed != CHECKSUM_PARTIAL)
       
  4601 		return 0;
       
  4602 
       
  4603 	if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
       
  4604 		protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
       
  4605 	else
       
  4606 		protocol = skb->protocol;
       
  4607 
       
  4608 	switch (protocol) {
       
  4609 	case cpu_to_be16(ETH_P_IP):
       
  4610 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
       
  4611 			cmd_len |= E1000_TXD_CMD_TCP;
       
  4612 		break;
       
  4613 	case cpu_to_be16(ETH_P_IPV6):
       
  4614 		/* XXX not handling all IPV6 headers */
       
  4615 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
       
  4616 			cmd_len |= E1000_TXD_CMD_TCP;
       
  4617 		break;
       
  4618 	default:
       
  4619 		if (unlikely(net_ratelimit()))
       
  4620 			e_warn("checksum_partial proto=%x!\n",
       
  4621 			       be16_to_cpu(protocol));
       
  4622 		break;
       
  4623 	}
       
  4624 
       
  4625 	css = skb_checksum_start_offset(skb);
       
  4626 
       
  4627 	i = tx_ring->next_to_use;
       
  4628 	buffer_info = &tx_ring->buffer_info[i];
       
  4629 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
       
  4630 
       
  4631 	context_desc->lower_setup.ip_config = 0;
       
  4632 	context_desc->upper_setup.tcp_fields.tucss = css;
       
  4633 	context_desc->upper_setup.tcp_fields.tucso =
       
  4634 				css + skb->csum_offset;
       
  4635 	context_desc->upper_setup.tcp_fields.tucse = 0;
       
  4636 	context_desc->tcp_seg_setup.data = 0;
       
  4637 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
       
  4638 
       
  4639 	buffer_info->time_stamp = jiffies;
       
  4640 	buffer_info->next_to_watch = i;
       
  4641 
       
  4642 	i++;
       
  4643 	if (i == tx_ring->count)
       
  4644 		i = 0;
       
  4645 	tx_ring->next_to_use = i;
       
  4646 
       
  4647 	return 1;
       
  4648 }
       
  4649 
       
  4650 #define E1000_MAX_PER_TXD	8192
       
  4651 #define E1000_MAX_TXD_PWR	12
       
  4652 
       
  4653 static int e1000_tx_map(struct e1000_adapter *adapter,
       
  4654 			struct sk_buff *skb, unsigned int first,
       
  4655 			unsigned int max_per_txd, unsigned int nr_frags,
       
  4656 			unsigned int mss)
       
  4657 {
       
  4658 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  4659 	struct pci_dev *pdev = adapter->pdev;
       
  4660 	struct e1000_buffer *buffer_info;
       
  4661 	unsigned int len = skb_headlen(skb);
       
  4662 	unsigned int offset = 0, size, count = 0, i;
       
  4663 	unsigned int f, bytecount, segs;
       
  4664 
       
  4665 	i = tx_ring->next_to_use;
       
  4666 
       
  4667 	while (len) {
       
  4668 		buffer_info = &tx_ring->buffer_info[i];
       
  4669 		size = min(len, max_per_txd);
       
  4670 
       
  4671 		buffer_info->length = size;
       
  4672 		buffer_info->time_stamp = jiffies;
       
  4673 		buffer_info->next_to_watch = i;
       
  4674 		buffer_info->dma = dma_map_single(&pdev->dev,
       
  4675 						  skb->data + offset,
       
  4676 						  size, DMA_TO_DEVICE);
       
  4677 		buffer_info->mapped_as_page = false;
       
  4678 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
       
  4679 			goto dma_error;
       
  4680 
       
  4681 		len -= size;
       
  4682 		offset += size;
       
  4683 		count++;
       
  4684 
       
  4685 		if (len) {
       
  4686 			i++;
       
  4687 			if (i == tx_ring->count)
       
  4688 				i = 0;
       
  4689 		}
       
  4690 	}
       
  4691 
       
  4692 	for (f = 0; f < nr_frags; f++) {
       
  4693 		const struct skb_frag_struct *frag;
       
  4694 
       
  4695 		frag = &skb_shinfo(skb)->frags[f];
       
  4696 		len = skb_frag_size(frag);
       
  4697 		offset = 0;
       
  4698 
       
  4699 		while (len) {
       
  4700 			i++;
       
  4701 			if (i == tx_ring->count)
       
  4702 				i = 0;
       
  4703 
       
  4704 			buffer_info = &tx_ring->buffer_info[i];
       
  4705 			size = min(len, max_per_txd);
       
  4706 
       
  4707 			buffer_info->length = size;
       
  4708 			buffer_info->time_stamp = jiffies;
       
  4709 			buffer_info->next_to_watch = i;
       
  4710 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
       
  4711 						offset, size, DMA_TO_DEVICE);
       
  4712 			buffer_info->mapped_as_page = true;
       
  4713 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
       
  4714 				goto dma_error;
       
  4715 
       
  4716 			len -= size;
       
  4717 			offset += size;
       
  4718 			count++;
       
  4719 		}
       
  4720 	}
       
  4721 
       
  4722 	segs = skb_shinfo(skb)->gso_segs ? : 1;
       
  4723 	/* multiply data chunks by size of headers */
       
  4724 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
       
  4725 
       
  4726 	tx_ring->buffer_info[i].skb = skb;
       
  4727 	tx_ring->buffer_info[i].segs = segs;
       
  4728 	tx_ring->buffer_info[i].bytecount = bytecount;
       
  4729 	tx_ring->buffer_info[first].next_to_watch = i;
       
  4730 
       
  4731 	return count;
       
  4732 
       
  4733 dma_error:
       
  4734 	dev_err(&pdev->dev, "Tx DMA map failed\n");
       
  4735 	buffer_info->dma = 0;
       
  4736 	if (count)
       
  4737 		count--;
       
  4738 
       
  4739 	while (count--) {
       
  4740 		if (i == 0)
       
  4741 			i += tx_ring->count;
       
  4742 		i--;
       
  4743 		buffer_info = &tx_ring->buffer_info[i];
       
  4744 		e1000_put_txbuf(adapter, buffer_info);
       
  4745 	}
       
  4746 
       
  4747 	return 0;
       
  4748 }
       
  4749 
       
  4750 static void e1000_tx_queue(struct e1000_adapter *adapter,
       
  4751 			   int tx_flags, int count)
       
  4752 {
       
  4753 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  4754 	struct e1000_tx_desc *tx_desc = NULL;
       
  4755 	struct e1000_buffer *buffer_info;
       
  4756 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
       
  4757 	unsigned int i;
       
  4758 
       
  4759 	if (tx_flags & E1000_TX_FLAGS_TSO) {
       
  4760 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
       
  4761 			     E1000_TXD_CMD_TSE;
       
  4762 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  4763 
       
  4764 		if (tx_flags & E1000_TX_FLAGS_IPV4)
       
  4765 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
       
  4766 	}
       
  4767 
       
  4768 	if (tx_flags & E1000_TX_FLAGS_CSUM) {
       
  4769 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
       
  4770 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
       
  4771 	}
       
  4772 
       
  4773 	if (tx_flags & E1000_TX_FLAGS_VLAN) {
       
  4774 		txd_lower |= E1000_TXD_CMD_VLE;
       
  4775 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
       
  4776 	}
       
  4777 
       
  4778 	i = tx_ring->next_to_use;
       
  4779 
       
  4780 	do {
       
  4781 		buffer_info = &tx_ring->buffer_info[i];
       
  4782 		tx_desc = E1000_TX_DESC(*tx_ring, i);
       
  4783 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
       
  4784 		tx_desc->lower.data =
       
  4785 			cpu_to_le32(txd_lower | buffer_info->length);
       
  4786 		tx_desc->upper.data = cpu_to_le32(txd_upper);
       
  4787 
       
  4788 		i++;
       
  4789 		if (i == tx_ring->count)
       
  4790 			i = 0;
       
  4791 	} while (--count > 0);
       
  4792 
       
  4793 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
       
  4794 
       
  4795 	/*
       
  4796 	 * Force memory writes to complete before letting h/w
       
  4797 	 * know there are new descriptors to fetch.  (Only
       
  4798 	 * applicable for weak-ordered memory model archs,
       
  4799 	 * such as IA-64).
       
  4800 	 */
       
  4801 	wmb();
       
  4802 
       
  4803 	tx_ring->next_to_use = i;
       
  4804 
       
  4805 	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
       
  4806 		e1000e_update_tdt_wa(adapter, i);
       
  4807 	else
       
  4808 		writel(i, adapter->hw.hw_addr + tx_ring->tail);
       
  4809 
       
  4810 	/*
       
  4811 	 * we need this if more than one processor can write to our tail
       
  4812 	 * at a time, it synchronizes IO on IA64/Altix systems
       
  4813 	 */
       
  4814 	mmiowb();
       
  4815 }
       
  4816 
       
  4817 #define MINIMUM_DHCP_PACKET_SIZE 282
       
  4818 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
       
  4819 				    struct sk_buff *skb)
       
  4820 {
       
  4821 	struct e1000_hw *hw =  &adapter->hw;
       
  4822 	u16 length, offset;
       
  4823 
       
  4824 	if (vlan_tx_tag_present(skb)) {
       
  4825 		if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
       
  4826 		    (adapter->hw.mng_cookie.status &
       
  4827 			E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
       
  4828 			return 0;
       
  4829 	}
       
  4830 
       
  4831 	if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
       
  4832 		return 0;
       
  4833 
       
  4834 	if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
       
  4835 		return 0;
       
  4836 
       
  4837 	{
       
  4838 		const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
       
  4839 		struct udphdr *udp;
       
  4840 
       
  4841 		if (ip->protocol != IPPROTO_UDP)
       
  4842 			return 0;
       
  4843 
       
  4844 		udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
       
  4845 		if (ntohs(udp->dest) != 67)
       
  4846 			return 0;
       
  4847 
       
  4848 		offset = (u8 *)udp + 8 - skb->data;
       
  4849 		length = skb->len - offset;
       
  4850 		return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
       
  4851 	}
       
  4852 
       
  4853 	return 0;
       
  4854 }
       
  4855 
       
  4856 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
       
  4857 {
       
  4858 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4859 
       
  4860 	netif_stop_queue(netdev);
       
  4861 	/*
       
  4862 	 * Herbert's original patch had:
       
  4863 	 *  smp_mb__after_netif_stop_queue();
       
  4864 	 * but since that doesn't exist yet, just open code it.
       
  4865 	 */
       
  4866 	smp_mb();
       
  4867 
       
  4868 	/*
       
  4869 	 * We need to check again in a case another CPU has just
       
  4870 	 * made room available.
       
  4871 	 */
       
  4872 	if (e1000_desc_unused(adapter->tx_ring) < size)
       
  4873 		return -EBUSY;
       
  4874 
       
  4875 	/* A reprieve! */
       
  4876 	netif_start_queue(netdev);
       
  4877 	++adapter->restart_queue;
       
  4878 	return 0;
       
  4879 }
       
  4880 
       
  4881 static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
       
  4882 {
       
  4883 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4884 
       
  4885 	if (e1000_desc_unused(adapter->tx_ring) >= size)
       
  4886 		return 0;
       
  4887 	return __e1000_maybe_stop_tx(netdev, size);
       
  4888 }
       
  4889 
       
  4890 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
       
  4891 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
       
  4892 				    struct net_device *netdev)
       
  4893 {
       
  4894 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  4895 	struct e1000_ring *tx_ring = adapter->tx_ring;
       
  4896 	unsigned int first;
       
  4897 	unsigned int max_per_txd = E1000_MAX_PER_TXD;
       
  4898 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
       
  4899 	unsigned int tx_flags = 0;
       
  4900 	unsigned int len = skb_headlen(skb);
       
  4901 	unsigned int nr_frags;
       
  4902 	unsigned int mss;
       
  4903 	int count = 0;
       
  4904 	int tso;
       
  4905 	unsigned int f;
       
  4906 
       
  4907 	if (test_bit(__E1000_DOWN, &adapter->state)) {
       
  4908 		dev_kfree_skb_any(skb);
       
  4909 		return NETDEV_TX_OK;
       
  4910 	}
       
  4911 
       
  4912 	if (skb->len <= 0) {
       
  4913 		dev_kfree_skb_any(skb);
       
  4914 		return NETDEV_TX_OK;
       
  4915 	}
       
  4916 
       
  4917 	mss = skb_shinfo(skb)->gso_size;
       
  4918 	/*
       
  4919 	 * The controller does a simple calculation to
       
  4920 	 * make sure there is enough room in the FIFO before
       
  4921 	 * initiating the DMA for each buffer.  The calc is:
       
  4922 	 * 4 = ceil(buffer len/mss).  To make sure we don't
       
  4923 	 * overrun the FIFO, adjust the max buffer len if mss
       
  4924 	 * drops.
       
  4925 	 */
       
  4926 	if (mss) {
       
  4927 		u8 hdr_len;
       
  4928 		max_per_txd = min(mss << 2, max_per_txd);
       
  4929 		max_txd_pwr = fls(max_per_txd) - 1;
       
  4930 
       
  4931 		/*
       
  4932 		 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
       
  4933 		 * points to just header, pull a few bytes of payload from
       
  4934 		 * frags into skb->data
       
  4935 		 */
       
  4936 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
       
  4937 		/*
       
  4938 		 * we do this workaround for ES2LAN, but it is un-necessary,
       
  4939 		 * avoiding it could save a lot of cycles
       
  4940 		 */
       
  4941 		if (skb->data_len && (hdr_len == len)) {
       
  4942 			unsigned int pull_size;
       
  4943 
       
  4944 			pull_size = min((unsigned int)4, skb->data_len);
       
  4945 			if (!__pskb_pull_tail(skb, pull_size)) {
       
  4946 				e_err("__pskb_pull_tail failed.\n");
       
  4947 				dev_kfree_skb_any(skb);
       
  4948 				return NETDEV_TX_OK;
       
  4949 			}
       
  4950 			len = skb_headlen(skb);
       
  4951 		}
       
  4952 	}
       
  4953 
       
  4954 	/* reserve a descriptor for the offload context */
       
  4955 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
       
  4956 		count++;
       
  4957 	count++;
       
  4958 
       
  4959 	count += TXD_USE_COUNT(len, max_txd_pwr);
       
  4960 
       
  4961 	nr_frags = skb_shinfo(skb)->nr_frags;
       
  4962 	for (f = 0; f < nr_frags; f++)
       
  4963 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
       
  4964 				       max_txd_pwr);
       
  4965 
       
  4966 	if (adapter->hw.mac.tx_pkt_filtering)
       
  4967 		e1000_transfer_dhcp_info(adapter, skb);
       
  4968 
       
  4969 	/*
       
  4970 	 * need: count + 2 desc gap to keep tail from touching
       
  4971 	 * head, otherwise try next time
       
  4972 	 */
       
  4973 	if (e1000_maybe_stop_tx(netdev, count + 2))
       
  4974 		return NETDEV_TX_BUSY;
       
  4975 
       
  4976 	if (vlan_tx_tag_present(skb)) {
       
  4977 		tx_flags |= E1000_TX_FLAGS_VLAN;
       
  4978 		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
       
  4979 	}
       
  4980 
       
  4981 	first = tx_ring->next_to_use;
       
  4982 
       
  4983 	tso = e1000_tso(adapter, skb);
       
  4984 	if (tso < 0) {
       
  4985 		dev_kfree_skb_any(skb);
       
  4986 		return NETDEV_TX_OK;
       
  4987 	}
       
  4988 
       
  4989 	if (tso)
       
  4990 		tx_flags |= E1000_TX_FLAGS_TSO;
       
  4991 	else if (e1000_tx_csum(adapter, skb))
       
  4992 		tx_flags |= E1000_TX_FLAGS_CSUM;
       
  4993 
       
  4994 	/*
       
  4995 	 * Old method was to assume IPv4 packet by default if TSO was enabled.
       
  4996 	 * 82571 hardware supports TSO capabilities for IPv6 as well...
       
  4997 	 * no longer assume, we must.
       
  4998 	 */
       
  4999 	if (skb->protocol == htons(ETH_P_IP))
       
  5000 		tx_flags |= E1000_TX_FLAGS_IPV4;
       
  5001 
       
  5002 	/* if count is 0 then mapping error has occurred */
       
  5003 	count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
       
  5004 	if (count) {
       
  5005 		e1000_tx_queue(adapter, tx_flags, count);
       
  5006 		/* Make sure there is space in the ring for the next send. */
       
  5007 		e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
       
  5008 
       
  5009 	} else {
       
  5010 		dev_kfree_skb_any(skb);
       
  5011 		tx_ring->buffer_info[first].time_stamp = 0;
       
  5012 		tx_ring->next_to_use = first;
       
  5013 	}
       
  5014 
       
  5015 	return NETDEV_TX_OK;
       
  5016 }
       
  5017 
       
  5018 /**
       
  5019  * e1000_tx_timeout - Respond to a Tx Hang
       
  5020  * @netdev: network interface device structure
       
  5021  **/
       
  5022 static void e1000_tx_timeout(struct net_device *netdev)
       
  5023 {
       
  5024 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5025 
       
  5026 	/* Do the reset outside of interrupt context */
       
  5027 	adapter->tx_timeout_count++;
       
  5028 	schedule_work(&adapter->reset_task);
       
  5029 }
       
  5030 
       
  5031 static void e1000_reset_task(struct work_struct *work)
       
  5032 {
       
  5033 	struct e1000_adapter *adapter;
       
  5034 	adapter = container_of(work, struct e1000_adapter, reset_task);
       
  5035 
       
  5036 	/* don't run the task if already down */
       
  5037 	if (test_bit(__E1000_DOWN, &adapter->state))
       
  5038 		return;
       
  5039 
       
  5040 	if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
       
  5041 	      (adapter->flags & FLAG_RX_RESTART_NOW))) {
       
  5042 		e1000e_dump(adapter);
       
  5043 		e_err("Reset adapter\n");
       
  5044 	}
       
  5045 	e1000e_reinit_locked(adapter);
       
  5046 }
       
  5047 
       
  5048 /**
       
  5049  * e1000_get_stats64 - Get System Network Statistics
       
  5050  * @netdev: network interface device structure
       
  5051  * @stats: rtnl_link_stats64 pointer
       
  5052  *
       
  5053  * Returns the address of the device statistics structure.
       
  5054  **/
       
  5055 struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
       
  5056                                              struct rtnl_link_stats64 *stats)
       
  5057 {
       
  5058 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5059 
       
  5060 	memset(stats, 0, sizeof(struct rtnl_link_stats64));
       
  5061 	spin_lock(&adapter->stats64_lock);
       
  5062 	e1000e_update_stats(adapter);
       
  5063 	/* Fill out the OS statistics structure */
       
  5064 	stats->rx_bytes = adapter->stats.gorc;
       
  5065 	stats->rx_packets = adapter->stats.gprc;
       
  5066 	stats->tx_bytes = adapter->stats.gotc;
       
  5067 	stats->tx_packets = adapter->stats.gptc;
       
  5068 	stats->multicast = adapter->stats.mprc;
       
  5069 	stats->collisions = adapter->stats.colc;
       
  5070 
       
  5071 	/* Rx Errors */
       
  5072 
       
  5073 	/*
       
  5074 	 * RLEC on some newer hardware can be incorrect so build
       
  5075 	 * our own version based on RUC and ROC
       
  5076 	 */
       
  5077 	stats->rx_errors = adapter->stats.rxerrc +
       
  5078 		adapter->stats.crcerrs + adapter->stats.algnerrc +
       
  5079 		adapter->stats.ruc + adapter->stats.roc +
       
  5080 		adapter->stats.cexterr;
       
  5081 	stats->rx_length_errors = adapter->stats.ruc +
       
  5082 					      adapter->stats.roc;
       
  5083 	stats->rx_crc_errors = adapter->stats.crcerrs;
       
  5084 	stats->rx_frame_errors = adapter->stats.algnerrc;
       
  5085 	stats->rx_missed_errors = adapter->stats.mpc;
       
  5086 
       
  5087 	/* Tx Errors */
       
  5088 	stats->tx_errors = adapter->stats.ecol +
       
  5089 				       adapter->stats.latecol;
       
  5090 	stats->tx_aborted_errors = adapter->stats.ecol;
       
  5091 	stats->tx_window_errors = adapter->stats.latecol;
       
  5092 	stats->tx_carrier_errors = adapter->stats.tncrs;
       
  5093 
       
  5094 	/* Tx Dropped needs to be maintained elsewhere */
       
  5095 
       
  5096 	spin_unlock(&adapter->stats64_lock);
       
  5097 	return stats;
       
  5098 }
       
  5099 
       
  5100 /**
       
  5101  * e1000_change_mtu - Change the Maximum Transfer Unit
       
  5102  * @netdev: network interface device structure
       
  5103  * @new_mtu: new value for maximum frame size
       
  5104  *
       
  5105  * Returns 0 on success, negative on failure
       
  5106  **/
       
  5107 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
       
  5108 {
       
  5109 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5110 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
       
  5111 
       
  5112 	/* Jumbo frame support */
       
  5113 	if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
       
  5114 	    !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
       
  5115 		e_err("Jumbo Frames not supported.\n");
       
  5116 		return -EINVAL;
       
  5117 	}
       
  5118 
       
  5119 	/* Supported frame sizes */
       
  5120 	if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
       
  5121 	    (max_frame > adapter->max_hw_frame_size)) {
       
  5122 		e_err("Unsupported MTU setting\n");
       
  5123 		return -EINVAL;
       
  5124 	}
       
  5125 
       
  5126 	/* Jumbo frame workaround on 82579 requires CRC be stripped */
       
  5127 	if ((adapter->hw.mac.type == e1000_pch2lan) &&
       
  5128 	    !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
       
  5129 	    (new_mtu > ETH_DATA_LEN)) {
       
  5130 		e_err("Jumbo Frames not supported on 82579 when CRC "
       
  5131 		      "stripping is disabled.\n");
       
  5132 		return -EINVAL;
       
  5133 	}
       
  5134 
       
  5135 	/* 82573 Errata 17 */
       
  5136 	if (((adapter->hw.mac.type == e1000_82573) ||
       
  5137 	     (adapter->hw.mac.type == e1000_82574)) &&
       
  5138 	    (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
       
  5139 		adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
       
  5140 		e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
       
  5141 	}
       
  5142 
       
  5143 	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
       
  5144 		usleep_range(1000, 2000);
       
  5145 	/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
       
  5146 	adapter->max_frame_size = max_frame;
       
  5147 	e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
       
  5148 	netdev->mtu = new_mtu;
       
  5149 	if (netif_running(netdev))
       
  5150 		e1000e_down(adapter);
       
  5151 
       
  5152 	/*
       
  5153 	 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
       
  5154 	 * means we reserve 2 more, this pushes us to allocate from the next
       
  5155 	 * larger slab size.
       
  5156 	 * i.e. RXBUFFER_2048 --> size-4096 slab
       
  5157 	 * However with the new *_jumbo_rx* routines, jumbo receives will use
       
  5158 	 * fragmented skbs
       
  5159 	 */
       
  5160 
       
  5161 	if (max_frame <= 2048)
       
  5162 		adapter->rx_buffer_len = 2048;
       
  5163 	else
       
  5164 		adapter->rx_buffer_len = 4096;
       
  5165 
       
  5166 	/* adjust allocation if LPE protects us, and we aren't using SBP */
       
  5167 	if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
       
  5168 	     (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
       
  5169 		adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
       
  5170 					 + ETH_FCS_LEN;
       
  5171 
       
  5172 	if (netif_running(netdev))
       
  5173 		e1000e_up(adapter);
       
  5174 	else
       
  5175 		e1000e_reset(adapter);
       
  5176 
       
  5177 	clear_bit(__E1000_RESETTING, &adapter->state);
       
  5178 
       
  5179 	return 0;
       
  5180 }
       
  5181 
       
  5182 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
       
  5183 			   int cmd)
       
  5184 {
       
  5185 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5186 	struct mii_ioctl_data *data = if_mii(ifr);
       
  5187 
       
  5188 	if (adapter->hw.phy.media_type != e1000_media_type_copper)
       
  5189 		return -EOPNOTSUPP;
       
  5190 
       
  5191 	switch (cmd) {
       
  5192 	case SIOCGMIIPHY:
       
  5193 		data->phy_id = adapter->hw.phy.addr;
       
  5194 		break;
       
  5195 	case SIOCGMIIREG:
       
  5196 		e1000_phy_read_status(adapter);
       
  5197 
       
  5198 		switch (data->reg_num & 0x1F) {
       
  5199 		case MII_BMCR:
       
  5200 			data->val_out = adapter->phy_regs.bmcr;
       
  5201 			break;
       
  5202 		case MII_BMSR:
       
  5203 			data->val_out = adapter->phy_regs.bmsr;
       
  5204 			break;
       
  5205 		case MII_PHYSID1:
       
  5206 			data->val_out = (adapter->hw.phy.id >> 16);
       
  5207 			break;
       
  5208 		case MII_PHYSID2:
       
  5209 			data->val_out = (adapter->hw.phy.id & 0xFFFF);
       
  5210 			break;
       
  5211 		case MII_ADVERTISE:
       
  5212 			data->val_out = adapter->phy_regs.advertise;
       
  5213 			break;
       
  5214 		case MII_LPA:
       
  5215 			data->val_out = adapter->phy_regs.lpa;
       
  5216 			break;
       
  5217 		case MII_EXPANSION:
       
  5218 			data->val_out = adapter->phy_regs.expansion;
       
  5219 			break;
       
  5220 		case MII_CTRL1000:
       
  5221 			data->val_out = adapter->phy_regs.ctrl1000;
       
  5222 			break;
       
  5223 		case MII_STAT1000:
       
  5224 			data->val_out = adapter->phy_regs.stat1000;
       
  5225 			break;
       
  5226 		case MII_ESTATUS:
       
  5227 			data->val_out = adapter->phy_regs.estatus;
       
  5228 			break;
       
  5229 		default:
       
  5230 			return -EIO;
       
  5231 		}
       
  5232 		break;
       
  5233 	case SIOCSMIIREG:
       
  5234 	default:
       
  5235 		return -EOPNOTSUPP;
       
  5236 	}
       
  5237 	return 0;
       
  5238 }
       
  5239 
       
  5240 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
       
  5241 {
       
  5242 	switch (cmd) {
       
  5243 	case SIOCGMIIPHY:
       
  5244 	case SIOCGMIIREG:
       
  5245 	case SIOCSMIIREG:
       
  5246 		return e1000_mii_ioctl(netdev, ifr, cmd);
       
  5247 	default:
       
  5248 		return -EOPNOTSUPP;
       
  5249 	}
       
  5250 }
       
  5251 
       
  5252 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
       
  5253 {
       
  5254 	struct e1000_hw *hw = &adapter->hw;
       
  5255 	u32 i, mac_reg;
       
  5256 	u16 phy_reg, wuc_enable;
       
  5257 	int retval = 0;
       
  5258 
       
  5259 	/* copy MAC RARs to PHY RARs */
       
  5260 	e1000_copy_rx_addrs_to_phy_ich8lan(hw);
       
  5261 
       
  5262 	retval = hw->phy.ops.acquire(hw);
       
  5263 	if (retval) {
       
  5264 		e_err("Could not acquire PHY\n");
       
  5265 		return retval;
       
  5266 	}
       
  5267 
       
  5268 	/* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
       
  5269 	retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
       
  5270 	if (retval)
       
  5271 		goto out;
       
  5272 
       
  5273 	/* copy MAC MTA to PHY MTA - only needed for pchlan */
       
  5274 	for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
       
  5275 		mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
       
  5276 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
       
  5277 					   (u16)(mac_reg & 0xFFFF));
       
  5278 		hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
       
  5279 					   (u16)((mac_reg >> 16) & 0xFFFF));
       
  5280 	}
       
  5281 
       
  5282 	/* configure PHY Rx Control register */
       
  5283 	hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
       
  5284 	mac_reg = er32(RCTL);
       
  5285 	if (mac_reg & E1000_RCTL_UPE)
       
  5286 		phy_reg |= BM_RCTL_UPE;
       
  5287 	if (mac_reg & E1000_RCTL_MPE)
       
  5288 		phy_reg |= BM_RCTL_MPE;
       
  5289 	phy_reg &= ~(BM_RCTL_MO_MASK);
       
  5290 	if (mac_reg & E1000_RCTL_MO_3)
       
  5291 		phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
       
  5292 				<< BM_RCTL_MO_SHIFT);
       
  5293 	if (mac_reg & E1000_RCTL_BAM)
       
  5294 		phy_reg |= BM_RCTL_BAM;
       
  5295 	if (mac_reg & E1000_RCTL_PMCF)
       
  5296 		phy_reg |= BM_RCTL_PMCF;
       
  5297 	mac_reg = er32(CTRL);
       
  5298 	if (mac_reg & E1000_CTRL_RFCE)
       
  5299 		phy_reg |= BM_RCTL_RFCE;
       
  5300 	hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
       
  5301 
       
  5302 	/* enable PHY wakeup in MAC register */
       
  5303 	ew32(WUFC, wufc);
       
  5304 	ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
       
  5305 
       
  5306 	/* configure and enable PHY wakeup in PHY registers */
       
  5307 	hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
       
  5308 	hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
       
  5309 
       
  5310 	/* activate PHY wakeup */
       
  5311 	wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
       
  5312 	retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
       
  5313 	if (retval)
       
  5314 		e_err("Could not set PHY Host Wakeup bit\n");
       
  5315 out:
       
  5316 	hw->phy.ops.release(hw);
       
  5317 
       
  5318 	return retval;
       
  5319 }
       
  5320 
       
  5321 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
       
  5322 			    bool runtime)
       
  5323 {
       
  5324 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5325 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5326 	struct e1000_hw *hw = &adapter->hw;
       
  5327 	u32 ctrl, ctrl_ext, rctl, status;
       
  5328 	/* Runtime suspend should only enable wakeup for link changes */
       
  5329 	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
       
  5330 	int retval = 0;
       
  5331 
       
  5332 	netif_device_detach(netdev);
       
  5333 
       
  5334 	if (netif_running(netdev)) {
       
  5335 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
       
  5336 		e1000e_down(adapter);
       
  5337 		e1000_free_irq(adapter);
       
  5338 	}
       
  5339 	e1000e_reset_interrupt_capability(adapter);
       
  5340 
       
  5341 	retval = pci_save_state(pdev);
       
  5342 	if (retval)
       
  5343 		return retval;
       
  5344 
       
  5345 	status = er32(STATUS);
       
  5346 	if (status & E1000_STATUS_LU)
       
  5347 		wufc &= ~E1000_WUFC_LNKC;
       
  5348 
       
  5349 	if (wufc) {
       
  5350 		e1000_setup_rctl(adapter);
       
  5351 		e1000_set_multi(netdev);
       
  5352 
       
  5353 		/* turn on all-multi mode if wake on multicast is enabled */
       
  5354 		if (wufc & E1000_WUFC_MC) {
       
  5355 			rctl = er32(RCTL);
       
  5356 			rctl |= E1000_RCTL_MPE;
       
  5357 			ew32(RCTL, rctl);
       
  5358 		}
       
  5359 
       
  5360 		ctrl = er32(CTRL);
       
  5361 		/* advertise wake from D3Cold */
       
  5362 		#define E1000_CTRL_ADVD3WUC 0x00100000
       
  5363 		/* phy power management enable */
       
  5364 		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
       
  5365 		ctrl |= E1000_CTRL_ADVD3WUC;
       
  5366 		if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
       
  5367 			ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
       
  5368 		ew32(CTRL, ctrl);
       
  5369 
       
  5370 		if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
       
  5371 		    adapter->hw.phy.media_type ==
       
  5372 		    e1000_media_type_internal_serdes) {
       
  5373 			/* keep the laser running in D3 */
       
  5374 			ctrl_ext = er32(CTRL_EXT);
       
  5375 			ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
       
  5376 			ew32(CTRL_EXT, ctrl_ext);
       
  5377 		}
       
  5378 
       
  5379 		if (adapter->flags & FLAG_IS_ICH)
       
  5380 			e1000_suspend_workarounds_ich8lan(&adapter->hw);
       
  5381 
       
  5382 		/* Allow time for pending master requests to run */
       
  5383 		e1000e_disable_pcie_master(&adapter->hw);
       
  5384 
       
  5385 		if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
       
  5386 			/* enable wakeup by the PHY */
       
  5387 			retval = e1000_init_phy_wakeup(adapter, wufc);
       
  5388 			if (retval)
       
  5389 				return retval;
       
  5390 		} else {
       
  5391 			/* enable wakeup by the MAC */
       
  5392 			ew32(WUFC, wufc);
       
  5393 			ew32(WUC, E1000_WUC_PME_EN);
       
  5394 		}
       
  5395 	} else {
       
  5396 		ew32(WUC, 0);
       
  5397 		ew32(WUFC, 0);
       
  5398 	}
       
  5399 
       
  5400 	*enable_wake = !!wufc;
       
  5401 
       
  5402 	/* make sure adapter isn't asleep if manageability is enabled */
       
  5403 	if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
       
  5404 	    (hw->mac.ops.check_mng_mode(hw)))
       
  5405 		*enable_wake = true;
       
  5406 
       
  5407 	if (adapter->hw.phy.type == e1000_phy_igp_3)
       
  5408 		e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
       
  5409 
       
  5410 	/*
       
  5411 	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
       
  5412 	 * would have already happened in close and is redundant.
       
  5413 	 */
       
  5414 	e1000e_release_hw_control(adapter);
       
  5415 
       
  5416 	pci_disable_device(pdev);
       
  5417 
       
  5418 	return 0;
       
  5419 }
       
  5420 
       
  5421 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
       
  5422 {
       
  5423 	if (sleep && wake) {
       
  5424 		pci_prepare_to_sleep(pdev);
       
  5425 		return;
       
  5426 	}
       
  5427 
       
  5428 	pci_wake_from_d3(pdev, wake);
       
  5429 	pci_set_power_state(pdev, PCI_D3hot);
       
  5430 }
       
  5431 
       
  5432 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
       
  5433                                     bool wake)
       
  5434 {
       
  5435 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5436 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5437 
       
  5438 	/*
       
  5439 	 * The pci-e switch on some quad port adapters will report a
       
  5440 	 * correctable error when the MAC transitions from D0 to D3.  To
       
  5441 	 * prevent this we need to mask off the correctable errors on the
       
  5442 	 * downstream port of the pci-e switch.
       
  5443 	 */
       
  5444 	if (adapter->flags & FLAG_IS_QUAD_PORT) {
       
  5445 		struct pci_dev *us_dev = pdev->bus->self;
       
  5446 		int pos = pci_pcie_cap(us_dev);
       
  5447 		u16 devctl;
       
  5448 
       
  5449 		pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
       
  5450 		pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
       
  5451 		                      (devctl & ~PCI_EXP_DEVCTL_CERE));
       
  5452 
       
  5453 		e1000_power_off(pdev, sleep, wake);
       
  5454 
       
  5455 		pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
       
  5456 	} else {
       
  5457 		e1000_power_off(pdev, sleep, wake);
       
  5458 	}
       
  5459 }
       
  5460 
       
  5461 #ifdef CONFIG_PCIEASPM
       
  5462 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
       
  5463 {
       
  5464 	pci_disable_link_state_locked(pdev, state);
       
  5465 }
       
  5466 #else
       
  5467 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
       
  5468 {
       
  5469 	int pos;
       
  5470 	u16 reg16;
       
  5471 
       
  5472 	/*
       
  5473 	 * Both device and parent should have the same ASPM setting.
       
  5474 	 * Disable ASPM in downstream component first and then upstream.
       
  5475 	 */
       
  5476 	pos = pci_pcie_cap(pdev);
       
  5477 	pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
       
  5478 	reg16 &= ~state;
       
  5479 	pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
       
  5480 
       
  5481 	if (!pdev->bus->self)
       
  5482 		return;
       
  5483 
       
  5484 	pos = pci_pcie_cap(pdev->bus->self);
       
  5485 	pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
       
  5486 	reg16 &= ~state;
       
  5487 	pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
       
  5488 }
       
  5489 #endif
       
  5490 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
       
  5491 {
       
  5492 	dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
       
  5493 		 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
       
  5494 		 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
       
  5495 
       
  5496 	__e1000e_disable_aspm(pdev, state);
       
  5497 }
       
  5498 
       
  5499 #ifdef CONFIG_PM
       
  5500 static bool e1000e_pm_ready(struct e1000_adapter *adapter)
       
  5501 {
       
  5502 	return !!adapter->tx_ring->buffer_info;
       
  5503 }
       
  5504 
       
  5505 static int __e1000_resume(struct pci_dev *pdev)
       
  5506 {
       
  5507 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5508 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5509 	struct e1000_hw *hw = &adapter->hw;
       
  5510 	u16 aspm_disable_flag = 0;
       
  5511 	u32 err;
       
  5512 
       
  5513 	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
       
  5514 		aspm_disable_flag = PCIE_LINK_STATE_L0S;
       
  5515 	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
       
  5516 		aspm_disable_flag |= PCIE_LINK_STATE_L1;
       
  5517 	if (aspm_disable_flag)
       
  5518 		e1000e_disable_aspm(pdev, aspm_disable_flag);
       
  5519 
       
  5520 	pci_set_power_state(pdev, PCI_D0);
       
  5521 	pci_restore_state(pdev);
       
  5522 	pci_save_state(pdev);
       
  5523 
       
  5524 	e1000e_set_interrupt_capability(adapter);
       
  5525 	if (netif_running(netdev)) {
       
  5526 		err = e1000_request_irq(adapter);
       
  5527 		if (err)
       
  5528 			return err;
       
  5529 	}
       
  5530 
       
  5531 	if (hw->mac.type == e1000_pch2lan)
       
  5532 		e1000_resume_workarounds_pchlan(&adapter->hw);
       
  5533 
       
  5534 	e1000e_power_up_phy(adapter);
       
  5535 
       
  5536 	/* report the system wakeup cause from S3/S4 */
       
  5537 	if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
       
  5538 		u16 phy_data;
       
  5539 
       
  5540 		e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
       
  5541 		if (phy_data) {
       
  5542 			e_info("PHY Wakeup cause - %s\n",
       
  5543 				phy_data & E1000_WUS_EX ? "Unicast Packet" :
       
  5544 				phy_data & E1000_WUS_MC ? "Multicast Packet" :
       
  5545 				phy_data & E1000_WUS_BC ? "Broadcast Packet" :
       
  5546 				phy_data & E1000_WUS_MAG ? "Magic Packet" :
       
  5547 				phy_data & E1000_WUS_LNKC ? "Link Status "
       
  5548 				" Change" : "other");
       
  5549 		}
       
  5550 		e1e_wphy(&adapter->hw, BM_WUS, ~0);
       
  5551 	} else {
       
  5552 		u32 wus = er32(WUS);
       
  5553 		if (wus) {
       
  5554 			e_info("MAC Wakeup cause - %s\n",
       
  5555 				wus & E1000_WUS_EX ? "Unicast Packet" :
       
  5556 				wus & E1000_WUS_MC ? "Multicast Packet" :
       
  5557 				wus & E1000_WUS_BC ? "Broadcast Packet" :
       
  5558 				wus & E1000_WUS_MAG ? "Magic Packet" :
       
  5559 				wus & E1000_WUS_LNKC ? "Link Status Change" :
       
  5560 				"other");
       
  5561 		}
       
  5562 		ew32(WUS, ~0);
       
  5563 	}
       
  5564 
       
  5565 	e1000e_reset(adapter);
       
  5566 
       
  5567 	e1000_init_manageability_pt(adapter);
       
  5568 
       
  5569 	if (netif_running(netdev))
       
  5570 		e1000e_up(adapter);
       
  5571 
       
  5572 	netif_device_attach(netdev);
       
  5573 
       
  5574 	/*
       
  5575 	 * If the controller has AMT, do not set DRV_LOAD until the interface
       
  5576 	 * is up.  For all other cases, let the f/w know that the h/w is now
       
  5577 	 * under the control of the driver.
       
  5578 	 */
       
  5579 	if (!(adapter->flags & FLAG_HAS_AMT))
       
  5580 		e1000e_get_hw_control(adapter);
       
  5581 
       
  5582 	return 0;
       
  5583 }
       
  5584 
       
  5585 #ifdef CONFIG_PM_SLEEP
       
  5586 static int e1000_suspend(struct device *dev)
       
  5587 {
       
  5588 	struct pci_dev *pdev = to_pci_dev(dev);
       
  5589 	int retval;
       
  5590 	bool wake;
       
  5591 
       
  5592 	retval = __e1000_shutdown(pdev, &wake, false);
       
  5593 	if (!retval)
       
  5594 		e1000_complete_shutdown(pdev, true, wake);
       
  5595 
       
  5596 	return retval;
       
  5597 }
       
  5598 
       
  5599 static int e1000_resume(struct device *dev)
       
  5600 {
       
  5601 	struct pci_dev *pdev = to_pci_dev(dev);
       
  5602 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5603 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5604 
       
  5605 	if (e1000e_pm_ready(adapter))
       
  5606 		adapter->idle_check = true;
       
  5607 
       
  5608 	return __e1000_resume(pdev);
       
  5609 }
       
  5610 #endif /* CONFIG_PM_SLEEP */
       
  5611 
       
  5612 #ifdef CONFIG_PM_RUNTIME
       
  5613 static int e1000_runtime_suspend(struct device *dev)
       
  5614 {
       
  5615 	struct pci_dev *pdev = to_pci_dev(dev);
       
  5616 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5617 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5618 
       
  5619 	if (e1000e_pm_ready(adapter)) {
       
  5620 		bool wake;
       
  5621 
       
  5622 		__e1000_shutdown(pdev, &wake, true);
       
  5623 	}
       
  5624 
       
  5625 	return 0;
       
  5626 }
       
  5627 
       
  5628 static int e1000_idle(struct device *dev)
       
  5629 {
       
  5630 	struct pci_dev *pdev = to_pci_dev(dev);
       
  5631 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5632 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5633 
       
  5634 	if (!e1000e_pm_ready(adapter))
       
  5635 		return 0;
       
  5636 
       
  5637 	if (adapter->idle_check) {
       
  5638 		adapter->idle_check = false;
       
  5639 		if (!e1000e_has_link(adapter))
       
  5640 			pm_schedule_suspend(dev, MSEC_PER_SEC);
       
  5641 	}
       
  5642 
       
  5643 	return -EBUSY;
       
  5644 }
       
  5645 
       
  5646 static int e1000_runtime_resume(struct device *dev)
       
  5647 {
       
  5648 	struct pci_dev *pdev = to_pci_dev(dev);
       
  5649 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5650 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5651 
       
  5652 	if (!e1000e_pm_ready(adapter))
       
  5653 		return 0;
       
  5654 
       
  5655 	adapter->idle_check = !dev->power.runtime_auto;
       
  5656 	return __e1000_resume(pdev);
       
  5657 }
       
  5658 #endif /* CONFIG_PM_RUNTIME */
       
  5659 #endif /* CONFIG_PM */
       
  5660 
       
  5661 static void e1000_shutdown(struct pci_dev *pdev)
       
  5662 {
       
  5663 	bool wake = false;
       
  5664 
       
  5665 	__e1000_shutdown(pdev, &wake, false);
       
  5666 
       
  5667 	if (system_state == SYSTEM_POWER_OFF)
       
  5668 		e1000_complete_shutdown(pdev, false, wake);
       
  5669 }
       
  5670 
       
  5671 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  5672 
       
  5673 static irqreturn_t e1000_intr_msix(int irq, void *data)
       
  5674 {
       
  5675 	struct net_device *netdev = data;
       
  5676 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5677 
       
  5678 	if (adapter->msix_entries) {
       
  5679 		int vector, msix_irq;
       
  5680 
       
  5681 		vector = 0;
       
  5682 		msix_irq = adapter->msix_entries[vector].vector;
       
  5683 		disable_irq(msix_irq);
       
  5684 		e1000_intr_msix_rx(msix_irq, netdev);
       
  5685 		enable_irq(msix_irq);
       
  5686 
       
  5687 		vector++;
       
  5688 		msix_irq = adapter->msix_entries[vector].vector;
       
  5689 		disable_irq(msix_irq);
       
  5690 		e1000_intr_msix_tx(msix_irq, netdev);
       
  5691 		enable_irq(msix_irq);
       
  5692 
       
  5693 		vector++;
       
  5694 		msix_irq = adapter->msix_entries[vector].vector;
       
  5695 		disable_irq(msix_irq);
       
  5696 		e1000_msix_other(msix_irq, netdev);
       
  5697 		enable_irq(msix_irq);
       
  5698 	}
       
  5699 
       
  5700 	return IRQ_HANDLED;
       
  5701 }
       
  5702 
       
  5703 /*
       
  5704  * Polling 'interrupt' - used by things like netconsole to send skbs
       
  5705  * without having to re-enable interrupts. It's not called while
       
  5706  * the interrupt routine is executing.
       
  5707  */
       
  5708 static void e1000_netpoll(struct net_device *netdev)
       
  5709 {
       
  5710 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5711 
       
  5712 	switch (adapter->int_mode) {
       
  5713 	case E1000E_INT_MODE_MSIX:
       
  5714 		e1000_intr_msix(adapter->pdev->irq, netdev);
       
  5715 		break;
       
  5716 	case E1000E_INT_MODE_MSI:
       
  5717 		disable_irq(adapter->pdev->irq);
       
  5718 		e1000_intr_msi(adapter->pdev->irq, netdev);
       
  5719 		enable_irq(adapter->pdev->irq);
       
  5720 		break;
       
  5721 	default: /* E1000E_INT_MODE_LEGACY */
       
  5722 		disable_irq(adapter->pdev->irq);
       
  5723 		e1000_intr(adapter->pdev->irq, netdev);
       
  5724 		enable_irq(adapter->pdev->irq);
       
  5725 		break;
       
  5726 	}
       
  5727 }
       
  5728 #endif
       
  5729 
       
  5730 /**
       
  5731  * e1000_io_error_detected - called when PCI error is detected
       
  5732  * @pdev: Pointer to PCI device
       
  5733  * @state: The current pci connection state
       
  5734  *
       
  5735  * This function is called after a PCI bus error affecting
       
  5736  * this device has been detected.
       
  5737  */
       
  5738 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
       
  5739 						pci_channel_state_t state)
       
  5740 {
       
  5741 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5742 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5743 
       
  5744 	netif_device_detach(netdev);
       
  5745 
       
  5746 	if (state == pci_channel_io_perm_failure)
       
  5747 		return PCI_ERS_RESULT_DISCONNECT;
       
  5748 
       
  5749 	if (netif_running(netdev))
       
  5750 		e1000e_down(adapter);
       
  5751 	pci_disable_device(pdev);
       
  5752 
       
  5753 	/* Request a slot slot reset. */
       
  5754 	return PCI_ERS_RESULT_NEED_RESET;
       
  5755 }
       
  5756 
       
  5757 /**
       
  5758  * e1000_io_slot_reset - called after the pci bus has been reset.
       
  5759  * @pdev: Pointer to PCI device
       
  5760  *
       
  5761  * Restart the card from scratch, as if from a cold-boot. Implementation
       
  5762  * resembles the first-half of the e1000_resume routine.
       
  5763  */
       
  5764 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
       
  5765 {
       
  5766 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5767 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5768 	struct e1000_hw *hw = &adapter->hw;
       
  5769 	u16 aspm_disable_flag = 0;
       
  5770 	int err;
       
  5771 	pci_ers_result_t result;
       
  5772 
       
  5773 	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
       
  5774 		aspm_disable_flag = PCIE_LINK_STATE_L0S;
       
  5775 	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
       
  5776 		aspm_disable_flag |= PCIE_LINK_STATE_L1;
       
  5777 	if (aspm_disable_flag)
       
  5778 		e1000e_disable_aspm(pdev, aspm_disable_flag);
       
  5779 
       
  5780 	err = pci_enable_device_mem(pdev);
       
  5781 	if (err) {
       
  5782 		dev_err(&pdev->dev,
       
  5783 			"Cannot re-enable PCI device after reset.\n");
       
  5784 		result = PCI_ERS_RESULT_DISCONNECT;
       
  5785 	} else {
       
  5786 		pci_set_master(pdev);
       
  5787 		pdev->state_saved = true;
       
  5788 		pci_restore_state(pdev);
       
  5789 
       
  5790 		pci_enable_wake(pdev, PCI_D3hot, 0);
       
  5791 		pci_enable_wake(pdev, PCI_D3cold, 0);
       
  5792 
       
  5793 		e1000e_reset(adapter);
       
  5794 		ew32(WUS, ~0);
       
  5795 		result = PCI_ERS_RESULT_RECOVERED;
       
  5796 	}
       
  5797 
       
  5798 	pci_cleanup_aer_uncorrect_error_status(pdev);
       
  5799 
       
  5800 	return result;
       
  5801 }
       
  5802 
       
  5803 /**
       
  5804  * e1000_io_resume - called when traffic can start flowing again.
       
  5805  * @pdev: Pointer to PCI device
       
  5806  *
       
  5807  * This callback is called when the error recovery driver tells us that
       
  5808  * its OK to resume normal operation. Implementation resembles the
       
  5809  * second-half of the e1000_resume routine.
       
  5810  */
       
  5811 static void e1000_io_resume(struct pci_dev *pdev)
       
  5812 {
       
  5813 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  5814 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5815 
       
  5816 	e1000_init_manageability_pt(adapter);
       
  5817 
       
  5818 	if (netif_running(netdev)) {
       
  5819 		if (e1000e_up(adapter)) {
       
  5820 			dev_err(&pdev->dev,
       
  5821 				"can't bring device back up after reset\n");
       
  5822 			return;
       
  5823 		}
       
  5824 	}
       
  5825 
       
  5826 	netif_device_attach(netdev);
       
  5827 
       
  5828 	/*
       
  5829 	 * If the controller has AMT, do not set DRV_LOAD until the interface
       
  5830 	 * is up.  For all other cases, let the f/w know that the h/w is now
       
  5831 	 * under the control of the driver.
       
  5832 	 */
       
  5833 	if (!(adapter->flags & FLAG_HAS_AMT))
       
  5834 		e1000e_get_hw_control(adapter);
       
  5835 
       
  5836 }
       
  5837 
       
  5838 static void e1000_print_device_info(struct e1000_adapter *adapter)
       
  5839 {
       
  5840 	struct e1000_hw *hw = &adapter->hw;
       
  5841 	struct net_device *netdev = adapter->netdev;
       
  5842 	u32 ret_val;
       
  5843 	u8 pba_str[E1000_PBANUM_LENGTH];
       
  5844 
       
  5845 	/* print bus type/speed/width info */
       
  5846 	e_info("(PCI Express:2.5GT/s:%s) %pM\n",
       
  5847 	       /* bus width */
       
  5848 	       ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
       
  5849 	        "Width x1"),
       
  5850 	       /* MAC address */
       
  5851 	       netdev->dev_addr);
       
  5852 	e_info("Intel(R) PRO/%s Network Connection\n",
       
  5853 	       (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
       
  5854 	ret_val = e1000_read_pba_string_generic(hw, pba_str,
       
  5855 						E1000_PBANUM_LENGTH);
       
  5856 	if (ret_val)
       
  5857 		strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
       
  5858 	e_info("MAC: %d, PHY: %d, PBA No: %s\n",
       
  5859 	       hw->mac.type, hw->phy.type, pba_str);
       
  5860 }
       
  5861 
       
  5862 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
       
  5863 {
       
  5864 	struct e1000_hw *hw = &adapter->hw;
       
  5865 	int ret_val;
       
  5866 	u16 buf = 0;
       
  5867 
       
  5868 	if (hw->mac.type != e1000_82573)
       
  5869 		return;
       
  5870 
       
  5871 	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
       
  5872 	if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
       
  5873 		/* Deep Smart Power Down (DSPD) */
       
  5874 		dev_warn(&adapter->pdev->dev,
       
  5875 			 "Warning: detected DSPD enabled in EEPROM\n");
       
  5876 	}
       
  5877 }
       
  5878 
       
  5879 static int e1000_set_features(struct net_device *netdev, u32 features)
       
  5880 {
       
  5881 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  5882 	u32 changed = features ^ netdev->features;
       
  5883 
       
  5884 	if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
       
  5885 		adapter->flags |= FLAG_TSO_FORCE;
       
  5886 
       
  5887 	if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
       
  5888 			 NETIF_F_RXCSUM)))
       
  5889 		return 0;
       
  5890 
       
  5891 	if (netif_running(netdev))
       
  5892 		e1000e_reinit_locked(adapter);
       
  5893 	else
       
  5894 		e1000e_reset(adapter);
       
  5895 
       
  5896 	return 0;
       
  5897 }
       
  5898 
       
  5899 static const struct net_device_ops e1000e_netdev_ops = {
       
  5900 	.ndo_open		= e1000_open,
       
  5901 	.ndo_stop		= e1000_close,
       
  5902 	.ndo_start_xmit		= e1000_xmit_frame,
       
  5903 	.ndo_get_stats64	= e1000e_get_stats64,
       
  5904 	.ndo_set_rx_mode	= e1000_set_multi,
       
  5905 	.ndo_set_mac_address	= e1000_set_mac,
       
  5906 	.ndo_change_mtu		= e1000_change_mtu,
       
  5907 	.ndo_do_ioctl		= e1000_ioctl,
       
  5908 	.ndo_tx_timeout		= e1000_tx_timeout,
       
  5909 	.ndo_validate_addr	= eth_validate_addr,
       
  5910 
       
  5911 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
       
  5912 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
       
  5913 #ifdef CONFIG_NET_POLL_CONTROLLER
       
  5914 	.ndo_poll_controller	= e1000_netpoll,
       
  5915 #endif
       
  5916 	.ndo_set_features = e1000_set_features,
       
  5917 };
       
  5918 
       
  5919 /**
       
  5920  * e1000_probe - Device Initialization Routine
       
  5921  * @pdev: PCI device information struct
       
  5922  * @ent: entry in e1000_pci_tbl
       
  5923  *
       
  5924  * Returns 0 on success, negative on failure
       
  5925  *
       
  5926  * e1000_probe initializes an adapter identified by a pci_dev structure.
       
  5927  * The OS initialization, configuring of the adapter private structure,
       
  5928  * and a hardware reset occur.
       
  5929  **/
       
  5930 static int __devinit e1000_probe(struct pci_dev *pdev,
       
  5931 				 const struct pci_device_id *ent)
       
  5932 {
       
  5933 	struct net_device *netdev;
       
  5934 	struct e1000_adapter *adapter;
       
  5935 	struct e1000_hw *hw;
       
  5936 	const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
       
  5937 	resource_size_t mmio_start, mmio_len;
       
  5938 	resource_size_t flash_start, flash_len;
       
  5939 
       
  5940 	static int cards_found;
       
  5941 	u16 aspm_disable_flag = 0;
       
  5942 	int i, err, pci_using_dac;
       
  5943 	u16 eeprom_data = 0;
       
  5944 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
       
  5945 
       
  5946 	if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
       
  5947 		aspm_disable_flag = PCIE_LINK_STATE_L0S;
       
  5948 	if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
       
  5949 		aspm_disable_flag |= PCIE_LINK_STATE_L1;
       
  5950 	if (aspm_disable_flag)
       
  5951 		e1000e_disable_aspm(pdev, aspm_disable_flag);
       
  5952 
       
  5953 	err = pci_enable_device_mem(pdev);
       
  5954 	if (err)
       
  5955 		return err;
       
  5956 
       
  5957 	pci_using_dac = 0;
       
  5958 	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
       
  5959 	if (!err) {
       
  5960 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
       
  5961 		if (!err)
       
  5962 			pci_using_dac = 1;
       
  5963 	} else {
       
  5964 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
       
  5965 		if (err) {
       
  5966 			err = dma_set_coherent_mask(&pdev->dev,
       
  5967 						    DMA_BIT_MASK(32));
       
  5968 			if (err) {
       
  5969 				dev_err(&pdev->dev, "No usable DMA "
       
  5970 					"configuration, aborting\n");
       
  5971 				goto err_dma;
       
  5972 			}
       
  5973 		}
       
  5974 	}
       
  5975 
       
  5976 	err = pci_request_selected_regions_exclusive(pdev,
       
  5977 	                                  pci_select_bars(pdev, IORESOURCE_MEM),
       
  5978 	                                  e1000e_driver_name);
       
  5979 	if (err)
       
  5980 		goto err_pci_reg;
       
  5981 
       
  5982 	/* AER (Advanced Error Reporting) hooks */
       
  5983 	pci_enable_pcie_error_reporting(pdev);
       
  5984 
       
  5985 	pci_set_master(pdev);
       
  5986 	/* PCI config space info */
       
  5987 	err = pci_save_state(pdev);
       
  5988 	if (err)
       
  5989 		goto err_alloc_etherdev;
       
  5990 
       
  5991 	err = -ENOMEM;
       
  5992 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
       
  5993 	if (!netdev)
       
  5994 		goto err_alloc_etherdev;
       
  5995 
       
  5996 	SET_NETDEV_DEV(netdev, &pdev->dev);
       
  5997 
       
  5998 	netdev->irq = pdev->irq;
       
  5999 
       
  6000 	pci_set_drvdata(pdev, netdev);
       
  6001 	adapter = netdev_priv(netdev);
       
  6002 	hw = &adapter->hw;
       
  6003 	adapter->netdev = netdev;
       
  6004 	adapter->pdev = pdev;
       
  6005 	adapter->ei = ei;
       
  6006 	adapter->pba = ei->pba;
       
  6007 	adapter->flags = ei->flags;
       
  6008 	adapter->flags2 = ei->flags2;
       
  6009 	adapter->hw.adapter = adapter;
       
  6010 	adapter->hw.mac.type = ei->mac;
       
  6011 	adapter->max_hw_frame_size = ei->max_hw_frame_size;
       
  6012 	adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
       
  6013 
       
  6014 	mmio_start = pci_resource_start(pdev, 0);
       
  6015 	mmio_len = pci_resource_len(pdev, 0);
       
  6016 
       
  6017 	err = -EIO;
       
  6018 	adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
       
  6019 	if (!adapter->hw.hw_addr)
       
  6020 		goto err_ioremap;
       
  6021 
       
  6022 	if ((adapter->flags & FLAG_HAS_FLASH) &&
       
  6023 	    (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
       
  6024 		flash_start = pci_resource_start(pdev, 1);
       
  6025 		flash_len = pci_resource_len(pdev, 1);
       
  6026 		adapter->hw.flash_address = ioremap(flash_start, flash_len);
       
  6027 		if (!adapter->hw.flash_address)
       
  6028 			goto err_flashmap;
       
  6029 	}
       
  6030 
       
  6031 	/* construct the net_device struct */
       
  6032 	netdev->netdev_ops		= &e1000e_netdev_ops;
       
  6033 	e1000e_set_ethtool_ops(netdev);
       
  6034 	netdev->watchdog_timeo		= 5 * HZ;
       
  6035 	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
       
  6036 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
       
  6037 
       
  6038 	netdev->mem_start = mmio_start;
       
  6039 	netdev->mem_end = mmio_start + mmio_len;
       
  6040 
       
  6041 	adapter->bd_number = cards_found++;
       
  6042 
       
  6043 	e1000e_check_options(adapter);
       
  6044 
       
  6045 	/* setup adapter struct */
       
  6046 	err = e1000_sw_init(adapter);
       
  6047 	if (err)
       
  6048 		goto err_sw_init;
       
  6049 
       
  6050 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
       
  6051 	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
       
  6052 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
       
  6053 
       
  6054 	err = ei->get_variants(adapter);
       
  6055 	if (err)
       
  6056 		goto err_hw_init;
       
  6057 
       
  6058 	if ((adapter->flags & FLAG_IS_ICH) &&
       
  6059 	    (adapter->flags & FLAG_READ_ONLY_NVM))
       
  6060 		e1000e_write_protect_nvm_ich8lan(&adapter->hw);
       
  6061 
       
  6062 	hw->mac.ops.get_bus_info(&adapter->hw);
       
  6063 
       
  6064 	adapter->hw.phy.autoneg_wait_to_complete = 0;
       
  6065 
       
  6066 	/* Copper options */
       
  6067 	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
       
  6068 		adapter->hw.phy.mdix = AUTO_ALL_MODES;
       
  6069 		adapter->hw.phy.disable_polarity_correction = 0;
       
  6070 		adapter->hw.phy.ms_type = e1000_ms_hw_default;
       
  6071 	}
       
  6072 
       
  6073 	if (e1000_check_reset_block(&adapter->hw))
       
  6074 		e_info("PHY reset is blocked due to SOL/IDER session.\n");
       
  6075 
       
  6076 	/* Set initial default active device features */
       
  6077 	netdev->features = (NETIF_F_SG |
       
  6078 			    NETIF_F_HW_VLAN_RX |
       
  6079 			    NETIF_F_HW_VLAN_TX |
       
  6080 			    NETIF_F_TSO |
       
  6081 			    NETIF_F_TSO6 |
       
  6082 			    NETIF_F_RXCSUM |
       
  6083 			    NETIF_F_HW_CSUM);
       
  6084 
       
  6085 	/* Set user-changeable features (subset of all device features) */
       
  6086 	netdev->hw_features = netdev->features;
       
  6087 
       
  6088 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
       
  6089 		netdev->features |= NETIF_F_HW_VLAN_FILTER;
       
  6090 
       
  6091 	netdev->vlan_features |= (NETIF_F_SG |
       
  6092 				  NETIF_F_TSO |
       
  6093 				  NETIF_F_TSO6 |
       
  6094 				  NETIF_F_HW_CSUM);
       
  6095 
       
  6096 	if (pci_using_dac) {
       
  6097 		netdev->features |= NETIF_F_HIGHDMA;
       
  6098 		netdev->vlan_features |= NETIF_F_HIGHDMA;
       
  6099 	}
       
  6100 
       
  6101 	if (e1000e_enable_mng_pass_thru(&adapter->hw))
       
  6102 		adapter->flags |= FLAG_MNG_PT_ENABLED;
       
  6103 
       
  6104 	/*
       
  6105 	 * before reading the NVM, reset the controller to
       
  6106 	 * put the device in a known good starting state
       
  6107 	 */
       
  6108 	adapter->hw.mac.ops.reset_hw(&adapter->hw);
       
  6109 
       
  6110 	/*
       
  6111 	 * systems with ASPM and others may see the checksum fail on the first
       
  6112 	 * attempt. Let's give it a few tries
       
  6113 	 */
       
  6114 	for (i = 0;; i++) {
       
  6115 		if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
       
  6116 			break;
       
  6117 		if (i == 2) {
       
  6118 			e_err("The NVM Checksum Is Not Valid\n");
       
  6119 			err = -EIO;
       
  6120 			goto err_eeprom;
       
  6121 		}
       
  6122 	}
       
  6123 
       
  6124 	e1000_eeprom_checks(adapter);
       
  6125 
       
  6126 	/* copy the MAC address */
       
  6127 	if (e1000e_read_mac_addr(&adapter->hw))
       
  6128 		e_err("NVM Read Error while reading MAC address\n");
       
  6129 
       
  6130 	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
       
  6131 	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
       
  6132 
       
  6133 	if (!is_valid_ether_addr(netdev->perm_addr)) {
       
  6134 		e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
       
  6135 		err = -EIO;
       
  6136 		goto err_eeprom;
       
  6137 	}
       
  6138 
       
  6139 	init_timer(&adapter->watchdog_timer);
       
  6140 	adapter->watchdog_timer.function = e1000_watchdog;
       
  6141 	adapter->watchdog_timer.data = (unsigned long) adapter;
       
  6142 
       
  6143 	init_timer(&adapter->phy_info_timer);
       
  6144 	adapter->phy_info_timer.function = e1000_update_phy_info;
       
  6145 	adapter->phy_info_timer.data = (unsigned long) adapter;
       
  6146 
       
  6147 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
       
  6148 	INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
       
  6149 	INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
       
  6150 	INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
       
  6151 	INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
       
  6152 
       
  6153 	/* Initialize link parameters. User can change them with ethtool */
       
  6154 	adapter->hw.mac.autoneg = 1;
       
  6155 	adapter->fc_autoneg = 1;
       
  6156 	adapter->hw.fc.requested_mode = e1000_fc_default;
       
  6157 	adapter->hw.fc.current_mode = e1000_fc_default;
       
  6158 	adapter->hw.phy.autoneg_advertised = 0x2f;
       
  6159 
       
  6160 	/* ring size defaults */
       
  6161 	adapter->rx_ring->count = 256;
       
  6162 	adapter->tx_ring->count = 256;
       
  6163 
       
  6164 	/*
       
  6165 	 * Initial Wake on LAN setting - If APM wake is enabled in
       
  6166 	 * the EEPROM, enable the ACPI Magic Packet filter
       
  6167 	 */
       
  6168 	if (adapter->flags & FLAG_APME_IN_WUC) {
       
  6169 		/* APME bit in EEPROM is mapped to WUC.APME */
       
  6170 		eeprom_data = er32(WUC);
       
  6171 		eeprom_apme_mask = E1000_WUC_APME;
       
  6172 		if ((hw->mac.type > e1000_ich10lan) &&
       
  6173 		    (eeprom_data & E1000_WUC_PHY_WAKE))
       
  6174 			adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
       
  6175 	} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
       
  6176 		if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
       
  6177 		    (adapter->hw.bus.func == 1))
       
  6178 			e1000_read_nvm(&adapter->hw,
       
  6179 				NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
       
  6180 		else
       
  6181 			e1000_read_nvm(&adapter->hw,
       
  6182 				NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
       
  6183 	}
       
  6184 
       
  6185 	/* fetch WoL from EEPROM */
       
  6186 	if (eeprom_data & eeprom_apme_mask)
       
  6187 		adapter->eeprom_wol |= E1000_WUFC_MAG;
       
  6188 
       
  6189 	/*
       
  6190 	 * now that we have the eeprom settings, apply the special cases
       
  6191 	 * where the eeprom may be wrong or the board simply won't support
       
  6192 	 * wake on lan on a particular port
       
  6193 	 */
       
  6194 	if (!(adapter->flags & FLAG_HAS_WOL))
       
  6195 		adapter->eeprom_wol = 0;
       
  6196 
       
  6197 	/* initialize the wol settings based on the eeprom settings */
       
  6198 	adapter->wol = adapter->eeprom_wol;
       
  6199 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
       
  6200 
       
  6201 	/* save off EEPROM version number */
       
  6202 	e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
       
  6203 
       
  6204 	/* reset the hardware with the new settings */
       
  6205 	e1000e_reset(adapter);
       
  6206 
       
  6207 	/*
       
  6208 	 * If the controller has AMT, do not set DRV_LOAD until the interface
       
  6209 	 * is up.  For all other cases, let the f/w know that the h/w is now
       
  6210 	 * under the control of the driver.
       
  6211 	 */
       
  6212 	if (!(adapter->flags & FLAG_HAS_AMT))
       
  6213 		e1000e_get_hw_control(adapter);
       
  6214 
       
  6215 	strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
       
  6216 	err = register_netdev(netdev);
       
  6217 	if (err)
       
  6218 		goto err_register;
       
  6219 
       
  6220 	/* carrier off reporting is important to ethtool even BEFORE open */
       
  6221 	netif_carrier_off(netdev);
       
  6222 
       
  6223 	e1000_print_device_info(adapter);
       
  6224 
       
  6225 	if (pci_dev_run_wake(pdev))
       
  6226 		pm_runtime_put_noidle(&pdev->dev);
       
  6227 
       
  6228 	return 0;
       
  6229 
       
  6230 err_register:
       
  6231 	if (!(adapter->flags & FLAG_HAS_AMT))
       
  6232 		e1000e_release_hw_control(adapter);
       
  6233 err_eeprom:
       
  6234 	if (!e1000_check_reset_block(&adapter->hw))
       
  6235 		e1000_phy_hw_reset(&adapter->hw);
       
  6236 err_hw_init:
       
  6237 	kfree(adapter->tx_ring);
       
  6238 	kfree(adapter->rx_ring);
       
  6239 err_sw_init:
       
  6240 	if (adapter->hw.flash_address)
       
  6241 		iounmap(adapter->hw.flash_address);
       
  6242 	e1000e_reset_interrupt_capability(adapter);
       
  6243 err_flashmap:
       
  6244 	iounmap(adapter->hw.hw_addr);
       
  6245 err_ioremap:
       
  6246 	free_netdev(netdev);
       
  6247 err_alloc_etherdev:
       
  6248 	pci_release_selected_regions(pdev,
       
  6249 	                             pci_select_bars(pdev, IORESOURCE_MEM));
       
  6250 err_pci_reg:
       
  6251 err_dma:
       
  6252 	pci_disable_device(pdev);
       
  6253 	return err;
       
  6254 }
       
  6255 
       
  6256 /**
       
  6257  * e1000_remove - Device Removal Routine
       
  6258  * @pdev: PCI device information struct
       
  6259  *
       
  6260  * e1000_remove is called by the PCI subsystem to alert the driver
       
  6261  * that it should release a PCI device.  The could be caused by a
       
  6262  * Hot-Plug event, or because the driver is going to be removed from
       
  6263  * memory.
       
  6264  **/
       
  6265 static void __devexit e1000_remove(struct pci_dev *pdev)
       
  6266 {
       
  6267 	struct net_device *netdev = pci_get_drvdata(pdev);
       
  6268 	struct e1000_adapter *adapter = netdev_priv(netdev);
       
  6269 	bool down = test_bit(__E1000_DOWN, &adapter->state);
       
  6270 
       
  6271 	/*
       
  6272 	 * The timers may be rescheduled, so explicitly disable them
       
  6273 	 * from being rescheduled.
       
  6274 	 */
       
  6275 	if (!down)
       
  6276 		set_bit(__E1000_DOWN, &adapter->state);
       
  6277 	del_timer_sync(&adapter->watchdog_timer);
       
  6278 	del_timer_sync(&adapter->phy_info_timer);
       
  6279 
       
  6280 	cancel_work_sync(&adapter->reset_task);
       
  6281 	cancel_work_sync(&adapter->watchdog_task);
       
  6282 	cancel_work_sync(&adapter->downshift_task);
       
  6283 	cancel_work_sync(&adapter->update_phy_task);
       
  6284 	cancel_work_sync(&adapter->print_hang_task);
       
  6285 
       
  6286 	if (!(netdev->flags & IFF_UP))
       
  6287 		e1000_power_down_phy(adapter);
       
  6288 
       
  6289 	/* Don't lie to e1000_close() down the road. */
       
  6290 	if (!down)
       
  6291 		clear_bit(__E1000_DOWN, &adapter->state);
       
  6292 	unregister_netdev(netdev);
       
  6293 
       
  6294 	if (pci_dev_run_wake(pdev))
       
  6295 		pm_runtime_get_noresume(&pdev->dev);
       
  6296 
       
  6297 	/*
       
  6298 	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
       
  6299 	 * would have already happened in close and is redundant.
       
  6300 	 */
       
  6301 	e1000e_release_hw_control(adapter);
       
  6302 
       
  6303 	e1000e_reset_interrupt_capability(adapter);
       
  6304 	kfree(adapter->tx_ring);
       
  6305 	kfree(adapter->rx_ring);
       
  6306 
       
  6307 	iounmap(adapter->hw.hw_addr);
       
  6308 	if (adapter->hw.flash_address)
       
  6309 		iounmap(adapter->hw.flash_address);
       
  6310 	pci_release_selected_regions(pdev,
       
  6311 	                             pci_select_bars(pdev, IORESOURCE_MEM));
       
  6312 
       
  6313 	free_netdev(netdev);
       
  6314 
       
  6315 	/* AER disable */
       
  6316 	pci_disable_pcie_error_reporting(pdev);
       
  6317 
       
  6318 	pci_disable_device(pdev);
       
  6319 }
       
  6320 
       
  6321 /* PCI Error Recovery (ERS) */
       
  6322 static struct pci_error_handlers e1000_err_handler = {
       
  6323 	.error_detected = e1000_io_error_detected,
       
  6324 	.slot_reset = e1000_io_slot_reset,
       
  6325 	.resume = e1000_io_resume,
       
  6326 };
       
  6327 
       
  6328 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
       
  6329 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
       
  6330 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
       
  6331 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
       
  6332 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
       
  6333 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
       
  6334 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
       
  6335 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
       
  6336 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
       
  6337 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
       
  6338 
       
  6339 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
       
  6340 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
       
  6341 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
       
  6342 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
       
  6343 
       
  6344 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
       
  6345 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
       
  6346 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
       
  6347 
       
  6348 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
       
  6349 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
       
  6350 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
       
  6351 
       
  6352 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
       
  6353 	  board_80003es2lan },
       
  6354 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
       
  6355 	  board_80003es2lan },
       
  6356 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
       
  6357 	  board_80003es2lan },
       
  6358 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
       
  6359 	  board_80003es2lan },
       
  6360 
       
  6361 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
       
  6362 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
       
  6363 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
       
  6364 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
       
  6365 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
       
  6366 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
       
  6367 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
       
  6368 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
       
  6369 
       
  6370 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
       
  6371 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
       
  6372 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
       
  6373 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
       
  6374 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
       
  6375 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
       
  6376 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
       
  6377 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
       
  6378 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
       
  6379 
       
  6380 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
       
  6381 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
       
  6382 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
       
  6383 
       
  6384 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
       
  6385 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
       
  6386 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
       
  6387 
       
  6388 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
       
  6389 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
       
  6390 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
       
  6391 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
       
  6392 
       
  6393 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
       
  6394 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
       
  6395 
       
  6396 	{ }	/* terminate list */
       
  6397 };
       
  6398 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
       
  6399 
       
  6400 #ifdef CONFIG_PM
       
  6401 static const struct dev_pm_ops e1000_pm_ops = {
       
  6402 	SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
       
  6403 	SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
       
  6404 				e1000_runtime_resume, e1000_idle)
       
  6405 };
       
  6406 #endif
       
  6407 
       
  6408 /* PCI Device API Driver */
       
  6409 static struct pci_driver e1000_driver = {
       
  6410 	.name     = e1000e_driver_name,
       
  6411 	.id_table = e1000_pci_tbl,
       
  6412 	.probe    = e1000_probe,
       
  6413 	.remove   = __devexit_p(e1000_remove),
       
  6414 #ifdef CONFIG_PM
       
  6415 	.driver.pm = &e1000_pm_ops,
       
  6416 #endif
       
  6417 	.shutdown = e1000_shutdown,
       
  6418 	.err_handler = &e1000_err_handler
       
  6419 };
       
  6420 
       
  6421 /**
       
  6422  * e1000_init_module - Driver Registration Routine
       
  6423  *
       
  6424  * e1000_init_module is the first routine called when the driver is
       
  6425  * loaded. All it does is register with the PCI subsystem.
       
  6426  **/
       
  6427 static int __init e1000_init_module(void)
       
  6428 {
       
  6429 	int ret;
       
  6430 	pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
       
  6431 		e1000e_driver_version);
       
  6432 	pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
       
  6433 	ret = pci_register_driver(&e1000_driver);
       
  6434 
       
  6435 	return ret;
       
  6436 }
       
  6437 module_init(e1000_init_module);
       
  6438 
       
  6439 /**
       
  6440  * e1000_exit_module - Driver Exit Cleanup Routine
       
  6441  *
       
  6442  * e1000_exit_module is called just before the driver is removed
       
  6443  * from memory.
       
  6444  **/
       
  6445 static void __exit e1000_exit_module(void)
       
  6446 {
       
  6447 	pci_unregister_driver(&e1000_driver);
       
  6448 }
       
  6449 module_exit(e1000_exit_module);
       
  6450 
       
  6451 
       
  6452 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
       
  6453 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
       
  6454 MODULE_LICENSE("GPL");
       
  6455 MODULE_VERSION(DRV_VERSION);
       
  6456 
       
  6457 /* e1000_main.c */