fp@2359: /******************************************************************************* fp@2359: fp@2359: Intel PRO/1000 Linux driver fp@2359: Copyright(c) 1999 - 2009 Intel Corporation. fp@2359: fp@2359: This program is free software; you can redistribute it and/or modify it fp@2359: under the terms and conditions of the GNU General Public License, fp@2359: version 2, as published by the Free Software Foundation. fp@2359: fp@2359: This program is distributed in the hope it will be useful, but WITHOUT fp@2359: ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or fp@2359: FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for fp@2359: more details. fp@2359: fp@2359: You should have received a copy of the GNU General Public License along with fp@2359: this program; if not, write to the Free Software Foundation, Inc., fp@2359: 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. fp@2359: fp@2359: The full GNU General Public License is included in this distribution in fp@2359: the file called "COPYING". fp@2359: fp@2359: Contact Information: fp@2359: Linux NICS fp@2359: e1000-devel Mailing List fp@2359: Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 fp@2359: fp@2359: *******************************************************************************/ fp@2359: fp@2359: #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt fp@2359: fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: #include fp@2359: fp@2359: #include "e1000.h" fp@2359: fp@2359: #define DRV_VERSION "1.0.2-k4" fp@2359: char e1000e_driver_name[] = "e1000e"; fp@2359: const char e1000e_driver_version[] = DRV_VERSION; fp@2359: fp@2359: static const struct e1000_info *e1000_info_tbl[] = { fp@2359: [board_82571] = &e1000_82571_info, fp@2359: [board_82572] = &e1000_82572_info, fp@2359: [board_82573] = &e1000_82573_info, fp@2359: [board_82574] = &e1000_82574_info, fp@2359: [board_82583] = &e1000_82583_info, fp@2359: [board_80003es2lan] = &e1000_es2_info, fp@2359: [board_ich8lan] = &e1000_ich8_info, fp@2359: [board_ich9lan] = &e1000_ich9_info, fp@2359: [board_ich10lan] = &e1000_ich10_info, fp@2359: [board_pchlan] = &e1000_pch_info, fp@2359: }; fp@2359: fp@2359: struct e1000_reg_info { fp@2359: u32 ofs; fp@2359: char *name; fp@2359: }; fp@2359: fp@2359: #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ fp@2359: #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ fp@2359: #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ fp@2359: #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ fp@2359: #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ fp@2359: fp@2359: #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ fp@2359: #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ fp@2359: #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ fp@2359: #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ fp@2359: #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ fp@2359: fp@2359: static const struct e1000_reg_info e1000_reg_info_tbl[] = { fp@2359: fp@2359: /* General Registers */ fp@2359: {E1000_CTRL, "CTRL"}, fp@2359: {E1000_STATUS, "STATUS"}, fp@2359: {E1000_CTRL_EXT, "CTRL_EXT"}, fp@2359: fp@2359: /* Interrupt Registers */ fp@2359: {E1000_ICR, "ICR"}, fp@2359: fp@2359: /* RX Registers */ fp@2359: {E1000_RCTL, "RCTL"}, fp@2359: {E1000_RDLEN, "RDLEN"}, fp@2359: {E1000_RDH, "RDH"}, fp@2359: {E1000_RDT, "RDT"}, fp@2359: {E1000_RDTR, "RDTR"}, fp@2359: {E1000_RXDCTL(0), "RXDCTL"}, fp@2359: {E1000_ERT, "ERT"}, fp@2359: {E1000_RDBAL, "RDBAL"}, fp@2359: {E1000_RDBAH, "RDBAH"}, fp@2359: {E1000_RDFH, "RDFH"}, fp@2359: {E1000_RDFT, "RDFT"}, fp@2359: {E1000_RDFHS, "RDFHS"}, fp@2359: {E1000_RDFTS, "RDFTS"}, fp@2359: {E1000_RDFPC, "RDFPC"}, fp@2359: fp@2359: /* TX Registers */ fp@2359: {E1000_TCTL, "TCTL"}, fp@2359: {E1000_TDBAL, "TDBAL"}, fp@2359: {E1000_TDBAH, "TDBAH"}, fp@2359: {E1000_TDLEN, "TDLEN"}, fp@2359: {E1000_TDH, "TDH"}, fp@2359: {E1000_TDT, "TDT"}, fp@2359: {E1000_TIDV, "TIDV"}, fp@2359: {E1000_TXDCTL(0), "TXDCTL"}, fp@2359: {E1000_TADV, "TADV"}, fp@2359: {E1000_TARC(0), "TARC"}, fp@2359: {E1000_TDFH, "TDFH"}, fp@2359: {E1000_TDFT, "TDFT"}, fp@2359: {E1000_TDFHS, "TDFHS"}, fp@2359: {E1000_TDFTS, "TDFTS"}, fp@2359: {E1000_TDFPC, "TDFPC"}, fp@2359: fp@2359: /* List Terminator */ fp@2359: {} fp@2359: }; fp@2359: fp@2359: /* fp@2359: * e1000_regdump - register printout routine fp@2359: */ fp@2359: static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) fp@2359: { fp@2359: int n = 0; fp@2359: char rname[16]; fp@2359: u32 regs[8]; fp@2359: fp@2359: switch (reginfo->ofs) { fp@2359: case E1000_RXDCTL(0): fp@2359: for (n = 0; n < 2; n++) fp@2359: regs[n] = __er32(hw, E1000_RXDCTL(n)); fp@2359: break; fp@2359: case E1000_TXDCTL(0): fp@2359: for (n = 0; n < 2; n++) fp@2359: regs[n] = __er32(hw, E1000_TXDCTL(n)); fp@2359: break; fp@2359: case E1000_TARC(0): fp@2359: for (n = 0; n < 2; n++) fp@2359: regs[n] = __er32(hw, E1000_TARC(n)); fp@2359: break; fp@2359: default: fp@2359: printk(KERN_INFO "%-15s %08x\n", fp@2359: reginfo->name, __er32(hw, reginfo->ofs)); fp@2359: return; fp@2359: } fp@2359: fp@2359: snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); fp@2359: printk(KERN_INFO "%-15s ", rname); fp@2359: for (n = 0; n < 2; n++) fp@2359: printk(KERN_CONT "%08x ", regs[n]); fp@2359: printk(KERN_CONT "\n"); fp@2359: } fp@2359: fp@2359: fp@2359: /* fp@2359: * e1000e_dump - Print registers, tx-ring and rx-ring fp@2359: */ fp@2359: static void e1000e_dump(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct e1000_reg_info *reginfo; fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: struct e1000_tx_desc *tx_desc; fp@2359: struct my_u0 { u64 a; u64 b; } *u0; fp@2359: struct e1000_buffer *buffer_info; fp@2359: struct e1000_ring *rx_ring = adapter->rx_ring; fp@2359: union e1000_rx_desc_packet_split *rx_desc_ps; fp@2359: struct e1000_rx_desc *rx_desc; fp@2359: struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1; fp@2359: u32 staterr; fp@2359: int i = 0; fp@2359: fp@2359: if (!netif_msg_hw(adapter)) fp@2359: return; fp@2359: fp@2359: /* Print netdevice Info */ fp@2359: if (netdev) { fp@2359: dev_info(&adapter->pdev->dev, "Net device Info\n"); fp@2359: printk(KERN_INFO "Device Name state " fp@2359: "trans_start last_rx\n"); fp@2359: printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", fp@2359: netdev->name, fp@2359: netdev->state, fp@2359: netdev->trans_start, fp@2359: netdev->last_rx); fp@2359: } fp@2359: fp@2359: /* Print Registers */ fp@2359: dev_info(&adapter->pdev->dev, "Register Dump\n"); fp@2359: printk(KERN_INFO " Register Name Value\n"); fp@2359: for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; fp@2359: reginfo->name; reginfo++) { fp@2359: e1000_regdump(hw, reginfo); fp@2359: } fp@2359: fp@2359: /* Print TX Ring Summary */ fp@2359: if (!netdev || !netif_running(netdev)) fp@2359: goto exit; fp@2359: fp@2359: dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); fp@2359: printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" fp@2359: " leng ntw timestamp\n"); fp@2359: buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; fp@2359: printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", fp@2359: 0, tx_ring->next_to_use, tx_ring->next_to_clean, fp@2359: (u64)buffer_info->dma, fp@2359: buffer_info->length, fp@2359: buffer_info->next_to_watch, fp@2359: (u64)buffer_info->time_stamp); fp@2359: fp@2359: /* Print TX Rings */ fp@2359: if (!netif_msg_tx_done(adapter)) fp@2359: goto rx_ring_summary; fp@2359: fp@2359: dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); fp@2359: fp@2359: /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) fp@2359: * fp@2359: * Legacy Transmit Descriptor fp@2359: * +--------------------------------------------------------------+ fp@2359: * 0 | Buffer Address [63:0] (Reserved on Write Back) | fp@2359: * +--------------------------------------------------------------+ fp@2359: * 8 | Special | CSS | Status | CMD | CSO | Length | fp@2359: * +--------------------------------------------------------------+ fp@2359: * 63 48 47 36 35 32 31 24 23 16 15 0 fp@2359: * fp@2359: * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload fp@2359: * 63 48 47 40 39 32 31 16 15 8 7 0 fp@2359: * +----------------------------------------------------------------+ fp@2359: * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | fp@2359: * +----------------------------------------------------------------+ fp@2359: * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | fp@2359: * +----------------------------------------------------------------+ fp@2359: * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 fp@2359: * fp@2359: * Extended Data Descriptor (DTYP=0x1) fp@2359: * +----------------------------------------------------------------+ fp@2359: * 0 | Buffer Address [63:0] | fp@2359: * +----------------------------------------------------------------+ fp@2359: * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | fp@2359: * +----------------------------------------------------------------+ fp@2359: * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 fp@2359: */ fp@2359: printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" fp@2359: " [bi->dma ] leng ntw timestamp bi->skb " fp@2359: "<-- Legacy format\n"); fp@2359: printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" fp@2359: " [bi->dma ] leng ntw timestamp bi->skb " fp@2359: "<-- Ext Context format\n"); fp@2359: printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" fp@2359: " [bi->dma ] leng ntw timestamp bi->skb " fp@2359: "<-- Ext Data format\n"); fp@2359: for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { fp@2359: tx_desc = E1000_TX_DESC(*tx_ring, i); fp@2359: buffer_info = &tx_ring->buffer_info[i]; fp@2359: u0 = (struct my_u0 *)tx_desc; fp@2359: printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " fp@2359: "%04X %3X %016llX %p", fp@2359: (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' : fp@2359: ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i, fp@2359: le64_to_cpu(u0->a), le64_to_cpu(u0->b), fp@2359: (u64)buffer_info->dma, buffer_info->length, fp@2359: buffer_info->next_to_watch, (u64)buffer_info->time_stamp, fp@2359: buffer_info->skb); fp@2359: if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) fp@2359: printk(KERN_CONT " NTC/U\n"); fp@2359: else if (i == tx_ring->next_to_use) fp@2359: printk(KERN_CONT " NTU\n"); fp@2359: else if (i == tx_ring->next_to_clean) fp@2359: printk(KERN_CONT " NTC\n"); fp@2359: else fp@2359: printk(KERN_CONT "\n"); fp@2359: fp@2359: if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) fp@2359: print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, fp@2359: 16, 1, phys_to_virt(buffer_info->dma), fp@2359: buffer_info->length, true); fp@2359: } fp@2359: fp@2359: /* Print RX Rings Summary */ fp@2359: rx_ring_summary: fp@2359: dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); fp@2359: printk(KERN_INFO "Queue [NTU] [NTC]\n"); fp@2359: printk(KERN_INFO " %5d %5X %5X\n", 0, fp@2359: rx_ring->next_to_use, rx_ring->next_to_clean); fp@2359: fp@2359: /* Print RX Rings */ fp@2359: if (!netif_msg_rx_status(adapter)) fp@2359: goto exit; fp@2359: fp@2359: dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); fp@2359: switch (adapter->rx_ps_pages) { fp@2359: case 1: fp@2359: case 2: fp@2359: case 3: fp@2359: /* [Extended] Packet Split Receive Descriptor Format fp@2359: * fp@2359: * +-----------------------------------------------------+ fp@2359: * 0 | Buffer Address 0 [63:0] | fp@2359: * +-----------------------------------------------------+ fp@2359: * 8 | Buffer Address 1 [63:0] | fp@2359: * +-----------------------------------------------------+ fp@2359: * 16 | Buffer Address 2 [63:0] | fp@2359: * +-----------------------------------------------------+ fp@2359: * 24 | Buffer Address 3 [63:0] | fp@2359: * +-----------------------------------------------------+ fp@2359: */ fp@2359: printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " fp@2359: "[buffer 1 63:0 ] " fp@2359: "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " fp@2359: "[bi->skb] <-- Ext Pkt Split format\n"); fp@2359: /* [Extended] Receive Descriptor (Write-Back) Format fp@2359: * fp@2359: * 63 48 47 32 31 13 12 8 7 4 3 0 fp@2359: * +------------------------------------------------------+ fp@2359: * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | fp@2359: * | Checksum | Ident | | Queue | | Type | fp@2359: * +------------------------------------------------------+ fp@2359: * 8 | VLAN Tag | Length | Extended Error | Extended Status | fp@2359: * +------------------------------------------------------+ fp@2359: * 63 48 47 32 31 20 19 0 fp@2359: */ fp@2359: printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " fp@2359: "[vl l0 ee es] " fp@2359: "[ l3 l2 l1 hs] [reserved ] ---------------- " fp@2359: "[bi->skb] <-- Ext Rx Write-Back format\n"); fp@2359: for (i = 0; i < rx_ring->count; i++) { fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); fp@2359: u1 = (struct my_u1 *)rx_desc_ps; fp@2359: staterr = fp@2359: le32_to_cpu(rx_desc_ps->wb.middle.status_error); fp@2359: if (staterr & E1000_RXD_STAT_DD) { fp@2359: /* Descriptor Done */ fp@2359: printk(KERN_INFO "RWB[0x%03X] %016llX " fp@2359: "%016llX %016llX %016llX " fp@2359: "---------------- %p", i, fp@2359: le64_to_cpu(u1->a), fp@2359: le64_to_cpu(u1->b), fp@2359: le64_to_cpu(u1->c), fp@2359: le64_to_cpu(u1->d), fp@2359: buffer_info->skb); fp@2359: } else { fp@2359: printk(KERN_INFO "R [0x%03X] %016llX " fp@2359: "%016llX %016llX %016llX %016llX %p", i, fp@2359: le64_to_cpu(u1->a), fp@2359: le64_to_cpu(u1->b), fp@2359: le64_to_cpu(u1->c), fp@2359: le64_to_cpu(u1->d), fp@2359: (u64)buffer_info->dma, fp@2359: buffer_info->skb); fp@2359: fp@2359: if (netif_msg_pktdata(adapter)) fp@2359: print_hex_dump(KERN_INFO, "", fp@2359: DUMP_PREFIX_ADDRESS, 16, 1, fp@2359: phys_to_virt(buffer_info->dma), fp@2359: adapter->rx_ps_bsize0, true); fp@2359: } fp@2359: fp@2359: if (i == rx_ring->next_to_use) fp@2359: printk(KERN_CONT " NTU\n"); fp@2359: else if (i == rx_ring->next_to_clean) fp@2359: printk(KERN_CONT " NTC\n"); fp@2359: else fp@2359: printk(KERN_CONT "\n"); fp@2359: } fp@2359: break; fp@2359: default: fp@2359: case 0: fp@2359: /* Legacy Receive Descriptor Format fp@2359: * fp@2359: * +-----------------------------------------------------+ fp@2359: * | Buffer Address [63:0] | fp@2359: * +-----------------------------------------------------+ fp@2359: * | VLAN Tag | Errors | Status 0 | Packet csum | Length | fp@2359: * +-----------------------------------------------------+ fp@2359: * 63 48 47 40 39 32 31 16 15 0 fp@2359: */ fp@2359: printk(KERN_INFO "Rl[desc] [address 63:0 ] " fp@2359: "[vl er S cks ln] [bi->dma ] [bi->skb] " fp@2359: "<-- Legacy format\n"); fp@2359: for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { fp@2359: rx_desc = E1000_RX_DESC(*rx_ring, i); fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: u0 = (struct my_u0 *)rx_desc; fp@2359: printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " fp@2359: "%016llX %p", fp@2359: i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), fp@2359: (u64)buffer_info->dma, buffer_info->skb); fp@2359: if (i == rx_ring->next_to_use) fp@2359: printk(KERN_CONT " NTU\n"); fp@2359: else if (i == rx_ring->next_to_clean) fp@2359: printk(KERN_CONT " NTC\n"); fp@2359: else fp@2359: printk(KERN_CONT "\n"); fp@2359: fp@2359: if (netif_msg_pktdata(adapter)) fp@2359: print_hex_dump(KERN_INFO, "", fp@2359: DUMP_PREFIX_ADDRESS, fp@2359: 16, 1, phys_to_virt(buffer_info->dma), fp@2359: adapter->rx_buffer_len, true); fp@2359: } fp@2359: } fp@2359: fp@2359: exit: fp@2359: return; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_desc_unused - calculate if we have unused descriptors fp@2359: **/ fp@2359: static int e1000_desc_unused(struct e1000_ring *ring) fp@2359: { fp@2359: if (ring->next_to_clean > ring->next_to_use) fp@2359: return ring->next_to_clean - ring->next_to_use - 1; fp@2359: fp@2359: return ring->count + ring->next_to_clean - ring->next_to_use - 1; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_receive_skb - helper function to handle Rx indications fp@2359: * @adapter: board private structure fp@2359: * @status: descriptor status field as written by hardware fp@2359: * @vlan: descriptor vlan field as written by hardware (no le/be conversion) fp@2359: * @skb: pointer to sk_buff to be indicated to stack fp@2359: **/ fp@2359: static void e1000_receive_skb(struct e1000_adapter *adapter, fp@2359: struct net_device *netdev, fp@2359: struct sk_buff *skb, fp@2359: u8 status, __le16 vlan) fp@2359: { fp@2359: skb->protocol = eth_type_trans(skb, netdev); fp@2359: fp@2359: if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) fp@2359: vlan_gro_receive(&adapter->napi, adapter->vlgrp, fp@2359: le16_to_cpu(vlan), skb); fp@2359: else fp@2359: napi_gro_receive(&adapter->napi, skb); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_rx_checksum - Receive Checksum Offload for 82543 fp@2359: * @adapter: board private structure fp@2359: * @status_err: receive descriptor status and error fields fp@2359: * @csum: receive descriptor csum field fp@2359: * @sk_buff: socket buffer with received data fp@2359: **/ fp@2359: static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, fp@2359: u32 csum, struct sk_buff *skb) fp@2359: { fp@2359: u16 status = (u16)status_err; fp@2359: u8 errors = (u8)(status_err >> 24); fp@2359: skb->ip_summed = CHECKSUM_NONE; fp@2359: fp@2359: /* Ignore Checksum bit is set */ fp@2359: if (status & E1000_RXD_STAT_IXSM) fp@2359: return; fp@2359: /* TCP/UDP checksum error bit is set */ fp@2359: if (errors & E1000_RXD_ERR_TCPE) { fp@2359: /* let the stack verify checksum errors */ fp@2359: adapter->hw_csum_err++; fp@2359: return; fp@2359: } fp@2359: fp@2359: /* TCP/UDP Checksum has not been calculated */ fp@2359: if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) fp@2359: return; fp@2359: fp@2359: /* It must be a TCP or UDP packet with a valid checksum */ fp@2359: if (status & E1000_RXD_STAT_TCPCS) { fp@2359: /* TCP checksum is good */ fp@2359: skb->ip_summed = CHECKSUM_UNNECESSARY; fp@2359: } else { fp@2359: /* fp@2359: * IP fragment with UDP payload fp@2359: * Hardware complements the payload checksum, so we undo it fp@2359: * and then put the value in host order for further stack use. fp@2359: */ fp@2359: __sum16 sum = (__force __sum16)htons(csum); fp@2359: skb->csum = csum_unfold(~sum); fp@2359: skb->ip_summed = CHECKSUM_COMPLETE; fp@2359: } fp@2359: adapter->hw_csum_good++; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended fp@2359: * @adapter: address of board private structure fp@2359: **/ fp@2359: static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, fp@2359: int cleaned_count) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: struct e1000_ring *rx_ring = adapter->rx_ring; fp@2359: struct e1000_rx_desc *rx_desc; fp@2359: struct e1000_buffer *buffer_info; fp@2359: struct sk_buff *skb; fp@2359: unsigned int i; fp@2359: unsigned int bufsz = adapter->rx_buffer_len; fp@2359: fp@2359: i = rx_ring->next_to_use; fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: fp@2359: while (cleaned_count--) { fp@2359: skb = buffer_info->skb; fp@2359: if (skb) { fp@2359: skb_trim(skb, 0); fp@2359: goto map_skb; fp@2359: } fp@2359: fp@2359: skb = netdev_alloc_skb_ip_align(netdev, bufsz); fp@2359: if (!skb) { fp@2359: /* Better luck next round */ fp@2359: adapter->alloc_rx_buff_failed++; fp@2359: break; fp@2359: } fp@2359: fp@2359: buffer_info->skb = skb; fp@2359: map_skb: fp@2359: buffer_info->dma = dma_map_single(&pdev->dev, skb->data, fp@2359: adapter->rx_buffer_len, fp@2359: DMA_FROM_DEVICE); fp@2359: if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { fp@2359: dev_err(&pdev->dev, "RX DMA map failed\n"); fp@2359: adapter->rx_dma_failed++; fp@2359: break; fp@2359: } fp@2359: fp@2359: rx_desc = E1000_RX_DESC(*rx_ring, i); fp@2359: rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); fp@2359: fp@2359: if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { fp@2359: /* fp@2359: * Force memory writes to complete before letting h/w fp@2359: * know there are new descriptors to fetch. (Only fp@2359: * applicable for weak-ordered memory model archs, fp@2359: * such as IA-64). fp@2359: */ fp@2359: wmb(); fp@2359: writel(i, adapter->hw.hw_addr + rx_ring->tail); fp@2359: } fp@2359: i++; fp@2359: if (i == rx_ring->count) fp@2359: i = 0; fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: } fp@2359: fp@2359: rx_ring->next_to_use = i; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split fp@2359: * @adapter: address of board private structure fp@2359: **/ fp@2359: static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, fp@2359: int cleaned_count) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: union e1000_rx_desc_packet_split *rx_desc; fp@2359: struct e1000_ring *rx_ring = adapter->rx_ring; fp@2359: struct e1000_buffer *buffer_info; fp@2359: struct e1000_ps_page *ps_page; fp@2359: struct sk_buff *skb; fp@2359: unsigned int i, j; fp@2359: fp@2359: i = rx_ring->next_to_use; fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: fp@2359: while (cleaned_count--) { fp@2359: rx_desc = E1000_RX_DESC_PS(*rx_ring, i); fp@2359: fp@2359: for (j = 0; j < PS_PAGE_BUFFERS; j++) { fp@2359: ps_page = &buffer_info->ps_pages[j]; fp@2359: if (j >= adapter->rx_ps_pages) { fp@2359: /* all unused desc entries get hw null ptr */ fp@2359: rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0); fp@2359: continue; fp@2359: } fp@2359: if (!ps_page->page) { fp@2359: ps_page->page = alloc_page(GFP_ATOMIC); fp@2359: if (!ps_page->page) { fp@2359: adapter->alloc_rx_buff_failed++; fp@2359: goto no_buffers; fp@2359: } fp@2359: ps_page->dma = dma_map_page(&pdev->dev, fp@2359: ps_page->page, fp@2359: 0, PAGE_SIZE, fp@2359: DMA_FROM_DEVICE); fp@2359: if (dma_mapping_error(&pdev->dev, fp@2359: ps_page->dma)) { fp@2359: dev_err(&adapter->pdev->dev, fp@2359: "RX DMA page map failed\n"); fp@2359: adapter->rx_dma_failed++; fp@2359: goto no_buffers; fp@2359: } fp@2359: } fp@2359: /* fp@2359: * Refresh the desc even if buffer_addrs fp@2359: * didn't change because each write-back fp@2359: * erases this info. fp@2359: */ fp@2359: rx_desc->read.buffer_addr[j+1] = fp@2359: cpu_to_le64(ps_page->dma); fp@2359: } fp@2359: fp@2359: skb = netdev_alloc_skb_ip_align(netdev, fp@2359: adapter->rx_ps_bsize0); fp@2359: fp@2359: if (!skb) { fp@2359: adapter->alloc_rx_buff_failed++; fp@2359: break; fp@2359: } fp@2359: fp@2359: buffer_info->skb = skb; fp@2359: buffer_info->dma = dma_map_single(&pdev->dev, skb->data, fp@2359: adapter->rx_ps_bsize0, fp@2359: DMA_FROM_DEVICE); fp@2359: if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { fp@2359: dev_err(&pdev->dev, "RX DMA map failed\n"); fp@2359: adapter->rx_dma_failed++; fp@2359: /* cleanup skb */ fp@2359: dev_kfree_skb_any(skb); fp@2359: buffer_info->skb = NULL; fp@2359: break; fp@2359: } fp@2359: fp@2359: rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); fp@2359: fp@2359: if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { fp@2359: /* fp@2359: * Force memory writes to complete before letting h/w fp@2359: * know there are new descriptors to fetch. (Only fp@2359: * applicable for weak-ordered memory model archs, fp@2359: * such as IA-64). fp@2359: */ fp@2359: wmb(); fp@2359: writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); fp@2359: } fp@2359: fp@2359: i++; fp@2359: if (i == rx_ring->count) fp@2359: i = 0; fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: } fp@2359: fp@2359: no_buffers: fp@2359: rx_ring->next_to_use = i; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers fp@2359: * @adapter: address of board private structure fp@2359: * @cleaned_count: number of buffers to allocate this pass fp@2359: **/ fp@2359: fp@2359: static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, fp@2359: int cleaned_count) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: struct e1000_rx_desc *rx_desc; fp@2359: struct e1000_ring *rx_ring = adapter->rx_ring; fp@2359: struct e1000_buffer *buffer_info; fp@2359: struct sk_buff *skb; fp@2359: unsigned int i; fp@2359: unsigned int bufsz = 256 - 16 /* for skb_reserve */; fp@2359: fp@2359: i = rx_ring->next_to_use; fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: fp@2359: while (cleaned_count--) { fp@2359: skb = buffer_info->skb; fp@2359: if (skb) { fp@2359: skb_trim(skb, 0); fp@2359: goto check_page; fp@2359: } fp@2359: fp@2359: skb = netdev_alloc_skb_ip_align(netdev, bufsz); fp@2359: if (unlikely(!skb)) { fp@2359: /* Better luck next round */ fp@2359: adapter->alloc_rx_buff_failed++; fp@2359: break; fp@2359: } fp@2359: fp@2359: buffer_info->skb = skb; fp@2359: check_page: fp@2359: /* allocate a new page if necessary */ fp@2359: if (!buffer_info->page) { fp@2359: buffer_info->page = alloc_page(GFP_ATOMIC); fp@2359: if (unlikely(!buffer_info->page)) { fp@2359: adapter->alloc_rx_buff_failed++; fp@2359: break; fp@2359: } fp@2359: } fp@2359: fp@2359: if (!buffer_info->dma) fp@2359: buffer_info->dma = dma_map_page(&pdev->dev, fp@2359: buffer_info->page, 0, fp@2359: PAGE_SIZE, fp@2359: DMA_FROM_DEVICE); fp@2359: fp@2359: rx_desc = E1000_RX_DESC(*rx_ring, i); fp@2359: rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); fp@2359: fp@2359: if (unlikely(++i == rx_ring->count)) fp@2359: i = 0; fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: } fp@2359: fp@2359: if (likely(rx_ring->next_to_use != i)) { fp@2359: rx_ring->next_to_use = i; fp@2359: if (unlikely(i-- == 0)) fp@2359: i = (rx_ring->count - 1); fp@2359: fp@2359: /* Force memory writes to complete before letting h/w fp@2359: * know there are new descriptors to fetch. (Only fp@2359: * applicable for weak-ordered memory model archs, fp@2359: * such as IA-64). */ fp@2359: wmb(); fp@2359: writel(i, adapter->hw.hw_addr + rx_ring->tail); fp@2359: } fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_clean_rx_irq - Send received data up the network stack; legacy fp@2359: * @adapter: board private structure fp@2359: * fp@2359: * the return value indicates whether actual cleaning was done, there fp@2359: * is no guarantee that everything was cleaned fp@2359: **/ fp@2359: static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, fp@2359: int *work_done, int work_to_do) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct e1000_ring *rx_ring = adapter->rx_ring; fp@2359: struct e1000_rx_desc *rx_desc, *next_rxd; fp@2359: struct e1000_buffer *buffer_info, *next_buffer; fp@2359: u32 length; fp@2359: unsigned int i; fp@2359: int cleaned_count = 0; fp@2359: bool cleaned = 0; fp@2359: unsigned int total_rx_bytes = 0, total_rx_packets = 0; fp@2359: fp@2359: i = rx_ring->next_to_clean; fp@2359: rx_desc = E1000_RX_DESC(*rx_ring, i); fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: fp@2359: while (rx_desc->status & E1000_RXD_STAT_DD) { fp@2359: struct sk_buff *skb; fp@2359: u8 status; fp@2359: fp@2359: if (*work_done >= work_to_do) fp@2359: break; fp@2359: (*work_done)++; fp@2359: rmb(); /* read descriptor and rx_buffer_info after status DD */ fp@2359: fp@2359: status = rx_desc->status; fp@2359: skb = buffer_info->skb; fp@2359: buffer_info->skb = NULL; fp@2359: fp@2359: prefetch(skb->data - NET_IP_ALIGN); fp@2359: fp@2359: i++; fp@2359: if (i == rx_ring->count) fp@2359: i = 0; fp@2359: next_rxd = E1000_RX_DESC(*rx_ring, i); fp@2359: prefetch(next_rxd); fp@2359: fp@2359: next_buffer = &rx_ring->buffer_info[i]; fp@2359: fp@2359: cleaned = 1; fp@2359: cleaned_count++; fp@2359: dma_unmap_single(&pdev->dev, fp@2359: buffer_info->dma, fp@2359: adapter->rx_buffer_len, fp@2359: DMA_FROM_DEVICE); fp@2359: buffer_info->dma = 0; fp@2359: fp@2359: length = le16_to_cpu(rx_desc->length); fp@2359: fp@2359: /* fp@2359: * !EOP means multiple descriptors were used to store a single fp@2359: * packet, if that's the case we need to toss it. In fact, we fp@2359: * need to toss every packet with the EOP bit clear and the fp@2359: * next frame that _does_ have the EOP bit set, as it is by fp@2359: * definition only a frame fragment fp@2359: */ fp@2359: if (unlikely(!(status & E1000_RXD_STAT_EOP))) fp@2359: adapter->flags2 |= FLAG2_IS_DISCARDING; fp@2359: fp@2359: if (adapter->flags2 & FLAG2_IS_DISCARDING) { fp@2359: /* All receives must fit into a single buffer */ fp@2359: e_dbg("Receive packet consumed multiple buffers\n"); fp@2359: /* recycle */ fp@2359: buffer_info->skb = skb; fp@2359: if (status & E1000_RXD_STAT_EOP) fp@2359: adapter->flags2 &= ~FLAG2_IS_DISCARDING; fp@2359: goto next_desc; fp@2359: } fp@2359: fp@2359: if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { fp@2359: /* recycle */ fp@2359: buffer_info->skb = skb; fp@2359: goto next_desc; fp@2359: } fp@2359: fp@2359: /* adjust length to remove Ethernet CRC */ fp@2359: if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) fp@2359: length -= 4; fp@2359: fp@2359: total_rx_bytes += length; fp@2359: total_rx_packets++; fp@2359: fp@2359: /* fp@2359: * code added for copybreak, this should improve fp@2359: * performance for small packets with large amounts fp@2359: * of reassembly being done in the stack fp@2359: */ fp@2359: if (length < copybreak) { fp@2359: struct sk_buff *new_skb = fp@2359: netdev_alloc_skb_ip_align(netdev, length); fp@2359: if (new_skb) { fp@2359: skb_copy_to_linear_data_offset(new_skb, fp@2359: -NET_IP_ALIGN, fp@2359: (skb->data - fp@2359: NET_IP_ALIGN), fp@2359: (length + fp@2359: NET_IP_ALIGN)); fp@2359: /* save the skb in buffer_info as good */ fp@2359: buffer_info->skb = skb; fp@2359: skb = new_skb; fp@2359: } fp@2359: /* else just continue with the old one */ fp@2359: } fp@2359: /* end copybreak code */ fp@2359: skb_put(skb, length); fp@2359: fp@2359: /* Receive Checksum Offload */ fp@2359: e1000_rx_checksum(adapter, fp@2359: (u32)(status) | fp@2359: ((u32)(rx_desc->errors) << 24), fp@2359: le16_to_cpu(rx_desc->csum), skb); fp@2359: fp@2359: e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); fp@2359: fp@2359: next_desc: fp@2359: rx_desc->status = 0; fp@2359: fp@2359: /* return some buffers to hardware, one at a time is too slow */ fp@2359: if (cleaned_count >= E1000_RX_BUFFER_WRITE) { fp@2359: adapter->alloc_rx_buf(adapter, cleaned_count); fp@2359: cleaned_count = 0; fp@2359: } fp@2359: fp@2359: /* use prefetched values */ fp@2359: rx_desc = next_rxd; fp@2359: buffer_info = next_buffer; fp@2359: } fp@2359: rx_ring->next_to_clean = i; fp@2359: fp@2359: cleaned_count = e1000_desc_unused(rx_ring); fp@2359: if (cleaned_count) fp@2359: adapter->alloc_rx_buf(adapter, cleaned_count); fp@2359: fp@2359: adapter->total_rx_bytes += total_rx_bytes; fp@2359: adapter->total_rx_packets += total_rx_packets; fp@2359: netdev->stats.rx_bytes += total_rx_bytes; fp@2359: netdev->stats.rx_packets += total_rx_packets; fp@2359: return cleaned; fp@2359: } fp@2359: fp@2359: static void e1000_put_txbuf(struct e1000_adapter *adapter, fp@2359: struct e1000_buffer *buffer_info) fp@2359: { fp@2359: if (buffer_info->dma) { fp@2359: if (buffer_info->mapped_as_page) fp@2359: dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, fp@2359: buffer_info->length, DMA_TO_DEVICE); fp@2359: else fp@2359: dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, fp@2359: buffer_info->length, DMA_TO_DEVICE); fp@2359: buffer_info->dma = 0; fp@2359: } fp@2359: if (buffer_info->skb) { fp@2359: dev_kfree_skb_any(buffer_info->skb); fp@2359: buffer_info->skb = NULL; fp@2359: } fp@2359: buffer_info->time_stamp = 0; fp@2359: } fp@2359: fp@2359: static void e1000_print_hw_hang(struct work_struct *work) fp@2359: { fp@2359: struct e1000_adapter *adapter = container_of(work, fp@2359: struct e1000_adapter, fp@2359: print_hang_task); fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: unsigned int i = tx_ring->next_to_clean; fp@2359: unsigned int eop = tx_ring->buffer_info[i].next_to_watch; fp@2359: struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u16 phy_status, phy_1000t_status, phy_ext_status; fp@2359: u16 pci_status; fp@2359: fp@2359: e1e_rphy(hw, PHY_STATUS, &phy_status); fp@2359: e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); fp@2359: e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); fp@2359: fp@2359: pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); fp@2359: fp@2359: /* detected Hardware unit hang */ fp@2359: e_err("Detected Hardware Unit Hang:\n" fp@2359: " TDH <%x>\n" fp@2359: " TDT <%x>\n" fp@2359: " next_to_use <%x>\n" fp@2359: " next_to_clean <%x>\n" fp@2359: "buffer_info[next_to_clean]:\n" fp@2359: " time_stamp <%lx>\n" fp@2359: " next_to_watch <%x>\n" fp@2359: " jiffies <%lx>\n" fp@2359: " next_to_watch.status <%x>\n" fp@2359: "MAC Status <%x>\n" fp@2359: "PHY Status <%x>\n" fp@2359: "PHY 1000BASE-T Status <%x>\n" fp@2359: "PHY Extended Status <%x>\n" fp@2359: "PCI Status <%x>\n", fp@2359: readl(adapter->hw.hw_addr + tx_ring->head), fp@2359: readl(adapter->hw.hw_addr + tx_ring->tail), fp@2359: tx_ring->next_to_use, fp@2359: tx_ring->next_to_clean, fp@2359: tx_ring->buffer_info[eop].time_stamp, fp@2359: eop, fp@2359: jiffies, fp@2359: eop_desc->upper.fields.status, fp@2359: er32(STATUS), fp@2359: phy_status, fp@2359: phy_1000t_status, fp@2359: phy_ext_status, fp@2359: pci_status); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_clean_tx_irq - Reclaim resources after transmit completes fp@2359: * @adapter: board private structure fp@2359: * fp@2359: * the return value indicates whether actual cleaning was done, there fp@2359: * is no guarantee that everything was cleaned fp@2359: **/ fp@2359: static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: struct e1000_tx_desc *tx_desc, *eop_desc; fp@2359: struct e1000_buffer *buffer_info; fp@2359: unsigned int i, eop; fp@2359: unsigned int count = 0; fp@2359: unsigned int total_tx_bytes = 0, total_tx_packets = 0; fp@2359: fp@2359: i = tx_ring->next_to_clean; fp@2359: eop = tx_ring->buffer_info[i].next_to_watch; fp@2359: eop_desc = E1000_TX_DESC(*tx_ring, eop); fp@2359: fp@2359: while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && fp@2359: (count < tx_ring->count)) { fp@2359: bool cleaned = false; fp@2359: rmb(); /* read buffer_info after eop_desc */ fp@2359: for (; !cleaned; count++) { fp@2359: tx_desc = E1000_TX_DESC(*tx_ring, i); fp@2359: buffer_info = &tx_ring->buffer_info[i]; fp@2359: cleaned = (i == eop); fp@2359: fp@2359: if (cleaned) { fp@2359: total_tx_packets += buffer_info->segs; fp@2359: total_tx_bytes += buffer_info->bytecount; fp@2359: } fp@2359: fp@2359: e1000_put_txbuf(adapter, buffer_info); fp@2359: tx_desc->upper.data = 0; fp@2359: fp@2359: i++; fp@2359: if (i == tx_ring->count) fp@2359: i = 0; fp@2359: } fp@2359: fp@2359: if (i == tx_ring->next_to_use) fp@2359: break; fp@2359: eop = tx_ring->buffer_info[i].next_to_watch; fp@2359: eop_desc = E1000_TX_DESC(*tx_ring, eop); fp@2359: } fp@2359: fp@2359: tx_ring->next_to_clean = i; fp@2359: fp@2359: #define TX_WAKE_THRESHOLD 32 fp@2359: if (count && netif_carrier_ok(netdev) && fp@2359: e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { fp@2359: /* Make sure that anybody stopping the queue after this fp@2359: * sees the new next_to_clean. fp@2359: */ fp@2359: smp_mb(); fp@2359: fp@2359: if (netif_queue_stopped(netdev) && fp@2359: !(test_bit(__E1000_DOWN, &adapter->state))) { fp@2359: netif_wake_queue(netdev); fp@2359: ++adapter->restart_queue; fp@2359: } fp@2359: } fp@2359: fp@2359: if (adapter->detect_tx_hung) { fp@2359: /* fp@2359: * Detect a transmit hang in hardware, this serializes the fp@2359: * check with the clearing of time_stamp and movement of i fp@2359: */ fp@2359: adapter->detect_tx_hung = 0; fp@2359: if (tx_ring->buffer_info[i].time_stamp && fp@2359: time_after(jiffies, tx_ring->buffer_info[i].time_stamp fp@2359: + (adapter->tx_timeout_factor * HZ)) && fp@2359: !(er32(STATUS) & E1000_STATUS_TXOFF)) { fp@2359: schedule_work(&adapter->print_hang_task); fp@2359: netif_stop_queue(netdev); fp@2359: } fp@2359: } fp@2359: adapter->total_tx_bytes += total_tx_bytes; fp@2359: adapter->total_tx_packets += total_tx_packets; fp@2359: netdev->stats.tx_bytes += total_tx_bytes; fp@2359: netdev->stats.tx_packets += total_tx_packets; fp@2359: return (count < tx_ring->count); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split fp@2359: * @adapter: board private structure fp@2359: * fp@2359: * the return value indicates whether actual cleaning was done, there fp@2359: * is no guarantee that everything was cleaned fp@2359: **/ fp@2359: static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, fp@2359: int *work_done, int work_to_do) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: union e1000_rx_desc_packet_split *rx_desc, *next_rxd; fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: struct e1000_ring *rx_ring = adapter->rx_ring; fp@2359: struct e1000_buffer *buffer_info, *next_buffer; fp@2359: struct e1000_ps_page *ps_page; fp@2359: struct sk_buff *skb; fp@2359: unsigned int i, j; fp@2359: u32 length, staterr; fp@2359: int cleaned_count = 0; fp@2359: bool cleaned = 0; fp@2359: unsigned int total_rx_bytes = 0, total_rx_packets = 0; fp@2359: fp@2359: i = rx_ring->next_to_clean; fp@2359: rx_desc = E1000_RX_DESC_PS(*rx_ring, i); fp@2359: staterr = le32_to_cpu(rx_desc->wb.middle.status_error); fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: fp@2359: while (staterr & E1000_RXD_STAT_DD) { fp@2359: if (*work_done >= work_to_do) fp@2359: break; fp@2359: (*work_done)++; fp@2359: skb = buffer_info->skb; fp@2359: rmb(); /* read descriptor and rx_buffer_info after status DD */ fp@2359: fp@2359: /* in the packet split case this is header only */ fp@2359: prefetch(skb->data - NET_IP_ALIGN); fp@2359: fp@2359: i++; fp@2359: if (i == rx_ring->count) fp@2359: i = 0; fp@2359: next_rxd = E1000_RX_DESC_PS(*rx_ring, i); fp@2359: prefetch(next_rxd); fp@2359: fp@2359: next_buffer = &rx_ring->buffer_info[i]; fp@2359: fp@2359: cleaned = 1; fp@2359: cleaned_count++; fp@2359: dma_unmap_single(&pdev->dev, buffer_info->dma, fp@2359: adapter->rx_ps_bsize0, fp@2359: DMA_FROM_DEVICE); fp@2359: buffer_info->dma = 0; fp@2359: fp@2359: /* see !EOP comment in other rx routine */ fp@2359: if (!(staterr & E1000_RXD_STAT_EOP)) fp@2359: adapter->flags2 |= FLAG2_IS_DISCARDING; fp@2359: fp@2359: if (adapter->flags2 & FLAG2_IS_DISCARDING) { fp@2359: e_dbg("Packet Split buffers didn't pick up the full " fp@2359: "packet\n"); fp@2359: dev_kfree_skb_irq(skb); fp@2359: if (staterr & E1000_RXD_STAT_EOP) fp@2359: adapter->flags2 &= ~FLAG2_IS_DISCARDING; fp@2359: goto next_desc; fp@2359: } fp@2359: fp@2359: if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { fp@2359: dev_kfree_skb_irq(skb); fp@2359: goto next_desc; fp@2359: } fp@2359: fp@2359: length = le16_to_cpu(rx_desc->wb.middle.length0); fp@2359: fp@2359: if (!length) { fp@2359: e_dbg("Last part of the packet spanning multiple " fp@2359: "descriptors\n"); fp@2359: dev_kfree_skb_irq(skb); fp@2359: goto next_desc; fp@2359: } fp@2359: fp@2359: /* Good Receive */ fp@2359: skb_put(skb, length); fp@2359: fp@2359: { fp@2359: /* fp@2359: * this looks ugly, but it seems compiler issues make it fp@2359: * more efficient than reusing j fp@2359: */ fp@2359: int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); fp@2359: fp@2359: /* fp@2359: * page alloc/put takes too long and effects small packet fp@2359: * throughput, so unsplit small packets and save the alloc/put fp@2359: * only valid in softirq (napi) context to call kmap_* fp@2359: */ fp@2359: if (l1 && (l1 <= copybreak) && fp@2359: ((length + l1) <= adapter->rx_ps_bsize0)) { fp@2359: u8 *vaddr; fp@2359: fp@2359: ps_page = &buffer_info->ps_pages[0]; fp@2359: fp@2359: /* fp@2359: * there is no documentation about how to call fp@2359: * kmap_atomic, so we can't hold the mapping fp@2359: * very long fp@2359: */ fp@2359: dma_sync_single_for_cpu(&pdev->dev, ps_page->dma, fp@2359: PAGE_SIZE, DMA_FROM_DEVICE); fp@2359: vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); fp@2359: memcpy(skb_tail_pointer(skb), vaddr, l1); fp@2359: kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); fp@2359: dma_sync_single_for_device(&pdev->dev, ps_page->dma, fp@2359: PAGE_SIZE, DMA_FROM_DEVICE); fp@2359: fp@2359: /* remove the CRC */ fp@2359: if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) fp@2359: l1 -= 4; fp@2359: fp@2359: skb_put(skb, l1); fp@2359: goto copydone; fp@2359: } /* if */ fp@2359: } fp@2359: fp@2359: for (j = 0; j < PS_PAGE_BUFFERS; j++) { fp@2359: length = le16_to_cpu(rx_desc->wb.upper.length[j]); fp@2359: if (!length) fp@2359: break; fp@2359: fp@2359: ps_page = &buffer_info->ps_pages[j]; fp@2359: dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, fp@2359: DMA_FROM_DEVICE); fp@2359: ps_page->dma = 0; fp@2359: skb_fill_page_desc(skb, j, ps_page->page, 0, length); fp@2359: ps_page->page = NULL; fp@2359: skb->len += length; fp@2359: skb->data_len += length; fp@2359: skb->truesize += length; fp@2359: } fp@2359: fp@2359: /* strip the ethernet crc, problem is we're using pages now so fp@2359: * this whole operation can get a little cpu intensive fp@2359: */ fp@2359: if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) fp@2359: pskb_trim(skb, skb->len - 4); fp@2359: fp@2359: copydone: fp@2359: total_rx_bytes += skb->len; fp@2359: total_rx_packets++; fp@2359: fp@2359: e1000_rx_checksum(adapter, staterr, le16_to_cpu( fp@2359: rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); fp@2359: fp@2359: if (rx_desc->wb.upper.header_status & fp@2359: cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) fp@2359: adapter->rx_hdr_split++; fp@2359: fp@2359: e1000_receive_skb(adapter, netdev, skb, fp@2359: staterr, rx_desc->wb.middle.vlan); fp@2359: fp@2359: next_desc: fp@2359: rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); fp@2359: buffer_info->skb = NULL; fp@2359: fp@2359: /* return some buffers to hardware, one at a time is too slow */ fp@2359: if (cleaned_count >= E1000_RX_BUFFER_WRITE) { fp@2359: adapter->alloc_rx_buf(adapter, cleaned_count); fp@2359: cleaned_count = 0; fp@2359: } fp@2359: fp@2359: /* use prefetched values */ fp@2359: rx_desc = next_rxd; fp@2359: buffer_info = next_buffer; fp@2359: fp@2359: staterr = le32_to_cpu(rx_desc->wb.middle.status_error); fp@2359: } fp@2359: rx_ring->next_to_clean = i; fp@2359: fp@2359: cleaned_count = e1000_desc_unused(rx_ring); fp@2359: if (cleaned_count) fp@2359: adapter->alloc_rx_buf(adapter, cleaned_count); fp@2359: fp@2359: adapter->total_rx_bytes += total_rx_bytes; fp@2359: adapter->total_rx_packets += total_rx_packets; fp@2359: netdev->stats.rx_bytes += total_rx_bytes; fp@2359: netdev->stats.rx_packets += total_rx_packets; fp@2359: return cleaned; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_consume_page - helper function fp@2359: **/ fp@2359: static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, fp@2359: u16 length) fp@2359: { fp@2359: bi->page = NULL; fp@2359: skb->len += length; fp@2359: skb->data_len += length; fp@2359: skb->truesize += length; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy fp@2359: * @adapter: board private structure fp@2359: * fp@2359: * the return value indicates whether actual cleaning was done, there fp@2359: * is no guarantee that everything was cleaned fp@2359: **/ fp@2359: fp@2359: static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, fp@2359: int *work_done, int work_to_do) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: struct e1000_ring *rx_ring = adapter->rx_ring; fp@2359: struct e1000_rx_desc *rx_desc, *next_rxd; fp@2359: struct e1000_buffer *buffer_info, *next_buffer; fp@2359: u32 length; fp@2359: unsigned int i; fp@2359: int cleaned_count = 0; fp@2359: bool cleaned = false; fp@2359: unsigned int total_rx_bytes=0, total_rx_packets=0; fp@2359: fp@2359: i = rx_ring->next_to_clean; fp@2359: rx_desc = E1000_RX_DESC(*rx_ring, i); fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: fp@2359: while (rx_desc->status & E1000_RXD_STAT_DD) { fp@2359: struct sk_buff *skb; fp@2359: u8 status; fp@2359: fp@2359: if (*work_done >= work_to_do) fp@2359: break; fp@2359: (*work_done)++; fp@2359: rmb(); /* read descriptor and rx_buffer_info after status DD */ fp@2359: fp@2359: status = rx_desc->status; fp@2359: skb = buffer_info->skb; fp@2359: buffer_info->skb = NULL; fp@2359: fp@2359: ++i; fp@2359: if (i == rx_ring->count) fp@2359: i = 0; fp@2359: next_rxd = E1000_RX_DESC(*rx_ring, i); fp@2359: prefetch(next_rxd); fp@2359: fp@2359: next_buffer = &rx_ring->buffer_info[i]; fp@2359: fp@2359: cleaned = true; fp@2359: cleaned_count++; fp@2359: dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, fp@2359: DMA_FROM_DEVICE); fp@2359: buffer_info->dma = 0; fp@2359: fp@2359: length = le16_to_cpu(rx_desc->length); fp@2359: fp@2359: /* errors is only valid for DD + EOP descriptors */ fp@2359: if (unlikely((status & E1000_RXD_STAT_EOP) && fp@2359: (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { fp@2359: /* recycle both page and skb */ fp@2359: buffer_info->skb = skb; fp@2359: /* an error means any chain goes out the window fp@2359: * too */ fp@2359: if (rx_ring->rx_skb_top) fp@2359: dev_kfree_skb(rx_ring->rx_skb_top); fp@2359: rx_ring->rx_skb_top = NULL; fp@2359: goto next_desc; fp@2359: } fp@2359: fp@2359: #define rxtop rx_ring->rx_skb_top fp@2359: if (!(status & E1000_RXD_STAT_EOP)) { fp@2359: /* this descriptor is only the beginning (or middle) */ fp@2359: if (!rxtop) { fp@2359: /* this is the beginning of a chain */ fp@2359: rxtop = skb; fp@2359: skb_fill_page_desc(rxtop, 0, buffer_info->page, fp@2359: 0, length); fp@2359: } else { fp@2359: /* this is the middle of a chain */ fp@2359: skb_fill_page_desc(rxtop, fp@2359: skb_shinfo(rxtop)->nr_frags, fp@2359: buffer_info->page, 0, length); fp@2359: /* re-use the skb, only consumed the page */ fp@2359: buffer_info->skb = skb; fp@2359: } fp@2359: e1000_consume_page(buffer_info, rxtop, length); fp@2359: goto next_desc; fp@2359: } else { fp@2359: if (rxtop) { fp@2359: /* end of the chain */ fp@2359: skb_fill_page_desc(rxtop, fp@2359: skb_shinfo(rxtop)->nr_frags, fp@2359: buffer_info->page, 0, length); fp@2359: /* re-use the current skb, we only consumed the fp@2359: * page */ fp@2359: buffer_info->skb = skb; fp@2359: skb = rxtop; fp@2359: rxtop = NULL; fp@2359: e1000_consume_page(buffer_info, skb, length); fp@2359: } else { fp@2359: /* no chain, got EOP, this buf is the packet fp@2359: * copybreak to save the put_page/alloc_page */ fp@2359: if (length <= copybreak && fp@2359: skb_tailroom(skb) >= length) { fp@2359: u8 *vaddr; fp@2359: vaddr = kmap_atomic(buffer_info->page, fp@2359: KM_SKB_DATA_SOFTIRQ); fp@2359: memcpy(skb_tail_pointer(skb), vaddr, fp@2359: length); fp@2359: kunmap_atomic(vaddr, fp@2359: KM_SKB_DATA_SOFTIRQ); fp@2359: /* re-use the page, so don't erase fp@2359: * buffer_info->page */ fp@2359: skb_put(skb, length); fp@2359: } else { fp@2359: skb_fill_page_desc(skb, 0, fp@2359: buffer_info->page, 0, fp@2359: length); fp@2359: e1000_consume_page(buffer_info, skb, fp@2359: length); fp@2359: } fp@2359: } fp@2359: } fp@2359: fp@2359: /* Receive Checksum Offload XXX recompute due to CRC strip? */ fp@2359: e1000_rx_checksum(adapter, fp@2359: (u32)(status) | fp@2359: ((u32)(rx_desc->errors) << 24), fp@2359: le16_to_cpu(rx_desc->csum), skb); fp@2359: fp@2359: /* probably a little skewed due to removing CRC */ fp@2359: total_rx_bytes += skb->len; fp@2359: total_rx_packets++; fp@2359: fp@2359: /* eth type trans needs skb->data to point to something */ fp@2359: if (!pskb_may_pull(skb, ETH_HLEN)) { fp@2359: e_err("pskb_may_pull failed.\n"); fp@2359: dev_kfree_skb(skb); fp@2359: goto next_desc; fp@2359: } fp@2359: fp@2359: e1000_receive_skb(adapter, netdev, skb, status, fp@2359: rx_desc->special); fp@2359: fp@2359: next_desc: fp@2359: rx_desc->status = 0; fp@2359: fp@2359: /* return some buffers to hardware, one at a time is too slow */ fp@2359: if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { fp@2359: adapter->alloc_rx_buf(adapter, cleaned_count); fp@2359: cleaned_count = 0; fp@2359: } fp@2359: fp@2359: /* use prefetched values */ fp@2359: rx_desc = next_rxd; fp@2359: buffer_info = next_buffer; fp@2359: } fp@2359: rx_ring->next_to_clean = i; fp@2359: fp@2359: cleaned_count = e1000_desc_unused(rx_ring); fp@2359: if (cleaned_count) fp@2359: adapter->alloc_rx_buf(adapter, cleaned_count); fp@2359: fp@2359: adapter->total_rx_bytes += total_rx_bytes; fp@2359: adapter->total_rx_packets += total_rx_packets; fp@2359: netdev->stats.rx_bytes += total_rx_bytes; fp@2359: netdev->stats.rx_packets += total_rx_packets; fp@2359: return cleaned; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_clean_rx_ring - Free Rx Buffers per Queue fp@2359: * @adapter: board private structure fp@2359: **/ fp@2359: static void e1000_clean_rx_ring(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_ring *rx_ring = adapter->rx_ring; fp@2359: struct e1000_buffer *buffer_info; fp@2359: struct e1000_ps_page *ps_page; fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: unsigned int i, j; fp@2359: fp@2359: /* Free all the Rx ring sk_buffs */ fp@2359: for (i = 0; i < rx_ring->count; i++) { fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: if (buffer_info->dma) { fp@2359: if (adapter->clean_rx == e1000_clean_rx_irq) fp@2359: dma_unmap_single(&pdev->dev, buffer_info->dma, fp@2359: adapter->rx_buffer_len, fp@2359: DMA_FROM_DEVICE); fp@2359: else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) fp@2359: dma_unmap_page(&pdev->dev, buffer_info->dma, fp@2359: PAGE_SIZE, fp@2359: DMA_FROM_DEVICE); fp@2359: else if (adapter->clean_rx == e1000_clean_rx_irq_ps) fp@2359: dma_unmap_single(&pdev->dev, buffer_info->dma, fp@2359: adapter->rx_ps_bsize0, fp@2359: DMA_FROM_DEVICE); fp@2359: buffer_info->dma = 0; fp@2359: } fp@2359: fp@2359: if (buffer_info->page) { fp@2359: put_page(buffer_info->page); fp@2359: buffer_info->page = NULL; fp@2359: } fp@2359: fp@2359: if (buffer_info->skb) { fp@2359: dev_kfree_skb(buffer_info->skb); fp@2359: buffer_info->skb = NULL; fp@2359: } fp@2359: fp@2359: for (j = 0; j < PS_PAGE_BUFFERS; j++) { fp@2359: ps_page = &buffer_info->ps_pages[j]; fp@2359: if (!ps_page->page) fp@2359: break; fp@2359: dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, fp@2359: DMA_FROM_DEVICE); fp@2359: ps_page->dma = 0; fp@2359: put_page(ps_page->page); fp@2359: ps_page->page = NULL; fp@2359: } fp@2359: } fp@2359: fp@2359: /* there also may be some cached data from a chained receive */ fp@2359: if (rx_ring->rx_skb_top) { fp@2359: dev_kfree_skb(rx_ring->rx_skb_top); fp@2359: rx_ring->rx_skb_top = NULL; fp@2359: } fp@2359: fp@2359: /* Zero out the descriptor ring */ fp@2359: memset(rx_ring->desc, 0, rx_ring->size); fp@2359: fp@2359: rx_ring->next_to_clean = 0; fp@2359: rx_ring->next_to_use = 0; fp@2359: adapter->flags2 &= ~FLAG2_IS_DISCARDING; fp@2359: fp@2359: writel(0, adapter->hw.hw_addr + rx_ring->head); fp@2359: writel(0, adapter->hw.hw_addr + rx_ring->tail); fp@2359: } fp@2359: fp@2359: static void e1000e_downshift_workaround(struct work_struct *work) fp@2359: { fp@2359: struct e1000_adapter *adapter = container_of(work, fp@2359: struct e1000_adapter, downshift_task); fp@2359: fp@2359: e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_intr_msi - Interrupt Handler fp@2359: * @irq: interrupt number fp@2359: * @data: pointer to a network interface device structure fp@2359: **/ fp@2359: static irqreturn_t e1000_intr_msi(int irq, void *data) fp@2359: { fp@2359: struct net_device *netdev = data; fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 icr = er32(ICR); fp@2359: fp@2359: /* fp@2359: * read ICR disables interrupts using IAM fp@2359: */ fp@2359: fp@2359: if (icr & E1000_ICR_LSC) { fp@2359: hw->mac.get_link_status = 1; fp@2359: /* fp@2359: * ICH8 workaround-- Call gig speed drop workaround on cable fp@2359: * disconnect (LSC) before accessing any PHY registers fp@2359: */ fp@2359: if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && fp@2359: (!(er32(STATUS) & E1000_STATUS_LU))) fp@2359: schedule_work(&adapter->downshift_task); fp@2359: fp@2359: /* fp@2359: * 80003ES2LAN workaround-- For packet buffer work-around on fp@2359: * link down event; disable receives here in the ISR and reset fp@2359: * adapter in watchdog fp@2359: */ fp@2359: if (netif_carrier_ok(netdev) && fp@2359: adapter->flags & FLAG_RX_NEEDS_RESTART) { fp@2359: /* disable receives */ fp@2359: u32 rctl = er32(RCTL); fp@2359: ew32(RCTL, rctl & ~E1000_RCTL_EN); fp@2359: adapter->flags |= FLAG_RX_RESTART_NOW; fp@2359: } fp@2359: /* guard against interrupt when we're going down */ fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) fp@2359: mod_timer(&adapter->watchdog_timer, jiffies + 1); fp@2359: } fp@2359: fp@2359: if (napi_schedule_prep(&adapter->napi)) { fp@2359: adapter->total_tx_bytes = 0; fp@2359: adapter->total_tx_packets = 0; fp@2359: adapter->total_rx_bytes = 0; fp@2359: adapter->total_rx_packets = 0; fp@2359: __napi_schedule(&adapter->napi); fp@2359: } fp@2359: fp@2359: return IRQ_HANDLED; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_intr - Interrupt Handler fp@2359: * @irq: interrupt number fp@2359: * @data: pointer to a network interface device structure fp@2359: **/ fp@2359: static irqreturn_t e1000_intr(int irq, void *data) fp@2359: { fp@2359: struct net_device *netdev = data; fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 rctl, icr = er32(ICR); fp@2359: fp@2359: if (!icr || test_bit(__E1000_DOWN, &adapter->state)) fp@2359: return IRQ_NONE; /* Not our interrupt */ fp@2359: fp@2359: /* fp@2359: * IMS will not auto-mask if INT_ASSERTED is not set, and if it is fp@2359: * not set, then the adapter didn't send an interrupt fp@2359: */ fp@2359: if (!(icr & E1000_ICR_INT_ASSERTED)) fp@2359: return IRQ_NONE; fp@2359: fp@2359: /* fp@2359: * Interrupt Auto-Mask...upon reading ICR, fp@2359: * interrupts are masked. No need for the fp@2359: * IMC write fp@2359: */ fp@2359: fp@2359: if (icr & E1000_ICR_LSC) { fp@2359: hw->mac.get_link_status = 1; fp@2359: /* fp@2359: * ICH8 workaround-- Call gig speed drop workaround on cable fp@2359: * disconnect (LSC) before accessing any PHY registers fp@2359: */ fp@2359: if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && fp@2359: (!(er32(STATUS) & E1000_STATUS_LU))) fp@2359: schedule_work(&adapter->downshift_task); fp@2359: fp@2359: /* fp@2359: * 80003ES2LAN workaround-- fp@2359: * For packet buffer work-around on link down event; fp@2359: * disable receives here in the ISR and fp@2359: * reset adapter in watchdog fp@2359: */ fp@2359: if (netif_carrier_ok(netdev) && fp@2359: (adapter->flags & FLAG_RX_NEEDS_RESTART)) { fp@2359: /* disable receives */ fp@2359: rctl = er32(RCTL); fp@2359: ew32(RCTL, rctl & ~E1000_RCTL_EN); fp@2359: adapter->flags |= FLAG_RX_RESTART_NOW; fp@2359: } fp@2359: /* guard against interrupt when we're going down */ fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) fp@2359: mod_timer(&adapter->watchdog_timer, jiffies + 1); fp@2359: } fp@2359: fp@2359: if (napi_schedule_prep(&adapter->napi)) { fp@2359: adapter->total_tx_bytes = 0; fp@2359: adapter->total_tx_packets = 0; fp@2359: adapter->total_rx_bytes = 0; fp@2359: adapter->total_rx_packets = 0; fp@2359: __napi_schedule(&adapter->napi); fp@2359: } fp@2359: fp@2359: return IRQ_HANDLED; fp@2359: } fp@2359: fp@2359: static irqreturn_t e1000_msix_other(int irq, void *data) fp@2359: { fp@2359: struct net_device *netdev = data; fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 icr = er32(ICR); fp@2359: fp@2359: if (!(icr & E1000_ICR_INT_ASSERTED)) { fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) fp@2359: ew32(IMS, E1000_IMS_OTHER); fp@2359: return IRQ_NONE; fp@2359: } fp@2359: fp@2359: if (icr & adapter->eiac_mask) fp@2359: ew32(ICS, (icr & adapter->eiac_mask)); fp@2359: fp@2359: if (icr & E1000_ICR_OTHER) { fp@2359: if (!(icr & E1000_ICR_LSC)) fp@2359: goto no_link_interrupt; fp@2359: hw->mac.get_link_status = 1; fp@2359: /* guard against interrupt when we're going down */ fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) fp@2359: mod_timer(&adapter->watchdog_timer, jiffies + 1); fp@2359: } fp@2359: fp@2359: no_link_interrupt: fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) fp@2359: ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); fp@2359: fp@2359: return IRQ_HANDLED; fp@2359: } fp@2359: fp@2359: fp@2359: static irqreturn_t e1000_intr_msix_tx(int irq, void *data) fp@2359: { fp@2359: struct net_device *netdev = data; fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: fp@2359: fp@2359: adapter->total_tx_bytes = 0; fp@2359: adapter->total_tx_packets = 0; fp@2359: fp@2359: if (!e1000_clean_tx_irq(adapter)) fp@2359: /* Ring was not completely cleaned, so fire another interrupt */ fp@2359: ew32(ICS, tx_ring->ims_val); fp@2359: fp@2359: return IRQ_HANDLED; fp@2359: } fp@2359: fp@2359: static irqreturn_t e1000_intr_msix_rx(int irq, void *data) fp@2359: { fp@2359: struct net_device *netdev = data; fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: fp@2359: /* Write the ITR value calculated at the end of the fp@2359: * previous interrupt. fp@2359: */ fp@2359: if (adapter->rx_ring->set_itr) { fp@2359: writel(1000000000 / (adapter->rx_ring->itr_val * 256), fp@2359: adapter->hw.hw_addr + adapter->rx_ring->itr_register); fp@2359: adapter->rx_ring->set_itr = 0; fp@2359: } fp@2359: fp@2359: if (napi_schedule_prep(&adapter->napi)) { fp@2359: adapter->total_rx_bytes = 0; fp@2359: adapter->total_rx_packets = 0; fp@2359: __napi_schedule(&adapter->napi); fp@2359: } fp@2359: return IRQ_HANDLED; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_configure_msix - Configure MSI-X hardware fp@2359: * fp@2359: * e1000_configure_msix sets up the hardware to properly fp@2359: * generate MSI-X interrupts. fp@2359: **/ fp@2359: static void e1000_configure_msix(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct e1000_ring *rx_ring = adapter->rx_ring; fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: int vector = 0; fp@2359: u32 ctrl_ext, ivar = 0; fp@2359: fp@2359: adapter->eiac_mask = 0; fp@2359: fp@2359: /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ fp@2359: if (hw->mac.type == e1000_82574) { fp@2359: u32 rfctl = er32(RFCTL); fp@2359: rfctl |= E1000_RFCTL_ACK_DIS; fp@2359: ew32(RFCTL, rfctl); fp@2359: } fp@2359: fp@2359: #define E1000_IVAR_INT_ALLOC_VALID 0x8 fp@2359: /* Configure Rx vector */ fp@2359: rx_ring->ims_val = E1000_IMS_RXQ0; fp@2359: adapter->eiac_mask |= rx_ring->ims_val; fp@2359: if (rx_ring->itr_val) fp@2359: writel(1000000000 / (rx_ring->itr_val * 256), fp@2359: hw->hw_addr + rx_ring->itr_register); fp@2359: else fp@2359: writel(1, hw->hw_addr + rx_ring->itr_register); fp@2359: ivar = E1000_IVAR_INT_ALLOC_VALID | vector; fp@2359: fp@2359: /* Configure Tx vector */ fp@2359: tx_ring->ims_val = E1000_IMS_TXQ0; fp@2359: vector++; fp@2359: if (tx_ring->itr_val) fp@2359: writel(1000000000 / (tx_ring->itr_val * 256), fp@2359: hw->hw_addr + tx_ring->itr_register); fp@2359: else fp@2359: writel(1, hw->hw_addr + tx_ring->itr_register); fp@2359: adapter->eiac_mask |= tx_ring->ims_val; fp@2359: ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); fp@2359: fp@2359: /* set vector for Other Causes, e.g. link changes */ fp@2359: vector++; fp@2359: ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); fp@2359: if (rx_ring->itr_val) fp@2359: writel(1000000000 / (rx_ring->itr_val * 256), fp@2359: hw->hw_addr + E1000_EITR_82574(vector)); fp@2359: else fp@2359: writel(1, hw->hw_addr + E1000_EITR_82574(vector)); fp@2359: fp@2359: /* Cause Tx interrupts on every write back */ fp@2359: ivar |= (1 << 31); fp@2359: fp@2359: ew32(IVAR, ivar); fp@2359: fp@2359: /* enable MSI-X PBA support */ fp@2359: ctrl_ext = er32(CTRL_EXT); fp@2359: ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; fp@2359: fp@2359: /* Auto-Mask Other interrupts upon ICR read */ fp@2359: #define E1000_EIAC_MASK_82574 0x01F00000 fp@2359: ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); fp@2359: ctrl_ext |= E1000_CTRL_EXT_EIAME; fp@2359: ew32(CTRL_EXT, ctrl_ext); fp@2359: e1e_flush(); fp@2359: } fp@2359: fp@2359: void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) fp@2359: { fp@2359: if (adapter->msix_entries) { fp@2359: pci_disable_msix(adapter->pdev); fp@2359: kfree(adapter->msix_entries); fp@2359: adapter->msix_entries = NULL; fp@2359: } else if (adapter->flags & FLAG_MSI_ENABLED) { fp@2359: pci_disable_msi(adapter->pdev); fp@2359: adapter->flags &= ~FLAG_MSI_ENABLED; fp@2359: } fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000e_set_interrupt_capability - set MSI or MSI-X if supported fp@2359: * fp@2359: * Attempt to configure interrupts using the best available fp@2359: * capabilities of the hardware and kernel. fp@2359: **/ fp@2359: void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) fp@2359: { fp@2359: int err; fp@2359: int numvecs, i; fp@2359: fp@2359: fp@2359: switch (adapter->int_mode) { fp@2359: case E1000E_INT_MODE_MSIX: fp@2359: if (adapter->flags & FLAG_HAS_MSIX) { fp@2359: numvecs = 3; /* RxQ0, TxQ0 and other */ fp@2359: adapter->msix_entries = kcalloc(numvecs, fp@2359: sizeof(struct msix_entry), fp@2359: GFP_KERNEL); fp@2359: if (adapter->msix_entries) { fp@2359: for (i = 0; i < numvecs; i++) fp@2359: adapter->msix_entries[i].entry = i; fp@2359: fp@2359: err = pci_enable_msix(adapter->pdev, fp@2359: adapter->msix_entries, fp@2359: numvecs); fp@2359: if (err == 0) fp@2359: return; fp@2359: } fp@2359: /* MSI-X failed, so fall through and try MSI */ fp@2359: e_err("Failed to initialize MSI-X interrupts. " fp@2359: "Falling back to MSI interrupts.\n"); fp@2359: e1000e_reset_interrupt_capability(adapter); fp@2359: } fp@2359: adapter->int_mode = E1000E_INT_MODE_MSI; fp@2359: /* Fall through */ fp@2359: case E1000E_INT_MODE_MSI: fp@2359: if (!pci_enable_msi(adapter->pdev)) { fp@2359: adapter->flags |= FLAG_MSI_ENABLED; fp@2359: } else { fp@2359: adapter->int_mode = E1000E_INT_MODE_LEGACY; fp@2359: e_err("Failed to initialize MSI interrupts. Falling " fp@2359: "back to legacy interrupts.\n"); fp@2359: } fp@2359: /* Fall through */ fp@2359: case E1000E_INT_MODE_LEGACY: fp@2359: /* Don't do anything; this is the system default */ fp@2359: break; fp@2359: } fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_request_msix - Initialize MSI-X interrupts fp@2359: * fp@2359: * e1000_request_msix allocates MSI-X vectors and requests interrupts from the fp@2359: * kernel. fp@2359: **/ fp@2359: static int e1000_request_msix(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: int err = 0, vector = 0; fp@2359: fp@2359: if (strlen(netdev->name) < (IFNAMSIZ - 5)) fp@2359: sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); fp@2359: else fp@2359: memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); fp@2359: err = request_irq(adapter->msix_entries[vector].vector, fp@2359: e1000_intr_msix_rx, 0, adapter->rx_ring->name, fp@2359: netdev); fp@2359: if (err) fp@2359: goto out; fp@2359: adapter->rx_ring->itr_register = E1000_EITR_82574(vector); fp@2359: adapter->rx_ring->itr_val = adapter->itr; fp@2359: vector++; fp@2359: fp@2359: if (strlen(netdev->name) < (IFNAMSIZ - 5)) fp@2359: sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); fp@2359: else fp@2359: memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); fp@2359: err = request_irq(adapter->msix_entries[vector].vector, fp@2359: e1000_intr_msix_tx, 0, adapter->tx_ring->name, fp@2359: netdev); fp@2359: if (err) fp@2359: goto out; fp@2359: adapter->tx_ring->itr_register = E1000_EITR_82574(vector); fp@2359: adapter->tx_ring->itr_val = adapter->itr; fp@2359: vector++; fp@2359: fp@2359: err = request_irq(adapter->msix_entries[vector].vector, fp@2359: e1000_msix_other, 0, netdev->name, netdev); fp@2359: if (err) fp@2359: goto out; fp@2359: fp@2359: e1000_configure_msix(adapter); fp@2359: return 0; fp@2359: out: fp@2359: return err; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_request_irq - initialize interrupts fp@2359: * fp@2359: * Attempts to configure interrupts using the best available fp@2359: * capabilities of the hardware and kernel. fp@2359: **/ fp@2359: static int e1000_request_irq(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: int err; fp@2359: fp@2359: if (adapter->msix_entries) { fp@2359: err = e1000_request_msix(adapter); fp@2359: if (!err) fp@2359: return err; fp@2359: /* fall back to MSI */ fp@2359: e1000e_reset_interrupt_capability(adapter); fp@2359: adapter->int_mode = E1000E_INT_MODE_MSI; fp@2359: e1000e_set_interrupt_capability(adapter); fp@2359: } fp@2359: if (adapter->flags & FLAG_MSI_ENABLED) { fp@2359: err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, fp@2359: netdev->name, netdev); fp@2359: if (!err) fp@2359: return err; fp@2359: fp@2359: /* fall back to legacy interrupt */ fp@2359: e1000e_reset_interrupt_capability(adapter); fp@2359: adapter->int_mode = E1000E_INT_MODE_LEGACY; fp@2359: } fp@2359: fp@2359: err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, fp@2359: netdev->name, netdev); fp@2359: if (err) fp@2359: e_err("Unable to allocate interrupt, Error: %d\n", err); fp@2359: fp@2359: return err; fp@2359: } fp@2359: fp@2359: static void e1000_free_irq(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: fp@2359: if (adapter->msix_entries) { fp@2359: int vector = 0; fp@2359: fp@2359: free_irq(adapter->msix_entries[vector].vector, netdev); fp@2359: vector++; fp@2359: fp@2359: free_irq(adapter->msix_entries[vector].vector, netdev); fp@2359: vector++; fp@2359: fp@2359: /* Other Causes interrupt vector */ fp@2359: free_irq(adapter->msix_entries[vector].vector, netdev); fp@2359: return; fp@2359: } fp@2359: fp@2359: free_irq(adapter->pdev->irq, netdev); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_irq_disable - Mask off interrupt generation on the NIC fp@2359: **/ fp@2359: static void e1000_irq_disable(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: fp@2359: ew32(IMC, ~0); fp@2359: if (adapter->msix_entries) fp@2359: ew32(EIAC_82574, 0); fp@2359: e1e_flush(); fp@2359: synchronize_irq(adapter->pdev->irq); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_irq_enable - Enable default interrupt generation settings fp@2359: **/ fp@2359: static void e1000_irq_enable(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: fp@2359: if (adapter->msix_entries) { fp@2359: ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); fp@2359: ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); fp@2359: } else { fp@2359: ew32(IMS, IMS_ENABLE_MASK); fp@2359: } fp@2359: e1e_flush(); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_get_hw_control - get control of the h/w from f/w fp@2359: * @adapter: address of board private structure fp@2359: * fp@2359: * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. fp@2359: * For ASF and Pass Through versions of f/w this means that fp@2359: * the driver is loaded. For AMT version (only with 82573) fp@2359: * of the f/w this means that the network i/f is open. fp@2359: **/ fp@2359: static void e1000_get_hw_control(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 ctrl_ext; fp@2359: u32 swsm; fp@2359: fp@2359: /* Let firmware know the driver has taken over */ fp@2359: if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { fp@2359: swsm = er32(SWSM); fp@2359: ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); fp@2359: } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { fp@2359: ctrl_ext = er32(CTRL_EXT); fp@2359: ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); fp@2359: } fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_release_hw_control - release control of the h/w to f/w fp@2359: * @adapter: address of board private structure fp@2359: * fp@2359: * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. fp@2359: * For ASF and Pass Through versions of f/w this means that the fp@2359: * driver is no longer loaded. For AMT version (only with 82573) i fp@2359: * of the f/w this means that the network i/f is closed. fp@2359: * fp@2359: **/ fp@2359: static void e1000_release_hw_control(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 ctrl_ext; fp@2359: u32 swsm; fp@2359: fp@2359: /* Let firmware taken over control of h/w */ fp@2359: if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { fp@2359: swsm = er32(SWSM); fp@2359: ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); fp@2359: } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { fp@2359: ctrl_ext = er32(CTRL_EXT); fp@2359: ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); fp@2359: } fp@2359: } fp@2359: fp@2359: /** fp@2359: * @e1000_alloc_ring - allocate memory for a ring structure fp@2359: **/ fp@2359: static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, fp@2359: struct e1000_ring *ring) fp@2359: { fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: fp@2359: ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, fp@2359: GFP_KERNEL); fp@2359: if (!ring->desc) fp@2359: return -ENOMEM; fp@2359: fp@2359: return 0; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) fp@2359: * @adapter: board private structure fp@2359: * fp@2359: * Return 0 on success, negative on failure fp@2359: **/ fp@2359: int e1000e_setup_tx_resources(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: int err = -ENOMEM, size; fp@2359: fp@2359: size = sizeof(struct e1000_buffer) * tx_ring->count; fp@2359: tx_ring->buffer_info = vmalloc(size); fp@2359: if (!tx_ring->buffer_info) fp@2359: goto err; fp@2359: memset(tx_ring->buffer_info, 0, size); fp@2359: fp@2359: /* round up to nearest 4K */ fp@2359: tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); fp@2359: tx_ring->size = ALIGN(tx_ring->size, 4096); fp@2359: fp@2359: err = e1000_alloc_ring_dma(adapter, tx_ring); fp@2359: if (err) fp@2359: goto err; fp@2359: fp@2359: tx_ring->next_to_use = 0; fp@2359: tx_ring->next_to_clean = 0; fp@2359: fp@2359: return 0; fp@2359: err: fp@2359: vfree(tx_ring->buffer_info); fp@2359: e_err("Unable to allocate memory for the transmit descriptor ring\n"); fp@2359: return err; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) fp@2359: * @adapter: board private structure fp@2359: * fp@2359: * Returns 0 on success, negative on failure fp@2359: **/ fp@2359: int e1000e_setup_rx_resources(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_ring *rx_ring = adapter->rx_ring; fp@2359: struct e1000_buffer *buffer_info; fp@2359: int i, size, desc_len, err = -ENOMEM; fp@2359: fp@2359: size = sizeof(struct e1000_buffer) * rx_ring->count; fp@2359: rx_ring->buffer_info = vmalloc(size); fp@2359: if (!rx_ring->buffer_info) fp@2359: goto err; fp@2359: memset(rx_ring->buffer_info, 0, size); fp@2359: fp@2359: for (i = 0; i < rx_ring->count; i++) { fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, fp@2359: sizeof(struct e1000_ps_page), fp@2359: GFP_KERNEL); fp@2359: if (!buffer_info->ps_pages) fp@2359: goto err_pages; fp@2359: } fp@2359: fp@2359: desc_len = sizeof(union e1000_rx_desc_packet_split); fp@2359: fp@2359: /* Round up to nearest 4K */ fp@2359: rx_ring->size = rx_ring->count * desc_len; fp@2359: rx_ring->size = ALIGN(rx_ring->size, 4096); fp@2359: fp@2359: err = e1000_alloc_ring_dma(adapter, rx_ring); fp@2359: if (err) fp@2359: goto err_pages; fp@2359: fp@2359: rx_ring->next_to_clean = 0; fp@2359: rx_ring->next_to_use = 0; fp@2359: rx_ring->rx_skb_top = NULL; fp@2359: fp@2359: return 0; fp@2359: fp@2359: err_pages: fp@2359: for (i = 0; i < rx_ring->count; i++) { fp@2359: buffer_info = &rx_ring->buffer_info[i]; fp@2359: kfree(buffer_info->ps_pages); fp@2359: } fp@2359: err: fp@2359: vfree(rx_ring->buffer_info); fp@2359: e_err("Unable to allocate memory for the transmit descriptor ring\n"); fp@2359: return err; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_clean_tx_ring - Free Tx Buffers fp@2359: * @adapter: board private structure fp@2359: **/ fp@2359: static void e1000_clean_tx_ring(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: struct e1000_buffer *buffer_info; fp@2359: unsigned long size; fp@2359: unsigned int i; fp@2359: fp@2359: for (i = 0; i < tx_ring->count; i++) { fp@2359: buffer_info = &tx_ring->buffer_info[i]; fp@2359: e1000_put_txbuf(adapter, buffer_info); fp@2359: } fp@2359: fp@2359: size = sizeof(struct e1000_buffer) * tx_ring->count; fp@2359: memset(tx_ring->buffer_info, 0, size); fp@2359: fp@2359: memset(tx_ring->desc, 0, tx_ring->size); fp@2359: fp@2359: tx_ring->next_to_use = 0; fp@2359: tx_ring->next_to_clean = 0; fp@2359: fp@2359: writel(0, adapter->hw.hw_addr + tx_ring->head); fp@2359: writel(0, adapter->hw.hw_addr + tx_ring->tail); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000e_free_tx_resources - Free Tx Resources per Queue fp@2359: * @adapter: board private structure fp@2359: * fp@2359: * Free all transmit software resources fp@2359: **/ fp@2359: void e1000e_free_tx_resources(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: fp@2359: e1000_clean_tx_ring(adapter); fp@2359: fp@2359: vfree(tx_ring->buffer_info); fp@2359: tx_ring->buffer_info = NULL; fp@2359: fp@2359: dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, fp@2359: tx_ring->dma); fp@2359: tx_ring->desc = NULL; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000e_free_rx_resources - Free Rx Resources fp@2359: * @adapter: board private structure fp@2359: * fp@2359: * Free all receive software resources fp@2359: **/ fp@2359: fp@2359: void e1000e_free_rx_resources(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: struct e1000_ring *rx_ring = adapter->rx_ring; fp@2359: int i; fp@2359: fp@2359: e1000_clean_rx_ring(adapter); fp@2359: fp@2359: for (i = 0; i < rx_ring->count; i++) { fp@2359: kfree(rx_ring->buffer_info[i].ps_pages); fp@2359: } fp@2359: fp@2359: vfree(rx_ring->buffer_info); fp@2359: rx_ring->buffer_info = NULL; fp@2359: fp@2359: dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, fp@2359: rx_ring->dma); fp@2359: rx_ring->desc = NULL; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_update_itr - update the dynamic ITR value based on statistics fp@2359: * @adapter: pointer to adapter fp@2359: * @itr_setting: current adapter->itr fp@2359: * @packets: the number of packets during this measurement interval fp@2359: * @bytes: the number of bytes during this measurement interval fp@2359: * fp@2359: * Stores a new ITR value based on packets and byte fp@2359: * counts during the last interrupt. The advantage of per interrupt fp@2359: * computation is faster updates and more accurate ITR for the current fp@2359: * traffic pattern. Constants in this function were computed fp@2359: * based on theoretical maximum wire speed and thresholds were set based fp@2359: * on testing data as well as attempting to minimize response time fp@2359: * while increasing bulk throughput. This functionality is controlled fp@2359: * by the InterruptThrottleRate module parameter. fp@2359: **/ fp@2359: static unsigned int e1000_update_itr(struct e1000_adapter *adapter, fp@2359: u16 itr_setting, int packets, fp@2359: int bytes) fp@2359: { fp@2359: unsigned int retval = itr_setting; fp@2359: fp@2359: if (packets == 0) fp@2359: goto update_itr_done; fp@2359: fp@2359: switch (itr_setting) { fp@2359: case lowest_latency: fp@2359: /* handle TSO and jumbo frames */ fp@2359: if (bytes/packets > 8000) fp@2359: retval = bulk_latency; fp@2359: else if ((packets < 5) && (bytes > 512)) { fp@2359: retval = low_latency; fp@2359: } fp@2359: break; fp@2359: case low_latency: /* 50 usec aka 20000 ints/s */ fp@2359: if (bytes > 10000) { fp@2359: /* this if handles the TSO accounting */ fp@2359: if (bytes/packets > 8000) { fp@2359: retval = bulk_latency; fp@2359: } else if ((packets < 10) || ((bytes/packets) > 1200)) { fp@2359: retval = bulk_latency; fp@2359: } else if ((packets > 35)) { fp@2359: retval = lowest_latency; fp@2359: } fp@2359: } else if (bytes/packets > 2000) { fp@2359: retval = bulk_latency; fp@2359: } else if (packets <= 2 && bytes < 512) { fp@2359: retval = lowest_latency; fp@2359: } fp@2359: break; fp@2359: case bulk_latency: /* 250 usec aka 4000 ints/s */ fp@2359: if (bytes > 25000) { fp@2359: if (packets > 35) { fp@2359: retval = low_latency; fp@2359: } fp@2359: } else if (bytes < 6000) { fp@2359: retval = low_latency; fp@2359: } fp@2359: break; fp@2359: } fp@2359: fp@2359: update_itr_done: fp@2359: return retval; fp@2359: } fp@2359: fp@2359: static void e1000_set_itr(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u16 current_itr; fp@2359: u32 new_itr = adapter->itr; fp@2359: fp@2359: /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ fp@2359: if (adapter->link_speed != SPEED_1000) { fp@2359: current_itr = 0; fp@2359: new_itr = 4000; fp@2359: goto set_itr_now; fp@2359: } fp@2359: fp@2359: adapter->tx_itr = e1000_update_itr(adapter, fp@2359: adapter->tx_itr, fp@2359: adapter->total_tx_packets, fp@2359: adapter->total_tx_bytes); fp@2359: /* conservative mode (itr 3) eliminates the lowest_latency setting */ fp@2359: if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) fp@2359: adapter->tx_itr = low_latency; fp@2359: fp@2359: adapter->rx_itr = e1000_update_itr(adapter, fp@2359: adapter->rx_itr, fp@2359: adapter->total_rx_packets, fp@2359: adapter->total_rx_bytes); fp@2359: /* conservative mode (itr 3) eliminates the lowest_latency setting */ fp@2359: if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) fp@2359: adapter->rx_itr = low_latency; fp@2359: fp@2359: current_itr = max(adapter->rx_itr, adapter->tx_itr); fp@2359: fp@2359: switch (current_itr) { fp@2359: /* counts and packets in update_itr are dependent on these numbers */ fp@2359: case lowest_latency: fp@2359: new_itr = 70000; fp@2359: break; fp@2359: case low_latency: fp@2359: new_itr = 20000; /* aka hwitr = ~200 */ fp@2359: break; fp@2359: case bulk_latency: fp@2359: new_itr = 4000; fp@2359: break; fp@2359: default: fp@2359: break; fp@2359: } fp@2359: fp@2359: set_itr_now: fp@2359: if (new_itr != adapter->itr) { fp@2359: /* fp@2359: * this attempts to bias the interrupt rate towards Bulk fp@2359: * by adding intermediate steps when interrupt rate is fp@2359: * increasing fp@2359: */ fp@2359: new_itr = new_itr > adapter->itr ? fp@2359: min(adapter->itr + (new_itr >> 2), new_itr) : fp@2359: new_itr; fp@2359: adapter->itr = new_itr; fp@2359: adapter->rx_ring->itr_val = new_itr; fp@2359: if (adapter->msix_entries) fp@2359: adapter->rx_ring->set_itr = 1; fp@2359: else fp@2359: ew32(ITR, 1000000000 / (new_itr * 256)); fp@2359: } fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_alloc_queues - Allocate memory for all rings fp@2359: * @adapter: board private structure to initialize fp@2359: **/ fp@2359: static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) fp@2359: { fp@2359: adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); fp@2359: if (!adapter->tx_ring) fp@2359: goto err; fp@2359: fp@2359: adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); fp@2359: if (!adapter->rx_ring) fp@2359: goto err; fp@2359: fp@2359: return 0; fp@2359: err: fp@2359: e_err("Unable to allocate memory for queues\n"); fp@2359: kfree(adapter->rx_ring); fp@2359: kfree(adapter->tx_ring); fp@2359: return -ENOMEM; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_clean - NAPI Rx polling callback fp@2359: * @napi: struct associated with this polling callback fp@2359: * @budget: amount of packets driver is allowed to process this poll fp@2359: **/ fp@2359: static int e1000_clean(struct napi_struct *napi, int budget) fp@2359: { fp@2359: struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct net_device *poll_dev = adapter->netdev; fp@2359: int tx_cleaned = 1, work_done = 0; fp@2359: fp@2359: adapter = netdev_priv(poll_dev); fp@2359: fp@2359: if (adapter->msix_entries && fp@2359: !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) fp@2359: goto clean_rx; fp@2359: fp@2359: tx_cleaned = e1000_clean_tx_irq(adapter); fp@2359: fp@2359: clean_rx: fp@2359: adapter->clean_rx(adapter, &work_done, budget); fp@2359: fp@2359: if (!tx_cleaned) fp@2359: work_done = budget; fp@2359: fp@2359: /* If budget not fully consumed, exit the polling mode */ fp@2359: if (work_done < budget) { fp@2359: if (adapter->itr_setting & 3) fp@2359: e1000_set_itr(adapter); fp@2359: napi_complete(napi); fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) { fp@2359: if (adapter->msix_entries) fp@2359: ew32(IMS, adapter->rx_ring->ims_val); fp@2359: else fp@2359: e1000_irq_enable(adapter); fp@2359: } fp@2359: } fp@2359: fp@2359: return work_done; fp@2359: } fp@2359: fp@2359: static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 vfta, index; fp@2359: fp@2359: /* don't update vlan cookie if already programmed */ fp@2359: if ((adapter->hw.mng_cookie.status & fp@2359: E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && fp@2359: (vid == adapter->mng_vlan_id)) fp@2359: return; fp@2359: fp@2359: /* add VID to filter table */ fp@2359: if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { fp@2359: index = (vid >> 5) & 0x7F; fp@2359: vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); fp@2359: vfta |= (1 << (vid & 0x1F)); fp@2359: hw->mac.ops.write_vfta(hw, index, vfta); fp@2359: } fp@2359: } fp@2359: fp@2359: static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 vfta, index; fp@2359: fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) fp@2359: e1000_irq_disable(adapter); fp@2359: vlan_group_set_device(adapter->vlgrp, vid, NULL); fp@2359: fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) fp@2359: e1000_irq_enable(adapter); fp@2359: fp@2359: if ((adapter->hw.mng_cookie.status & fp@2359: E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && fp@2359: (vid == adapter->mng_vlan_id)) { fp@2359: /* release control to f/w */ fp@2359: e1000_release_hw_control(adapter); fp@2359: return; fp@2359: } fp@2359: fp@2359: /* remove VID from filter table */ fp@2359: if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { fp@2359: index = (vid >> 5) & 0x7F; fp@2359: vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); fp@2359: vfta &= ~(1 << (vid & 0x1F)); fp@2359: hw->mac.ops.write_vfta(hw, index, vfta); fp@2359: } fp@2359: } fp@2359: fp@2359: static void e1000_update_mng_vlan(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: u16 vid = adapter->hw.mng_cookie.vlan_id; fp@2359: u16 old_vid = adapter->mng_vlan_id; fp@2359: fp@2359: if (!adapter->vlgrp) fp@2359: return; fp@2359: fp@2359: if (!vlan_group_get_device(adapter->vlgrp, vid)) { fp@2359: adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; fp@2359: if (adapter->hw.mng_cookie.status & fp@2359: E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { fp@2359: e1000_vlan_rx_add_vid(netdev, vid); fp@2359: adapter->mng_vlan_id = vid; fp@2359: } fp@2359: fp@2359: if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && fp@2359: (vid != old_vid) && fp@2359: !vlan_group_get_device(adapter->vlgrp, old_vid)) fp@2359: e1000_vlan_rx_kill_vid(netdev, old_vid); fp@2359: } else { fp@2359: adapter->mng_vlan_id = vid; fp@2359: } fp@2359: } fp@2359: fp@2359: fp@2359: static void e1000_vlan_rx_register(struct net_device *netdev, fp@2359: struct vlan_group *grp) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 ctrl, rctl; fp@2359: fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) fp@2359: e1000_irq_disable(adapter); fp@2359: adapter->vlgrp = grp; fp@2359: fp@2359: if (grp) { fp@2359: /* enable VLAN tag insert/strip */ fp@2359: ctrl = er32(CTRL); fp@2359: ctrl |= E1000_CTRL_VME; fp@2359: ew32(CTRL, ctrl); fp@2359: fp@2359: if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { fp@2359: /* enable VLAN receive filtering */ fp@2359: rctl = er32(RCTL); fp@2359: rctl &= ~E1000_RCTL_CFIEN; fp@2359: ew32(RCTL, rctl); fp@2359: e1000_update_mng_vlan(adapter); fp@2359: } fp@2359: } else { fp@2359: /* disable VLAN tag insert/strip */ fp@2359: ctrl = er32(CTRL); fp@2359: ctrl &= ~E1000_CTRL_VME; fp@2359: ew32(CTRL, ctrl); fp@2359: fp@2359: if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { fp@2359: if (adapter->mng_vlan_id != fp@2359: (u16)E1000_MNG_VLAN_NONE) { fp@2359: e1000_vlan_rx_kill_vid(netdev, fp@2359: adapter->mng_vlan_id); fp@2359: adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; fp@2359: } fp@2359: } fp@2359: } fp@2359: fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) fp@2359: e1000_irq_enable(adapter); fp@2359: } fp@2359: fp@2359: static void e1000_restore_vlan(struct e1000_adapter *adapter) fp@2359: { fp@2359: u16 vid; fp@2359: fp@2359: e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); fp@2359: fp@2359: if (!adapter->vlgrp) fp@2359: return; fp@2359: fp@2359: for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { fp@2359: if (!vlan_group_get_device(adapter->vlgrp, vid)) fp@2359: continue; fp@2359: e1000_vlan_rx_add_vid(adapter->netdev, vid); fp@2359: } fp@2359: } fp@2359: fp@2359: static void e1000_init_manageability_pt(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 manc, manc2h, mdef, i, j; fp@2359: fp@2359: if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) fp@2359: return; fp@2359: fp@2359: manc = er32(MANC); fp@2359: fp@2359: /* fp@2359: * enable receiving management packets to the host. this will probably fp@2359: * generate destination unreachable messages from the host OS, but fp@2359: * the packets will be handled on SMBUS fp@2359: */ fp@2359: manc |= E1000_MANC_EN_MNG2HOST; fp@2359: manc2h = er32(MANC2H); fp@2359: fp@2359: switch (hw->mac.type) { fp@2359: default: fp@2359: manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); fp@2359: break; fp@2359: case e1000_82574: fp@2359: case e1000_82583: fp@2359: /* fp@2359: * Check if IPMI pass-through decision filter already exists; fp@2359: * if so, enable it. fp@2359: */ fp@2359: for (i = 0, j = 0; i < 8; i++) { fp@2359: mdef = er32(MDEF(i)); fp@2359: fp@2359: /* Ignore filters with anything other than IPMI ports */ fp@2359: if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) fp@2359: continue; fp@2359: fp@2359: /* Enable this decision filter in MANC2H */ fp@2359: if (mdef) fp@2359: manc2h |= (1 << i); fp@2359: fp@2359: j |= mdef; fp@2359: } fp@2359: fp@2359: if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) fp@2359: break; fp@2359: fp@2359: /* Create new decision filter in an empty filter */ fp@2359: for (i = 0, j = 0; i < 8; i++) fp@2359: if (er32(MDEF(i)) == 0) { fp@2359: ew32(MDEF(i), (E1000_MDEF_PORT_623 | fp@2359: E1000_MDEF_PORT_664)); fp@2359: manc2h |= (1 << 1); fp@2359: j++; fp@2359: break; fp@2359: } fp@2359: fp@2359: if (!j) fp@2359: e_warn("Unable to create IPMI pass-through filter\n"); fp@2359: break; fp@2359: } fp@2359: fp@2359: ew32(MANC2H, manc2h); fp@2359: ew32(MANC, manc); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_configure_tx - Configure 8254x Transmit Unit after Reset fp@2359: * @adapter: board private structure fp@2359: * fp@2359: * Configure the Tx unit of the MAC after a reset. fp@2359: **/ fp@2359: static void e1000_configure_tx(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: u64 tdba; fp@2359: u32 tdlen, tctl, tipg, tarc; fp@2359: u32 ipgr1, ipgr2; fp@2359: fp@2359: /* Setup the HW Tx Head and Tail descriptor pointers */ fp@2359: tdba = tx_ring->dma; fp@2359: tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); fp@2359: ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); fp@2359: ew32(TDBAH, (tdba >> 32)); fp@2359: ew32(TDLEN, tdlen); fp@2359: ew32(TDH, 0); fp@2359: ew32(TDT, 0); fp@2359: tx_ring->head = E1000_TDH; fp@2359: tx_ring->tail = E1000_TDT; fp@2359: fp@2359: /* Set the default values for the Tx Inter Packet Gap timer */ fp@2359: tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ fp@2359: ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ fp@2359: ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ fp@2359: fp@2359: if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) fp@2359: ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ fp@2359: fp@2359: tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; fp@2359: tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; fp@2359: ew32(TIPG, tipg); fp@2359: fp@2359: /* Set the Tx Interrupt Delay register */ fp@2359: ew32(TIDV, adapter->tx_int_delay); fp@2359: /* Tx irq moderation */ fp@2359: ew32(TADV, adapter->tx_abs_int_delay); fp@2359: fp@2359: /* Program the Transmit Control Register */ fp@2359: tctl = er32(TCTL); fp@2359: tctl &= ~E1000_TCTL_CT; fp@2359: tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | fp@2359: (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); fp@2359: fp@2359: if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { fp@2359: tarc = er32(TARC(0)); fp@2359: /* fp@2359: * set the speed mode bit, we'll clear it if we're not at fp@2359: * gigabit link later fp@2359: */ fp@2359: #define SPEED_MODE_BIT (1 << 21) fp@2359: tarc |= SPEED_MODE_BIT; fp@2359: ew32(TARC(0), tarc); fp@2359: } fp@2359: fp@2359: /* errata: program both queues to unweighted RR */ fp@2359: if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { fp@2359: tarc = er32(TARC(0)); fp@2359: tarc |= 1; fp@2359: ew32(TARC(0), tarc); fp@2359: tarc = er32(TARC(1)); fp@2359: tarc |= 1; fp@2359: ew32(TARC(1), tarc); fp@2359: } fp@2359: fp@2359: /* Setup Transmit Descriptor Settings for eop descriptor */ fp@2359: adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; fp@2359: fp@2359: /* only set IDE if we are delaying interrupts using the timers */ fp@2359: if (adapter->tx_int_delay) fp@2359: adapter->txd_cmd |= E1000_TXD_CMD_IDE; fp@2359: fp@2359: /* enable Report Status bit */ fp@2359: adapter->txd_cmd |= E1000_TXD_CMD_RS; fp@2359: fp@2359: ew32(TCTL, tctl); fp@2359: fp@2359: e1000e_config_collision_dist(hw); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_setup_rctl - configure the receive control registers fp@2359: * @adapter: Board private structure fp@2359: **/ fp@2359: #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ fp@2359: (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) fp@2359: static void e1000_setup_rctl(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 rctl, rfctl; fp@2359: u32 psrctl = 0; fp@2359: u32 pages = 0; fp@2359: fp@2359: /* Program MC offset vector base */ fp@2359: rctl = er32(RCTL); fp@2359: rctl &= ~(3 << E1000_RCTL_MO_SHIFT); fp@2359: rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | fp@2359: E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | fp@2359: (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); fp@2359: fp@2359: /* Do not Store bad packets */ fp@2359: rctl &= ~E1000_RCTL_SBP; fp@2359: fp@2359: /* Enable Long Packet receive */ fp@2359: if (adapter->netdev->mtu <= ETH_DATA_LEN) fp@2359: rctl &= ~E1000_RCTL_LPE; fp@2359: else fp@2359: rctl |= E1000_RCTL_LPE; fp@2359: fp@2359: /* Some systems expect that the CRC is included in SMBUS traffic. The fp@2359: * hardware strips the CRC before sending to both SMBUS (BMC) and to fp@2359: * host memory when this is enabled fp@2359: */ fp@2359: if (adapter->flags2 & FLAG2_CRC_STRIPPING) fp@2359: rctl |= E1000_RCTL_SECRC; fp@2359: fp@2359: /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ fp@2359: if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { fp@2359: u16 phy_data; fp@2359: fp@2359: e1e_rphy(hw, PHY_REG(770, 26), &phy_data); fp@2359: phy_data &= 0xfff8; fp@2359: phy_data |= (1 << 2); fp@2359: e1e_wphy(hw, PHY_REG(770, 26), phy_data); fp@2359: fp@2359: e1e_rphy(hw, 22, &phy_data); fp@2359: phy_data &= 0x0fff; fp@2359: phy_data |= (1 << 14); fp@2359: e1e_wphy(hw, 0x10, 0x2823); fp@2359: e1e_wphy(hw, 0x11, 0x0003); fp@2359: e1e_wphy(hw, 22, phy_data); fp@2359: } fp@2359: fp@2359: /* Setup buffer sizes */ fp@2359: rctl &= ~E1000_RCTL_SZ_4096; fp@2359: rctl |= E1000_RCTL_BSEX; fp@2359: switch (adapter->rx_buffer_len) { fp@2359: case 2048: fp@2359: default: fp@2359: rctl |= E1000_RCTL_SZ_2048; fp@2359: rctl &= ~E1000_RCTL_BSEX; fp@2359: break; fp@2359: case 4096: fp@2359: rctl |= E1000_RCTL_SZ_4096; fp@2359: break; fp@2359: case 8192: fp@2359: rctl |= E1000_RCTL_SZ_8192; fp@2359: break; fp@2359: case 16384: fp@2359: rctl |= E1000_RCTL_SZ_16384; fp@2359: break; fp@2359: } fp@2359: fp@2359: /* fp@2359: * 82571 and greater support packet-split where the protocol fp@2359: * header is placed in skb->data and the packet data is fp@2359: * placed in pages hanging off of skb_shinfo(skb)->nr_frags. fp@2359: * In the case of a non-split, skb->data is linearly filled, fp@2359: * followed by the page buffers. Therefore, skb->data is fp@2359: * sized to hold the largest protocol header. fp@2359: * fp@2359: * allocations using alloc_page take too long for regular MTU fp@2359: * so only enable packet split for jumbo frames fp@2359: * fp@2359: * Using pages when the page size is greater than 16k wastes fp@2359: * a lot of memory, since we allocate 3 pages at all times fp@2359: * per packet. fp@2359: */ fp@2359: pages = PAGE_USE_COUNT(adapter->netdev->mtu); fp@2359: if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) && fp@2359: (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) fp@2359: adapter->rx_ps_pages = pages; fp@2359: else fp@2359: adapter->rx_ps_pages = 0; fp@2359: fp@2359: if (adapter->rx_ps_pages) { fp@2359: /* Configure extra packet-split registers */ fp@2359: rfctl = er32(RFCTL); fp@2359: rfctl |= E1000_RFCTL_EXTEN; fp@2359: /* fp@2359: * disable packet split support for IPv6 extension headers, fp@2359: * because some malformed IPv6 headers can hang the Rx fp@2359: */ fp@2359: rfctl |= (E1000_RFCTL_IPV6_EX_DIS | fp@2359: E1000_RFCTL_NEW_IPV6_EXT_DIS); fp@2359: fp@2359: ew32(RFCTL, rfctl); fp@2359: fp@2359: /* Enable Packet split descriptors */ fp@2359: rctl |= E1000_RCTL_DTYP_PS; fp@2359: fp@2359: psrctl |= adapter->rx_ps_bsize0 >> fp@2359: E1000_PSRCTL_BSIZE0_SHIFT; fp@2359: fp@2359: switch (adapter->rx_ps_pages) { fp@2359: case 3: fp@2359: psrctl |= PAGE_SIZE << fp@2359: E1000_PSRCTL_BSIZE3_SHIFT; fp@2359: case 2: fp@2359: psrctl |= PAGE_SIZE << fp@2359: E1000_PSRCTL_BSIZE2_SHIFT; fp@2359: case 1: fp@2359: psrctl |= PAGE_SIZE >> fp@2359: E1000_PSRCTL_BSIZE1_SHIFT; fp@2359: break; fp@2359: } fp@2359: fp@2359: ew32(PSRCTL, psrctl); fp@2359: } fp@2359: fp@2359: ew32(RCTL, rctl); fp@2359: /* just started the receive unit, no need to restart */ fp@2359: adapter->flags &= ~FLAG_RX_RESTART_NOW; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_configure_rx - Configure Receive Unit after Reset fp@2359: * @adapter: board private structure fp@2359: * fp@2359: * Configure the Rx unit of the MAC after a reset. fp@2359: **/ fp@2359: static void e1000_configure_rx(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct e1000_ring *rx_ring = adapter->rx_ring; fp@2359: u64 rdba; fp@2359: u32 rdlen, rctl, rxcsum, ctrl_ext; fp@2359: fp@2359: if (adapter->rx_ps_pages) { fp@2359: /* this is a 32 byte descriptor */ fp@2359: rdlen = rx_ring->count * fp@2359: sizeof(union e1000_rx_desc_packet_split); fp@2359: adapter->clean_rx = e1000_clean_rx_irq_ps; fp@2359: adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; fp@2359: } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { fp@2359: rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); fp@2359: adapter->clean_rx = e1000_clean_jumbo_rx_irq; fp@2359: adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; fp@2359: } else { fp@2359: rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); fp@2359: adapter->clean_rx = e1000_clean_rx_irq; fp@2359: adapter->alloc_rx_buf = e1000_alloc_rx_buffers; fp@2359: } fp@2359: fp@2359: /* disable receives while setting up the descriptors */ fp@2359: rctl = er32(RCTL); fp@2359: ew32(RCTL, rctl & ~E1000_RCTL_EN); fp@2359: e1e_flush(); fp@2359: msleep(10); fp@2359: fp@2359: /* set the Receive Delay Timer Register */ fp@2359: ew32(RDTR, adapter->rx_int_delay); fp@2359: fp@2359: /* irq moderation */ fp@2359: ew32(RADV, adapter->rx_abs_int_delay); fp@2359: if (adapter->itr_setting != 0) fp@2359: ew32(ITR, 1000000000 / (adapter->itr * 256)); fp@2359: fp@2359: ctrl_ext = er32(CTRL_EXT); fp@2359: /* Auto-Mask interrupts upon ICR access */ fp@2359: ctrl_ext |= E1000_CTRL_EXT_IAME; fp@2359: ew32(IAM, 0xffffffff); fp@2359: ew32(CTRL_EXT, ctrl_ext); fp@2359: e1e_flush(); fp@2359: fp@2359: /* fp@2359: * Setup the HW Rx Head and Tail Descriptor Pointers and fp@2359: * the Base and Length of the Rx Descriptor Ring fp@2359: */ fp@2359: rdba = rx_ring->dma; fp@2359: ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); fp@2359: ew32(RDBAH, (rdba >> 32)); fp@2359: ew32(RDLEN, rdlen); fp@2359: ew32(RDH, 0); fp@2359: ew32(RDT, 0); fp@2359: rx_ring->head = E1000_RDH; fp@2359: rx_ring->tail = E1000_RDT; fp@2359: fp@2359: /* Enable Receive Checksum Offload for TCP and UDP */ fp@2359: rxcsum = er32(RXCSUM); fp@2359: if (adapter->flags & FLAG_RX_CSUM_ENABLED) { fp@2359: rxcsum |= E1000_RXCSUM_TUOFL; fp@2359: fp@2359: /* fp@2359: * IPv4 payload checksum for UDP fragments must be fp@2359: * used in conjunction with packet-split. fp@2359: */ fp@2359: if (adapter->rx_ps_pages) fp@2359: rxcsum |= E1000_RXCSUM_IPPCSE; fp@2359: } else { fp@2359: rxcsum &= ~E1000_RXCSUM_TUOFL; fp@2359: /* no need to clear IPPCSE as it defaults to 0 */ fp@2359: } fp@2359: ew32(RXCSUM, rxcsum); fp@2359: fp@2359: /* fp@2359: * Enable early receives on supported devices, only takes effect when fp@2359: * packet size is equal or larger than the specified value (in 8 byte fp@2359: * units), e.g. using jumbo frames when setting to E1000_ERT_2048 fp@2359: */ fp@2359: if (adapter->flags & FLAG_HAS_ERT) { fp@2359: if (adapter->netdev->mtu > ETH_DATA_LEN) { fp@2359: u32 rxdctl = er32(RXDCTL(0)); fp@2359: ew32(RXDCTL(0), rxdctl | 0x3); fp@2359: ew32(ERT, E1000_ERT_2048 | (1 << 13)); fp@2359: /* fp@2359: * With jumbo frames and early-receive enabled, fp@2359: * excessive C-state transition latencies result in fp@2359: * dropped transactions. fp@2359: */ fp@2359: pm_qos_update_request( fp@2359: adapter->netdev->pm_qos_req, 55); fp@2359: } else { fp@2359: pm_qos_update_request( fp@2359: adapter->netdev->pm_qos_req, fp@2359: PM_QOS_DEFAULT_VALUE); fp@2359: } fp@2359: } fp@2359: fp@2359: /* Enable Receives */ fp@2359: ew32(RCTL, rctl); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_update_mc_addr_list - Update Multicast addresses fp@2359: * @hw: pointer to the HW structure fp@2359: * @mc_addr_list: array of multicast addresses to program fp@2359: * @mc_addr_count: number of multicast addresses to program fp@2359: * fp@2359: * Updates the Multicast Table Array. fp@2359: * The caller must have a packed mc_addr_list of multicast addresses. fp@2359: **/ fp@2359: static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, fp@2359: u32 mc_addr_count) fp@2359: { fp@2359: hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_set_multi - Multicast and Promiscuous mode set fp@2359: * @netdev: network interface device structure fp@2359: * fp@2359: * The set_multi entry point is called whenever the multicast address fp@2359: * list or the network interface flags are updated. This routine is fp@2359: * responsible for configuring the hardware for proper multicast, fp@2359: * promiscuous mode, and all-multi behavior. fp@2359: **/ fp@2359: static void e1000_set_multi(struct net_device *netdev) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct netdev_hw_addr *ha; fp@2359: u8 *mta_list; fp@2359: u32 rctl; fp@2359: int i; fp@2359: fp@2359: /* Check for Promiscuous and All Multicast modes */ fp@2359: fp@2359: rctl = er32(RCTL); fp@2359: fp@2359: if (netdev->flags & IFF_PROMISC) { fp@2359: rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); fp@2359: rctl &= ~E1000_RCTL_VFE; fp@2359: } else { fp@2359: if (netdev->flags & IFF_ALLMULTI) { fp@2359: rctl |= E1000_RCTL_MPE; fp@2359: rctl &= ~E1000_RCTL_UPE; fp@2359: } else { fp@2359: rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); fp@2359: } fp@2359: if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) fp@2359: rctl |= E1000_RCTL_VFE; fp@2359: } fp@2359: fp@2359: ew32(RCTL, rctl); fp@2359: fp@2359: if (!netdev_mc_empty(netdev)) { fp@2359: mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); fp@2359: if (!mta_list) fp@2359: return; fp@2359: fp@2359: /* prepare a packed array of only addresses. */ fp@2359: i = 0; fp@2359: netdev_for_each_mc_addr(ha, netdev) fp@2359: memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); fp@2359: fp@2359: e1000_update_mc_addr_list(hw, mta_list, i); fp@2359: kfree(mta_list); fp@2359: } else { fp@2359: /* fp@2359: * if we're called from probe, we might not have fp@2359: * anything to do here, so clear out the list fp@2359: */ fp@2359: e1000_update_mc_addr_list(hw, NULL, 0); fp@2359: } fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_configure - configure the hardware for Rx and Tx fp@2359: * @adapter: private board structure fp@2359: **/ fp@2359: static void e1000_configure(struct e1000_adapter *adapter) fp@2359: { fp@2359: e1000_set_multi(adapter->netdev); fp@2359: fp@2359: e1000_restore_vlan(adapter); fp@2359: e1000_init_manageability_pt(adapter); fp@2359: fp@2359: e1000_configure_tx(adapter); fp@2359: e1000_setup_rctl(adapter); fp@2359: e1000_configure_rx(adapter); fp@2359: adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring)); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000e_power_up_phy - restore link in case the phy was powered down fp@2359: * @adapter: address of board private structure fp@2359: * fp@2359: * The phy may be powered down to save power and turn off link when the fp@2359: * driver is unloaded and wake on lan is not enabled (among others) fp@2359: * *** this routine MUST be followed by a call to e1000e_reset *** fp@2359: **/ fp@2359: void e1000e_power_up_phy(struct e1000_adapter *adapter) fp@2359: { fp@2359: if (adapter->hw.phy.ops.power_up) fp@2359: adapter->hw.phy.ops.power_up(&adapter->hw); fp@2359: fp@2359: adapter->hw.mac.ops.setup_link(&adapter->hw); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_power_down_phy - Power down the PHY fp@2359: * fp@2359: * Power down the PHY so no link is implied when interface is down. fp@2359: * The PHY cannot be powered down if management or WoL is active. fp@2359: */ fp@2359: static void e1000_power_down_phy(struct e1000_adapter *adapter) fp@2359: { fp@2359: /* WoL is enabled */ fp@2359: if (adapter->wol) fp@2359: return; fp@2359: fp@2359: if (adapter->hw.phy.ops.power_down) fp@2359: adapter->hw.phy.ops.power_down(&adapter->hw); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000e_reset - bring the hardware into a known good state fp@2359: * fp@2359: * This function boots the hardware and enables some settings that fp@2359: * require a configuration cycle of the hardware - those cannot be fp@2359: * set/changed during runtime. After reset the device needs to be fp@2359: * properly configured for Rx, Tx etc. fp@2359: */ fp@2359: void e1000e_reset(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_mac_info *mac = &adapter->hw.mac; fp@2359: struct e1000_fc_info *fc = &adapter->hw.fc; fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 tx_space, min_tx_space, min_rx_space; fp@2359: u32 pba = adapter->pba; fp@2359: u16 hwm; fp@2359: fp@2359: /* reset Packet Buffer Allocation to default */ fp@2359: ew32(PBA, pba); fp@2359: fp@2359: if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { fp@2359: /* fp@2359: * To maintain wire speed transmits, the Tx FIFO should be fp@2359: * large enough to accommodate two full transmit packets, fp@2359: * rounded up to the next 1KB and expressed in KB. Likewise, fp@2359: * the Rx FIFO should be large enough to accommodate at least fp@2359: * one full receive packet and is similarly rounded up and fp@2359: * expressed in KB. fp@2359: */ fp@2359: pba = er32(PBA); fp@2359: /* upper 16 bits has Tx packet buffer allocation size in KB */ fp@2359: tx_space = pba >> 16; fp@2359: /* lower 16 bits has Rx packet buffer allocation size in KB */ fp@2359: pba &= 0xffff; fp@2359: /* fp@2359: * the Tx fifo also stores 16 bytes of information about the tx fp@2359: * but don't include ethernet FCS because hardware appends it fp@2359: */ fp@2359: min_tx_space = (adapter->max_frame_size + fp@2359: sizeof(struct e1000_tx_desc) - fp@2359: ETH_FCS_LEN) * 2; fp@2359: min_tx_space = ALIGN(min_tx_space, 1024); fp@2359: min_tx_space >>= 10; fp@2359: /* software strips receive CRC, so leave room for it */ fp@2359: min_rx_space = adapter->max_frame_size; fp@2359: min_rx_space = ALIGN(min_rx_space, 1024); fp@2359: min_rx_space >>= 10; fp@2359: fp@2359: /* fp@2359: * If current Tx allocation is less than the min Tx FIFO size, fp@2359: * and the min Tx FIFO size is less than the current Rx FIFO fp@2359: * allocation, take space away from current Rx allocation fp@2359: */ fp@2359: if ((tx_space < min_tx_space) && fp@2359: ((min_tx_space - tx_space) < pba)) { fp@2359: pba -= min_tx_space - tx_space; fp@2359: fp@2359: /* fp@2359: * if short on Rx space, Rx wins and must trump tx fp@2359: * adjustment or use Early Receive if available fp@2359: */ fp@2359: if ((pba < min_rx_space) && fp@2359: (!(adapter->flags & FLAG_HAS_ERT))) fp@2359: /* ERT enabled in e1000_configure_rx */ fp@2359: pba = min_rx_space; fp@2359: } fp@2359: fp@2359: ew32(PBA, pba); fp@2359: } fp@2359: fp@2359: fp@2359: /* fp@2359: * flow control settings fp@2359: * fp@2359: * The high water mark must be low enough to fit one full frame fp@2359: * (or the size used for early receive) above it in the Rx FIFO. fp@2359: * Set it to the lower of: fp@2359: * - 90% of the Rx FIFO size, and fp@2359: * - the full Rx FIFO size minus the early receive size (for parts fp@2359: * with ERT support assuming ERT set to E1000_ERT_2048), or fp@2359: * - the full Rx FIFO size minus one full frame fp@2359: */ fp@2359: if (hw->mac.type == e1000_pchlan) { fp@2359: /* fp@2359: * Workaround PCH LOM adapter hangs with certain network fp@2359: * loads. If hangs persist, try disabling Tx flow control. fp@2359: */ fp@2359: if (adapter->netdev->mtu > ETH_DATA_LEN) { fp@2359: fc->high_water = 0x3500; fp@2359: fc->low_water = 0x1500; fp@2359: } else { fp@2359: fc->high_water = 0x5000; fp@2359: fc->low_water = 0x3000; fp@2359: } fp@2359: fc->refresh_time = 0x1000; fp@2359: } else { fp@2359: if ((adapter->flags & FLAG_HAS_ERT) && fp@2359: (adapter->netdev->mtu > ETH_DATA_LEN)) fp@2359: hwm = min(((pba << 10) * 9 / 10), fp@2359: ((pba << 10) - (E1000_ERT_2048 << 3))); fp@2359: else fp@2359: hwm = min(((pba << 10) * 9 / 10), fp@2359: ((pba << 10) - adapter->max_frame_size)); fp@2359: fp@2359: fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ fp@2359: fc->low_water = fc->high_water - 8; fp@2359: } fp@2359: fp@2359: if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) fp@2359: fc->pause_time = 0xFFFF; fp@2359: else fp@2359: fc->pause_time = E1000_FC_PAUSE_TIME; fp@2359: fc->send_xon = 1; fp@2359: fc->current_mode = fc->requested_mode; fp@2359: fp@2359: /* Allow time for pending master requests to run */ fp@2359: mac->ops.reset_hw(hw); fp@2359: fp@2359: /* fp@2359: * For parts with AMT enabled, let the firmware know fp@2359: * that the network interface is in control fp@2359: */ fp@2359: if (adapter->flags & FLAG_HAS_AMT) fp@2359: e1000_get_hw_control(adapter); fp@2359: fp@2359: ew32(WUC, 0); fp@2359: if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) fp@2359: e1e_wphy(&adapter->hw, BM_WUC, 0); fp@2359: fp@2359: if (mac->ops.init_hw(hw)) fp@2359: e_err("Hardware Error\n"); fp@2359: fp@2359: e1000_update_mng_vlan(adapter); fp@2359: fp@2359: /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ fp@2359: ew32(VET, ETH_P_8021Q); fp@2359: fp@2359: e1000e_reset_adaptive(hw); fp@2359: e1000_get_phy_info(hw); fp@2359: fp@2359: if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && fp@2359: !(adapter->flags & FLAG_SMART_POWER_DOWN)) { fp@2359: u16 phy_data = 0; fp@2359: /* fp@2359: * speed up time to link by disabling smart power down, ignore fp@2359: * the return value of this function because there is nothing fp@2359: * different we would do if it failed fp@2359: */ fp@2359: e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); fp@2359: phy_data &= ~IGP02E1000_PM_SPD; fp@2359: e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); fp@2359: } fp@2359: } fp@2359: fp@2359: int e1000e_up(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: fp@2359: /* DMA latency requirement to workaround early-receive/jumbo issue */ fp@2359: if (adapter->flags & FLAG_HAS_ERT) fp@2359: adapter->netdev->pm_qos_req = fp@2359: pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, fp@2359: PM_QOS_DEFAULT_VALUE); fp@2359: fp@2359: /* hardware has been reset, we need to reload some things */ fp@2359: e1000_configure(adapter); fp@2359: fp@2359: clear_bit(__E1000_DOWN, &adapter->state); fp@2359: fp@2359: napi_enable(&adapter->napi); fp@2359: if (adapter->msix_entries) fp@2359: e1000_configure_msix(adapter); fp@2359: e1000_irq_enable(adapter); fp@2359: fp@2359: netif_wake_queue(adapter->netdev); fp@2359: fp@2359: /* fire a link change interrupt to start the watchdog */ fp@2359: if (adapter->msix_entries) fp@2359: ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); fp@2359: else fp@2359: ew32(ICS, E1000_ICS_LSC); fp@2359: fp@2359: return 0; fp@2359: } fp@2359: fp@2359: void e1000e_down(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 tctl, rctl; fp@2359: fp@2359: /* fp@2359: * signal that we're down so the interrupt handler does not fp@2359: * reschedule our watchdog timer fp@2359: */ fp@2359: set_bit(__E1000_DOWN, &adapter->state); fp@2359: fp@2359: /* disable receives in the hardware */ fp@2359: rctl = er32(RCTL); fp@2359: ew32(RCTL, rctl & ~E1000_RCTL_EN); fp@2359: /* flush and sleep below */ fp@2359: fp@2359: netif_stop_queue(netdev); fp@2359: fp@2359: /* disable transmits in the hardware */ fp@2359: tctl = er32(TCTL); fp@2359: tctl &= ~E1000_TCTL_EN; fp@2359: ew32(TCTL, tctl); fp@2359: /* flush both disables and wait for them to finish */ fp@2359: e1e_flush(); fp@2359: msleep(10); fp@2359: fp@2359: napi_disable(&adapter->napi); fp@2359: e1000_irq_disable(adapter); fp@2359: fp@2359: del_timer_sync(&adapter->watchdog_timer); fp@2359: del_timer_sync(&adapter->phy_info_timer); fp@2359: fp@2359: netif_carrier_off(netdev); fp@2359: adapter->link_speed = 0; fp@2359: adapter->link_duplex = 0; fp@2359: fp@2359: if (!pci_channel_offline(adapter->pdev)) fp@2359: e1000e_reset(adapter); fp@2359: e1000_clean_tx_ring(adapter); fp@2359: e1000_clean_rx_ring(adapter); fp@2359: fp@2359: if (adapter->flags & FLAG_HAS_ERT) { fp@2359: pm_qos_remove_request( fp@2359: adapter->netdev->pm_qos_req); fp@2359: adapter->netdev->pm_qos_req = NULL; fp@2359: } fp@2359: fp@2359: /* fp@2359: * TODO: for power management, we could drop the link and fp@2359: * pci_disable_device here. fp@2359: */ fp@2359: } fp@2359: fp@2359: void e1000e_reinit_locked(struct e1000_adapter *adapter) fp@2359: { fp@2359: might_sleep(); fp@2359: while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) fp@2359: msleep(1); fp@2359: e1000e_down(adapter); fp@2359: e1000e_up(adapter); fp@2359: clear_bit(__E1000_RESETTING, &adapter->state); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_sw_init - Initialize general software structures (struct e1000_adapter) fp@2359: * @adapter: board private structure to initialize fp@2359: * fp@2359: * e1000_sw_init initializes the Adapter private data structure. fp@2359: * Fields are initialized based on PCI device information and fp@2359: * OS network device settings (MTU size). fp@2359: **/ fp@2359: static int __devinit e1000_sw_init(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: fp@2359: adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; fp@2359: adapter->rx_ps_bsize0 = 128; fp@2359: adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; fp@2359: adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; fp@2359: fp@2359: e1000e_set_interrupt_capability(adapter); fp@2359: fp@2359: if (e1000_alloc_queues(adapter)) fp@2359: return -ENOMEM; fp@2359: fp@2359: /* Explicitly disable IRQ since the NIC can be in any state. */ fp@2359: e1000_irq_disable(adapter); fp@2359: fp@2359: set_bit(__E1000_DOWN, &adapter->state); fp@2359: return 0; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_intr_msi_test - Interrupt Handler fp@2359: * @irq: interrupt number fp@2359: * @data: pointer to a network interface device structure fp@2359: **/ fp@2359: static irqreturn_t e1000_intr_msi_test(int irq, void *data) fp@2359: { fp@2359: struct net_device *netdev = data; fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 icr = er32(ICR); fp@2359: fp@2359: e_dbg("icr is %08X\n", icr); fp@2359: if (icr & E1000_ICR_RXSEQ) { fp@2359: adapter->flags &= ~FLAG_MSI_TEST_FAILED; fp@2359: wmb(); fp@2359: } fp@2359: fp@2359: return IRQ_HANDLED; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_test_msi_interrupt - Returns 0 for successful test fp@2359: * @adapter: board private struct fp@2359: * fp@2359: * code flow taken from tg3.c fp@2359: **/ fp@2359: static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: int err; fp@2359: fp@2359: /* poll_enable hasn't been called yet, so don't need disable */ fp@2359: /* clear any pending events */ fp@2359: er32(ICR); fp@2359: fp@2359: /* free the real vector and request a test handler */ fp@2359: e1000_free_irq(adapter); fp@2359: e1000e_reset_interrupt_capability(adapter); fp@2359: fp@2359: /* Assume that the test fails, if it succeeds then the test fp@2359: * MSI irq handler will unset this flag */ fp@2359: adapter->flags |= FLAG_MSI_TEST_FAILED; fp@2359: fp@2359: err = pci_enable_msi(adapter->pdev); fp@2359: if (err) fp@2359: goto msi_test_failed; fp@2359: fp@2359: err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, fp@2359: netdev->name, netdev); fp@2359: if (err) { fp@2359: pci_disable_msi(adapter->pdev); fp@2359: goto msi_test_failed; fp@2359: } fp@2359: fp@2359: wmb(); fp@2359: fp@2359: e1000_irq_enable(adapter); fp@2359: fp@2359: /* fire an unusual interrupt on the test handler */ fp@2359: ew32(ICS, E1000_ICS_RXSEQ); fp@2359: e1e_flush(); fp@2359: msleep(50); fp@2359: fp@2359: e1000_irq_disable(adapter); fp@2359: fp@2359: rmb(); fp@2359: fp@2359: if (adapter->flags & FLAG_MSI_TEST_FAILED) { fp@2359: adapter->int_mode = E1000E_INT_MODE_LEGACY; fp@2359: err = -EIO; fp@2359: e_info("MSI interrupt test failed!\n"); fp@2359: } fp@2359: fp@2359: free_irq(adapter->pdev->irq, netdev); fp@2359: pci_disable_msi(adapter->pdev); fp@2359: fp@2359: if (err == -EIO) fp@2359: goto msi_test_failed; fp@2359: fp@2359: /* okay so the test worked, restore settings */ fp@2359: e_dbg("MSI interrupt test succeeded!\n"); fp@2359: msi_test_failed: fp@2359: e1000e_set_interrupt_capability(adapter); fp@2359: e1000_request_irq(adapter); fp@2359: return err; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored fp@2359: * @adapter: board private struct fp@2359: * fp@2359: * code flow taken from tg3.c, called with e1000 interrupts disabled. fp@2359: **/ fp@2359: static int e1000_test_msi(struct e1000_adapter *adapter) fp@2359: { fp@2359: int err; fp@2359: u16 pci_cmd; fp@2359: fp@2359: if (!(adapter->flags & FLAG_MSI_ENABLED)) fp@2359: return 0; fp@2359: fp@2359: /* disable SERR in case the MSI write causes a master abort */ fp@2359: pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); fp@2359: if (pci_cmd & PCI_COMMAND_SERR) fp@2359: pci_write_config_word(adapter->pdev, PCI_COMMAND, fp@2359: pci_cmd & ~PCI_COMMAND_SERR); fp@2359: fp@2359: err = e1000_test_msi_interrupt(adapter); fp@2359: fp@2359: /* re-enable SERR */ fp@2359: if (pci_cmd & PCI_COMMAND_SERR) { fp@2359: pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); fp@2359: pci_cmd |= PCI_COMMAND_SERR; fp@2359: pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); fp@2359: } fp@2359: fp@2359: /* success ! */ fp@2359: if (!err) fp@2359: return 0; fp@2359: fp@2359: /* EIO means MSI test failed */ fp@2359: if (err != -EIO) fp@2359: return err; fp@2359: fp@2359: /* back to INTx mode */ fp@2359: e_warn("MSI interrupt test failed, using legacy interrupt.\n"); fp@2359: fp@2359: e1000_free_irq(adapter); fp@2359: fp@2359: err = e1000_request_irq(adapter); fp@2359: fp@2359: return err; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_open - Called when a network interface is made active fp@2359: * @netdev: network interface device structure fp@2359: * fp@2359: * Returns 0 on success, negative value on failure fp@2359: * fp@2359: * The open entry point is called when a network interface is made fp@2359: * active by the system (IFF_UP). At this point all resources needed fp@2359: * for transmit and receive operations are allocated, the interrupt fp@2359: * handler is registered with the OS, the watchdog timer is started, fp@2359: * and the stack is notified that the interface is ready. fp@2359: **/ fp@2359: static int e1000_open(struct net_device *netdev) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: int err; fp@2359: fp@2359: /* disallow open during test */ fp@2359: if (test_bit(__E1000_TESTING, &adapter->state)) fp@2359: return -EBUSY; fp@2359: fp@2359: pm_runtime_get_sync(&pdev->dev); fp@2359: fp@2359: netif_carrier_off(netdev); fp@2359: fp@2359: /* allocate transmit descriptors */ fp@2359: err = e1000e_setup_tx_resources(adapter); fp@2359: if (err) fp@2359: goto err_setup_tx; fp@2359: fp@2359: /* allocate receive descriptors */ fp@2359: err = e1000e_setup_rx_resources(adapter); fp@2359: if (err) fp@2359: goto err_setup_rx; fp@2359: fp@2359: /* fp@2359: * If AMT is enabled, let the firmware know that the network fp@2359: * interface is now open and reset the part to a known state. fp@2359: */ fp@2359: if (adapter->flags & FLAG_HAS_AMT) { fp@2359: e1000_get_hw_control(adapter); fp@2359: e1000e_reset(adapter); fp@2359: } fp@2359: fp@2359: e1000e_power_up_phy(adapter); fp@2359: fp@2359: adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; fp@2359: if ((adapter->hw.mng_cookie.status & fp@2359: E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) fp@2359: e1000_update_mng_vlan(adapter); fp@2359: fp@2359: /* fp@2359: * before we allocate an interrupt, we must be ready to handle it. fp@2359: * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt fp@2359: * as soon as we call pci_request_irq, so we have to setup our fp@2359: * clean_rx handler before we do so. fp@2359: */ fp@2359: e1000_configure(adapter); fp@2359: fp@2359: err = e1000_request_irq(adapter); fp@2359: if (err) fp@2359: goto err_req_irq; fp@2359: fp@2359: /* fp@2359: * Work around PCIe errata with MSI interrupts causing some chipsets to fp@2359: * ignore e1000e MSI messages, which means we need to test our MSI fp@2359: * interrupt now fp@2359: */ fp@2359: if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { fp@2359: err = e1000_test_msi(adapter); fp@2359: if (err) { fp@2359: e_err("Interrupt allocation failed\n"); fp@2359: goto err_req_irq; fp@2359: } fp@2359: } fp@2359: fp@2359: /* From here on the code is the same as e1000e_up() */ fp@2359: clear_bit(__E1000_DOWN, &adapter->state); fp@2359: fp@2359: napi_enable(&adapter->napi); fp@2359: fp@2359: e1000_irq_enable(adapter); fp@2359: fp@2359: netif_start_queue(netdev); fp@2359: fp@2359: adapter->idle_check = true; fp@2359: pm_runtime_put(&pdev->dev); fp@2359: fp@2359: /* fire a link status change interrupt to start the watchdog */ fp@2359: if (adapter->msix_entries) fp@2359: ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); fp@2359: else fp@2359: ew32(ICS, E1000_ICS_LSC); fp@2359: fp@2359: return 0; fp@2359: fp@2359: err_req_irq: fp@2359: e1000_release_hw_control(adapter); fp@2359: e1000_power_down_phy(adapter); fp@2359: e1000e_free_rx_resources(adapter); fp@2359: err_setup_rx: fp@2359: e1000e_free_tx_resources(adapter); fp@2359: err_setup_tx: fp@2359: e1000e_reset(adapter); fp@2359: pm_runtime_put_sync(&pdev->dev); fp@2359: fp@2359: return err; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_close - Disables a network interface fp@2359: * @netdev: network interface device structure fp@2359: * fp@2359: * Returns 0, this is not allowed to fail fp@2359: * fp@2359: * The close entry point is called when an interface is de-activated fp@2359: * by the OS. The hardware is still under the drivers control, but fp@2359: * needs to be disabled. A global MAC reset is issued to stop the fp@2359: * hardware, and all transmit and receive resources are freed. fp@2359: **/ fp@2359: static int e1000_close(struct net_device *netdev) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: fp@2359: WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); fp@2359: fp@2359: pm_runtime_get_sync(&pdev->dev); fp@2359: fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) { fp@2359: e1000e_down(adapter); fp@2359: e1000_free_irq(adapter); fp@2359: } fp@2359: e1000_power_down_phy(adapter); fp@2359: fp@2359: e1000e_free_tx_resources(adapter); fp@2359: e1000e_free_rx_resources(adapter); fp@2359: fp@2359: /* fp@2359: * kill manageability vlan ID if supported, but not if a vlan with fp@2359: * the same ID is registered on the host OS (let 8021q kill it) fp@2359: */ fp@2359: if ((adapter->hw.mng_cookie.status & fp@2359: E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && fp@2359: !(adapter->vlgrp && fp@2359: vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) fp@2359: e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); fp@2359: fp@2359: /* fp@2359: * If AMT is enabled, let the firmware know that the network fp@2359: * interface is now closed fp@2359: */ fp@2359: if (adapter->flags & FLAG_HAS_AMT) fp@2359: e1000_release_hw_control(adapter); fp@2359: fp@2359: pm_runtime_put_sync(&pdev->dev); fp@2359: fp@2359: return 0; fp@2359: } fp@2359: /** fp@2359: * e1000_set_mac - Change the Ethernet Address of the NIC fp@2359: * @netdev: network interface device structure fp@2359: * @p: pointer to an address structure fp@2359: * fp@2359: * Returns 0 on success, negative on failure fp@2359: **/ fp@2359: static int e1000_set_mac(struct net_device *netdev, void *p) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct sockaddr *addr = p; fp@2359: fp@2359: if (!is_valid_ether_addr(addr->sa_data)) fp@2359: return -EADDRNOTAVAIL; fp@2359: fp@2359: memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); fp@2359: memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); fp@2359: fp@2359: e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); fp@2359: fp@2359: if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { fp@2359: /* activate the work around */ fp@2359: e1000e_set_laa_state_82571(&adapter->hw, 1); fp@2359: fp@2359: /* fp@2359: * Hold a copy of the LAA in RAR[14] This is done so that fp@2359: * between the time RAR[0] gets clobbered and the time it fp@2359: * gets fixed (in e1000_watchdog), the actual LAA is in one fp@2359: * of the RARs and no incoming packets directed to this port fp@2359: * are dropped. Eventually the LAA will be in RAR[0] and fp@2359: * RAR[14] fp@2359: */ fp@2359: e1000e_rar_set(&adapter->hw, fp@2359: adapter->hw.mac.addr, fp@2359: adapter->hw.mac.rar_entry_count - 1); fp@2359: } fp@2359: fp@2359: return 0; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000e_update_phy_task - work thread to update phy fp@2359: * @work: pointer to our work struct fp@2359: * fp@2359: * this worker thread exists because we must acquire a fp@2359: * semaphore to read the phy, which we could msleep while fp@2359: * waiting for it, and we can't msleep in a timer. fp@2359: **/ fp@2359: static void e1000e_update_phy_task(struct work_struct *work) fp@2359: { fp@2359: struct e1000_adapter *adapter = container_of(work, fp@2359: struct e1000_adapter, update_phy_task); fp@2359: e1000_get_phy_info(&adapter->hw); fp@2359: } fp@2359: fp@2359: /* fp@2359: * Need to wait a few seconds after link up to get diagnostic information from fp@2359: * the phy fp@2359: */ fp@2359: static void e1000_update_phy_info(unsigned long data) fp@2359: { fp@2359: struct e1000_adapter *adapter = (struct e1000_adapter *) data; fp@2359: schedule_work(&adapter->update_phy_task); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000e_update_stats - Update the board statistics counters fp@2359: * @adapter: board private structure fp@2359: **/ fp@2359: void e1000e_update_stats(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: u16 phy_data; fp@2359: fp@2359: /* fp@2359: * Prevent stats update while adapter is being reset, or if the pci fp@2359: * connection is down. fp@2359: */ fp@2359: if (adapter->link_speed == 0) fp@2359: return; fp@2359: if (pci_channel_offline(pdev)) fp@2359: return; fp@2359: fp@2359: adapter->stats.crcerrs += er32(CRCERRS); fp@2359: adapter->stats.gprc += er32(GPRC); fp@2359: adapter->stats.gorc += er32(GORCL); fp@2359: er32(GORCH); /* Clear gorc */ fp@2359: adapter->stats.bprc += er32(BPRC); fp@2359: adapter->stats.mprc += er32(MPRC); fp@2359: adapter->stats.roc += er32(ROC); fp@2359: fp@2359: adapter->stats.mpc += er32(MPC); fp@2359: if ((hw->phy.type == e1000_phy_82578) || fp@2359: (hw->phy.type == e1000_phy_82577)) { fp@2359: e1e_rphy(hw, HV_SCC_UPPER, &phy_data); fp@2359: if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data)) fp@2359: adapter->stats.scc += phy_data; fp@2359: fp@2359: e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); fp@2359: if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data)) fp@2359: adapter->stats.ecol += phy_data; fp@2359: fp@2359: e1e_rphy(hw, HV_MCC_UPPER, &phy_data); fp@2359: if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data)) fp@2359: adapter->stats.mcc += phy_data; fp@2359: fp@2359: e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); fp@2359: if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data)) fp@2359: adapter->stats.latecol += phy_data; fp@2359: fp@2359: e1e_rphy(hw, HV_DC_UPPER, &phy_data); fp@2359: if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data)) fp@2359: adapter->stats.dc += phy_data; fp@2359: } else { fp@2359: adapter->stats.scc += er32(SCC); fp@2359: adapter->stats.ecol += er32(ECOL); fp@2359: adapter->stats.mcc += er32(MCC); fp@2359: adapter->stats.latecol += er32(LATECOL); fp@2359: adapter->stats.dc += er32(DC); fp@2359: } fp@2359: adapter->stats.xonrxc += er32(XONRXC); fp@2359: adapter->stats.xontxc += er32(XONTXC); fp@2359: adapter->stats.xoffrxc += er32(XOFFRXC); fp@2359: adapter->stats.xofftxc += er32(XOFFTXC); fp@2359: adapter->stats.gptc += er32(GPTC); fp@2359: adapter->stats.gotc += er32(GOTCL); fp@2359: er32(GOTCH); /* Clear gotc */ fp@2359: adapter->stats.rnbc += er32(RNBC); fp@2359: adapter->stats.ruc += er32(RUC); fp@2359: fp@2359: adapter->stats.mptc += er32(MPTC); fp@2359: adapter->stats.bptc += er32(BPTC); fp@2359: fp@2359: /* used for adaptive IFS */ fp@2359: fp@2359: hw->mac.tx_packet_delta = er32(TPT); fp@2359: adapter->stats.tpt += hw->mac.tx_packet_delta; fp@2359: if ((hw->phy.type == e1000_phy_82578) || fp@2359: (hw->phy.type == e1000_phy_82577)) { fp@2359: e1e_rphy(hw, HV_COLC_UPPER, &phy_data); fp@2359: if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data)) fp@2359: hw->mac.collision_delta = phy_data; fp@2359: } else { fp@2359: hw->mac.collision_delta = er32(COLC); fp@2359: } fp@2359: adapter->stats.colc += hw->mac.collision_delta; fp@2359: fp@2359: adapter->stats.algnerrc += er32(ALGNERRC); fp@2359: adapter->stats.rxerrc += er32(RXERRC); fp@2359: if ((hw->phy.type == e1000_phy_82578) || fp@2359: (hw->phy.type == e1000_phy_82577)) { fp@2359: e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); fp@2359: if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data)) fp@2359: adapter->stats.tncrs += phy_data; fp@2359: } else { fp@2359: if ((hw->mac.type != e1000_82574) && fp@2359: (hw->mac.type != e1000_82583)) fp@2359: adapter->stats.tncrs += er32(TNCRS); fp@2359: } fp@2359: adapter->stats.cexterr += er32(CEXTERR); fp@2359: adapter->stats.tsctc += er32(TSCTC); fp@2359: adapter->stats.tsctfc += er32(TSCTFC); fp@2359: fp@2359: /* Fill out the OS statistics structure */ fp@2359: netdev->stats.multicast = adapter->stats.mprc; fp@2359: netdev->stats.collisions = adapter->stats.colc; fp@2359: fp@2359: /* Rx Errors */ fp@2359: fp@2359: /* fp@2359: * RLEC on some newer hardware can be incorrect so build fp@2359: * our own version based on RUC and ROC fp@2359: */ fp@2359: netdev->stats.rx_errors = adapter->stats.rxerrc + fp@2359: adapter->stats.crcerrs + adapter->stats.algnerrc + fp@2359: adapter->stats.ruc + adapter->stats.roc + fp@2359: adapter->stats.cexterr; fp@2359: netdev->stats.rx_length_errors = adapter->stats.ruc + fp@2359: adapter->stats.roc; fp@2359: netdev->stats.rx_crc_errors = adapter->stats.crcerrs; fp@2359: netdev->stats.rx_frame_errors = adapter->stats.algnerrc; fp@2359: netdev->stats.rx_missed_errors = adapter->stats.mpc; fp@2359: fp@2359: /* Tx Errors */ fp@2359: netdev->stats.tx_errors = adapter->stats.ecol + fp@2359: adapter->stats.latecol; fp@2359: netdev->stats.tx_aborted_errors = adapter->stats.ecol; fp@2359: netdev->stats.tx_window_errors = adapter->stats.latecol; fp@2359: netdev->stats.tx_carrier_errors = adapter->stats.tncrs; fp@2359: fp@2359: /* Tx Dropped needs to be maintained elsewhere */ fp@2359: fp@2359: /* Management Stats */ fp@2359: adapter->stats.mgptc += er32(MGTPTC); fp@2359: adapter->stats.mgprc += er32(MGTPRC); fp@2359: adapter->stats.mgpdc += er32(MGTPDC); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_phy_read_status - Update the PHY register status snapshot fp@2359: * @adapter: board private structure fp@2359: **/ fp@2359: static void e1000_phy_read_status(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct e1000_phy_regs *phy = &adapter->phy_regs; fp@2359: int ret_val; fp@2359: fp@2359: if ((er32(STATUS) & E1000_STATUS_LU) && fp@2359: (adapter->hw.phy.media_type == e1000_media_type_copper)) { fp@2359: ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); fp@2359: ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); fp@2359: ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); fp@2359: ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); fp@2359: ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); fp@2359: ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); fp@2359: ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); fp@2359: ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); fp@2359: if (ret_val) fp@2359: e_warn("Error reading PHY register\n"); fp@2359: } else { fp@2359: /* fp@2359: * Do not read PHY registers if link is not up fp@2359: * Set values to typical power-on defaults fp@2359: */ fp@2359: phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); fp@2359: phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | fp@2359: BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | fp@2359: BMSR_ERCAP); fp@2359: phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | fp@2359: ADVERTISE_ALL | ADVERTISE_CSMA); fp@2359: phy->lpa = 0; fp@2359: phy->expansion = EXPANSION_ENABLENPAGE; fp@2359: phy->ctrl1000 = ADVERTISE_1000FULL; fp@2359: phy->stat1000 = 0; fp@2359: phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); fp@2359: } fp@2359: } fp@2359: fp@2359: static void e1000_print_link_info(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 ctrl = er32(CTRL); fp@2359: fp@2359: /* Link status message must follow this format for user tools */ fp@2359: printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, " fp@2359: "Flow Control: %s\n", fp@2359: adapter->netdev->name, fp@2359: adapter->link_speed, fp@2359: (adapter->link_duplex == FULL_DUPLEX) ? fp@2359: "Full Duplex" : "Half Duplex", fp@2359: ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? fp@2359: "RX/TX" : fp@2359: ((ctrl & E1000_CTRL_RFCE) ? "RX" : fp@2359: ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); fp@2359: } fp@2359: fp@2359: bool e1000e_has_link(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: bool link_active = 0; fp@2359: s32 ret_val = 0; fp@2359: fp@2359: /* fp@2359: * get_link_status is set on LSC (link status) interrupt or fp@2359: * Rx sequence error interrupt. get_link_status will stay fp@2359: * false until the check_for_link establishes link fp@2359: * for copper adapters ONLY fp@2359: */ fp@2359: switch (hw->phy.media_type) { fp@2359: case e1000_media_type_copper: fp@2359: if (hw->mac.get_link_status) { fp@2359: ret_val = hw->mac.ops.check_for_link(hw); fp@2359: link_active = !hw->mac.get_link_status; fp@2359: } else { fp@2359: link_active = 1; fp@2359: } fp@2359: break; fp@2359: case e1000_media_type_fiber: fp@2359: ret_val = hw->mac.ops.check_for_link(hw); fp@2359: link_active = !!(er32(STATUS) & E1000_STATUS_LU); fp@2359: break; fp@2359: case e1000_media_type_internal_serdes: fp@2359: ret_val = hw->mac.ops.check_for_link(hw); fp@2359: link_active = adapter->hw.mac.serdes_has_link; fp@2359: break; fp@2359: default: fp@2359: case e1000_media_type_unknown: fp@2359: break; fp@2359: } fp@2359: fp@2359: if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && fp@2359: (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { fp@2359: /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ fp@2359: e_info("Gigabit has been disabled, downgrading speed\n"); fp@2359: } fp@2359: fp@2359: return link_active; fp@2359: } fp@2359: fp@2359: static void e1000e_enable_receives(struct e1000_adapter *adapter) fp@2359: { fp@2359: /* make sure the receive unit is started */ fp@2359: if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && fp@2359: (adapter->flags & FLAG_RX_RESTART_NOW)) { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 rctl = er32(RCTL); fp@2359: ew32(RCTL, rctl | E1000_RCTL_EN); fp@2359: adapter->flags &= ~FLAG_RX_RESTART_NOW; fp@2359: } fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_watchdog - Timer Call-back fp@2359: * @data: pointer to adapter cast into an unsigned long fp@2359: **/ fp@2359: static void e1000_watchdog(unsigned long data) fp@2359: { fp@2359: struct e1000_adapter *adapter = (struct e1000_adapter *) data; fp@2359: fp@2359: /* Do the rest outside of interrupt context */ fp@2359: schedule_work(&adapter->watchdog_task); fp@2359: fp@2359: /* TODO: make this use queue_delayed_work() */ fp@2359: } fp@2359: fp@2359: static void e1000_watchdog_task(struct work_struct *work) fp@2359: { fp@2359: struct e1000_adapter *adapter = container_of(work, fp@2359: struct e1000_adapter, watchdog_task); fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: struct e1000_mac_info *mac = &adapter->hw.mac; fp@2359: struct e1000_phy_info *phy = &adapter->hw.phy; fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 link, tctl; fp@2359: int tx_pending = 0; fp@2359: fp@2359: link = e1000e_has_link(adapter); fp@2359: if ((netif_carrier_ok(netdev)) && link) { fp@2359: /* Cancel scheduled suspend requests. */ fp@2359: pm_runtime_resume(netdev->dev.parent); fp@2359: fp@2359: e1000e_enable_receives(adapter); fp@2359: goto link_up; fp@2359: } fp@2359: fp@2359: if ((e1000e_enable_tx_pkt_filtering(hw)) && fp@2359: (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) fp@2359: e1000_update_mng_vlan(adapter); fp@2359: fp@2359: if (link) { fp@2359: if (!netif_carrier_ok(netdev)) { fp@2359: bool txb2b = 1; fp@2359: fp@2359: /* Cancel scheduled suspend requests. */ fp@2359: pm_runtime_resume(netdev->dev.parent); fp@2359: fp@2359: /* update snapshot of PHY registers on LSC */ fp@2359: e1000_phy_read_status(adapter); fp@2359: mac->ops.get_link_up_info(&adapter->hw, fp@2359: &adapter->link_speed, fp@2359: &adapter->link_duplex); fp@2359: e1000_print_link_info(adapter); fp@2359: /* fp@2359: * On supported PHYs, check for duplex mismatch only fp@2359: * if link has autonegotiated at 10/100 half fp@2359: */ fp@2359: if ((hw->phy.type == e1000_phy_igp_3 || fp@2359: hw->phy.type == e1000_phy_bm) && fp@2359: (hw->mac.autoneg == true) && fp@2359: (adapter->link_speed == SPEED_10 || fp@2359: adapter->link_speed == SPEED_100) && fp@2359: (adapter->link_duplex == HALF_DUPLEX)) { fp@2359: u16 autoneg_exp; fp@2359: fp@2359: e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); fp@2359: fp@2359: if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) fp@2359: e_info("Autonegotiated half duplex but" fp@2359: " link partner cannot autoneg. " fp@2359: " Try forcing full duplex if " fp@2359: "link gets many collisions.\n"); fp@2359: } fp@2359: fp@2359: /* adjust timeout factor according to speed/duplex */ fp@2359: adapter->tx_timeout_factor = 1; fp@2359: switch (adapter->link_speed) { fp@2359: case SPEED_10: fp@2359: txb2b = 0; fp@2359: adapter->tx_timeout_factor = 16; fp@2359: break; fp@2359: case SPEED_100: fp@2359: txb2b = 0; fp@2359: adapter->tx_timeout_factor = 10; fp@2359: break; fp@2359: } fp@2359: fp@2359: /* fp@2359: * workaround: re-program speed mode bit after fp@2359: * link-up event fp@2359: */ fp@2359: if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && fp@2359: !txb2b) { fp@2359: u32 tarc0; fp@2359: tarc0 = er32(TARC(0)); fp@2359: tarc0 &= ~SPEED_MODE_BIT; fp@2359: ew32(TARC(0), tarc0); fp@2359: } fp@2359: fp@2359: /* fp@2359: * disable TSO for pcie and 10/100 speeds, to avoid fp@2359: * some hardware issues fp@2359: */ fp@2359: if (!(adapter->flags & FLAG_TSO_FORCE)) { fp@2359: switch (adapter->link_speed) { fp@2359: case SPEED_10: fp@2359: case SPEED_100: fp@2359: e_info("10/100 speed: disabling TSO\n"); fp@2359: netdev->features &= ~NETIF_F_TSO; fp@2359: netdev->features &= ~NETIF_F_TSO6; fp@2359: break; fp@2359: case SPEED_1000: fp@2359: netdev->features |= NETIF_F_TSO; fp@2359: netdev->features |= NETIF_F_TSO6; fp@2359: break; fp@2359: default: fp@2359: /* oops */ fp@2359: break; fp@2359: } fp@2359: } fp@2359: fp@2359: /* fp@2359: * enable transmits in the hardware, need to do this fp@2359: * after setting TARC(0) fp@2359: */ fp@2359: tctl = er32(TCTL); fp@2359: tctl |= E1000_TCTL_EN; fp@2359: ew32(TCTL, tctl); fp@2359: fp@2359: /* fp@2359: * Perform any post-link-up configuration before fp@2359: * reporting link up. fp@2359: */ fp@2359: if (phy->ops.cfg_on_link_up) fp@2359: phy->ops.cfg_on_link_up(hw); fp@2359: fp@2359: netif_carrier_on(netdev); fp@2359: fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) fp@2359: mod_timer(&adapter->phy_info_timer, fp@2359: round_jiffies(jiffies + 2 * HZ)); fp@2359: } fp@2359: } else { fp@2359: if (netif_carrier_ok(netdev)) { fp@2359: adapter->link_speed = 0; fp@2359: adapter->link_duplex = 0; fp@2359: /* Link status message must follow this format */ fp@2359: printk(KERN_INFO "e1000e: %s NIC Link is Down\n", fp@2359: adapter->netdev->name); fp@2359: netif_carrier_off(netdev); fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) fp@2359: mod_timer(&adapter->phy_info_timer, fp@2359: round_jiffies(jiffies + 2 * HZ)); fp@2359: fp@2359: if (adapter->flags & FLAG_RX_NEEDS_RESTART) fp@2359: schedule_work(&adapter->reset_task); fp@2359: else fp@2359: pm_schedule_suspend(netdev->dev.parent, fp@2359: LINK_TIMEOUT); fp@2359: } fp@2359: } fp@2359: fp@2359: link_up: fp@2359: e1000e_update_stats(adapter); fp@2359: fp@2359: mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; fp@2359: adapter->tpt_old = adapter->stats.tpt; fp@2359: mac->collision_delta = adapter->stats.colc - adapter->colc_old; fp@2359: adapter->colc_old = adapter->stats.colc; fp@2359: fp@2359: adapter->gorc = adapter->stats.gorc - adapter->gorc_old; fp@2359: adapter->gorc_old = adapter->stats.gorc; fp@2359: adapter->gotc = adapter->stats.gotc - adapter->gotc_old; fp@2359: adapter->gotc_old = adapter->stats.gotc; fp@2359: fp@2359: e1000e_update_adaptive(&adapter->hw); fp@2359: fp@2359: if (!netif_carrier_ok(netdev)) { fp@2359: tx_pending = (e1000_desc_unused(tx_ring) + 1 < fp@2359: tx_ring->count); fp@2359: if (tx_pending) { fp@2359: /* fp@2359: * We've lost link, so the controller stops DMA, fp@2359: * but we've got queued Tx work that's never going fp@2359: * to get done, so reset controller to flush Tx. fp@2359: * (Do the reset outside of interrupt context). fp@2359: */ fp@2359: adapter->tx_timeout_count++; fp@2359: schedule_work(&adapter->reset_task); fp@2359: /* return immediately since reset is imminent */ fp@2359: return; fp@2359: } fp@2359: } fp@2359: fp@2359: /* Simple mode for Interrupt Throttle Rate (ITR) */ fp@2359: if (adapter->itr_setting == 4) { fp@2359: /* fp@2359: * Symmetric Tx/Rx gets a reduced ITR=2000; fp@2359: * Total asymmetrical Tx or Rx gets ITR=8000; fp@2359: * everyone else is between 2000-8000. fp@2359: */ fp@2359: u32 goc = (adapter->gotc + adapter->gorc) / 10000; fp@2359: u32 dif = (adapter->gotc > adapter->gorc ? fp@2359: adapter->gotc - adapter->gorc : fp@2359: adapter->gorc - adapter->gotc) / 10000; fp@2359: u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; fp@2359: fp@2359: ew32(ITR, 1000000000 / (itr * 256)); fp@2359: } fp@2359: fp@2359: /* Cause software interrupt to ensure Rx ring is cleaned */ fp@2359: if (adapter->msix_entries) fp@2359: ew32(ICS, adapter->rx_ring->ims_val); fp@2359: else fp@2359: ew32(ICS, E1000_ICS_RXDMT0); fp@2359: fp@2359: /* Force detection of hung controller every watchdog period */ fp@2359: adapter->detect_tx_hung = 1; fp@2359: fp@2359: /* fp@2359: * With 82571 controllers, LAA may be overwritten due to controller fp@2359: * reset from the other port. Set the appropriate LAA in RAR[0] fp@2359: */ fp@2359: if (e1000e_get_laa_state_82571(hw)) fp@2359: e1000e_rar_set(hw, adapter->hw.mac.addr, 0); fp@2359: fp@2359: /* Reset the timer */ fp@2359: if (!test_bit(__E1000_DOWN, &adapter->state)) fp@2359: mod_timer(&adapter->watchdog_timer, fp@2359: round_jiffies(jiffies + 2 * HZ)); fp@2359: } fp@2359: fp@2359: #define E1000_TX_FLAGS_CSUM 0x00000001 fp@2359: #define E1000_TX_FLAGS_VLAN 0x00000002 fp@2359: #define E1000_TX_FLAGS_TSO 0x00000004 fp@2359: #define E1000_TX_FLAGS_IPV4 0x00000008 fp@2359: #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 fp@2359: #define E1000_TX_FLAGS_VLAN_SHIFT 16 fp@2359: fp@2359: static int e1000_tso(struct e1000_adapter *adapter, fp@2359: struct sk_buff *skb) fp@2359: { fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: struct e1000_context_desc *context_desc; fp@2359: struct e1000_buffer *buffer_info; fp@2359: unsigned int i; fp@2359: u32 cmd_length = 0; fp@2359: u16 ipcse = 0, tucse, mss; fp@2359: u8 ipcss, ipcso, tucss, tucso, hdr_len; fp@2359: int err; fp@2359: fp@2359: if (!skb_is_gso(skb)) fp@2359: return 0; fp@2359: fp@2359: if (skb_header_cloned(skb)) { fp@2359: err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); fp@2359: if (err) fp@2359: return err; fp@2359: } fp@2359: fp@2359: hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); fp@2359: mss = skb_shinfo(skb)->gso_size; fp@2359: if (skb->protocol == htons(ETH_P_IP)) { fp@2359: struct iphdr *iph = ip_hdr(skb); fp@2359: iph->tot_len = 0; fp@2359: iph->check = 0; fp@2359: tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, fp@2359: 0, IPPROTO_TCP, 0); fp@2359: cmd_length = E1000_TXD_CMD_IP; fp@2359: ipcse = skb_transport_offset(skb) - 1; fp@2359: } else if (skb_is_gso_v6(skb)) { fp@2359: ipv6_hdr(skb)->payload_len = 0; fp@2359: tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, fp@2359: &ipv6_hdr(skb)->daddr, fp@2359: 0, IPPROTO_TCP, 0); fp@2359: ipcse = 0; fp@2359: } fp@2359: ipcss = skb_network_offset(skb); fp@2359: ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; fp@2359: tucss = skb_transport_offset(skb); fp@2359: tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; fp@2359: tucse = 0; fp@2359: fp@2359: cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | fp@2359: E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); fp@2359: fp@2359: i = tx_ring->next_to_use; fp@2359: context_desc = E1000_CONTEXT_DESC(*tx_ring, i); fp@2359: buffer_info = &tx_ring->buffer_info[i]; fp@2359: fp@2359: context_desc->lower_setup.ip_fields.ipcss = ipcss; fp@2359: context_desc->lower_setup.ip_fields.ipcso = ipcso; fp@2359: context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); fp@2359: context_desc->upper_setup.tcp_fields.tucss = tucss; fp@2359: context_desc->upper_setup.tcp_fields.tucso = tucso; fp@2359: context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); fp@2359: context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); fp@2359: context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; fp@2359: context_desc->cmd_and_length = cpu_to_le32(cmd_length); fp@2359: fp@2359: buffer_info->time_stamp = jiffies; fp@2359: buffer_info->next_to_watch = i; fp@2359: fp@2359: i++; fp@2359: if (i == tx_ring->count) fp@2359: i = 0; fp@2359: tx_ring->next_to_use = i; fp@2359: fp@2359: return 1; fp@2359: } fp@2359: fp@2359: static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) fp@2359: { fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: struct e1000_context_desc *context_desc; fp@2359: struct e1000_buffer *buffer_info; fp@2359: unsigned int i; fp@2359: u8 css; fp@2359: u32 cmd_len = E1000_TXD_CMD_DEXT; fp@2359: __be16 protocol; fp@2359: fp@2359: if (skb->ip_summed != CHECKSUM_PARTIAL) fp@2359: return 0; fp@2359: fp@2359: if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) fp@2359: protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; fp@2359: else fp@2359: protocol = skb->protocol; fp@2359: fp@2359: switch (protocol) { fp@2359: case cpu_to_be16(ETH_P_IP): fp@2359: if (ip_hdr(skb)->protocol == IPPROTO_TCP) fp@2359: cmd_len |= E1000_TXD_CMD_TCP; fp@2359: break; fp@2359: case cpu_to_be16(ETH_P_IPV6): fp@2359: /* XXX not handling all IPV6 headers */ fp@2359: if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) fp@2359: cmd_len |= E1000_TXD_CMD_TCP; fp@2359: break; fp@2359: default: fp@2359: if (unlikely(net_ratelimit())) fp@2359: e_warn("checksum_partial proto=%x!\n", fp@2359: be16_to_cpu(protocol)); fp@2359: break; fp@2359: } fp@2359: fp@2359: css = skb_transport_offset(skb); fp@2359: fp@2359: i = tx_ring->next_to_use; fp@2359: buffer_info = &tx_ring->buffer_info[i]; fp@2359: context_desc = E1000_CONTEXT_DESC(*tx_ring, i); fp@2359: fp@2359: context_desc->lower_setup.ip_config = 0; fp@2359: context_desc->upper_setup.tcp_fields.tucss = css; fp@2359: context_desc->upper_setup.tcp_fields.tucso = fp@2359: css + skb->csum_offset; fp@2359: context_desc->upper_setup.tcp_fields.tucse = 0; fp@2359: context_desc->tcp_seg_setup.data = 0; fp@2359: context_desc->cmd_and_length = cpu_to_le32(cmd_len); fp@2359: fp@2359: buffer_info->time_stamp = jiffies; fp@2359: buffer_info->next_to_watch = i; fp@2359: fp@2359: i++; fp@2359: if (i == tx_ring->count) fp@2359: i = 0; fp@2359: tx_ring->next_to_use = i; fp@2359: fp@2359: return 1; fp@2359: } fp@2359: fp@2359: #define E1000_MAX_PER_TXD 8192 fp@2359: #define E1000_MAX_TXD_PWR 12 fp@2359: fp@2359: static int e1000_tx_map(struct e1000_adapter *adapter, fp@2359: struct sk_buff *skb, unsigned int first, fp@2359: unsigned int max_per_txd, unsigned int nr_frags, fp@2359: unsigned int mss) fp@2359: { fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: struct pci_dev *pdev = adapter->pdev; fp@2359: struct e1000_buffer *buffer_info; fp@2359: unsigned int len = skb_headlen(skb); fp@2359: unsigned int offset = 0, size, count = 0, i; fp@2359: unsigned int f, bytecount, segs; fp@2359: fp@2359: i = tx_ring->next_to_use; fp@2359: fp@2359: while (len) { fp@2359: buffer_info = &tx_ring->buffer_info[i]; fp@2359: size = min(len, max_per_txd); fp@2359: fp@2359: buffer_info->length = size; fp@2359: buffer_info->time_stamp = jiffies; fp@2359: buffer_info->next_to_watch = i; fp@2359: buffer_info->dma = dma_map_single(&pdev->dev, fp@2359: skb->data + offset, fp@2359: size, DMA_TO_DEVICE); fp@2359: buffer_info->mapped_as_page = false; fp@2359: if (dma_mapping_error(&pdev->dev, buffer_info->dma)) fp@2359: goto dma_error; fp@2359: fp@2359: len -= size; fp@2359: offset += size; fp@2359: count++; fp@2359: fp@2359: if (len) { fp@2359: i++; fp@2359: if (i == tx_ring->count) fp@2359: i = 0; fp@2359: } fp@2359: } fp@2359: fp@2359: for (f = 0; f < nr_frags; f++) { fp@2359: struct skb_frag_struct *frag; fp@2359: fp@2359: frag = &skb_shinfo(skb)->frags[f]; fp@2359: len = frag->size; fp@2359: offset = frag->page_offset; fp@2359: fp@2359: while (len) { fp@2359: i++; fp@2359: if (i == tx_ring->count) fp@2359: i = 0; fp@2359: fp@2359: buffer_info = &tx_ring->buffer_info[i]; fp@2359: size = min(len, max_per_txd); fp@2359: fp@2359: buffer_info->length = size; fp@2359: buffer_info->time_stamp = jiffies; fp@2359: buffer_info->next_to_watch = i; fp@2359: buffer_info->dma = dma_map_page(&pdev->dev, frag->page, fp@2359: offset, size, fp@2359: DMA_TO_DEVICE); fp@2359: buffer_info->mapped_as_page = true; fp@2359: if (dma_mapping_error(&pdev->dev, buffer_info->dma)) fp@2359: goto dma_error; fp@2359: fp@2359: len -= size; fp@2359: offset += size; fp@2359: count++; fp@2359: } fp@2359: } fp@2359: fp@2359: segs = skb_shinfo(skb)->gso_segs ?: 1; fp@2359: /* multiply data chunks by size of headers */ fp@2359: bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; fp@2359: fp@2359: tx_ring->buffer_info[i].skb = skb; fp@2359: tx_ring->buffer_info[i].segs = segs; fp@2359: tx_ring->buffer_info[i].bytecount = bytecount; fp@2359: tx_ring->buffer_info[first].next_to_watch = i; fp@2359: fp@2359: return count; fp@2359: fp@2359: dma_error: fp@2359: dev_err(&pdev->dev, "TX DMA map failed\n"); fp@2359: buffer_info->dma = 0; fp@2359: if (count) fp@2359: count--; fp@2359: fp@2359: while (count--) { fp@2359: if (i==0) fp@2359: i += tx_ring->count; fp@2359: i--; fp@2359: buffer_info = &tx_ring->buffer_info[i]; fp@2359: e1000_put_txbuf(adapter, buffer_info);; fp@2359: } fp@2359: fp@2359: return 0; fp@2359: } fp@2359: fp@2359: static void e1000_tx_queue(struct e1000_adapter *adapter, fp@2359: int tx_flags, int count) fp@2359: { fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: struct e1000_tx_desc *tx_desc = NULL; fp@2359: struct e1000_buffer *buffer_info; fp@2359: u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; fp@2359: unsigned int i; fp@2359: fp@2359: if (tx_flags & E1000_TX_FLAGS_TSO) { fp@2359: txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | fp@2359: E1000_TXD_CMD_TSE; fp@2359: txd_upper |= E1000_TXD_POPTS_TXSM << 8; fp@2359: fp@2359: if (tx_flags & E1000_TX_FLAGS_IPV4) fp@2359: txd_upper |= E1000_TXD_POPTS_IXSM << 8; fp@2359: } fp@2359: fp@2359: if (tx_flags & E1000_TX_FLAGS_CSUM) { fp@2359: txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; fp@2359: txd_upper |= E1000_TXD_POPTS_TXSM << 8; fp@2359: } fp@2359: fp@2359: if (tx_flags & E1000_TX_FLAGS_VLAN) { fp@2359: txd_lower |= E1000_TXD_CMD_VLE; fp@2359: txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); fp@2359: } fp@2359: fp@2359: i = tx_ring->next_to_use; fp@2359: fp@2359: while (count--) { fp@2359: buffer_info = &tx_ring->buffer_info[i]; fp@2359: tx_desc = E1000_TX_DESC(*tx_ring, i); fp@2359: tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); fp@2359: tx_desc->lower.data = fp@2359: cpu_to_le32(txd_lower | buffer_info->length); fp@2359: tx_desc->upper.data = cpu_to_le32(txd_upper); fp@2359: fp@2359: i++; fp@2359: if (i == tx_ring->count) fp@2359: i = 0; fp@2359: } fp@2359: fp@2359: tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); fp@2359: fp@2359: /* fp@2359: * Force memory writes to complete before letting h/w fp@2359: * know there are new descriptors to fetch. (Only fp@2359: * applicable for weak-ordered memory model archs, fp@2359: * such as IA-64). fp@2359: */ fp@2359: wmb(); fp@2359: fp@2359: tx_ring->next_to_use = i; fp@2359: writel(i, adapter->hw.hw_addr + tx_ring->tail); fp@2359: /* fp@2359: * we need this if more than one processor can write to our tail fp@2359: * at a time, it synchronizes IO on IA64/Altix systems fp@2359: */ fp@2359: mmiowb(); fp@2359: } fp@2359: fp@2359: #define MINIMUM_DHCP_PACKET_SIZE 282 fp@2359: static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, fp@2359: struct sk_buff *skb) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u16 length, offset; fp@2359: fp@2359: if (vlan_tx_tag_present(skb)) { fp@2359: if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && fp@2359: (adapter->hw.mng_cookie.status & fp@2359: E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) fp@2359: return 0; fp@2359: } fp@2359: fp@2359: if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) fp@2359: return 0; fp@2359: fp@2359: if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) fp@2359: return 0; fp@2359: fp@2359: { fp@2359: const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); fp@2359: struct udphdr *udp; fp@2359: fp@2359: if (ip->protocol != IPPROTO_UDP) fp@2359: return 0; fp@2359: fp@2359: udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); fp@2359: if (ntohs(udp->dest) != 67) fp@2359: return 0; fp@2359: fp@2359: offset = (u8 *)udp + 8 - skb->data; fp@2359: length = skb->len - offset; fp@2359: return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); fp@2359: } fp@2359: fp@2359: return 0; fp@2359: } fp@2359: fp@2359: static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: fp@2359: netif_stop_queue(netdev); fp@2359: /* fp@2359: * Herbert's original patch had: fp@2359: * smp_mb__after_netif_stop_queue(); fp@2359: * but since that doesn't exist yet, just open code it. fp@2359: */ fp@2359: smp_mb(); fp@2359: fp@2359: /* fp@2359: * We need to check again in a case another CPU has just fp@2359: * made room available. fp@2359: */ fp@2359: if (e1000_desc_unused(adapter->tx_ring) < size) fp@2359: return -EBUSY; fp@2359: fp@2359: /* A reprieve! */ fp@2359: netif_start_queue(netdev); fp@2359: ++adapter->restart_queue; fp@2359: return 0; fp@2359: } fp@2359: fp@2359: static int e1000_maybe_stop_tx(struct net_device *netdev, int size) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: fp@2359: if (e1000_desc_unused(adapter->tx_ring) >= size) fp@2359: return 0; fp@2359: return __e1000_maybe_stop_tx(netdev, size); fp@2359: } fp@2359: fp@2359: #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) fp@2359: static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, fp@2359: struct net_device *netdev) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_ring *tx_ring = adapter->tx_ring; fp@2359: unsigned int first; fp@2359: unsigned int max_per_txd = E1000_MAX_PER_TXD; fp@2359: unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; fp@2359: unsigned int tx_flags = 0; fp@2359: unsigned int len = skb_headlen(skb); fp@2359: unsigned int nr_frags; fp@2359: unsigned int mss; fp@2359: int count = 0; fp@2359: int tso; fp@2359: unsigned int f; fp@2359: fp@2359: if (test_bit(__E1000_DOWN, &adapter->state)) { fp@2359: dev_kfree_skb_any(skb); fp@2359: return NETDEV_TX_OK; fp@2359: } fp@2359: fp@2359: if (skb->len <= 0) { fp@2359: dev_kfree_skb_any(skb); fp@2359: return NETDEV_TX_OK; fp@2359: } fp@2359: fp@2359: mss = skb_shinfo(skb)->gso_size; fp@2359: /* fp@2359: * The controller does a simple calculation to fp@2359: * make sure there is enough room in the FIFO before fp@2359: * initiating the DMA for each buffer. The calc is: fp@2359: * 4 = ceil(buffer len/mss). To make sure we don't fp@2359: * overrun the FIFO, adjust the max buffer len if mss fp@2359: * drops. fp@2359: */ fp@2359: if (mss) { fp@2359: u8 hdr_len; fp@2359: max_per_txd = min(mss << 2, max_per_txd); fp@2359: max_txd_pwr = fls(max_per_txd) - 1; fp@2359: fp@2359: /* fp@2359: * TSO Workaround for 82571/2/3 Controllers -- if skb->data fp@2359: * points to just header, pull a few bytes of payload from fp@2359: * frags into skb->data fp@2359: */ fp@2359: hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); fp@2359: /* fp@2359: * we do this workaround for ES2LAN, but it is un-necessary, fp@2359: * avoiding it could save a lot of cycles fp@2359: */ fp@2359: if (skb->data_len && (hdr_len == len)) { fp@2359: unsigned int pull_size; fp@2359: fp@2359: pull_size = min((unsigned int)4, skb->data_len); fp@2359: if (!__pskb_pull_tail(skb, pull_size)) { fp@2359: e_err("__pskb_pull_tail failed.\n"); fp@2359: dev_kfree_skb_any(skb); fp@2359: return NETDEV_TX_OK; fp@2359: } fp@2359: len = skb_headlen(skb); fp@2359: } fp@2359: } fp@2359: fp@2359: /* reserve a descriptor for the offload context */ fp@2359: if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) fp@2359: count++; fp@2359: count++; fp@2359: fp@2359: count += TXD_USE_COUNT(len, max_txd_pwr); fp@2359: fp@2359: nr_frags = skb_shinfo(skb)->nr_frags; fp@2359: for (f = 0; f < nr_frags; f++) fp@2359: count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, fp@2359: max_txd_pwr); fp@2359: fp@2359: if (adapter->hw.mac.tx_pkt_filtering) fp@2359: e1000_transfer_dhcp_info(adapter, skb); fp@2359: fp@2359: /* fp@2359: * need: count + 2 desc gap to keep tail from touching fp@2359: * head, otherwise try next time fp@2359: */ fp@2359: if (e1000_maybe_stop_tx(netdev, count + 2)) fp@2359: return NETDEV_TX_BUSY; fp@2359: fp@2359: if (adapter->vlgrp && vlan_tx_tag_present(skb)) { fp@2359: tx_flags |= E1000_TX_FLAGS_VLAN; fp@2359: tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); fp@2359: } fp@2359: fp@2359: first = tx_ring->next_to_use; fp@2359: fp@2359: tso = e1000_tso(adapter, skb); fp@2359: if (tso < 0) { fp@2359: dev_kfree_skb_any(skb); fp@2359: return NETDEV_TX_OK; fp@2359: } fp@2359: fp@2359: if (tso) fp@2359: tx_flags |= E1000_TX_FLAGS_TSO; fp@2359: else if (e1000_tx_csum(adapter, skb)) fp@2359: tx_flags |= E1000_TX_FLAGS_CSUM; fp@2359: fp@2359: /* fp@2359: * Old method was to assume IPv4 packet by default if TSO was enabled. fp@2359: * 82571 hardware supports TSO capabilities for IPv6 as well... fp@2359: * no longer assume, we must. fp@2359: */ fp@2359: if (skb->protocol == htons(ETH_P_IP)) fp@2359: tx_flags |= E1000_TX_FLAGS_IPV4; fp@2359: fp@2359: /* if count is 0 then mapping error has occured */ fp@2359: count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); fp@2359: if (count) { fp@2359: e1000_tx_queue(adapter, tx_flags, count); fp@2359: /* Make sure there is space in the ring for the next send. */ fp@2359: e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); fp@2359: fp@2359: } else { fp@2359: dev_kfree_skb_any(skb); fp@2359: tx_ring->buffer_info[first].time_stamp = 0; fp@2359: tx_ring->next_to_use = first; fp@2359: } fp@2359: fp@2359: return NETDEV_TX_OK; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_tx_timeout - Respond to a Tx Hang fp@2359: * @netdev: network interface device structure fp@2359: **/ fp@2359: static void e1000_tx_timeout(struct net_device *netdev) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: fp@2359: /* Do the reset outside of interrupt context */ fp@2359: adapter->tx_timeout_count++; fp@2359: schedule_work(&adapter->reset_task); fp@2359: } fp@2359: fp@2359: static void e1000_reset_task(struct work_struct *work) fp@2359: { fp@2359: struct e1000_adapter *adapter; fp@2359: adapter = container_of(work, struct e1000_adapter, reset_task); fp@2359: fp@2359: e1000e_dump(adapter); fp@2359: e_err("Reset adapter\n"); fp@2359: e1000e_reinit_locked(adapter); fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_get_stats - Get System Network Statistics fp@2359: * @netdev: network interface device structure fp@2359: * fp@2359: * Returns the address of the device statistics structure. fp@2359: * The statistics are actually updated from the timer callback. fp@2359: **/ fp@2359: static struct net_device_stats *e1000_get_stats(struct net_device *netdev) fp@2359: { fp@2359: /* only return the current stats */ fp@2359: return &netdev->stats; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_change_mtu - Change the Maximum Transfer Unit fp@2359: * @netdev: network interface device structure fp@2359: * @new_mtu: new value for maximum frame size fp@2359: * fp@2359: * Returns 0 on success, negative on failure fp@2359: **/ fp@2359: static int e1000_change_mtu(struct net_device *netdev, int new_mtu) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; fp@2359: fp@2359: /* Jumbo frame support */ fp@2359: if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && fp@2359: !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { fp@2359: e_err("Jumbo Frames not supported.\n"); fp@2359: return -EINVAL; fp@2359: } fp@2359: fp@2359: /* Supported frame sizes */ fp@2359: if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || fp@2359: (max_frame > adapter->max_hw_frame_size)) { fp@2359: e_err("Unsupported MTU setting\n"); fp@2359: return -EINVAL; fp@2359: } fp@2359: fp@2359: /* 82573 Errata 17 */ fp@2359: if (((adapter->hw.mac.type == e1000_82573) || fp@2359: (adapter->hw.mac.type == e1000_82574)) && fp@2359: (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { fp@2359: adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; fp@2359: e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); fp@2359: } fp@2359: fp@2359: while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) fp@2359: msleep(1); fp@2359: /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ fp@2359: adapter->max_frame_size = max_frame; fp@2359: e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); fp@2359: netdev->mtu = new_mtu; fp@2359: if (netif_running(netdev)) fp@2359: e1000e_down(adapter); fp@2359: fp@2359: /* fp@2359: * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN fp@2359: * means we reserve 2 more, this pushes us to allocate from the next fp@2359: * larger slab size. fp@2359: * i.e. RXBUFFER_2048 --> size-4096 slab fp@2359: * However with the new *_jumbo_rx* routines, jumbo receives will use fp@2359: * fragmented skbs fp@2359: */ fp@2359: fp@2359: if (max_frame <= 2048) fp@2359: adapter->rx_buffer_len = 2048; fp@2359: else fp@2359: adapter->rx_buffer_len = 4096; fp@2359: fp@2359: /* adjust allocation if LPE protects us, and we aren't using SBP */ fp@2359: if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || fp@2359: (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) fp@2359: adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN fp@2359: + ETH_FCS_LEN; fp@2359: fp@2359: if (netif_running(netdev)) fp@2359: e1000e_up(adapter); fp@2359: else fp@2359: e1000e_reset(adapter); fp@2359: fp@2359: clear_bit(__E1000_RESETTING, &adapter->state); fp@2359: fp@2359: return 0; fp@2359: } fp@2359: fp@2359: static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, fp@2359: int cmd) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct mii_ioctl_data *data = if_mii(ifr); fp@2359: fp@2359: if (adapter->hw.phy.media_type != e1000_media_type_copper) fp@2359: return -EOPNOTSUPP; fp@2359: fp@2359: switch (cmd) { fp@2359: case SIOCGMIIPHY: fp@2359: data->phy_id = adapter->hw.phy.addr; fp@2359: break; fp@2359: case SIOCGMIIREG: fp@2359: e1000_phy_read_status(adapter); fp@2359: fp@2359: switch (data->reg_num & 0x1F) { fp@2359: case MII_BMCR: fp@2359: data->val_out = adapter->phy_regs.bmcr; fp@2359: break; fp@2359: case MII_BMSR: fp@2359: data->val_out = adapter->phy_regs.bmsr; fp@2359: break; fp@2359: case MII_PHYSID1: fp@2359: data->val_out = (adapter->hw.phy.id >> 16); fp@2359: break; fp@2359: case MII_PHYSID2: fp@2359: data->val_out = (adapter->hw.phy.id & 0xFFFF); fp@2359: break; fp@2359: case MII_ADVERTISE: fp@2359: data->val_out = adapter->phy_regs.advertise; fp@2359: break; fp@2359: case MII_LPA: fp@2359: data->val_out = adapter->phy_regs.lpa; fp@2359: break; fp@2359: case MII_EXPANSION: fp@2359: data->val_out = adapter->phy_regs.expansion; fp@2359: break; fp@2359: case MII_CTRL1000: fp@2359: data->val_out = adapter->phy_regs.ctrl1000; fp@2359: break; fp@2359: case MII_STAT1000: fp@2359: data->val_out = adapter->phy_regs.stat1000; fp@2359: break; fp@2359: case MII_ESTATUS: fp@2359: data->val_out = adapter->phy_regs.estatus; fp@2359: break; fp@2359: default: fp@2359: return -EIO; fp@2359: } fp@2359: break; fp@2359: case SIOCSMIIREG: fp@2359: default: fp@2359: return -EOPNOTSUPP; fp@2359: } fp@2359: return 0; fp@2359: } fp@2359: fp@2359: static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) fp@2359: { fp@2359: switch (cmd) { fp@2359: case SIOCGMIIPHY: fp@2359: case SIOCGMIIREG: fp@2359: case SIOCSMIIREG: fp@2359: return e1000_mii_ioctl(netdev, ifr, cmd); fp@2359: default: fp@2359: return -EOPNOTSUPP; fp@2359: } fp@2359: } fp@2359: fp@2359: static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 i, mac_reg; fp@2359: u16 phy_reg; fp@2359: int retval = 0; fp@2359: fp@2359: /* copy MAC RARs to PHY RARs */ fp@2359: for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) { fp@2359: mac_reg = er32(RAL(i)); fp@2359: e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF)); fp@2359: e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF)); fp@2359: mac_reg = er32(RAH(i)); fp@2359: e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF)); fp@2359: e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF)); fp@2359: } fp@2359: fp@2359: /* copy MAC MTA to PHY MTA */ fp@2359: for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { fp@2359: mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); fp@2359: e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF)); fp@2359: e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF)); fp@2359: } fp@2359: fp@2359: /* configure PHY Rx Control register */ fp@2359: e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg); fp@2359: mac_reg = er32(RCTL); fp@2359: if (mac_reg & E1000_RCTL_UPE) fp@2359: phy_reg |= BM_RCTL_UPE; fp@2359: if (mac_reg & E1000_RCTL_MPE) fp@2359: phy_reg |= BM_RCTL_MPE; fp@2359: phy_reg &= ~(BM_RCTL_MO_MASK); fp@2359: if (mac_reg & E1000_RCTL_MO_3) fp@2359: phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) fp@2359: << BM_RCTL_MO_SHIFT); fp@2359: if (mac_reg & E1000_RCTL_BAM) fp@2359: phy_reg |= BM_RCTL_BAM; fp@2359: if (mac_reg & E1000_RCTL_PMCF) fp@2359: phy_reg |= BM_RCTL_PMCF; fp@2359: mac_reg = er32(CTRL); fp@2359: if (mac_reg & E1000_CTRL_RFCE) fp@2359: phy_reg |= BM_RCTL_RFCE; fp@2359: e1e_wphy(&adapter->hw, BM_RCTL, phy_reg); fp@2359: fp@2359: /* enable PHY wakeup in MAC register */ fp@2359: ew32(WUFC, wufc); fp@2359: ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); fp@2359: fp@2359: /* configure and enable PHY wakeup in PHY registers */ fp@2359: e1e_wphy(&adapter->hw, BM_WUFC, wufc); fp@2359: e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); fp@2359: fp@2359: /* activate PHY wakeup */ fp@2359: retval = hw->phy.ops.acquire(hw); fp@2359: if (retval) { fp@2359: e_err("Could not acquire PHY\n"); fp@2359: return retval; fp@2359: } fp@2359: e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, fp@2359: (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); fp@2359: retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg); fp@2359: if (retval) { fp@2359: e_err("Could not read PHY page 769\n"); fp@2359: goto out; fp@2359: } fp@2359: phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; fp@2359: retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); fp@2359: if (retval) fp@2359: e_err("Could not set PHY Host Wakeup bit\n"); fp@2359: out: fp@2359: hw->phy.ops.release(hw); fp@2359: fp@2359: return retval; fp@2359: } fp@2359: fp@2359: static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, fp@2359: bool runtime) fp@2359: { fp@2359: struct net_device *netdev = pci_get_drvdata(pdev); fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 ctrl, ctrl_ext, rctl, status; fp@2359: /* Runtime suspend should only enable wakeup for link changes */ fp@2359: u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; fp@2359: int retval = 0; fp@2359: fp@2359: netif_device_detach(netdev); fp@2359: fp@2359: if (netif_running(netdev)) { fp@2359: WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); fp@2359: e1000e_down(adapter); fp@2359: e1000_free_irq(adapter); fp@2359: } fp@2359: e1000e_reset_interrupt_capability(adapter); fp@2359: fp@2359: retval = pci_save_state(pdev); fp@2359: if (retval) fp@2359: return retval; fp@2359: fp@2359: status = er32(STATUS); fp@2359: if (status & E1000_STATUS_LU) fp@2359: wufc &= ~E1000_WUFC_LNKC; fp@2359: fp@2359: if (wufc) { fp@2359: e1000_setup_rctl(adapter); fp@2359: e1000_set_multi(netdev); fp@2359: fp@2359: /* turn on all-multi mode if wake on multicast is enabled */ fp@2359: if (wufc & E1000_WUFC_MC) { fp@2359: rctl = er32(RCTL); fp@2359: rctl |= E1000_RCTL_MPE; fp@2359: ew32(RCTL, rctl); fp@2359: } fp@2359: fp@2359: ctrl = er32(CTRL); fp@2359: /* advertise wake from D3Cold */ fp@2359: #define E1000_CTRL_ADVD3WUC 0x00100000 fp@2359: /* phy power management enable */ fp@2359: #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 fp@2359: ctrl |= E1000_CTRL_ADVD3WUC; fp@2359: if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) fp@2359: ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; fp@2359: ew32(CTRL, ctrl); fp@2359: fp@2359: if (adapter->hw.phy.media_type == e1000_media_type_fiber || fp@2359: adapter->hw.phy.media_type == fp@2359: e1000_media_type_internal_serdes) { fp@2359: /* keep the laser running in D3 */ fp@2359: ctrl_ext = er32(CTRL_EXT); fp@2359: ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; fp@2359: ew32(CTRL_EXT, ctrl_ext); fp@2359: } fp@2359: fp@2359: if (adapter->flags & FLAG_IS_ICH) fp@2359: e1000e_disable_gig_wol_ich8lan(&adapter->hw); fp@2359: fp@2359: /* Allow time for pending master requests to run */ fp@2359: e1000e_disable_pcie_master(&adapter->hw); fp@2359: fp@2359: if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { fp@2359: /* enable wakeup by the PHY */ fp@2359: retval = e1000_init_phy_wakeup(adapter, wufc); fp@2359: if (retval) fp@2359: return retval; fp@2359: } else { fp@2359: /* enable wakeup by the MAC */ fp@2359: ew32(WUFC, wufc); fp@2359: ew32(WUC, E1000_WUC_PME_EN); fp@2359: } fp@2359: } else { fp@2359: ew32(WUC, 0); fp@2359: ew32(WUFC, 0); fp@2359: } fp@2359: fp@2359: *enable_wake = !!wufc; fp@2359: fp@2359: /* make sure adapter isn't asleep if manageability is enabled */ fp@2359: if ((adapter->flags & FLAG_MNG_PT_ENABLED) || fp@2359: (hw->mac.ops.check_mng_mode(hw))) fp@2359: *enable_wake = true; fp@2359: fp@2359: if (adapter->hw.phy.type == e1000_phy_igp_3) fp@2359: e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); fp@2359: fp@2359: /* fp@2359: * Release control of h/w to f/w. If f/w is AMT enabled, this fp@2359: * would have already happened in close and is redundant. fp@2359: */ fp@2359: e1000_release_hw_control(adapter); fp@2359: fp@2359: pci_disable_device(pdev); fp@2359: fp@2359: return 0; fp@2359: } fp@2359: fp@2359: static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) fp@2359: { fp@2359: if (sleep && wake) { fp@2359: pci_prepare_to_sleep(pdev); fp@2359: return; fp@2359: } fp@2359: fp@2359: pci_wake_from_d3(pdev, wake); fp@2359: pci_set_power_state(pdev, PCI_D3hot); fp@2359: } fp@2359: fp@2359: static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, fp@2359: bool wake) fp@2359: { fp@2359: struct net_device *netdev = pci_get_drvdata(pdev); fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: fp@2359: /* fp@2359: * The pci-e switch on some quad port adapters will report a fp@2359: * correctable error when the MAC transitions from D0 to D3. To fp@2359: * prevent this we need to mask off the correctable errors on the fp@2359: * downstream port of the pci-e switch. fp@2359: */ fp@2359: if (adapter->flags & FLAG_IS_QUAD_PORT) { fp@2359: struct pci_dev *us_dev = pdev->bus->self; fp@2359: int pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP); fp@2359: u16 devctl; fp@2359: fp@2359: pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); fp@2359: pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, fp@2359: (devctl & ~PCI_EXP_DEVCTL_CERE)); fp@2359: fp@2359: e1000_power_off(pdev, sleep, wake); fp@2359: fp@2359: pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); fp@2359: } else { fp@2359: e1000_power_off(pdev, sleep, wake); fp@2359: } fp@2359: } fp@2359: fp@2359: #ifdef CONFIG_PCIEASPM fp@2359: static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) fp@2359: { fp@2359: pci_disable_link_state(pdev, state); fp@2359: } fp@2359: #else fp@2359: static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) fp@2359: { fp@2359: int pos; fp@2359: u16 reg16; fp@2359: fp@2359: /* fp@2359: * Both device and parent should have the same ASPM setting. fp@2359: * Disable ASPM in downstream component first and then upstream. fp@2359: */ fp@2359: pos = pci_pcie_cap(pdev); fp@2359: pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); fp@2359: reg16 &= ~state; fp@2359: pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); fp@2359: fp@2359: if (!pdev->bus->self) fp@2359: return; fp@2359: fp@2359: pos = pci_pcie_cap(pdev->bus->self); fp@2359: pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); fp@2359: reg16 &= ~state; fp@2359: pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); fp@2359: } fp@2359: #endif fp@2359: void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) fp@2359: { fp@2359: dev_info(&pdev->dev, "Disabling ASPM %s %s\n", fp@2359: (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", fp@2359: (state & PCIE_LINK_STATE_L1) ? "L1" : ""); fp@2359: fp@2359: __e1000e_disable_aspm(pdev, state); fp@2359: } fp@2359: fp@2359: #ifdef CONFIG_PM_OPS fp@2359: static bool e1000e_pm_ready(struct e1000_adapter *adapter) fp@2359: { fp@2359: return !!adapter->tx_ring->buffer_info; fp@2359: } fp@2359: fp@2359: static int __e1000_resume(struct pci_dev *pdev) fp@2359: { fp@2359: struct net_device *netdev = pci_get_drvdata(pdev); fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: u32 err; fp@2359: fp@2359: pci_set_power_state(pdev, PCI_D0); fp@2359: pci_restore_state(pdev); fp@2359: pci_save_state(pdev); fp@2359: if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) fp@2359: e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); fp@2359: fp@2359: e1000e_set_interrupt_capability(adapter); fp@2359: if (netif_running(netdev)) { fp@2359: err = e1000_request_irq(adapter); fp@2359: if (err) fp@2359: return err; fp@2359: } fp@2359: fp@2359: e1000e_power_up_phy(adapter); fp@2359: fp@2359: /* report the system wakeup cause from S3/S4 */ fp@2359: if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { fp@2359: u16 phy_data; fp@2359: fp@2359: e1e_rphy(&adapter->hw, BM_WUS, &phy_data); fp@2359: if (phy_data) { fp@2359: e_info("PHY Wakeup cause - %s\n", fp@2359: phy_data & E1000_WUS_EX ? "Unicast Packet" : fp@2359: phy_data & E1000_WUS_MC ? "Multicast Packet" : fp@2359: phy_data & E1000_WUS_BC ? "Broadcast Packet" : fp@2359: phy_data & E1000_WUS_MAG ? "Magic Packet" : fp@2359: phy_data & E1000_WUS_LNKC ? "Link Status " fp@2359: " Change" : "other"); fp@2359: } fp@2359: e1e_wphy(&adapter->hw, BM_WUS, ~0); fp@2359: } else { fp@2359: u32 wus = er32(WUS); fp@2359: if (wus) { fp@2359: e_info("MAC Wakeup cause - %s\n", fp@2359: wus & E1000_WUS_EX ? "Unicast Packet" : fp@2359: wus & E1000_WUS_MC ? "Multicast Packet" : fp@2359: wus & E1000_WUS_BC ? "Broadcast Packet" : fp@2359: wus & E1000_WUS_MAG ? "Magic Packet" : fp@2359: wus & E1000_WUS_LNKC ? "Link Status Change" : fp@2359: "other"); fp@2359: } fp@2359: ew32(WUS, ~0); fp@2359: } fp@2359: fp@2359: e1000e_reset(adapter); fp@2359: fp@2359: e1000_init_manageability_pt(adapter); fp@2359: fp@2359: if (netif_running(netdev)) fp@2359: e1000e_up(adapter); fp@2359: fp@2359: netif_device_attach(netdev); fp@2359: fp@2359: /* fp@2359: * If the controller has AMT, do not set DRV_LOAD until the interface fp@2359: * is up. For all other cases, let the f/w know that the h/w is now fp@2359: * under the control of the driver. fp@2359: */ fp@2359: if (!(adapter->flags & FLAG_HAS_AMT)) fp@2359: e1000_get_hw_control(adapter); fp@2359: fp@2359: return 0; fp@2359: } fp@2359: fp@2359: #ifdef CONFIG_PM_SLEEP fp@2359: static int e1000_suspend(struct device *dev) fp@2359: { fp@2359: struct pci_dev *pdev = to_pci_dev(dev); fp@2359: int retval; fp@2359: bool wake; fp@2359: fp@2359: retval = __e1000_shutdown(pdev, &wake, false); fp@2359: if (!retval) fp@2359: e1000_complete_shutdown(pdev, true, wake); fp@2359: fp@2359: return retval; fp@2359: } fp@2359: fp@2359: static int e1000_resume(struct device *dev) fp@2359: { fp@2359: struct pci_dev *pdev = to_pci_dev(dev); fp@2359: struct net_device *netdev = pci_get_drvdata(pdev); fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: fp@2359: if (e1000e_pm_ready(adapter)) fp@2359: adapter->idle_check = true; fp@2359: fp@2359: return __e1000_resume(pdev); fp@2359: } fp@2359: #endif /* CONFIG_PM_SLEEP */ fp@2359: fp@2359: #ifdef CONFIG_PM_RUNTIME fp@2359: static int e1000_runtime_suspend(struct device *dev) fp@2359: { fp@2359: struct pci_dev *pdev = to_pci_dev(dev); fp@2359: struct net_device *netdev = pci_get_drvdata(pdev); fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: fp@2359: if (e1000e_pm_ready(adapter)) { fp@2359: bool wake; fp@2359: fp@2359: __e1000_shutdown(pdev, &wake, true); fp@2359: } fp@2359: fp@2359: return 0; fp@2359: } fp@2359: fp@2359: static int e1000_idle(struct device *dev) fp@2359: { fp@2359: struct pci_dev *pdev = to_pci_dev(dev); fp@2359: struct net_device *netdev = pci_get_drvdata(pdev); fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: fp@2359: if (!e1000e_pm_ready(adapter)) fp@2359: return 0; fp@2359: fp@2359: if (adapter->idle_check) { fp@2359: adapter->idle_check = false; fp@2359: if (!e1000e_has_link(adapter)) fp@2359: pm_schedule_suspend(dev, MSEC_PER_SEC); fp@2359: } fp@2359: fp@2359: return -EBUSY; fp@2359: } fp@2359: fp@2359: static int e1000_runtime_resume(struct device *dev) fp@2359: { fp@2359: struct pci_dev *pdev = to_pci_dev(dev); fp@2359: struct net_device *netdev = pci_get_drvdata(pdev); fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: fp@2359: if (!e1000e_pm_ready(adapter)) fp@2359: return 0; fp@2359: fp@2359: adapter->idle_check = !dev->power.runtime_auto; fp@2359: return __e1000_resume(pdev); fp@2359: } fp@2359: #endif /* CONFIG_PM_RUNTIME */ fp@2359: #endif /* CONFIG_PM_OPS */ fp@2359: fp@2359: static void e1000_shutdown(struct pci_dev *pdev) fp@2359: { fp@2359: bool wake = false; fp@2359: fp@2359: __e1000_shutdown(pdev, &wake, false); fp@2359: fp@2359: if (system_state == SYSTEM_POWER_OFF) fp@2359: e1000_complete_shutdown(pdev, false, wake); fp@2359: } fp@2359: fp@2359: #ifdef CONFIG_NET_POLL_CONTROLLER fp@2359: /* fp@2359: * Polling 'interrupt' - used by things like netconsole to send skbs fp@2359: * without having to re-enable interrupts. It's not called while fp@2359: * the interrupt routine is executing. fp@2359: */ fp@2359: static void e1000_netpoll(struct net_device *netdev) fp@2359: { fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: fp@2359: disable_irq(adapter->pdev->irq); fp@2359: e1000_intr(adapter->pdev->irq, netdev); fp@2359: fp@2359: enable_irq(adapter->pdev->irq); fp@2359: } fp@2359: #endif fp@2359: fp@2359: /** fp@2359: * e1000_io_error_detected - called when PCI error is detected fp@2359: * @pdev: Pointer to PCI device fp@2359: * @state: The current pci connection state fp@2359: * fp@2359: * This function is called after a PCI bus error affecting fp@2359: * this device has been detected. fp@2359: */ fp@2359: static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, fp@2359: pci_channel_state_t state) fp@2359: { fp@2359: struct net_device *netdev = pci_get_drvdata(pdev); fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: fp@2359: netif_device_detach(netdev); fp@2359: fp@2359: if (state == pci_channel_io_perm_failure) fp@2359: return PCI_ERS_RESULT_DISCONNECT; fp@2359: fp@2359: if (netif_running(netdev)) fp@2359: e1000e_down(adapter); fp@2359: pci_disable_device(pdev); fp@2359: fp@2359: /* Request a slot slot reset. */ fp@2359: return PCI_ERS_RESULT_NEED_RESET; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_io_slot_reset - called after the pci bus has been reset. fp@2359: * @pdev: Pointer to PCI device fp@2359: * fp@2359: * Restart the card from scratch, as if from a cold-boot. Implementation fp@2359: * resembles the first-half of the e1000_resume routine. fp@2359: */ fp@2359: static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) fp@2359: { fp@2359: struct net_device *netdev = pci_get_drvdata(pdev); fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: int err; fp@2359: pci_ers_result_t result; fp@2359: fp@2359: if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) fp@2359: e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); fp@2359: err = pci_enable_device_mem(pdev); fp@2359: if (err) { fp@2359: dev_err(&pdev->dev, fp@2359: "Cannot re-enable PCI device after reset.\n"); fp@2359: result = PCI_ERS_RESULT_DISCONNECT; fp@2359: } else { fp@2359: pci_set_master(pdev); fp@2359: pdev->state_saved = true; fp@2359: pci_restore_state(pdev); fp@2359: fp@2359: pci_enable_wake(pdev, PCI_D3hot, 0); fp@2359: pci_enable_wake(pdev, PCI_D3cold, 0); fp@2359: fp@2359: e1000e_reset(adapter); fp@2359: ew32(WUS, ~0); fp@2359: result = PCI_ERS_RESULT_RECOVERED; fp@2359: } fp@2359: fp@2359: pci_cleanup_aer_uncorrect_error_status(pdev); fp@2359: fp@2359: return result; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_io_resume - called when traffic can start flowing again. fp@2359: * @pdev: Pointer to PCI device fp@2359: * fp@2359: * This callback is called when the error recovery driver tells us that fp@2359: * its OK to resume normal operation. Implementation resembles the fp@2359: * second-half of the e1000_resume routine. fp@2359: */ fp@2359: static void e1000_io_resume(struct pci_dev *pdev) fp@2359: { fp@2359: struct net_device *netdev = pci_get_drvdata(pdev); fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: fp@2359: e1000_init_manageability_pt(adapter); fp@2359: fp@2359: if (netif_running(netdev)) { fp@2359: if (e1000e_up(adapter)) { fp@2359: dev_err(&pdev->dev, fp@2359: "can't bring device back up after reset\n"); fp@2359: return; fp@2359: } fp@2359: } fp@2359: fp@2359: netif_device_attach(netdev); fp@2359: fp@2359: /* fp@2359: * If the controller has AMT, do not set DRV_LOAD until the interface fp@2359: * is up. For all other cases, let the f/w know that the h/w is now fp@2359: * under the control of the driver. fp@2359: */ fp@2359: if (!(adapter->flags & FLAG_HAS_AMT)) fp@2359: e1000_get_hw_control(adapter); fp@2359: fp@2359: } fp@2359: fp@2359: static void e1000_print_device_info(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: struct net_device *netdev = adapter->netdev; fp@2359: u32 pba_num; fp@2359: fp@2359: /* print bus type/speed/width info */ fp@2359: e_info("(PCI Express:2.5GB/s:%s) %pM\n", fp@2359: /* bus width */ fp@2359: ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : fp@2359: "Width x1"), fp@2359: /* MAC address */ fp@2359: netdev->dev_addr); fp@2359: e_info("Intel(R) PRO/%s Network Connection\n", fp@2359: (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); fp@2359: e1000e_read_pba_num(hw, &pba_num); fp@2359: e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", fp@2359: hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff)); fp@2359: } fp@2359: fp@2359: static void e1000_eeprom_checks(struct e1000_adapter *adapter) fp@2359: { fp@2359: struct e1000_hw *hw = &adapter->hw; fp@2359: int ret_val; fp@2359: u16 buf = 0; fp@2359: fp@2359: if (hw->mac.type != e1000_82573) fp@2359: return; fp@2359: fp@2359: ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); fp@2359: if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) { fp@2359: /* Deep Smart Power Down (DSPD) */ fp@2359: dev_warn(&adapter->pdev->dev, fp@2359: "Warning: detected DSPD enabled in EEPROM\n"); fp@2359: } fp@2359: } fp@2359: fp@2359: static const struct net_device_ops e1000e_netdev_ops = { fp@2359: .ndo_open = e1000_open, fp@2359: .ndo_stop = e1000_close, fp@2359: .ndo_start_xmit = e1000_xmit_frame, fp@2359: .ndo_get_stats = e1000_get_stats, fp@2359: .ndo_set_multicast_list = e1000_set_multi, fp@2359: .ndo_set_mac_address = e1000_set_mac, fp@2359: .ndo_change_mtu = e1000_change_mtu, fp@2359: .ndo_do_ioctl = e1000_ioctl, fp@2359: .ndo_tx_timeout = e1000_tx_timeout, fp@2359: .ndo_validate_addr = eth_validate_addr, fp@2359: fp@2359: .ndo_vlan_rx_register = e1000_vlan_rx_register, fp@2359: .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, fp@2359: .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, fp@2359: #ifdef CONFIG_NET_POLL_CONTROLLER fp@2359: .ndo_poll_controller = e1000_netpoll, fp@2359: #endif fp@2359: }; fp@2359: fp@2359: /** fp@2359: * e1000_probe - Device Initialization Routine fp@2359: * @pdev: PCI device information struct fp@2359: * @ent: entry in e1000_pci_tbl fp@2359: * fp@2359: * Returns 0 on success, negative on failure fp@2359: * fp@2359: * e1000_probe initializes an adapter identified by a pci_dev structure. fp@2359: * The OS initialization, configuring of the adapter private structure, fp@2359: * and a hardware reset occur. fp@2359: **/ fp@2359: static int __devinit e1000_probe(struct pci_dev *pdev, fp@2359: const struct pci_device_id *ent) fp@2359: { fp@2359: struct net_device *netdev; fp@2359: struct e1000_adapter *adapter; fp@2359: struct e1000_hw *hw; fp@2359: const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; fp@2359: resource_size_t mmio_start, mmio_len; fp@2359: resource_size_t flash_start, flash_len; fp@2359: fp@2359: static int cards_found; fp@2359: int i, err, pci_using_dac; fp@2359: u16 eeprom_data = 0; fp@2359: u16 eeprom_apme_mask = E1000_EEPROM_APME; fp@2359: fp@2359: if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) fp@2359: e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); fp@2359: fp@2359: err = pci_enable_device_mem(pdev); fp@2359: if (err) fp@2359: return err; fp@2359: fp@2359: pci_using_dac = 0; fp@2359: err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); fp@2359: if (!err) { fp@2359: err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); fp@2359: if (!err) fp@2359: pci_using_dac = 1; fp@2359: } else { fp@2359: err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); fp@2359: if (err) { fp@2359: err = dma_set_coherent_mask(&pdev->dev, fp@2359: DMA_BIT_MASK(32)); fp@2359: if (err) { fp@2359: dev_err(&pdev->dev, "No usable DMA " fp@2359: "configuration, aborting\n"); fp@2359: goto err_dma; fp@2359: } fp@2359: } fp@2359: } fp@2359: fp@2359: err = pci_request_selected_regions_exclusive(pdev, fp@2359: pci_select_bars(pdev, IORESOURCE_MEM), fp@2359: e1000e_driver_name); fp@2359: if (err) fp@2359: goto err_pci_reg; fp@2359: fp@2359: /* AER (Advanced Error Reporting) hooks */ fp@2359: pci_enable_pcie_error_reporting(pdev); fp@2359: fp@2359: pci_set_master(pdev); fp@2359: /* PCI config space info */ fp@2359: err = pci_save_state(pdev); fp@2359: if (err) fp@2359: goto err_alloc_etherdev; fp@2359: fp@2359: err = -ENOMEM; fp@2359: netdev = alloc_etherdev(sizeof(struct e1000_adapter)); fp@2359: if (!netdev) fp@2359: goto err_alloc_etherdev; fp@2359: fp@2359: SET_NETDEV_DEV(netdev, &pdev->dev); fp@2359: fp@2359: netdev->irq = pdev->irq; fp@2359: fp@2359: pci_set_drvdata(pdev, netdev); fp@2359: adapter = netdev_priv(netdev); fp@2359: hw = &adapter->hw; fp@2359: adapter->netdev = netdev; fp@2359: adapter->pdev = pdev; fp@2359: adapter->ei = ei; fp@2359: adapter->pba = ei->pba; fp@2359: adapter->flags = ei->flags; fp@2359: adapter->flags2 = ei->flags2; fp@2359: adapter->hw.adapter = adapter; fp@2359: adapter->hw.mac.type = ei->mac; fp@2359: adapter->max_hw_frame_size = ei->max_hw_frame_size; fp@2359: adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; fp@2359: fp@2359: mmio_start = pci_resource_start(pdev, 0); fp@2359: mmio_len = pci_resource_len(pdev, 0); fp@2359: fp@2359: err = -EIO; fp@2359: adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); fp@2359: if (!adapter->hw.hw_addr) fp@2359: goto err_ioremap; fp@2359: fp@2359: if ((adapter->flags & FLAG_HAS_FLASH) && fp@2359: (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { fp@2359: flash_start = pci_resource_start(pdev, 1); fp@2359: flash_len = pci_resource_len(pdev, 1); fp@2359: adapter->hw.flash_address = ioremap(flash_start, flash_len); fp@2359: if (!adapter->hw.flash_address) fp@2359: goto err_flashmap; fp@2359: } fp@2359: fp@2359: /* construct the net_device struct */ fp@2359: netdev->netdev_ops = &e1000e_netdev_ops; fp@2359: e1000e_set_ethtool_ops(netdev); fp@2359: netdev->watchdog_timeo = 5 * HZ; fp@2359: netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); fp@2359: strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); fp@2359: fp@2359: netdev->mem_start = mmio_start; fp@2359: netdev->mem_end = mmio_start + mmio_len; fp@2359: fp@2359: adapter->bd_number = cards_found++; fp@2359: fp@2359: e1000e_check_options(adapter); fp@2359: fp@2359: /* setup adapter struct */ fp@2359: err = e1000_sw_init(adapter); fp@2359: if (err) fp@2359: goto err_sw_init; fp@2359: fp@2359: err = -EIO; fp@2359: fp@2359: memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); fp@2359: memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); fp@2359: memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); fp@2359: fp@2359: err = ei->get_variants(adapter); fp@2359: if (err) fp@2359: goto err_hw_init; fp@2359: fp@2359: if ((adapter->flags & FLAG_IS_ICH) && fp@2359: (adapter->flags & FLAG_READ_ONLY_NVM)) fp@2359: e1000e_write_protect_nvm_ich8lan(&adapter->hw); fp@2359: fp@2359: hw->mac.ops.get_bus_info(&adapter->hw); fp@2359: fp@2359: adapter->hw.phy.autoneg_wait_to_complete = 0; fp@2359: fp@2359: /* Copper options */ fp@2359: if (adapter->hw.phy.media_type == e1000_media_type_copper) { fp@2359: adapter->hw.phy.mdix = AUTO_ALL_MODES; fp@2359: adapter->hw.phy.disable_polarity_correction = 0; fp@2359: adapter->hw.phy.ms_type = e1000_ms_hw_default; fp@2359: } fp@2359: fp@2359: if (e1000_check_reset_block(&adapter->hw)) fp@2359: e_info("PHY reset is blocked due to SOL/IDER session.\n"); fp@2359: fp@2359: netdev->features = NETIF_F_SG | fp@2359: NETIF_F_HW_CSUM | fp@2359: NETIF_F_HW_VLAN_TX | fp@2359: NETIF_F_HW_VLAN_RX; fp@2359: fp@2359: if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) fp@2359: netdev->features |= NETIF_F_HW_VLAN_FILTER; fp@2359: fp@2359: netdev->features |= NETIF_F_TSO; fp@2359: netdev->features |= NETIF_F_TSO6; fp@2359: fp@2359: netdev->vlan_features |= NETIF_F_TSO; fp@2359: netdev->vlan_features |= NETIF_F_TSO6; fp@2359: netdev->vlan_features |= NETIF_F_HW_CSUM; fp@2359: netdev->vlan_features |= NETIF_F_SG; fp@2359: fp@2359: if (pci_using_dac) fp@2359: netdev->features |= NETIF_F_HIGHDMA; fp@2359: fp@2359: if (e1000e_enable_mng_pass_thru(&adapter->hw)) fp@2359: adapter->flags |= FLAG_MNG_PT_ENABLED; fp@2359: fp@2359: /* fp@2359: * before reading the NVM, reset the controller to fp@2359: * put the device in a known good starting state fp@2359: */ fp@2359: adapter->hw.mac.ops.reset_hw(&adapter->hw); fp@2359: fp@2359: /* fp@2359: * systems with ASPM and others may see the checksum fail on the first fp@2359: * attempt. Let's give it a few tries fp@2359: */ fp@2359: for (i = 0;; i++) { fp@2359: if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) fp@2359: break; fp@2359: if (i == 2) { fp@2359: e_err("The NVM Checksum Is Not Valid\n"); fp@2359: err = -EIO; fp@2359: goto err_eeprom; fp@2359: } fp@2359: } fp@2359: fp@2359: e1000_eeprom_checks(adapter); fp@2359: fp@2359: /* copy the MAC address */ fp@2359: if (e1000e_read_mac_addr(&adapter->hw)) fp@2359: e_err("NVM Read Error while reading MAC address\n"); fp@2359: fp@2359: memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); fp@2359: memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); fp@2359: fp@2359: if (!is_valid_ether_addr(netdev->perm_addr)) { fp@2359: e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); fp@2359: err = -EIO; fp@2359: goto err_eeprom; fp@2359: } fp@2359: fp@2359: init_timer(&adapter->watchdog_timer); fp@2359: adapter->watchdog_timer.function = &e1000_watchdog; fp@2359: adapter->watchdog_timer.data = (unsigned long) adapter; fp@2359: fp@2359: init_timer(&adapter->phy_info_timer); fp@2359: adapter->phy_info_timer.function = &e1000_update_phy_info; fp@2359: adapter->phy_info_timer.data = (unsigned long) adapter; fp@2359: fp@2359: INIT_WORK(&adapter->reset_task, e1000_reset_task); fp@2359: INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); fp@2359: INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); fp@2359: INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); fp@2359: INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); fp@2359: fp@2359: /* Initialize link parameters. User can change them with ethtool */ fp@2359: adapter->hw.mac.autoneg = 1; fp@2359: adapter->fc_autoneg = 1; fp@2359: adapter->hw.fc.requested_mode = e1000_fc_default; fp@2359: adapter->hw.fc.current_mode = e1000_fc_default; fp@2359: adapter->hw.phy.autoneg_advertised = 0x2f; fp@2359: fp@2359: /* ring size defaults */ fp@2359: adapter->rx_ring->count = 256; fp@2359: adapter->tx_ring->count = 256; fp@2359: fp@2359: /* fp@2359: * Initial Wake on LAN setting - If APM wake is enabled in fp@2359: * the EEPROM, enable the ACPI Magic Packet filter fp@2359: */ fp@2359: if (adapter->flags & FLAG_APME_IN_WUC) { fp@2359: /* APME bit in EEPROM is mapped to WUC.APME */ fp@2359: eeprom_data = er32(WUC); fp@2359: eeprom_apme_mask = E1000_WUC_APME; fp@2359: if (eeprom_data & E1000_WUC_PHY_WAKE) fp@2359: adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; fp@2359: } else if (adapter->flags & FLAG_APME_IN_CTRL3) { fp@2359: if (adapter->flags & FLAG_APME_CHECK_PORT_B && fp@2359: (adapter->hw.bus.func == 1)) fp@2359: e1000_read_nvm(&adapter->hw, fp@2359: NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); fp@2359: else fp@2359: e1000_read_nvm(&adapter->hw, fp@2359: NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); fp@2359: } fp@2359: fp@2359: /* fetch WoL from EEPROM */ fp@2359: if (eeprom_data & eeprom_apme_mask) fp@2359: adapter->eeprom_wol |= E1000_WUFC_MAG; fp@2359: fp@2359: /* fp@2359: * now that we have the eeprom settings, apply the special cases fp@2359: * where the eeprom may be wrong or the board simply won't support fp@2359: * wake on lan on a particular port fp@2359: */ fp@2359: if (!(adapter->flags & FLAG_HAS_WOL)) fp@2359: adapter->eeprom_wol = 0; fp@2359: fp@2359: /* initialize the wol settings based on the eeprom settings */ fp@2359: adapter->wol = adapter->eeprom_wol; fp@2359: device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); fp@2359: fp@2359: /* save off EEPROM version number */ fp@2359: e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); fp@2359: fp@2359: /* reset the hardware with the new settings */ fp@2359: e1000e_reset(adapter); fp@2359: fp@2359: /* fp@2359: * If the controller has AMT, do not set DRV_LOAD until the interface fp@2359: * is up. For all other cases, let the f/w know that the h/w is now fp@2359: * under the control of the driver. fp@2359: */ fp@2359: if (!(adapter->flags & FLAG_HAS_AMT)) fp@2359: e1000_get_hw_control(adapter); fp@2359: fp@2359: strcpy(netdev->name, "eth%d"); fp@2359: err = register_netdev(netdev); fp@2359: if (err) fp@2359: goto err_register; fp@2359: fp@2359: /* carrier off reporting is important to ethtool even BEFORE open */ fp@2359: netif_carrier_off(netdev); fp@2359: fp@2359: e1000_print_device_info(adapter); fp@2359: fp@2359: if (pci_dev_run_wake(pdev)) { fp@2359: pm_runtime_set_active(&pdev->dev); fp@2359: pm_runtime_enable(&pdev->dev); fp@2359: } fp@2359: pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC); fp@2359: fp@2359: return 0; fp@2359: fp@2359: err_register: fp@2359: if (!(adapter->flags & FLAG_HAS_AMT)) fp@2359: e1000_release_hw_control(adapter); fp@2359: err_eeprom: fp@2359: if (!e1000_check_reset_block(&adapter->hw)) fp@2359: e1000_phy_hw_reset(&adapter->hw); fp@2359: err_hw_init: fp@2359: fp@2359: kfree(adapter->tx_ring); fp@2359: kfree(adapter->rx_ring); fp@2359: err_sw_init: fp@2359: if (adapter->hw.flash_address) fp@2359: iounmap(adapter->hw.flash_address); fp@2359: e1000e_reset_interrupt_capability(adapter); fp@2359: err_flashmap: fp@2359: iounmap(adapter->hw.hw_addr); fp@2359: err_ioremap: fp@2359: free_netdev(netdev); fp@2359: err_alloc_etherdev: fp@2359: pci_release_selected_regions(pdev, fp@2359: pci_select_bars(pdev, IORESOURCE_MEM)); fp@2359: err_pci_reg: fp@2359: err_dma: fp@2359: pci_disable_device(pdev); fp@2359: return err; fp@2359: } fp@2359: fp@2359: /** fp@2359: * e1000_remove - Device Removal Routine fp@2359: * @pdev: PCI device information struct fp@2359: * fp@2359: * e1000_remove is called by the PCI subsystem to alert the driver fp@2359: * that it should release a PCI device. The could be caused by a fp@2359: * Hot-Plug event, or because the driver is going to be removed from fp@2359: * memory. fp@2359: **/ fp@2359: static void __devexit e1000_remove(struct pci_dev *pdev) fp@2359: { fp@2359: struct net_device *netdev = pci_get_drvdata(pdev); fp@2359: struct e1000_adapter *adapter = netdev_priv(netdev); fp@2359: bool down = test_bit(__E1000_DOWN, &adapter->state); fp@2359: fp@2359: pm_runtime_get_sync(&pdev->dev); fp@2359: fp@2359: /* fp@2359: * flush_scheduled work may reschedule our watchdog task, so fp@2359: * explicitly disable watchdog tasks from being rescheduled fp@2359: */ fp@2359: if (!down) fp@2359: set_bit(__E1000_DOWN, &adapter->state); fp@2359: del_timer_sync(&adapter->watchdog_timer); fp@2359: del_timer_sync(&adapter->phy_info_timer); fp@2359: fp@2359: cancel_work_sync(&adapter->reset_task); fp@2359: cancel_work_sync(&adapter->watchdog_task); fp@2359: cancel_work_sync(&adapter->downshift_task); fp@2359: cancel_work_sync(&adapter->update_phy_task); fp@2359: cancel_work_sync(&adapter->print_hang_task); fp@2359: flush_scheduled_work(); fp@2359: fp@2359: if (!(netdev->flags & IFF_UP)) fp@2359: e1000_power_down_phy(adapter); fp@2359: fp@2359: /* Don't lie to e1000_close() down the road. */ fp@2359: if (!down) fp@2359: clear_bit(__E1000_DOWN, &adapter->state); fp@2359: unregister_netdev(netdev); fp@2359: fp@2359: if (pci_dev_run_wake(pdev)) { fp@2359: pm_runtime_disable(&pdev->dev); fp@2359: pm_runtime_set_suspended(&pdev->dev); fp@2359: } fp@2359: pm_runtime_put_noidle(&pdev->dev); fp@2359: fp@2359: /* fp@2359: * Release control of h/w to f/w. If f/w is AMT enabled, this fp@2359: * would have already happened in close and is redundant. fp@2359: */ fp@2359: e1000_release_hw_control(adapter); fp@2359: fp@2359: e1000e_reset_interrupt_capability(adapter); fp@2359: kfree(adapter->tx_ring); fp@2359: kfree(adapter->rx_ring); fp@2359: fp@2359: iounmap(adapter->hw.hw_addr); fp@2359: if (adapter->hw.flash_address) fp@2359: iounmap(adapter->hw.flash_address); fp@2359: pci_release_selected_regions(pdev, fp@2359: pci_select_bars(pdev, IORESOURCE_MEM)); fp@2359: fp@2359: free_netdev(netdev); fp@2359: fp@2359: /* AER disable */ fp@2359: pci_disable_pcie_error_reporting(pdev); fp@2359: fp@2359: pci_disable_device(pdev); fp@2359: } fp@2359: fp@2359: /* PCI Error Recovery (ERS) */ fp@2359: static struct pci_error_handlers e1000_err_handler = { fp@2359: .error_detected = e1000_io_error_detected, fp@2359: .slot_reset = e1000_io_slot_reset, fp@2359: .resume = e1000_io_resume, fp@2359: }; fp@2359: fp@2359: static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, fp@2359: fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, fp@2359: fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, fp@2359: fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, fp@2359: fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), fp@2359: board_80003es2lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), fp@2359: board_80003es2lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), fp@2359: board_80003es2lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), fp@2359: board_80003es2lan }, fp@2359: fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, fp@2359: fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, fp@2359: fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, fp@2359: fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, fp@2359: fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, fp@2359: { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, fp@2359: fp@2359: { } /* terminate list */ fp@2359: }; fp@2359: MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); fp@2359: fp@2359: #ifdef CONFIG_PM_OPS fp@2359: static const struct dev_pm_ops e1000_pm_ops = { fp@2359: SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) fp@2359: SET_RUNTIME_PM_OPS(e1000_runtime_suspend, fp@2359: e1000_runtime_resume, e1000_idle) fp@2359: }; fp@2359: #endif fp@2359: fp@2359: /* PCI Device API Driver */ fp@2359: static struct pci_driver e1000_driver = { fp@2359: .name = e1000e_driver_name, fp@2359: .id_table = e1000_pci_tbl, fp@2359: .probe = e1000_probe, fp@2359: .remove = __devexit_p(e1000_remove), fp@2359: #ifdef CONFIG_PM_OPS fp@2359: .driver.pm = &e1000_pm_ops, fp@2359: #endif fp@2359: .shutdown = e1000_shutdown, fp@2359: .err_handler = &e1000_err_handler fp@2359: }; fp@2359: fp@2359: /** fp@2359: * e1000_init_module - Driver Registration Routine fp@2359: * fp@2359: * e1000_init_module is the first routine called when the driver is fp@2359: * loaded. All it does is register with the PCI subsystem. fp@2359: **/ fp@2359: static int __init e1000_init_module(void) fp@2359: { fp@2359: int ret; fp@2359: pr_info("Intel(R) PRO/1000 Network Driver - %s\n", fp@2359: e1000e_driver_version); fp@2359: pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n"); fp@2359: ret = pci_register_driver(&e1000_driver); fp@2359: fp@2359: return ret; fp@2359: } fp@2359: module_init(e1000_init_module); fp@2359: fp@2359: /** fp@2359: * e1000_exit_module - Driver Exit Cleanup Routine fp@2359: * fp@2359: * e1000_exit_module is called just before the driver is removed fp@2359: * from memory. fp@2359: **/ fp@2359: static void __exit e1000_exit_module(void) fp@2359: { fp@2359: pci_unregister_driver(&e1000_driver); fp@2359: } fp@2359: module_exit(e1000_exit_module); fp@2359: fp@2359: fp@2359: MODULE_AUTHOR("Intel Corporation, "); fp@2359: MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); fp@2359: MODULE_LICENSE("GPL"); fp@2359: MODULE_VERSION(DRV_VERSION); fp@2359: fp@2359: /* e1000_main.c */