fp@581: /* fp@581: * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. fp@581: * fp@581: * Note: This driver is a cleanroom reimplementation based on reverse fp@581: * engineered documentation written by Carl-Daniel Hailfinger fp@581: * and Andrew de Quincey. It's neither supported nor endorsed fp@581: * by NVIDIA Corp. Use at your own risk. fp@581: * fp@581: * NVIDIA, nForce and other NVIDIA marks are trademarks or registered fp@581: * trademarks of NVIDIA Corporation in the United States and other fp@581: * countries. fp@581: * fp@581: * Copyright (C) 2003,4,5 Manfred Spraul fp@581: * Copyright (C) 2004 Andrew de Quincey (wol support) fp@581: * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane fp@581: * IRQ rate fixes, bigendian fixes, cleanups, verification) fp@581: * Copyright (c) 2004 NVIDIA Corporation fp@581: * fp@581: * This program is free software; you can redistribute it and/or modify fp@581: * it under the terms of the GNU General Public License as published by fp@581: * the Free Software Foundation; either version 2 of the License, or fp@581: * (at your option) any later version. fp@581: * fp@581: * This program is distributed in the hope that it will be useful, fp@581: * but WITHOUT ANY WARRANTY; without even the implied warranty of fp@581: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the fp@581: * GNU General Public License for more details. fp@581: * fp@581: * You should have received a copy of the GNU General Public License fp@581: * along with this program; if not, write to the Free Software fp@581: * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA fp@581: * fp@581: * Changelog: fp@581: * 0.01: 05 Oct 2003: First release that compiles without warnings. fp@581: * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs. fp@581: * Check all PCI BARs for the register window. fp@581: * udelay added to mii_rw. fp@581: * 0.03: 06 Oct 2003: Initialize dev->irq. fp@581: * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks. fp@581: * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout. fp@581: * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated, fp@581: * irq mask updated fp@581: * 0.07: 14 Oct 2003: Further irq mask updates. fp@581: * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill fp@581: * added into irq handler, NULL check for drain_ring. fp@581: * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the fp@581: * requested interrupt sources. fp@581: * 0.10: 20 Oct 2003: First cleanup for release. fp@581: * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased. fp@581: * MAC Address init fix, set_multicast cleanup. fp@581: * 0.12: 23 Oct 2003: Cleanups for release. fp@581: * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10. fp@581: * Set link speed correctly. start rx before starting fp@581: * tx (nv_start_rx sets the link speed). fp@581: * 0.14: 25 Oct 2003: Nic dependant irq mask. fp@581: * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during fp@581: * open. fp@581: * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size fp@581: * increased to 1628 bytes. fp@581: * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from fp@581: * the tx length. fp@581: * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats fp@581: * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac fp@581: * addresses, really stop rx if already running fp@581: * in nv_start_rx, clean up a bit. fp@581: * 0.20: 07 Dec 2003: alloc fixes fp@581: * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix. fp@581: * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup fp@581: * on close. fp@581: * 0.23: 26 Jan 2004: various small cleanups fp@581: * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces fp@581: * 0.25: 09 Mar 2004: wol support fp@581: * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes fp@581: * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings, fp@581: * added CK804/MCP04 device IDs, code fixes fp@581: * for registers, link status and other minor fixes. fp@581: * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe fp@581: * 0.29: 31 Aug 2004: Add backup timer for link change notification. fp@581: * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset fp@581: * into nv_close, otherwise reenabling for wol can fp@581: * cause DMA to kfree'd memory. fp@581: * 0.31: 14 Nov 2004: ethtool support for getting/setting link fp@581: * capabilities. fp@581: * 0.32: 16 Apr 2005: RX_ERROR4 handling added. fp@581: * 0.33: 16 May 2005: Support for MCP51 added. fp@581: * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. fp@581: * 0.35: 26 Jun 2005: Support for MCP55 added. fp@581: * 0.36: 28 Jun 2005: Add jumbo frame support. fp@581: * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list fp@581: * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of fp@581: * per-packet flags. fp@581: * 0.39: 18 Jul 2005: Add 64bit descriptor support. fp@581: * 0.40: 19 Jul 2005: Add support for mac address change. fp@581: * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead fp@581: * of nv_remove fp@581: * 0.42: 06 Aug 2005: Fix lack of link speed initialization fp@581: * in the second (and later) nv_open call fp@581: * 0.43: 10 Aug 2005: Add support for tx checksum. fp@581: * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. fp@581: * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check fp@581: * 0.46: 20 Oct 2005: Add irq optimization modes. fp@581: * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. fp@581: * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single fp@581: * 0.49: 10 Dec 2005: Fix tso for large buffers. fp@581: * 0.50: 20 Jan 2006: Add 8021pq tagging support. fp@581: * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. fp@581: * 0.52: 20 Jan 2006: Add MSI/MSIX support. fp@581: * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. fp@581: * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. fp@581: * fp@581: * Known bugs: fp@581: * We suspect that on some hardware no TX done interrupts are generated. fp@581: * This means recovery from netif_stop_queue only happens if the hw timer fp@581: * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) fp@581: * and the timer is active in the IRQMask, or if a rx packet arrives by chance. fp@581: * If your hardware reliably generates tx done interrupts, then you can remove fp@581: * DEV_NEED_TIMERIRQ from the driver_data flags. fp@581: * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few fp@581: * superfluous timer interrupts from the nic. fp@581: */ fp@581: #define FORCEDETH_VERSION "0.54" fp@581: #define DRV_NAME "forcedeth" fp@581: fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: fp@581: #include fp@581: #include fp@581: #include fp@581: #include fp@581: fp@581: #include "../globals.h" fp@581: #include "ecdev.h" fp@581: fp@581: #if 0 fp@581: #define dprintk printk fp@581: #else fp@581: #define dprintk(x...) do { } while (0) fp@581: #endif fp@581: fp@581: fp@581: /* fp@581: * Hardware access: fp@581: */ fp@581: fp@581: #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ fp@581: #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ fp@581: #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ fp@581: #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ fp@581: #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ fp@581: #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ fp@581: #define DEV_HAS_MSI 0x0040 /* device supports MSI */ fp@581: #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ fp@581: #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ fp@581: fp@581: enum { fp@581: NvRegIrqStatus = 0x000, fp@581: #define NVREG_IRQSTAT_MIIEVENT 0x040 fp@581: #define NVREG_IRQSTAT_MASK 0x1ff fp@581: NvRegIrqMask = 0x004, fp@581: #define NVREG_IRQ_RX_ERROR 0x0001 fp@581: #define NVREG_IRQ_RX 0x0002 fp@581: #define NVREG_IRQ_RX_NOBUF 0x0004 fp@581: #define NVREG_IRQ_TX_ERR 0x0008 fp@581: #define NVREG_IRQ_TX_OK 0x0010 fp@581: #define NVREG_IRQ_TIMER 0x0020 fp@581: #define NVREG_IRQ_LINK 0x0040 fp@581: #define NVREG_IRQ_RX_FORCED 0x0080 fp@581: #define NVREG_IRQ_TX_FORCED 0x0100 fp@581: #define NVREG_IRQMASK_THROUGHPUT 0x00df fp@581: #define NVREG_IRQMASK_CPU 0x0040 fp@581: #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) fp@581: #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) fp@581: #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK) fp@581: fp@581: #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ fp@581: NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ fp@581: NVREG_IRQ_TX_FORCED)) fp@581: fp@581: NvRegUnknownSetupReg6 = 0x008, fp@581: #define NVREG_UNKSETUP6_VAL 3 fp@581: fp@581: /* fp@581: * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic fp@581: * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms fp@581: */ fp@581: NvRegPollingInterval = 0x00c, fp@581: #define NVREG_POLL_DEFAULT_THROUGHPUT 970 fp@581: #define NVREG_POLL_DEFAULT_CPU 13 fp@581: NvRegMSIMap0 = 0x020, fp@581: NvRegMSIMap1 = 0x024, fp@581: NvRegMSIIrqMask = 0x030, fp@581: #define NVREG_MSI_VECTOR_0_ENABLED 0x01 fp@581: NvRegMisc1 = 0x080, fp@581: #define NVREG_MISC1_HD 0x02 fp@581: #define NVREG_MISC1_FORCE 0x3b0f3c fp@581: fp@581: NvRegMacReset = 0x3c, fp@581: #define NVREG_MAC_RESET_ASSERT 0x0F3 fp@581: NvRegTransmitterControl = 0x084, fp@581: #define NVREG_XMITCTL_START 0x01 fp@581: NvRegTransmitterStatus = 0x088, fp@581: #define NVREG_XMITSTAT_BUSY 0x01 fp@581: fp@581: NvRegPacketFilterFlags = 0x8c, fp@581: #define NVREG_PFF_ALWAYS 0x7F0008 fp@581: #define NVREG_PFF_PROMISC 0x80 fp@581: #define NVREG_PFF_MYADDR 0x20 fp@581: fp@581: NvRegOffloadConfig = 0x90, fp@581: #define NVREG_OFFLOAD_HOMEPHY 0x601 fp@581: #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE fp@581: NvRegReceiverControl = 0x094, fp@581: #define NVREG_RCVCTL_START 0x01 fp@581: NvRegReceiverStatus = 0x98, fp@581: #define NVREG_RCVSTAT_BUSY 0x01 fp@581: fp@581: NvRegRandomSeed = 0x9c, fp@581: #define NVREG_RNDSEED_MASK 0x00ff fp@581: #define NVREG_RNDSEED_FORCE 0x7f00 fp@581: #define NVREG_RNDSEED_FORCE2 0x2d00 fp@581: #define NVREG_RNDSEED_FORCE3 0x7400 fp@581: fp@581: NvRegUnknownSetupReg1 = 0xA0, fp@581: #define NVREG_UNKSETUP1_VAL 0x16070f fp@581: NvRegUnknownSetupReg2 = 0xA4, fp@581: #define NVREG_UNKSETUP2_VAL 0x16 fp@581: NvRegMacAddrA = 0xA8, fp@581: NvRegMacAddrB = 0xAC, fp@581: NvRegMulticastAddrA = 0xB0, fp@581: #define NVREG_MCASTADDRA_FORCE 0x01 fp@581: NvRegMulticastAddrB = 0xB4, fp@581: NvRegMulticastMaskA = 0xB8, fp@581: NvRegMulticastMaskB = 0xBC, fp@581: fp@581: NvRegPhyInterface = 0xC0, fp@581: #define PHY_RGMII 0x10000000 fp@581: fp@581: NvRegTxRingPhysAddr = 0x100, fp@581: NvRegRxRingPhysAddr = 0x104, fp@581: NvRegRingSizes = 0x108, fp@581: #define NVREG_RINGSZ_TXSHIFT 0 fp@581: #define NVREG_RINGSZ_RXSHIFT 16 fp@581: NvRegUnknownTransmitterReg = 0x10c, fp@581: NvRegLinkSpeed = 0x110, fp@581: #define NVREG_LINKSPEED_FORCE 0x10000 fp@581: #define NVREG_LINKSPEED_10 1000 fp@581: #define NVREG_LINKSPEED_100 100 fp@581: #define NVREG_LINKSPEED_1000 50 fp@581: #define NVREG_LINKSPEED_MASK (0xFFF) fp@581: NvRegUnknownSetupReg5 = 0x130, fp@581: #define NVREG_UNKSETUP5_BIT31 (1<<31) fp@581: NvRegUnknownSetupReg3 = 0x13c, fp@581: #define NVREG_UNKSETUP3_VAL1 0x200010 fp@581: NvRegTxRxControl = 0x144, fp@581: #define NVREG_TXRXCTL_KICK 0x0001 fp@581: #define NVREG_TXRXCTL_BIT1 0x0002 fp@581: #define NVREG_TXRXCTL_BIT2 0x0004 fp@581: #define NVREG_TXRXCTL_IDLE 0x0008 fp@581: #define NVREG_TXRXCTL_RESET 0x0010 fp@581: #define NVREG_TXRXCTL_RXCHECK 0x0400 fp@581: #define NVREG_TXRXCTL_DESC_1 0 fp@581: #define NVREG_TXRXCTL_DESC_2 0x02100 fp@581: #define NVREG_TXRXCTL_DESC_3 0x02200 fp@581: #define NVREG_TXRXCTL_VLANSTRIP 0x00040 fp@581: #define NVREG_TXRXCTL_VLANINS 0x00080 fp@581: NvRegTxRingPhysAddrHigh = 0x148, fp@581: NvRegRxRingPhysAddrHigh = 0x14C, fp@581: NvRegMIIStatus = 0x180, fp@581: #define NVREG_MIISTAT_ERROR 0x0001 fp@581: #define NVREG_MIISTAT_LINKCHANGE 0x0008 fp@581: #define NVREG_MIISTAT_MASK 0x000f fp@581: #define NVREG_MIISTAT_MASK2 0x000f fp@581: NvRegUnknownSetupReg4 = 0x184, fp@581: #define NVREG_UNKSETUP4_VAL 8 fp@581: fp@581: NvRegAdapterControl = 0x188, fp@581: #define NVREG_ADAPTCTL_START 0x02 fp@581: #define NVREG_ADAPTCTL_LINKUP 0x04 fp@581: #define NVREG_ADAPTCTL_PHYVALID 0x40000 fp@581: #define NVREG_ADAPTCTL_RUNNING 0x100000 fp@581: #define NVREG_ADAPTCTL_PHYSHIFT 24 fp@581: NvRegMIISpeed = 0x18c, fp@581: #define NVREG_MIISPEED_BIT8 (1<<8) fp@581: #define NVREG_MIIDELAY 5 fp@581: NvRegMIIControl = 0x190, fp@581: #define NVREG_MIICTL_INUSE 0x08000 fp@581: #define NVREG_MIICTL_WRITE 0x00400 fp@581: #define NVREG_MIICTL_ADDRSHIFT 5 fp@581: NvRegMIIData = 0x194, fp@581: NvRegWakeUpFlags = 0x200, fp@581: #define NVREG_WAKEUPFLAGS_VAL 0x7770 fp@581: #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 fp@581: #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 fp@581: #define NVREG_WAKEUPFLAGS_D3SHIFT 12 fp@581: #define NVREG_WAKEUPFLAGS_D2SHIFT 8 fp@581: #define NVREG_WAKEUPFLAGS_D1SHIFT 4 fp@581: #define NVREG_WAKEUPFLAGS_D0SHIFT 0 fp@581: #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 fp@581: #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 fp@581: #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 fp@581: #define NVREG_WAKEUPFLAGS_ENABLE 0x1111 fp@581: fp@581: NvRegPatternCRC = 0x204, fp@581: NvRegPatternMask = 0x208, fp@581: NvRegPowerCap = 0x268, fp@581: #define NVREG_POWERCAP_D3SUPP (1<<30) fp@581: #define NVREG_POWERCAP_D2SUPP (1<<26) fp@581: #define NVREG_POWERCAP_D1SUPP (1<<25) fp@581: NvRegPowerState = 0x26c, fp@581: #define NVREG_POWERSTATE_POWEREDUP 0x8000 fp@581: #define NVREG_POWERSTATE_VALID 0x0100 fp@581: #define NVREG_POWERSTATE_MASK 0x0003 fp@581: #define NVREG_POWERSTATE_D0 0x0000 fp@581: #define NVREG_POWERSTATE_D1 0x0001 fp@581: #define NVREG_POWERSTATE_D2 0x0002 fp@581: #define NVREG_POWERSTATE_D3 0x0003 fp@581: NvRegVlanControl = 0x300, fp@581: #define NVREG_VLANCONTROL_ENABLE 0x2000 fp@581: NvRegMSIXMap0 = 0x3e0, fp@581: NvRegMSIXMap1 = 0x3e4, fp@581: NvRegMSIXIrqStatus = 0x3f0, fp@581: fp@581: NvRegPowerState2 = 0x600, fp@581: #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 fp@581: #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 fp@581: }; fp@581: fp@581: /* Big endian: should work, but is untested */ fp@581: struct ring_desc { fp@581: u32 PacketBuffer; fp@581: u32 FlagLen; fp@581: }; fp@581: fp@581: struct ring_desc_ex { fp@581: u32 PacketBufferHigh; fp@581: u32 PacketBufferLow; fp@581: u32 TxVlan; fp@581: u32 FlagLen; fp@581: }; fp@581: fp@581: typedef union _ring_type { fp@581: struct ring_desc* orig; fp@581: struct ring_desc_ex* ex; fp@581: } ring_type; fp@581: fp@581: #define FLAG_MASK_V1 0xffff0000 fp@581: #define FLAG_MASK_V2 0xffffc000 fp@581: #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) fp@581: #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) fp@581: fp@581: #define NV_TX_LASTPACKET (1<<16) fp@581: #define NV_TX_RETRYERROR (1<<19) fp@581: #define NV_TX_FORCED_INTERRUPT (1<<24) fp@581: #define NV_TX_DEFERRED (1<<26) fp@581: #define NV_TX_CARRIERLOST (1<<27) fp@581: #define NV_TX_LATECOLLISION (1<<28) fp@581: #define NV_TX_UNDERFLOW (1<<29) fp@581: #define NV_TX_ERROR (1<<30) fp@581: #define NV_TX_VALID (1<<31) fp@581: fp@581: #define NV_TX2_LASTPACKET (1<<29) fp@581: #define NV_TX2_RETRYERROR (1<<18) fp@581: #define NV_TX2_FORCED_INTERRUPT (1<<30) fp@581: #define NV_TX2_DEFERRED (1<<25) fp@581: #define NV_TX2_CARRIERLOST (1<<26) fp@581: #define NV_TX2_LATECOLLISION (1<<27) fp@581: #define NV_TX2_UNDERFLOW (1<<28) fp@581: /* error and valid are the same for both */ fp@581: #define NV_TX2_ERROR (1<<30) fp@581: #define NV_TX2_VALID (1<<31) fp@581: #define NV_TX2_TSO (1<<28) fp@581: #define NV_TX2_TSO_SHIFT 14 fp@581: #define NV_TX2_TSO_MAX_SHIFT 14 fp@581: #define NV_TX2_TSO_MAX_SIZE (1< */ fp@581: #define MII_1000BT_CR 0x09 fp@581: #define MII_1000BT_SR 0x0a fp@581: #define ADVERTISE_1000FULL 0x0200 fp@581: #define ADVERTISE_1000HALF 0x0100 fp@581: #define LPA_1000FULL 0x0800 fp@581: #define LPA_1000HALF 0x0400 fp@581: fp@581: /* MSI/MSI-X defines */ fp@581: #define NV_MSI_X_MAX_VECTORS 8 fp@581: #define NV_MSI_X_VECTORS_MASK 0x000f fp@581: #define NV_MSI_CAPABLE 0x0010 fp@581: #define NV_MSI_X_CAPABLE 0x0020 fp@581: #define NV_MSI_ENABLED 0x0040 fp@581: #define NV_MSI_X_ENABLED 0x0080 fp@581: fp@581: #define NV_MSI_X_VECTOR_ALL 0x0 fp@581: #define NV_MSI_X_VECTOR_RX 0x0 fp@581: #define NV_MSI_X_VECTOR_TX 0x1 fp@581: #define NV_MSI_X_VECTOR_OTHER 0x2 fp@581: fp@581: /* fp@581: * SMP locking: fp@581: * All hardware access under dev->priv->lock, except the performance fp@581: * critical parts: fp@581: * - rx is (pseudo-) lockless: it relies on the single-threading provided fp@581: * by the arch code for interrupts. fp@581: * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission fp@581: * needs dev->priv->lock :-( fp@581: * - set_multicast_list: preparation lockless, relies on dev->xmit_lock. fp@581: */ fp@581: fp@581: /* in dev: base, irq */ fp@581: struct fe_priv { fp@581: spinlock_t lock; fp@581: fp@581: /* General data: fp@581: * Locking: spin_lock(&np->lock); */ fp@581: struct net_device_stats stats; fp@581: int in_shutdown; fp@581: u32 linkspeed; fp@581: int duplex; fp@581: int autoneg; fp@581: int fixed_mode; fp@581: int phyaddr; fp@581: int wolenabled; fp@581: unsigned int phy_oui; fp@581: u16 gigabit; fp@581: fp@581: /* General data: RO fields */ fp@581: dma_addr_t ring_addr; fp@581: struct pci_dev *pci_dev; fp@581: u32 orig_mac[2]; fp@581: u32 irqmask; fp@581: u32 desc_ver; fp@581: u32 txrxctl_bits; fp@581: u32 vlanctl_bits; fp@581: u32 driver_data; fp@581: u32 register_size; fp@581: fp@581: void __iomem *base; fp@581: fp@581: /* rx specific fields. fp@581: * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); fp@581: */ fp@581: ring_type rx_ring; fp@581: unsigned int cur_rx, refill_rx; fp@581: struct sk_buff *rx_skbuff[RX_RING]; fp@581: dma_addr_t rx_dma[RX_RING]; fp@581: unsigned int rx_buf_sz; fp@581: unsigned int pkt_limit; fp@581: struct timer_list oom_kick; fp@581: struct timer_list nic_poll; fp@581: u32 nic_poll_irq; fp@581: fp@581: /* media detection workaround. fp@581: * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); fp@581: */ fp@581: int need_linktimer; fp@581: unsigned long link_timeout; fp@581: /* fp@581: * tx specific fields. fp@581: */ fp@581: ring_type tx_ring; fp@581: unsigned int next_tx, nic_tx; fp@581: struct sk_buff *tx_skbuff[TX_RING]; fp@581: dma_addr_t tx_dma[TX_RING]; fp@581: unsigned int tx_dma_len[TX_RING]; fp@581: u32 tx_flags; fp@581: fp@581: /* vlan fields */ fp@581: struct vlan_group *vlangrp; fp@581: fp@581: /* msi/msi-x fields */ fp@581: u32 msi_flags; fp@581: struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; fp@581: fp@581: ec_device_t *ecdev; fp@581: }; fp@581: fp@581: /* fp@581: * Maximum number of loops until we assume that a bit in the irq mask fp@581: * is stuck. Overridable with module param. fp@581: */ fp@581: static int max_interrupt_work = 5; fp@581: fp@581: /* fp@581: * Optimization can be either throuput mode or cpu mode fp@581: * fp@581: * Throughput Mode: Every tx and rx packet will generate an interrupt. fp@581: * CPU Mode: Interrupts are controlled by a timer. fp@581: */ fp@581: #define NV_OPTIMIZATION_MODE_THROUGHPUT 0 fp@581: #define NV_OPTIMIZATION_MODE_CPU 1 fp@581: static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; fp@581: fp@581: /* fp@581: * Poll interval for timer irq fp@581: * fp@581: * This interval determines how frequent an interrupt is generated. fp@581: * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] fp@581: * Min = 0, and Max = 65535 fp@581: */ fp@581: static int poll_interval = -1; fp@581: fp@581: /* fp@581: * Disable MSI interrupts fp@581: */ fp@581: static int disable_msi = 0; fp@581: fp@581: /* fp@581: * Disable MSIX interrupts fp@581: */ fp@581: static int disable_msix = 0; fp@581: fp@581: static int board_idx = -1; fp@581: fp@581: static inline struct fe_priv *get_nvpriv(struct net_device *dev) fp@581: { fp@581: return netdev_priv(dev); fp@581: } fp@581: fp@581: static inline u8 __iomem *get_hwbase(struct net_device *dev) fp@581: { fp@581: return ((struct fe_priv *)netdev_priv(dev))->base; fp@581: } fp@581: fp@581: static inline void pci_push(u8 __iomem *base) fp@581: { fp@581: /* force out pending posted writes */ fp@581: readl(base); fp@581: } fp@581: fp@581: static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) fp@581: { fp@581: return le32_to_cpu(prd->FlagLen) fp@581: & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); fp@581: } fp@581: fp@581: static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) fp@581: { fp@581: return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; fp@581: } fp@581: fp@581: static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, fp@581: int delay, int delaymax, const char *msg) fp@581: { fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: fp@581: pci_push(base); fp@581: do { fp@581: udelay(delay); fp@581: delaymax -= delay; fp@581: if (delaymax < 0) { fp@581: if (msg) fp@581: printk(msg); fp@581: return 1; fp@581: } fp@581: } while ((readl(base + offset) & mask) != target); fp@581: return 0; fp@581: } fp@581: fp@581: #define NV_SETUP_RX_RING 0x01 fp@581: #define NV_SETUP_TX_RING 0x02 fp@581: fp@581: static void setup_hw_rings(struct net_device *dev, int rxtx_flags) fp@581: { fp@581: struct fe_priv *np = get_nvpriv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { fp@581: if (rxtx_flags & NV_SETUP_RX_RING) { fp@581: writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); fp@581: } fp@581: if (rxtx_flags & NV_SETUP_TX_RING) { fp@581: writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); fp@581: } fp@581: } else { fp@581: if (rxtx_flags & NV_SETUP_RX_RING) { fp@581: writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); fp@581: writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); fp@581: } fp@581: if (rxtx_flags & NV_SETUP_TX_RING) { fp@581: writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); fp@581: writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); fp@581: } fp@581: } fp@581: } fp@581: fp@581: static int using_multi_irqs(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = get_nvpriv(dev); fp@581: fp@581: if (!(np->msi_flags & NV_MSI_X_ENABLED) || fp@581: ((np->msi_flags & NV_MSI_X_ENABLED) && fp@581: ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) fp@581: return 0; fp@581: else fp@581: return 1; fp@581: } fp@581: fp@581: static void nv_enable_irq(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = get_nvpriv(dev); fp@581: fp@581: if (!using_multi_irqs(dev)) { fp@581: if (np->msi_flags & NV_MSI_X_ENABLED) fp@581: enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); fp@581: else fp@581: enable_irq(dev->irq); fp@581: } else { fp@581: enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); fp@581: enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); fp@581: enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); fp@581: } fp@581: } fp@581: fp@581: static void nv_disable_irq(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = get_nvpriv(dev); fp@581: fp@581: if (!using_multi_irqs(dev)) { fp@581: if (np->msi_flags & NV_MSI_X_ENABLED) fp@581: disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); fp@581: else fp@581: disable_irq(dev->irq); fp@581: } else { fp@581: disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); fp@581: disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); fp@581: disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); fp@581: } fp@581: } fp@581: fp@581: /* In MSIX mode, a write to irqmask behaves as XOR */ fp@581: static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) fp@581: { fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: fp@581: writel(mask, base + NvRegIrqMask); fp@581: } fp@581: fp@581: static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) fp@581: { fp@581: struct fe_priv *np = get_nvpriv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: fp@581: if (np->msi_flags & NV_MSI_X_ENABLED) { fp@581: writel(mask, base + NvRegIrqMask); fp@581: } else { fp@581: if (np->msi_flags & NV_MSI_ENABLED) fp@581: writel(0, base + NvRegMSIIrqMask); fp@581: writel(0, base + NvRegIrqMask); fp@581: } fp@581: } fp@581: fp@581: #define MII_READ (-1) fp@581: /* mii_rw: read/write a register on the PHY. fp@581: * fp@581: * Caller must guarantee serialization fp@581: */ fp@581: static int mii_rw(struct net_device *dev, int addr, int miireg, int value) fp@581: { fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: u32 reg; fp@581: int retval; fp@581: fp@581: writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); fp@581: fp@581: reg = readl(base + NvRegMIIControl); fp@581: if (reg & NVREG_MIICTL_INUSE) { fp@581: writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); fp@581: udelay(NV_MIIBUSY_DELAY); fp@581: } fp@581: fp@581: reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; fp@581: if (value != MII_READ) { fp@581: writel(value, base + NvRegMIIData); fp@581: reg |= NVREG_MIICTL_WRITE; fp@581: } fp@581: writel(reg, base + NvRegMIIControl); fp@581: fp@581: if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, fp@581: NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { fp@581: dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", fp@581: dev->name, miireg, addr); fp@581: retval = -1; fp@581: } else if (value != MII_READ) { fp@581: /* it was a write operation - fewer failures are detectable */ fp@581: dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", fp@581: dev->name, value, miireg, addr); fp@581: retval = 0; fp@581: } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { fp@581: dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", fp@581: dev->name, miireg, addr); fp@581: retval = -1; fp@581: } else { fp@581: retval = readl(base + NvRegMIIData); fp@581: dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", fp@581: dev->name, miireg, addr, retval); fp@581: } fp@581: fp@581: return retval; fp@581: } fp@581: fp@581: static int phy_reset(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u32 miicontrol; fp@581: unsigned int tries = 0; fp@581: fp@581: miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); fp@581: miicontrol |= BMCR_RESET; fp@581: if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { fp@581: return -1; fp@581: } fp@581: fp@581: /* wait for 500ms */ fp@581: msleep(500); fp@581: fp@581: /* must wait till reset is deasserted */ fp@581: while (miicontrol & BMCR_RESET) { fp@581: msleep(10); fp@581: miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); fp@581: /* FIXME: 100 tries seem excessive */ fp@581: if (tries++ > 100) fp@581: return -1; fp@581: } fp@581: return 0; fp@581: } fp@581: fp@581: static int phy_init(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = get_nvpriv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; fp@581: fp@581: /* set advertise register */ fp@581: reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); fp@581: reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400); fp@581: if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { fp@581: printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); fp@581: return PHY_ERROR; fp@581: } fp@581: fp@581: /* get phy interface type */ fp@581: phyinterface = readl(base + NvRegPhyInterface); fp@581: fp@581: /* see if gigabit phy */ fp@581: mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); fp@581: if (mii_status & PHY_GIGABIT) { fp@581: np->gigabit = PHY_GIGABIT; fp@581: mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); fp@581: mii_control_1000 &= ~ADVERTISE_1000HALF; fp@581: if (phyinterface & PHY_RGMII) fp@581: mii_control_1000 |= ADVERTISE_1000FULL; fp@581: else fp@581: mii_control_1000 &= ~ADVERTISE_1000FULL; fp@581: fp@581: if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) { fp@581: printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); fp@581: return PHY_ERROR; fp@581: } fp@581: } fp@581: else fp@581: np->gigabit = 0; fp@581: fp@581: /* reset the phy */ fp@581: if (phy_reset(dev)) { fp@581: printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); fp@581: return PHY_ERROR; fp@581: } fp@581: fp@581: /* phy vendor specific configuration */ fp@581: if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { fp@581: phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); fp@581: phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); fp@581: phy_reserved |= (PHY_INIT3 | PHY_INIT4); fp@581: if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { fp@581: printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); fp@581: return PHY_ERROR; fp@581: } fp@581: phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); fp@581: phy_reserved |= PHY_INIT5; fp@581: if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { fp@581: printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); fp@581: return PHY_ERROR; fp@581: } fp@581: } fp@581: if (np->phy_oui == PHY_OUI_CICADA) { fp@581: phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); fp@581: phy_reserved |= PHY_INIT6; fp@581: if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { fp@581: printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); fp@581: return PHY_ERROR; fp@581: } fp@581: } fp@581: fp@581: /* restart auto negotiation */ fp@581: mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); fp@581: mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); fp@581: if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { fp@581: return PHY_ERROR; fp@581: } fp@581: fp@581: return 0; fp@581: } fp@581: fp@581: static void nv_start_rx(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); fp@581: /* Already running? Stop it. */ fp@581: if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { fp@581: writel(0, base + NvRegReceiverControl); fp@581: pci_push(base); fp@581: } fp@581: writel(np->linkspeed, base + NvRegLinkSpeed); fp@581: pci_push(base); fp@581: writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); fp@581: dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", fp@581: dev->name, np->duplex, np->linkspeed); fp@581: pci_push(base); fp@581: } fp@581: fp@581: static void nv_stop_rx(struct net_device *dev) fp@581: { fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); fp@581: writel(0, base + NvRegReceiverControl); fp@581: reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, fp@581: NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, fp@581: KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); fp@581: fp@581: udelay(NV_RXSTOP_DELAY2); fp@581: writel(0, base + NvRegLinkSpeed); fp@581: } fp@581: fp@581: static void nv_start_tx(struct net_device *dev) fp@581: { fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); fp@581: writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl); fp@581: pci_push(base); fp@581: } fp@581: fp@581: static void nv_stop_tx(struct net_device *dev) fp@581: { fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); fp@581: writel(0, base + NvRegTransmitterControl); fp@581: reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, fp@581: NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, fp@581: KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); fp@581: fp@581: udelay(NV_TXSTOP_DELAY2); fp@581: writel(0, base + NvRegUnknownTransmitterReg); fp@581: } fp@581: fp@581: static void nv_txrx_reset(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); fp@581: writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); fp@581: pci_push(base); fp@581: udelay(NV_TXRX_RESET_DELAY); fp@581: writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); fp@581: pci_push(base); fp@581: } fp@581: fp@581: static void nv_mac_reset(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); fp@581: writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); fp@581: pci_push(base); fp@581: writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); fp@581: pci_push(base); fp@581: udelay(NV_MAC_RESET_DELAY); fp@581: writel(0, base + NvRegMacReset); fp@581: pci_push(base); fp@581: udelay(NV_MAC_RESET_DELAY); fp@581: writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); fp@581: pci_push(base); fp@581: } fp@581: fp@581: /* fp@581: * nv_get_stats: dev->get_stats function fp@581: * Get latest stats value from the nic. fp@581: * Called with read_lock(&dev_base_lock) held for read - fp@581: * only synchronized against unregister_netdevice. fp@581: */ fp@581: static struct net_device_stats *nv_get_stats(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: fp@581: /* It seems that the nic always generates interrupts and doesn't fp@581: * accumulate errors internally. Thus the current values in np->stats fp@581: * are already up to date. fp@581: */ fp@581: return &np->stats; fp@581: } fp@581: fp@581: /* fp@581: * nv_alloc_rx: fill rx ring entries. fp@581: * Return 1 if the allocations for the skbs failed and the fp@581: * rx engine is without Available descriptors fp@581: */ fp@581: static int nv_alloc_rx(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: unsigned int refill_rx = np->refill_rx; fp@581: int nr; fp@581: fp@581: while (np->cur_rx != refill_rx) { fp@581: struct sk_buff *skb; fp@581: fp@581: nr = refill_rx % RX_RING; fp@581: if (np->rx_skbuff[nr] == NULL) { fp@581: fp@581: skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); fp@581: if (!skb) fp@581: break; fp@581: fp@581: skb->dev = dev; fp@581: np->rx_skbuff[nr] = skb; fp@581: } else { fp@581: skb = np->rx_skbuff[nr]; fp@581: } fp@581: np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, fp@581: skb->end-skb->data, PCI_DMA_FROMDEVICE); fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { fp@581: np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); fp@581: wmb(); fp@581: np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); fp@581: } else { fp@581: np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; fp@581: np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; fp@581: wmb(); fp@581: np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); fp@581: } fp@581: dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", fp@581: dev->name, refill_rx); fp@581: refill_rx++; fp@581: } fp@581: np->refill_rx = refill_rx; fp@581: if (np->cur_rx - refill_rx == RX_RING) fp@581: return 1; fp@581: return 0; fp@581: } fp@581: fp@581: static void nv_do_rx_refill(unsigned long data) fp@581: { fp@581: struct net_device *dev = (struct net_device *) data; fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: fp@581: if (!using_multi_irqs(dev)) { fp@581: if (np->msi_flags & NV_MSI_X_ENABLED) fp@581: disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); fp@581: else fp@581: disable_irq(dev->irq); fp@581: } else { fp@581: disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); fp@581: } fp@581: if (nv_alloc_rx(dev)) { fp@581: spin_lock_irq(&np->lock); fp@581: if (!np->in_shutdown) fp@581: mod_timer(&np->oom_kick, jiffies + OOM_REFILL); fp@581: spin_unlock_irq(&np->lock); fp@581: } fp@581: if (!using_multi_irqs(dev)) { fp@581: if (np->msi_flags & NV_MSI_X_ENABLED) fp@581: enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); fp@581: else fp@581: enable_irq(dev->irq); fp@581: } else { fp@581: enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); fp@581: } fp@581: } fp@581: fp@581: static void nv_init_rx(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: int i; fp@581: fp@581: np->cur_rx = RX_RING; fp@581: np->refill_rx = 0; fp@581: for (i = 0; i < RX_RING; i++) fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) fp@581: np->rx_ring.orig[i].FlagLen = 0; fp@581: else fp@581: np->rx_ring.ex[i].FlagLen = 0; fp@581: } fp@581: fp@581: static void nv_init_tx(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: int i; fp@581: fp@581: np->next_tx = np->nic_tx = 0; fp@581: for (i = 0; i < TX_RING; i++) { fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) fp@581: np->tx_ring.orig[i].FlagLen = 0; fp@581: else fp@581: np->tx_ring.ex[i].FlagLen = 0; fp@581: np->tx_skbuff[i] = NULL; fp@581: np->tx_dma[i] = 0; fp@581: } fp@581: } fp@581: fp@581: static int nv_init_ring(struct net_device *dev) fp@581: { fp@581: nv_init_tx(dev); fp@581: nv_init_rx(dev); fp@581: return nv_alloc_rx(dev); fp@581: } fp@581: fp@581: static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: fp@581: dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n", fp@581: dev->name, skbnr); fp@581: fp@581: if (np->tx_dma[skbnr]) { fp@581: pci_unmap_page(np->pci_dev, np->tx_dma[skbnr], fp@581: np->tx_dma_len[skbnr], fp@581: PCI_DMA_TODEVICE); fp@581: np->tx_dma[skbnr] = 0; fp@581: } fp@581: fp@581: if (np->tx_skbuff[skbnr]) { fp@581: if (!np->ecdev) dev_kfree_skb_any(np->tx_skbuff[skbnr]); fp@581: np->tx_skbuff[skbnr] = NULL; fp@581: return 1; fp@581: } else { fp@581: return 0; fp@581: } fp@581: } fp@581: fp@581: static void nv_drain_tx(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: unsigned int i; fp@581: fp@581: for (i = 0; i < TX_RING; i++) { fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) fp@581: np->tx_ring.orig[i].FlagLen = 0; fp@581: else fp@581: np->tx_ring.ex[i].FlagLen = 0; fp@581: if (nv_release_txskb(dev, i)) fp@581: np->stats.tx_dropped++; fp@581: } fp@581: } fp@581: fp@581: static void nv_drain_rx(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: int i; fp@581: for (i = 0; i < RX_RING; i++) { fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) fp@581: np->rx_ring.orig[i].FlagLen = 0; fp@581: else fp@581: np->rx_ring.ex[i].FlagLen = 0; fp@581: wmb(); fp@581: if (np->rx_skbuff[i]) { fp@581: pci_unmap_single(np->pci_dev, np->rx_dma[i], fp@581: np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, fp@581: PCI_DMA_FROMDEVICE); fp@581: if (!np->ecdev) dev_kfree_skb(np->rx_skbuff[i]); fp@581: np->rx_skbuff[i] = NULL; fp@581: } fp@581: } fp@581: } fp@581: fp@581: static void drain_ring(struct net_device *dev) fp@581: { fp@581: nv_drain_tx(dev); fp@581: nv_drain_rx(dev); fp@581: } fp@581: fp@581: /* fp@581: * nv_start_xmit: dev->hard_start_xmit function fp@581: * Called with dev->xmit_lock held. fp@581: */ fp@581: static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u32 tx_flags = 0; fp@581: u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); fp@581: unsigned int fragments = skb_shinfo(skb)->nr_frags; fp@581: unsigned int nr = (np->next_tx - 1) % TX_RING; fp@581: unsigned int start_nr = np->next_tx % TX_RING; fp@581: unsigned int i; fp@581: u32 offset = 0; fp@581: u32 bcnt; fp@581: u32 size = skb->len-skb->data_len; fp@581: u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); fp@581: u32 tx_flags_vlan = 0; fp@581: fp@581: /* add fragments to entries count */ fp@581: for (i = 0; i < fragments; i++) { fp@581: entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + fp@581: ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); fp@581: } fp@581: fp@581: if (!np->ecdev) { fp@581: spin_lock_irq(&np->lock); fp@581: fp@581: if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) { fp@581: spin_unlock_irq(&np->lock); fp@581: netif_stop_queue(dev); fp@581: return NETDEV_TX_BUSY; fp@592: } fp@581: } fp@581: fp@581: /* setup the header buffer */ fp@581: do { fp@581: bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; fp@581: nr = (nr + 1) % TX_RING; fp@581: fp@581: np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt, fp@581: PCI_DMA_TODEVICE); fp@581: np->tx_dma_len[nr] = bcnt; fp@581: fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { fp@581: np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); fp@581: np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); fp@581: } else { fp@581: np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; fp@581: np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; fp@581: np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); fp@581: } fp@581: tx_flags = np->tx_flags; fp@581: offset += bcnt; fp@581: size -= bcnt; fp@581: } while(size); fp@581: fp@581: /* setup the fragments */ fp@581: for (i = 0; i < fragments; i++) { fp@581: skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; fp@581: u32 size = frag->size; fp@581: offset = 0; fp@581: fp@581: do { fp@581: bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; fp@581: nr = (nr + 1) % TX_RING; fp@581: fp@581: np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, fp@581: PCI_DMA_TODEVICE); fp@581: np->tx_dma_len[nr] = bcnt; fp@581: fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { fp@581: np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); fp@581: np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); fp@581: } else { fp@581: np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; fp@581: np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; fp@581: np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); fp@581: } fp@581: offset += bcnt; fp@581: size -= bcnt; fp@581: } while (size); fp@581: } fp@581: fp@581: /* set last fragment flag */ fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { fp@581: np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); fp@581: } else { fp@581: np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); fp@581: } fp@581: fp@581: np->tx_skbuff[nr] = skb; fp@581: fp@581: #ifdef NETIF_F_TSO fp@581: if (skb_shinfo(skb)->tso_size) fp@581: tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT); fp@581: else fp@581: #endif fp@581: tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); fp@581: fp@581: /* vlan tag */ fp@581: if (np->vlangrp && vlan_tx_tag_present(skb)) { fp@581: tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); fp@581: } fp@581: fp@581: /* set tx flags */ fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { fp@581: np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); fp@581: } else { fp@581: np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); fp@581: np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); fp@581: } fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", fp@581: dev->name, np->next_tx, entries, tx_flags_extra); fp@581: { fp@581: int j; fp@581: for (j=0; j<64; j++) { fp@581: if ((j%16) == 0) fp@581: dprintk("\n%03x:", j); fp@581: dprintk(" %02x", ((unsigned char*)skb->data)[j]); fp@581: } fp@581: dprintk("\n"); fp@581: } fp@581: fp@581: np->next_tx += entries; fp@581: fp@581: dev->trans_start = jiffies; fp@581: if (!np->ecdev) spin_unlock_irq(&np->lock); fp@581: writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); fp@581: pci_push(get_hwbase(dev)); fp@581: return NETDEV_TX_OK; fp@581: } fp@581: fp@581: /* fp@581: * nv_tx_done: check for completed packets, release the skbs. fp@581: * fp@581: * Caller must own np->lock. fp@581: */ fp@581: static void nv_tx_done(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u32 Flags; fp@581: unsigned int i; fp@581: struct sk_buff *skb; fp@581: fp@581: while (np->nic_tx != np->next_tx) { fp@581: i = np->nic_tx % TX_RING; fp@581: fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) fp@581: Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); fp@581: else fp@581: Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", fp@581: dev->name, np->nic_tx, Flags); fp@581: if (Flags & NV_TX_VALID) fp@581: break; fp@581: if (np->desc_ver == DESC_VER_1) { fp@581: if (Flags & NV_TX_LASTPACKET) { fp@581: skb = np->tx_skbuff[i]; fp@581: if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| fp@581: NV_TX_UNDERFLOW|NV_TX_ERROR)) { fp@581: if (Flags & NV_TX_UNDERFLOW) fp@581: np->stats.tx_fifo_errors++; fp@581: if (Flags & NV_TX_CARRIERLOST) fp@581: np->stats.tx_carrier_errors++; fp@581: np->stats.tx_errors++; fp@581: } else { fp@581: np->stats.tx_packets++; fp@581: np->stats.tx_bytes += skb->len; fp@581: } fp@581: } fp@581: } else { fp@581: if (Flags & NV_TX2_LASTPACKET) { fp@581: skb = np->tx_skbuff[i]; fp@581: if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| fp@581: NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { fp@581: if (Flags & NV_TX2_UNDERFLOW) fp@581: np->stats.tx_fifo_errors++; fp@581: if (Flags & NV_TX2_CARRIERLOST) fp@581: np->stats.tx_carrier_errors++; fp@581: np->stats.tx_errors++; fp@581: } else { fp@581: np->stats.tx_packets++; fp@581: np->stats.tx_bytes += skb->len; fp@581: } fp@581: } fp@581: } fp@581: nv_release_txskb(dev, i); fp@581: np->nic_tx++; fp@581: } fp@581: if (!np->ecdev && np->next_tx - np->nic_tx < TX_LIMIT_START) fp@581: netif_wake_queue(dev); fp@581: } fp@581: fp@581: /* fp@581: * nv_tx_timeout: dev->tx_timeout function fp@581: * Called with dev->xmit_lock held. fp@581: */ fp@581: static void nv_tx_timeout(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: u32 status; fp@581: fp@581: if (np->msi_flags & NV_MSI_X_ENABLED) fp@581: status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; fp@581: else fp@581: status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; fp@581: fp@581: printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); fp@581: fp@581: { fp@581: int i; fp@581: fp@581: printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", fp@581: dev->name, (unsigned long)np->ring_addr, fp@581: np->next_tx, np->nic_tx); fp@581: printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); fp@581: for (i=0;i<=np->register_size;i+= 32) { fp@581: printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", fp@581: i, fp@581: readl(base + i + 0), readl(base + i + 4), fp@581: readl(base + i + 8), readl(base + i + 12), fp@581: readl(base + i + 16), readl(base + i + 20), fp@581: readl(base + i + 24), readl(base + i + 28)); fp@581: } fp@581: printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); fp@581: for (i=0;idesc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { fp@581: printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", fp@581: i, fp@581: le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), fp@581: le32_to_cpu(np->tx_ring.orig[i].FlagLen), fp@581: le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), fp@581: le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), fp@581: le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), fp@581: le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), fp@581: le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), fp@581: le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); fp@581: } else { fp@581: printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", fp@581: i, fp@581: le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), fp@581: le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), fp@581: le32_to_cpu(np->tx_ring.ex[i].FlagLen), fp@581: le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), fp@581: le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), fp@581: le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), fp@581: le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), fp@581: le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), fp@581: le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), fp@581: le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), fp@581: le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), fp@581: le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); fp@581: } fp@581: } fp@581: } fp@581: fp@581: if (!np->ecdev) spin_lock_irq(&np->lock); fp@581: fp@581: /* 1) stop tx engine */ fp@581: nv_stop_tx(dev); fp@581: fp@581: /* 2) check that the packets were not sent already: */ fp@581: nv_tx_done(dev); fp@581: fp@581: /* 3) if there are dead entries: clear everything */ fp@581: if (np->next_tx != np->nic_tx) { fp@581: printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); fp@581: nv_drain_tx(dev); fp@581: np->next_tx = np->nic_tx = 0; fp@581: setup_hw_rings(dev, NV_SETUP_TX_RING); fp@581: if (!np->ecdev) netif_wake_queue(dev); fp@581: } fp@581: fp@581: /* 4) restart tx engine */ fp@581: nv_start_tx(dev); fp@581: if (!np->ecdev) spin_unlock_irq(&np->lock); fp@581: } fp@581: fp@581: /* fp@581: * Called when the nic notices a mismatch between the actual data len on the fp@581: * wire and the len indicated in the 802 header fp@581: */ fp@581: static int nv_getlen(struct net_device *dev, void *packet, int datalen) fp@581: { fp@581: int hdrlen; /* length of the 802 header */ fp@581: int protolen; /* length as stored in the proto field */ fp@581: fp@581: /* 1) calculate len according to header */ fp@581: if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { fp@581: protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); fp@581: hdrlen = VLAN_HLEN; fp@581: } else { fp@581: protolen = ntohs( ((struct ethhdr *)packet)->h_proto); fp@581: hdrlen = ETH_HLEN; fp@581: } fp@581: dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", fp@581: dev->name, datalen, protolen, hdrlen); fp@581: if (protolen > ETH_DATA_LEN) fp@581: return datalen; /* Value in proto field not a len, no checks possible */ fp@581: fp@581: protolen += hdrlen; fp@581: /* consistency checks: */ fp@581: if (datalen > ETH_ZLEN) { fp@581: if (datalen >= protolen) { fp@581: /* more data on wire than in 802 header, trim of fp@581: * additional data. fp@581: */ fp@581: dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", fp@581: dev->name, protolen); fp@581: return protolen; fp@581: } else { fp@581: /* less data on wire than mentioned in header. fp@581: * Discard the packet. fp@581: */ fp@581: dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", fp@581: dev->name); fp@581: return -1; fp@581: } fp@581: } else { fp@581: /* short packet. Accept only if 802 values are also short */ fp@581: if (protolen > ETH_ZLEN) { fp@581: dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", fp@581: dev->name); fp@581: return -1; fp@581: } fp@581: dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", fp@581: dev->name, datalen); fp@581: return datalen; fp@581: } fp@581: } fp@581: fp@581: static void nv_rx_process(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u32 Flags; fp@581: u32 vlanflags = 0; fp@581: fp@581: fp@581: for (;;) { fp@581: struct sk_buff *skb; fp@581: int len; fp@581: int i; fp@581: if (np->cur_rx - np->refill_rx >= RX_RING) fp@581: break; /* we scanned the whole ring - do not continue */ fp@581: fp@581: i = np->cur_rx % RX_RING; fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { fp@581: Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); fp@581: len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); fp@581: } else { fp@581: Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); fp@581: len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); fp@581: vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); fp@581: } fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", fp@581: dev->name, np->cur_rx, Flags); fp@581: fp@581: if (Flags & NV_RX_AVAIL) fp@581: break; /* still owned by hardware, */ fp@581: fp@581: /* fp@581: * the packet is for us - immediately tear down the pci mapping. fp@581: * TODO: check if a prefetch of the first cacheline improves fp@581: * the performance. fp@581: */ fp@581: pci_unmap_single(np->pci_dev, np->rx_dma[i], fp@581: np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, fp@581: PCI_DMA_FROMDEVICE); fp@592: fp@581: { fp@581: int j; fp@581: dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); fp@581: for (j=0; j<64; j++) { fp@581: if ((j%16) == 0) fp@581: dprintk("\n%03x:", j); fp@581: dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); fp@581: } fp@581: dprintk("\n"); fp@581: } fp@581: /* look at what we actually got: */ fp@581: if (np->desc_ver == DESC_VER_1) { fp@581: if (!(Flags & NV_RX_DESCRIPTORVALID)) fp@581: goto next_pkt; fp@581: fp@581: if (Flags & NV_RX_ERROR) { fp@581: if (Flags & NV_RX_MISSEDFRAME) { fp@581: np->stats.rx_missed_errors++; fp@581: np->stats.rx_errors++; fp@581: goto next_pkt; fp@581: } fp@581: if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { fp@581: np->stats.rx_errors++; fp@581: goto next_pkt; fp@581: } fp@581: if (Flags & NV_RX_CRCERR) { fp@581: np->stats.rx_crc_errors++; fp@581: np->stats.rx_errors++; fp@581: goto next_pkt; fp@581: } fp@581: if (Flags & NV_RX_OVERFLOW) { fp@581: np->stats.rx_over_errors++; fp@581: np->stats.rx_errors++; fp@581: goto next_pkt; fp@581: } fp@581: if (Flags & NV_RX_ERROR4) { fp@581: len = nv_getlen(dev, np->rx_skbuff[i]->data, len); fp@581: if (len < 0) { fp@581: np->stats.rx_errors++; fp@581: goto next_pkt; fp@581: } fp@581: } fp@581: /* framing errors are soft errors. */ fp@581: if (Flags & NV_RX_FRAMINGERR) { fp@581: if (Flags & NV_RX_SUBSTRACT1) { fp@581: len--; fp@581: } fp@581: } fp@581: } fp@581: } else { fp@581: if (!(Flags & NV_RX2_DESCRIPTORVALID)) fp@581: goto next_pkt; fp@581: fp@581: if (Flags & NV_RX2_ERROR) { fp@581: if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { fp@581: np->stats.rx_errors++; fp@581: goto next_pkt; fp@581: } fp@581: if (Flags & NV_RX2_CRCERR) { fp@581: np->stats.rx_crc_errors++; fp@581: np->stats.rx_errors++; fp@581: goto next_pkt; fp@581: } fp@581: if (Flags & NV_RX2_OVERFLOW) { fp@581: np->stats.rx_over_errors++; fp@581: np->stats.rx_errors++; fp@581: goto next_pkt; fp@581: } fp@581: if (Flags & NV_RX2_ERROR4) { fp@581: len = nv_getlen(dev, np->rx_skbuff[i]->data, len); fp@581: if (len < 0) { fp@581: np->stats.rx_errors++; fp@581: goto next_pkt; fp@581: } fp@581: } fp@581: /* framing errors are soft errors */ fp@581: if (Flags & NV_RX2_FRAMINGERR) { fp@581: if (Flags & NV_RX2_SUBSTRACT1) { fp@581: len--; fp@581: } fp@581: } fp@581: } fp@581: Flags &= NV_RX2_CHECKSUMMASK; fp@581: if (Flags == NV_RX2_CHECKSUMOK1 || fp@581: Flags == NV_RX2_CHECKSUMOK2 || fp@581: Flags == NV_RX2_CHECKSUMOK3) { fp@581: dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); fp@581: np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; fp@581: } else { fp@581: dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); fp@581: } fp@581: } fp@581: if (np->ecdev) { fp@581: ecdev_receive(np->ecdev, np->rx_skbuff[i]->data, len); fp@581: } fp@581: else { fp@581: /* got a valid packet - forward it to the network core */ fp@581: skb = np->rx_skbuff[i]; fp@581: np->rx_skbuff[i] = NULL; fp@581: fp@581: skb_put(skb, len); fp@581: skb->protocol = eth_type_trans(skb, dev); fp@581: dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", fp@592: dev->name, np->cur_rx, len, skb->protocol); fp@581: if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { fp@581: vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); fp@581: } else { fp@581: netif_rx(skb); fp@581: } fp@581: } fp@581: dev->last_rx = jiffies; fp@581: np->stats.rx_packets++; fp@581: np->stats.rx_bytes += len; fp@581: next_pkt: fp@581: np->cur_rx++; fp@581: } fp@581: } fp@581: fp@581: static void set_bufsize(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: fp@581: if (dev->mtu <= ETH_DATA_LEN) fp@581: np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; fp@581: else fp@581: np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; fp@581: } fp@581: fp@581: /* fp@581: * nv_change_mtu: dev->change_mtu function fp@581: * Called with dev_base_lock held for read. fp@581: */ fp@581: static int nv_change_mtu(struct net_device *dev, int new_mtu) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: int old_mtu; fp@581: fp@581: if (new_mtu < 64 || new_mtu > np->pkt_limit) fp@581: return -EINVAL; fp@581: fp@581: old_mtu = dev->mtu; fp@581: dev->mtu = new_mtu; fp@581: fp@581: /* return early if the buffer sizes will not change */ fp@581: if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) fp@581: return 0; fp@581: if (old_mtu == new_mtu) fp@581: return 0; fp@581: fp@581: /* synchronized against open : rtnl_lock() held by caller */ fp@581: if (netif_running(dev)) { fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: /* fp@581: * It seems that the nic preloads valid ring entries into an fp@581: * internal buffer. The procedure for flushing everything is fp@581: * guessed, there is probably a simpler approach. fp@581: * Changing the MTU is a rare event, it shouldn't matter. fp@581: */ fp@581: nv_disable_irq(dev); fp@581: spin_lock_bh(&dev->xmit_lock); fp@581: spin_lock(&np->lock); fp@581: /* stop engines */ fp@581: nv_stop_rx(dev); fp@581: nv_stop_tx(dev); fp@581: nv_txrx_reset(dev); fp@581: /* drain rx queue */ fp@581: nv_drain_rx(dev); fp@581: nv_drain_tx(dev); fp@581: /* reinit driver view of the rx queue */ fp@581: nv_init_rx(dev); fp@581: nv_init_tx(dev); fp@581: /* alloc new rx buffers */ fp@581: set_bufsize(dev); fp@581: if (nv_alloc_rx(dev)) { fp@581: if (!np->in_shutdown) fp@581: mod_timer(&np->oom_kick, jiffies + OOM_REFILL); fp@581: } fp@581: /* reinit nic view of the rx queue */ fp@581: writel(np->rx_buf_sz, base + NvRegOffloadConfig); fp@581: setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); fp@581: writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), fp@581: base + NvRegRingSizes); fp@581: pci_push(base); fp@581: writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); fp@581: pci_push(base); fp@581: fp@581: /* restart rx engine */ fp@581: nv_start_rx(dev); fp@581: nv_start_tx(dev); fp@581: spin_unlock(&np->lock); fp@581: spin_unlock_bh(&dev->xmit_lock); fp@581: nv_enable_irq(dev); fp@581: } fp@581: return 0; fp@581: } fp@581: fp@581: static void nv_copy_mac_to_hw(struct net_device *dev) fp@581: { fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: u32 mac[2]; fp@581: fp@581: mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + fp@581: (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); fp@581: mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); fp@581: fp@581: writel(mac[0], base + NvRegMacAddrA); fp@581: writel(mac[1], base + NvRegMacAddrB); fp@581: } fp@581: fp@581: /* fp@581: * nv_set_mac_address: dev->set_mac_address function fp@581: * Called with rtnl_lock() held. fp@581: */ fp@581: static int nv_set_mac_address(struct net_device *dev, void *addr) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: struct sockaddr *macaddr = (struct sockaddr*)addr; fp@581: fp@581: if(!is_valid_ether_addr(macaddr->sa_data)) fp@581: return -EADDRNOTAVAIL; fp@581: fp@581: /* synchronized against open : rtnl_lock() held by caller */ fp@581: memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); fp@581: fp@581: if (netif_running(dev)) { fp@581: spin_lock_bh(&dev->xmit_lock); fp@581: spin_lock_irq(&np->lock); fp@581: fp@581: /* stop rx engine */ fp@581: nv_stop_rx(dev); fp@581: fp@581: /* set mac address */ fp@581: nv_copy_mac_to_hw(dev); fp@581: fp@581: /* restart rx engine */ fp@581: nv_start_rx(dev); fp@581: spin_unlock_irq(&np->lock); fp@581: spin_unlock_bh(&dev->xmit_lock); fp@581: } else { fp@581: nv_copy_mac_to_hw(dev); fp@581: } fp@581: return 0; fp@581: } fp@581: fp@581: /* fp@581: * nv_set_multicast: dev->set_multicast function fp@581: * Called with dev->xmit_lock held. fp@581: */ fp@581: static void nv_set_multicast(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: u32 addr[2]; fp@581: u32 mask[2]; fp@581: u32 pff; fp@581: fp@581: memset(addr, 0, sizeof(addr)); fp@581: memset(mask, 0, sizeof(mask)); fp@581: fp@581: if (dev->flags & IFF_PROMISC) { fp@581: printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); fp@581: pff = NVREG_PFF_PROMISC; fp@581: } else { fp@581: pff = NVREG_PFF_MYADDR; fp@581: fp@581: if (dev->flags & IFF_ALLMULTI || dev->mc_list) { fp@581: u32 alwaysOff[2]; fp@581: u32 alwaysOn[2]; fp@581: fp@581: alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; fp@581: if (dev->flags & IFF_ALLMULTI) { fp@581: alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; fp@581: } else { fp@581: struct dev_mc_list *walk; fp@581: fp@581: walk = dev->mc_list; fp@581: while (walk != NULL) { fp@581: u32 a, b; fp@581: a = le32_to_cpu(*(u32 *) walk->dmi_addr); fp@581: b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4])); fp@581: alwaysOn[0] &= a; fp@581: alwaysOff[0] &= ~a; fp@581: alwaysOn[1] &= b; fp@581: alwaysOff[1] &= ~b; fp@581: walk = walk->next; fp@581: } fp@581: } fp@581: addr[0] = alwaysOn[0]; fp@581: addr[1] = alwaysOn[1]; fp@581: mask[0] = alwaysOn[0] | alwaysOff[0]; fp@581: mask[1] = alwaysOn[1] | alwaysOff[1]; fp@581: } fp@581: } fp@581: addr[0] |= NVREG_MCASTADDRA_FORCE; fp@581: pff |= NVREG_PFF_ALWAYS; fp@581: spin_lock_irq(&np->lock); fp@581: nv_stop_rx(dev); fp@581: writel(addr[0], base + NvRegMulticastAddrA); fp@581: writel(addr[1], base + NvRegMulticastAddrB); fp@581: writel(mask[0], base + NvRegMulticastMaskA); fp@581: writel(mask[1], base + NvRegMulticastMaskB); fp@581: writel(pff, base + NvRegPacketFilterFlags); fp@581: dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", fp@581: dev->name); fp@581: nv_start_rx(dev); fp@581: spin_unlock_irq(&np->lock); fp@581: } fp@581: fp@581: /** fp@581: * nv_update_linkspeed: Setup the MAC according to the link partner fp@581: * @dev: Network device to be configured fp@581: * fp@581: * The function queries the PHY and checks if there is a link partner. fp@581: * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is fp@581: * set to 10 MBit HD. fp@581: * fp@581: * The function returns 0 if there is no link partner and 1 if there is fp@581: * a good link partner. fp@581: */ fp@581: static int nv_update_linkspeed(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: int adv, lpa; fp@581: int newls = np->linkspeed; fp@581: int newdup = np->duplex; fp@581: int mii_status; fp@581: int retval = 0; fp@581: u32 control_1000, status_1000, phyreg; fp@581: fp@581: /* BMSR_LSTATUS is latched, read it twice: fp@581: * we want the current value. fp@581: */ fp@581: mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); fp@581: mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); fp@581: fp@581: if (!(mii_status & BMSR_LSTATUS)) { fp@581: dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", fp@581: dev->name); fp@581: newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; fp@581: newdup = 0; fp@581: retval = 0; fp@581: goto set_speed; fp@581: } fp@581: fp@581: if (np->autoneg == 0) { fp@581: dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", fp@581: dev->name, np->fixed_mode); fp@581: if (np->fixed_mode & LPA_100FULL) { fp@581: newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; fp@581: newdup = 1; fp@581: } else if (np->fixed_mode & LPA_100HALF) { fp@581: newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; fp@581: newdup = 0; fp@581: } else if (np->fixed_mode & LPA_10FULL) { fp@581: newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; fp@581: newdup = 1; fp@581: } else { fp@581: newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; fp@581: newdup = 0; fp@581: } fp@581: retval = 1; fp@581: goto set_speed; fp@581: } fp@581: /* check auto negotiation is complete */ fp@581: if (!(mii_status & BMSR_ANEGCOMPLETE)) { fp@581: /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ fp@581: newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; fp@581: newdup = 0; fp@581: retval = 0; fp@581: dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); fp@581: goto set_speed; fp@581: } fp@581: fp@581: retval = 1; fp@581: if (np->gigabit == PHY_GIGABIT) { fp@581: control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); fp@581: status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ); fp@581: fp@581: if ((control_1000 & ADVERTISE_1000FULL) && fp@581: (status_1000 & LPA_1000FULL)) { fp@581: dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", fp@581: dev->name); fp@581: newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; fp@581: newdup = 1; fp@581: goto set_speed; fp@581: } fp@581: } fp@581: fp@581: adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); fp@581: lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); fp@581: dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", fp@581: dev->name, adv, lpa); fp@581: fp@581: /* FIXME: handle parallel detection properly */ fp@581: lpa = lpa & adv; fp@581: if (lpa & LPA_100FULL) { fp@581: newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; fp@581: newdup = 1; fp@581: } else if (lpa & LPA_100HALF) { fp@581: newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; fp@581: newdup = 0; fp@581: } else if (lpa & LPA_10FULL) { fp@581: newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; fp@581: newdup = 1; fp@581: } else if (lpa & LPA_10HALF) { fp@581: newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; fp@581: newdup = 0; fp@581: } else { fp@581: dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa); fp@581: newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; fp@581: newdup = 0; fp@581: } fp@581: fp@581: set_speed: fp@581: if (np->duplex == newdup && np->linkspeed == newls) fp@581: return retval; fp@581: fp@581: dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", fp@581: dev->name, np->linkspeed, np->duplex, newls, newdup); fp@581: fp@581: np->duplex = newdup; fp@581: np->linkspeed = newls; fp@581: fp@581: if (np->gigabit == PHY_GIGABIT) { fp@581: phyreg = readl(base + NvRegRandomSeed); fp@581: phyreg &= ~(0x3FF00); fp@581: if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) fp@581: phyreg |= NVREG_RNDSEED_FORCE3; fp@581: else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) fp@581: phyreg |= NVREG_RNDSEED_FORCE2; fp@581: else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) fp@581: phyreg |= NVREG_RNDSEED_FORCE; fp@581: writel(phyreg, base + NvRegRandomSeed); fp@581: } fp@581: fp@581: phyreg = readl(base + NvRegPhyInterface); fp@581: phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); fp@581: if (np->duplex == 0) fp@581: phyreg |= PHY_HALF; fp@581: if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) fp@581: phyreg |= PHY_100; fp@581: else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) fp@581: phyreg |= PHY_1000; fp@581: writel(phyreg, base + NvRegPhyInterface); fp@581: fp@581: writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), fp@581: base + NvRegMisc1); fp@581: pci_push(base); fp@581: writel(np->linkspeed, base + NvRegLinkSpeed); fp@581: pci_push(base); fp@581: fp@581: return retval; fp@581: } fp@581: fp@581: static void nv_linkchange(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: fp@581: if (np->ecdev) { fp@592: int link = nv_update_linkspeed(dev); fp@670: ecdev_set_link(np->ecdev, link); fp@581: return; fp@581: } fp@581: fp@581: if (nv_update_linkspeed(dev)) { fp@581: if (!netif_carrier_ok(dev)) { fp@581: netif_carrier_on(dev); fp@581: printk(KERN_INFO "%s: link up.\n", dev->name); fp@581: nv_start_rx(dev); fp@581: } fp@581: } else { fp@581: if (netif_carrier_ok(dev)) { fp@581: netif_carrier_off(dev); fp@581: printk(KERN_INFO "%s: link down.\n", dev->name); fp@581: nv_stop_rx(dev); fp@581: } fp@581: } fp@581: } fp@581: fp@581: static void nv_link_irq(struct net_device *dev) fp@581: { fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: u32 miistat; fp@581: fp@581: miistat = readl(base + NvRegMIIStatus); fp@581: writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); fp@581: dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); fp@581: fp@581: if (miistat & (NVREG_MIISTAT_LINKCHANGE)) fp@581: nv_linkchange(dev); fp@581: dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); fp@581: } fp@581: fp@581: static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) fp@581: { fp@581: struct net_device *dev = (struct net_device *) data; fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: u32 events; fp@581: int i; fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); fp@581: fp@581: for (i=0; ; i++) { fp@581: if (!(np->msi_flags & NV_MSI_X_ENABLED)) { fp@581: events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; fp@581: writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); fp@581: } else { fp@581: events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; fp@581: writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); fp@581: } fp@581: pci_push(base); fp@581: dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); fp@581: if (!(events & np->irqmask)) fp@581: break; fp@581: fp@592: if (!np->ecdev) spin_lock(&np->lock); fp@581: nv_tx_done(dev); fp@592: if (!np->ecdev) spin_unlock(&np->lock); fp@581: fp@581: nv_rx_process(dev); fp@581: if (nv_alloc_rx(dev)) { fp@581: spin_lock(&np->lock); fp@581: if (!np->in_shutdown) fp@581: mod_timer(&np->oom_kick, jiffies + OOM_REFILL); fp@581: spin_unlock(&np->lock); fp@581: } fp@581: fp@581: if (events & NVREG_IRQ_LINK) { fp@592: if (!np->ecdev) spin_lock(&np->lock); fp@581: nv_link_irq(dev); fp@592: if (!np->ecdev) spin_unlock(&np->lock); fp@581: } fp@581: if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { fp@592: if (!np->ecdev) spin_lock(&np->lock); fp@581: nv_linkchange(dev); fp@592: if (!np->ecdev) spin_unlock(&np->lock); fp@581: np->link_timeout = jiffies + LINK_TIMEOUT; fp@581: } fp@581: if (events & (NVREG_IRQ_TX_ERR)) { fp@581: dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", fp@581: dev->name, events); fp@581: } fp@581: if (events & (NVREG_IRQ_UNKNOWN)) { fp@581: printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", fp@581: dev->name, events); fp@581: } fp@581: if (i > max_interrupt_work) { fp@592: if (!np->ecdev) { fp@592: spin_lock(&np->lock); fp@592: /* disable interrupts on the nic */ fp@592: if (!(np->msi_flags & NV_MSI_X_ENABLED)) fp@592: writel(0, base + NvRegIrqMask); fp@592: else fp@592: writel(np->irqmask, base + NvRegIrqMask); fp@592: pci_push(base); fp@592: fp@592: if (!np->in_shutdown) { fp@592: np->nic_poll_irq = np->irqmask; fp@592: mod_timer(&np->nic_poll, jiffies + POLL_WAIT); fp@592: } fp@592: spin_unlock(&np->lock); fp@581: } fp@581: printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); fp@581: break; fp@581: } fp@581: fp@581: } fp@581: dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); fp@581: fp@581: return IRQ_RETVAL(i); fp@581: } fp@581: fp@581: static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) fp@581: { fp@581: struct net_device *dev = (struct net_device *) data; fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: u32 events; fp@581: int i; fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); fp@581: fp@581: for (i=0; ; i++) { fp@581: events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; fp@581: writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); fp@581: pci_push(base); fp@581: dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); fp@581: if (!(events & np->irqmask)) fp@581: break; fp@581: fp@592: if (!np->ecdev) spin_lock_irq(&np->lock); fp@581: nv_tx_done(dev); fp@592: if (!np->ecdev) spin_unlock_irq(&np->lock); fp@581: fp@581: if (events & (NVREG_IRQ_TX_ERR)) { fp@581: dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", fp@581: dev->name, events); fp@581: } fp@581: if (i > max_interrupt_work) { fp@592: if (!np->ecdev) { fp@592: spin_lock_irq(&np->lock); fp@592: /* disable interrupts on the nic */ fp@592: writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); fp@592: pci_push(base); fp@592: fp@592: if (!np->in_shutdown) { fp@592: np->nic_poll_irq |= NVREG_IRQ_TX_ALL; fp@592: mod_timer(&np->nic_poll, jiffies + POLL_WAIT); fp@592: } fp@592: spin_unlock_irq(&np->lock); fp@581: } fp@581: printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); fp@581: break; fp@581: } fp@581: fp@581: } fp@581: dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); fp@581: fp@581: return IRQ_RETVAL(i); fp@581: } fp@581: fp@581: static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) fp@581: { fp@581: struct net_device *dev = (struct net_device *) data; fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: u32 events; fp@581: int i; fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); fp@581: fp@581: for (i=0; ; i++) { fp@581: events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; fp@581: writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); fp@581: pci_push(base); fp@581: dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); fp@581: if (!(events & np->irqmask)) fp@581: break; fp@581: fp@581: nv_rx_process(dev); fp@592: if (nv_alloc_rx(dev) && !np->ecdev) { fp@581: spin_lock_irq(&np->lock); fp@581: if (!np->in_shutdown) fp@581: mod_timer(&np->oom_kick, jiffies + OOM_REFILL); fp@581: spin_unlock_irq(&np->lock); fp@581: } fp@581: fp@581: if (i > max_interrupt_work) { fp@592: if (!np->ecdev) { fp@592: spin_lock_irq(&np->lock); fp@592: /* disable interrupts on the nic */ fp@592: writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); fp@592: pci_push(base); fp@592: fp@592: if (!np->in_shutdown) { fp@592: np->nic_poll_irq |= NVREG_IRQ_RX_ALL; fp@592: mod_timer(&np->nic_poll, jiffies + POLL_WAIT); fp@592: } fp@592: spin_unlock_irq(&np->lock); fp@581: } fp@581: printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); fp@581: break; fp@581: } fp@581: fp@581: } fp@581: dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); fp@581: fp@581: return IRQ_RETVAL(i); fp@581: } fp@581: fp@581: static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) fp@581: { fp@581: struct net_device *dev = (struct net_device *) data; fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: u32 events; fp@581: int i; fp@581: fp@581: dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); fp@581: fp@581: for (i=0; ; i++) { fp@581: events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; fp@581: writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); fp@581: pci_push(base); fp@581: dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); fp@581: if (!(events & np->irqmask)) fp@581: break; fp@581: fp@581: if (events & NVREG_IRQ_LINK) { fp@592: if (!np->ecdev) spin_lock_irq(&np->lock); fp@581: nv_link_irq(dev); fp@592: if (!np->ecdev) spin_unlock_irq(&np->lock); fp@581: } fp@581: if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { fp@592: if (!np->ecdev) spin_lock_irq(&np->lock); fp@581: nv_linkchange(dev); fp@592: if (!np->ecdev) spin_unlock_irq(&np->lock); fp@581: np->link_timeout = jiffies + LINK_TIMEOUT; fp@581: } fp@581: if (events & (NVREG_IRQ_UNKNOWN)) { fp@581: printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", fp@581: dev->name, events); fp@581: } fp@581: if (i > max_interrupt_work) { fp@592: if (!np->ecdev) { fp@592: spin_lock_irq(&np->lock); fp@592: /* disable interrupts on the nic */ fp@592: writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); fp@592: pci_push(base); fp@592: fp@592: if (!np->in_shutdown) { fp@592: np->nic_poll_irq |= NVREG_IRQ_OTHER; fp@592: mod_timer(&np->nic_poll, jiffies + POLL_WAIT); fp@592: } fp@592: spin_unlock_irq(&np->lock); fp@581: } fp@581: printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); fp@581: break; fp@581: } fp@581: fp@581: } fp@581: dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); fp@581: fp@581: return IRQ_RETVAL(i); fp@581: } fp@581: fp@581: void ec_poll(struct net_device *dev) fp@581: { fp@592: struct fe_priv *np = netdev_priv(dev); fp@592: fp@592: if (!using_multi_irqs(dev)) { fp@592: nv_nic_irq((int) 0, dev, (struct pt_regs *) NULL); fp@592: } else { fp@592: if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { fp@592: nv_nic_irq_rx((int) 0, dev, (struct pt_regs *) NULL); fp@592: } fp@592: if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { fp@592: nv_nic_irq_tx((int) 0, dev, (struct pt_regs *) NULL); fp@592: } fp@592: if (np->nic_poll_irq & NVREG_IRQ_OTHER) { fp@592: nv_nic_irq_other((int) 0, dev, (struct pt_regs *) NULL); fp@592: } fp@592: } fp@581: } fp@581: fp@581: static void nv_do_nic_poll(unsigned long data) fp@581: { fp@581: struct net_device *dev = (struct net_device *) data; fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: u32 mask = 0; fp@581: fp@581: /* fp@581: * First disable irq(s) and then fp@581: * reenable interrupts on the nic, we have to do this before calling fp@581: * nv_nic_irq because that may decide to do otherwise fp@581: */ fp@581: fp@581: if (!using_multi_irqs(dev)) { fp@581: if (np->msi_flags & NV_MSI_X_ENABLED) fp@581: disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); fp@581: else fp@581: disable_irq(dev->irq); fp@581: mask = np->irqmask; fp@581: } else { fp@581: if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { fp@581: disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); fp@581: mask |= NVREG_IRQ_RX_ALL; fp@581: } fp@581: if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { fp@581: disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); fp@581: mask |= NVREG_IRQ_TX_ALL; fp@581: } fp@581: if (np->nic_poll_irq & NVREG_IRQ_OTHER) { fp@581: disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); fp@581: mask |= NVREG_IRQ_OTHER; fp@581: } fp@581: } fp@581: np->nic_poll_irq = 0; fp@581: fp@581: /* FIXME: Do we need synchronize_irq(dev->irq) here? */ fp@581: fp@581: writel(mask, base + NvRegIrqMask); fp@581: pci_push(base); fp@581: fp@581: if (!using_multi_irqs(dev)) { fp@581: nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); fp@581: if (np->msi_flags & NV_MSI_X_ENABLED) fp@581: enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); fp@581: else fp@581: enable_irq(dev->irq); fp@581: } else { fp@581: if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { fp@581: nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); fp@581: enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); fp@581: } fp@581: if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { fp@581: nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL); fp@581: enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); fp@581: } fp@581: if (np->nic_poll_irq & NVREG_IRQ_OTHER) { fp@581: nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL); fp@581: enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); fp@581: } fp@581: } fp@581: } fp@581: fp@581: #ifdef CONFIG_NET_POLL_CONTROLLER fp@581: static void nv_poll_controller(struct net_device *dev) fp@581: { fp@581: nv_do_nic_poll((unsigned long) dev); fp@581: } fp@581: #endif fp@581: fp@581: static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: strcpy(info->driver, "forcedeth"); fp@581: strcpy(info->version, FORCEDETH_VERSION); fp@581: strcpy(info->bus_info, pci_name(np->pci_dev)); fp@581: } fp@581: fp@581: static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: wolinfo->supported = WAKE_MAGIC; fp@581: fp@581: spin_lock_irq(&np->lock); fp@581: if (np->wolenabled) fp@581: wolinfo->wolopts = WAKE_MAGIC; fp@581: spin_unlock_irq(&np->lock); fp@581: } fp@581: fp@581: static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: fp@581: spin_lock_irq(&np->lock); fp@581: if (wolinfo->wolopts == 0) { fp@581: writel(0, base + NvRegWakeUpFlags); fp@581: np->wolenabled = 0; fp@581: } fp@581: if (wolinfo->wolopts & WAKE_MAGIC) { fp@581: writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags); fp@581: np->wolenabled = 1; fp@581: } fp@581: spin_unlock_irq(&np->lock); fp@581: return 0; fp@581: } fp@581: fp@581: static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: int adv; fp@581: fp@581: spin_lock_irq(&np->lock); fp@581: ecmd->port = PORT_MII; fp@581: if (!netif_running(dev)) { fp@581: /* We do not track link speed / duplex setting if the fp@581: * interface is disabled. Force a link check */ fp@581: nv_update_linkspeed(dev); fp@581: } fp@581: switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { fp@581: case NVREG_LINKSPEED_10: fp@581: ecmd->speed = SPEED_10; fp@581: break; fp@581: case NVREG_LINKSPEED_100: fp@581: ecmd->speed = SPEED_100; fp@581: break; fp@581: case NVREG_LINKSPEED_1000: fp@581: ecmd->speed = SPEED_1000; fp@581: break; fp@581: } fp@581: ecmd->duplex = DUPLEX_HALF; fp@581: if (np->duplex) fp@581: ecmd->duplex = DUPLEX_FULL; fp@581: fp@581: ecmd->autoneg = np->autoneg; fp@581: fp@581: ecmd->advertising = ADVERTISED_MII; fp@581: if (np->autoneg) { fp@581: ecmd->advertising |= ADVERTISED_Autoneg; fp@581: adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); fp@581: } else { fp@581: adv = np->fixed_mode; fp@581: } fp@581: if (adv & ADVERTISE_10HALF) fp@581: ecmd->advertising |= ADVERTISED_10baseT_Half; fp@581: if (adv & ADVERTISE_10FULL) fp@581: ecmd->advertising |= ADVERTISED_10baseT_Full; fp@581: if (adv & ADVERTISE_100HALF) fp@581: ecmd->advertising |= ADVERTISED_100baseT_Half; fp@581: if (adv & ADVERTISE_100FULL) fp@581: ecmd->advertising |= ADVERTISED_100baseT_Full; fp@581: if (np->autoneg && np->gigabit == PHY_GIGABIT) { fp@581: adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); fp@581: if (adv & ADVERTISE_1000FULL) fp@581: ecmd->advertising |= ADVERTISED_1000baseT_Full; fp@581: } fp@581: fp@581: ecmd->supported = (SUPPORTED_Autoneg | fp@581: SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | fp@581: SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | fp@581: SUPPORTED_MII); fp@581: if (np->gigabit == PHY_GIGABIT) fp@581: ecmd->supported |= SUPPORTED_1000baseT_Full; fp@581: fp@581: ecmd->phy_address = np->phyaddr; fp@581: ecmd->transceiver = XCVR_EXTERNAL; fp@581: fp@581: /* ignore maxtxpkt, maxrxpkt for now */ fp@581: spin_unlock_irq(&np->lock); fp@581: return 0; fp@581: } fp@581: fp@581: static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: fp@581: if (ecmd->port != PORT_MII) fp@581: return -EINVAL; fp@581: if (ecmd->transceiver != XCVR_EXTERNAL) fp@581: return -EINVAL; fp@581: if (ecmd->phy_address != np->phyaddr) { fp@581: /* TODO: support switching between multiple phys. Should be fp@581: * trivial, but not enabled due to lack of test hardware. */ fp@581: return -EINVAL; fp@581: } fp@581: if (ecmd->autoneg == AUTONEG_ENABLE) { fp@581: u32 mask; fp@581: fp@581: mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | fp@581: ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; fp@581: if (np->gigabit == PHY_GIGABIT) fp@581: mask |= ADVERTISED_1000baseT_Full; fp@581: fp@581: if ((ecmd->advertising & mask) == 0) fp@581: return -EINVAL; fp@581: fp@581: } else if (ecmd->autoneg == AUTONEG_DISABLE) { fp@581: /* Note: autonegotiation disable, speed 1000 intentionally fp@581: * forbidden - noone should need that. */ fp@581: fp@581: if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) fp@581: return -EINVAL; fp@581: if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) fp@581: return -EINVAL; fp@581: } else { fp@581: return -EINVAL; fp@581: } fp@581: fp@581: spin_lock_irq(&np->lock); fp@581: if (ecmd->autoneg == AUTONEG_ENABLE) { fp@581: int adv, bmcr; fp@581: fp@581: np->autoneg = 1; fp@581: fp@581: /* advertise only what has been requested */ fp@581: adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); fp@581: adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); fp@581: if (ecmd->advertising & ADVERTISED_10baseT_Half) fp@581: adv |= ADVERTISE_10HALF; fp@581: if (ecmd->advertising & ADVERTISED_10baseT_Full) fp@581: adv |= ADVERTISE_10FULL; fp@581: if (ecmd->advertising & ADVERTISED_100baseT_Half) fp@581: adv |= ADVERTISE_100HALF; fp@581: if (ecmd->advertising & ADVERTISED_100baseT_Full) fp@581: adv |= ADVERTISE_100FULL; fp@581: mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); fp@581: fp@581: if (np->gigabit == PHY_GIGABIT) { fp@581: adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); fp@581: adv &= ~ADVERTISE_1000FULL; fp@581: if (ecmd->advertising & ADVERTISED_1000baseT_Full) fp@581: adv |= ADVERTISE_1000FULL; fp@581: mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv); fp@581: } fp@581: fp@581: bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); fp@581: bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); fp@581: mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); fp@581: fp@581: } else { fp@581: int adv, bmcr; fp@581: fp@581: np->autoneg = 0; fp@581: fp@581: adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); fp@581: adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); fp@581: if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) fp@581: adv |= ADVERTISE_10HALF; fp@581: if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) fp@581: adv |= ADVERTISE_10FULL; fp@581: if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) fp@581: adv |= ADVERTISE_100HALF; fp@581: if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) fp@581: adv |= ADVERTISE_100FULL; fp@581: mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); fp@581: np->fixed_mode = adv; fp@581: fp@581: if (np->gigabit == PHY_GIGABIT) { fp@581: adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ); fp@581: adv &= ~ADVERTISE_1000FULL; fp@581: mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv); fp@581: } fp@581: fp@581: bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); fp@581: bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX); fp@581: if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL)) fp@581: bmcr |= BMCR_FULLDPLX; fp@581: if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL)) fp@581: bmcr |= BMCR_SPEED100; fp@581: mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); fp@581: fp@581: if (netif_running(dev)) { fp@581: /* Wait a bit and then reconfigure the nic. */ fp@581: udelay(10); fp@581: nv_linkchange(dev); fp@581: } fp@581: } fp@581: spin_unlock_irq(&np->lock); fp@581: fp@581: return 0; fp@581: } fp@581: fp@581: #define FORCEDETH_REGS_VER 1 fp@581: fp@581: static int nv_get_regs_len(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: return np->register_size; fp@581: } fp@581: fp@581: static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: u32 *rbuf = buf; fp@581: int i; fp@581: fp@581: regs->version = FORCEDETH_REGS_VER; fp@581: spin_lock_irq(&np->lock); fp@581: for (i = 0;i <= np->register_size/sizeof(u32); i++) fp@581: rbuf[i] = readl(base + i*sizeof(u32)); fp@581: spin_unlock_irq(&np->lock); fp@581: } fp@581: fp@581: static int nv_nway_reset(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: int ret; fp@581: fp@581: spin_lock_irq(&np->lock); fp@581: if (np->autoneg) { fp@581: int bmcr; fp@581: fp@581: bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); fp@581: bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); fp@581: mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); fp@581: fp@581: ret = 0; fp@581: } else { fp@581: ret = -EINVAL; fp@581: } fp@581: spin_unlock_irq(&np->lock); fp@581: fp@581: return ret; fp@581: } fp@581: fp@581: #ifdef NETIF_F_TSO fp@581: static int nv_set_tso(struct net_device *dev, u32 value) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: fp@581: if ((np->driver_data & DEV_HAS_CHECKSUM)) fp@581: return ethtool_op_set_tso(dev, value); fp@581: else fp@581: return value ? -EOPNOTSUPP : 0; fp@581: } fp@581: #endif fp@581: fp@581: static struct ethtool_ops ops = { fp@581: .get_drvinfo = nv_get_drvinfo, fp@581: .get_link = ethtool_op_get_link, fp@581: .get_wol = nv_get_wol, fp@581: .set_wol = nv_set_wol, fp@581: .get_settings = nv_get_settings, fp@581: .set_settings = nv_set_settings, fp@581: .get_regs_len = nv_get_regs_len, fp@581: .get_regs = nv_get_regs, fp@581: .nway_reset = nv_nway_reset, fp@581: .get_perm_addr = ethtool_op_get_perm_addr, fp@581: #ifdef NETIF_F_TSO fp@581: .get_tso = ethtool_op_get_tso, fp@581: .set_tso = nv_set_tso fp@581: #endif fp@581: }; fp@581: fp@581: static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) fp@581: { fp@581: struct fe_priv *np = get_nvpriv(dev); fp@581: fp@581: spin_lock_irq(&np->lock); fp@581: fp@581: /* save vlan group */ fp@581: np->vlangrp = grp; fp@581: fp@581: if (grp) { fp@581: /* enable vlan on MAC */ fp@581: np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; fp@581: } else { fp@581: /* disable vlan on MAC */ fp@581: np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; fp@581: np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; fp@581: } fp@581: fp@581: writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); fp@581: fp@581: spin_unlock_irq(&np->lock); fp@581: }; fp@581: fp@581: static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) fp@581: { fp@581: /* nothing to do */ fp@581: }; fp@581: fp@581: static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) fp@581: { fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: int i; fp@581: u32 msixmap = 0; fp@581: fp@581: /* Each interrupt bit can be mapped to a MSIX vector (4 bits). fp@581: * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents fp@581: * the remaining 8 interrupts. fp@581: */ fp@581: for (i = 0; i < 8; i++) { fp@581: if ((irqmask >> i) & 0x1) { fp@581: msixmap |= vector << (i << 2); fp@581: } fp@581: } fp@581: writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); fp@581: fp@581: msixmap = 0; fp@581: for (i = 0; i < 8; i++) { fp@581: if ((irqmask >> (i + 8)) & 0x1) { fp@581: msixmap |= vector << (i << 2); fp@581: } fp@581: } fp@581: writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); fp@581: } fp@581: fp@581: static int nv_request_irq(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = get_nvpriv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: int ret = 1; fp@581: int i; fp@581: fp@581: if (np->msi_flags & NV_MSI_X_CAPABLE) { fp@581: for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { fp@581: np->msi_x_entry[i].entry = i; fp@581: } fp@581: if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { fp@581: np->msi_flags |= NV_MSI_X_ENABLED; fp@581: if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { fp@581: /* Request irq for rx handling */ fp@581: if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { fp@581: printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); fp@581: pci_disable_msix(np->pci_dev); fp@581: np->msi_flags &= ~NV_MSI_X_ENABLED; fp@581: goto out_err; fp@581: } fp@581: /* Request irq for tx handling */ fp@581: if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { fp@581: printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); fp@581: pci_disable_msix(np->pci_dev); fp@581: np->msi_flags &= ~NV_MSI_X_ENABLED; fp@581: goto out_free_rx; fp@581: } fp@581: /* Request irq for link and timer handling */ fp@581: if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { fp@581: printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); fp@581: pci_disable_msix(np->pci_dev); fp@581: np->msi_flags &= ~NV_MSI_X_ENABLED; fp@581: goto out_free_tx; fp@581: } fp@581: /* map interrupts to their respective vector */ fp@581: writel(0, base + NvRegMSIXMap0); fp@581: writel(0, base + NvRegMSIXMap1); fp@581: set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); fp@581: set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); fp@581: set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); fp@581: } else { fp@581: /* Request irq for all interrupts */ fp@581: if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { fp@581: printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); fp@581: pci_disable_msix(np->pci_dev); fp@581: np->msi_flags &= ~NV_MSI_X_ENABLED; fp@581: goto out_err; fp@581: } fp@581: fp@581: /* map interrupts to vector 0 */ fp@581: writel(0, base + NvRegMSIXMap0); fp@581: writel(0, base + NvRegMSIXMap1); fp@581: } fp@581: } fp@581: } fp@581: if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { fp@581: if ((ret = pci_enable_msi(np->pci_dev)) == 0) { fp@581: np->msi_flags |= NV_MSI_ENABLED; fp@581: if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { fp@581: printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); fp@581: pci_disable_msi(np->pci_dev); fp@581: np->msi_flags &= ~NV_MSI_ENABLED; fp@581: goto out_err; fp@581: } fp@581: fp@581: /* map interrupts to vector 0 */ fp@581: writel(0, base + NvRegMSIMap0); fp@581: writel(0, base + NvRegMSIMap1); fp@581: /* enable msi vector 0 */ fp@581: writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); fp@581: } fp@581: } fp@581: if (ret != 0) { fp@581: if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) fp@581: goto out_err; fp@581: } fp@581: fp@581: return 0; fp@581: out_free_tx: fp@581: free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); fp@581: out_free_rx: fp@581: free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); fp@581: out_err: fp@581: return 1; fp@581: } fp@581: fp@581: static void nv_free_irq(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = get_nvpriv(dev); fp@581: int i; fp@581: fp@581: if (np->msi_flags & NV_MSI_X_ENABLED) { fp@581: for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { fp@581: free_irq(np->msi_x_entry[i].vector, dev); fp@581: } fp@581: pci_disable_msix(np->pci_dev); fp@581: np->msi_flags &= ~NV_MSI_X_ENABLED; fp@581: } else { fp@581: free_irq(np->pci_dev->irq, dev); fp@581: if (np->msi_flags & NV_MSI_ENABLED) { fp@581: pci_disable_msi(np->pci_dev); fp@581: np->msi_flags &= ~NV_MSI_ENABLED; fp@581: } fp@581: } fp@581: } fp@581: fp@581: static int nv_open(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base = get_hwbase(dev); fp@581: int ret = 1; fp@581: int oom, i; fp@581: fp@581: dprintk(KERN_DEBUG "nv_open: begin\n"); fp@581: fp@581: /* 1) erase previous misconfiguration */ fp@581: if (np->driver_data & DEV_HAS_POWER_CNTRL) fp@581: nv_mac_reset(dev); fp@581: /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */ fp@581: writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); fp@581: writel(0, base + NvRegMulticastAddrB); fp@581: writel(0, base + NvRegMulticastMaskA); fp@581: writel(0, base + NvRegMulticastMaskB); fp@581: writel(0, base + NvRegPacketFilterFlags); fp@581: fp@581: writel(0, base + NvRegTransmitterControl); fp@581: writel(0, base + NvRegReceiverControl); fp@581: fp@581: writel(0, base + NvRegAdapterControl); fp@581: fp@581: /* 2) initialize descriptor rings */ fp@581: set_bufsize(dev); fp@581: oom = nv_init_ring(dev); fp@581: fp@581: writel(0, base + NvRegLinkSpeed); fp@581: writel(0, base + NvRegUnknownTransmitterReg); fp@581: nv_txrx_reset(dev); fp@581: writel(0, base + NvRegUnknownSetupReg6); fp@581: fp@581: np->in_shutdown = 0; fp@581: fp@581: /* 3) set mac address */ fp@581: nv_copy_mac_to_hw(dev); fp@581: fp@581: /* 4) give hw rings */ fp@581: setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); fp@581: writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), fp@581: base + NvRegRingSizes); fp@581: fp@581: /* 5) continue setup */ fp@581: writel(np->linkspeed, base + NvRegLinkSpeed); fp@581: writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); fp@581: writel(np->txrxctl_bits, base + NvRegTxRxControl); fp@581: writel(np->vlanctl_bits, base + NvRegVlanControl); fp@581: pci_push(base); fp@581: writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); fp@581: reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, fp@581: NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, fp@581: KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); fp@581: fp@581: writel(0, base + NvRegUnknownSetupReg4); fp@581: writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); fp@581: writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); fp@581: fp@581: /* 6) continue setup */ fp@581: writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); fp@581: writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); fp@581: writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); fp@581: writel(np->rx_buf_sz, base + NvRegOffloadConfig); fp@581: fp@581: writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); fp@581: get_random_bytes(&i, sizeof(i)); fp@581: writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); fp@581: writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1); fp@581: writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2); fp@581: if (poll_interval == -1) { fp@581: if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) fp@581: writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); fp@581: else fp@581: writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); fp@581: } fp@581: else fp@581: writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); fp@581: writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); fp@581: writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, fp@581: base + NvRegAdapterControl); fp@581: writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); fp@581: writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4); fp@581: writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags); fp@581: fp@581: i = readl(base + NvRegPowerState); fp@581: if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) fp@581: writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); fp@581: fp@581: pci_push(base); fp@581: udelay(10); fp@581: writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); fp@581: fp@581: nv_disable_hw_interrupts(dev, np->irqmask); fp@581: pci_push(base); fp@581: writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); fp@581: writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); fp@581: pci_push(base); fp@581: fp@592: if (!np->ecdev) { fp@592: if (nv_request_irq(dev)) { fp@592: goto out_drain; fp@592: } fp@592: fp@592: /* ask for interrupts */ fp@592: nv_enable_hw_interrupts(dev, np->irqmask); fp@592: fp@592: spin_lock_irq(&np->lock); fp@592: } fp@592: fp@581: writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); fp@581: writel(0, base + NvRegMulticastAddrB); fp@581: writel(0, base + NvRegMulticastMaskA); fp@581: writel(0, base + NvRegMulticastMaskB); fp@581: writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); fp@581: /* One manual link speed update: Interrupts are enabled, future link fp@581: * speed changes cause interrupts and are handled by nv_link_irq(). fp@581: */ fp@581: { fp@581: u32 miistat; fp@581: miistat = readl(base + NvRegMIIStatus); fp@581: writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); fp@581: dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); fp@581: } fp@581: /* set linkspeed to invalid value, thus force nv_update_linkspeed fp@581: * to init hw */ fp@581: np->linkspeed = 0; fp@581: ret = nv_update_linkspeed(dev); fp@581: nv_start_rx(dev); fp@581: nv_start_tx(dev); fp@592: fp@592: if (np->ecdev) { fp@670: ecdev_set_link(np->ecdev, ret); fp@592: } fp@592: else { fp@592: netif_start_queue(dev); fp@592: if (ret) { fp@592: netif_carrier_on(dev); fp@592: } else { fp@592: printk("%s: no link during initialization.\n", dev->name); fp@592: netif_carrier_off(dev); fp@592: } fp@592: if (oom) fp@592: mod_timer(&np->oom_kick, jiffies + OOM_REFILL); fp@592: spin_unlock_irq(&np->lock); fp@592: } fp@581: fp@581: return 0; fp@581: out_drain: fp@581: drain_ring(dev); fp@581: return ret; fp@581: } fp@581: fp@581: static int nv_close(struct net_device *dev) fp@581: { fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: u8 __iomem *base; fp@581: fp@592: if (!np->ecdev) { fp@592: spin_lock_irq(&np->lock); fp@592: np->in_shutdown = 1; fp@592: spin_unlock_irq(&np->lock); fp@592: synchronize_irq(dev->irq); fp@592: fp@592: del_timer_sync(&np->oom_kick); fp@592: del_timer_sync(&np->nic_poll); fp@592: fp@592: netif_stop_queue(dev); fp@592: spin_lock_irq(&np->lock); fp@592: } fp@592: fp@581: nv_stop_tx(dev); fp@581: nv_stop_rx(dev); fp@581: nv_txrx_reset(dev); fp@581: fp@592: base = get_hwbase(dev); fp@592: fp@592: if (!np->ecdev) { fp@592: /* disable interrupts on the nic or we will lock up */ fp@592: nv_disable_hw_interrupts(dev, np->irqmask); fp@592: pci_push(base); fp@592: dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); fp@592: fp@592: spin_unlock_irq(&np->lock); fp@592: fp@592: nv_free_irq(dev); fp@592: } fp@581: fp@581: drain_ring(dev); fp@581: fp@581: if (np->wolenabled) fp@581: nv_start_rx(dev); fp@581: fp@581: /* special op: write back the misordered MAC address - otherwise fp@581: * the next nv_probe would see a wrong address. fp@581: */ fp@581: writel(np->orig_mac[0], base + NvRegMacAddrA); fp@581: writel(np->orig_mac[1], base + NvRegMacAddrB); fp@581: fp@581: /* FIXME: power down nic */ fp@581: fp@581: return 0; fp@581: } fp@581: fp@581: static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) fp@581: { fp@581: struct net_device *dev; fp@581: struct fe_priv *np; fp@581: unsigned long addr; fp@581: u8 __iomem *base; fp@581: int err, i; fp@581: u32 powerstate; fp@581: fp@581: board_idx++; fp@581: fp@581: dev = alloc_etherdev(sizeof(struct fe_priv)); fp@581: err = -ENOMEM; fp@581: if (!dev) fp@581: goto out; fp@581: fp@581: np = netdev_priv(dev); fp@581: np->pci_dev = pci_dev; fp@581: spin_lock_init(&np->lock); fp@581: SET_MODULE_OWNER(dev); fp@581: SET_NETDEV_DEV(dev, &pci_dev->dev); fp@581: fp@581: init_timer(&np->oom_kick); fp@581: np->oom_kick.data = (unsigned long) dev; fp@581: np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ fp@581: init_timer(&np->nic_poll); fp@581: np->nic_poll.data = (unsigned long) dev; fp@581: np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ fp@581: fp@581: err = pci_enable_device(pci_dev); fp@581: if (err) { fp@581: printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n", fp@581: err, pci_name(pci_dev)); fp@581: goto out_free; fp@581: } fp@581: fp@581: pci_set_master(pci_dev); fp@581: fp@581: err = pci_request_regions(pci_dev, DRV_NAME); fp@581: if (err < 0) fp@581: goto out_disable; fp@581: fp@581: if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL)) fp@581: np->register_size = NV_PCI_REGSZ_VER2; fp@581: else fp@581: np->register_size = NV_PCI_REGSZ_VER1; fp@581: fp@581: err = -EINVAL; fp@581: addr = 0; fp@581: for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { fp@581: dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", fp@581: pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), fp@581: pci_resource_len(pci_dev, i), fp@581: pci_resource_flags(pci_dev, i)); fp@581: if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && fp@581: pci_resource_len(pci_dev, i) >= np->register_size) { fp@581: addr = pci_resource_start(pci_dev, i); fp@581: break; fp@581: } fp@581: } fp@581: if (i == DEVICE_COUNT_RESOURCE) { fp@581: printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n", fp@581: pci_name(pci_dev)); fp@581: goto out_relreg; fp@581: } fp@581: fp@581: /* copy of driver data */ fp@581: np->driver_data = id->driver_data; fp@581: fp@581: /* handle different descriptor versions */ fp@581: if (id->driver_data & DEV_HAS_HIGH_DMA) { fp@581: /* packet format 3: supports 40-bit addressing */ fp@581: np->desc_ver = DESC_VER_3; fp@581: np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; fp@581: if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { fp@581: printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", fp@581: pci_name(pci_dev)); fp@581: } else { fp@581: dev->features |= NETIF_F_HIGHDMA; fp@581: printk(KERN_INFO "forcedeth: using HIGHDMA\n"); fp@581: } fp@581: if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { fp@581: printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", fp@581: pci_name(pci_dev)); fp@581: } fp@581: } else if (id->driver_data & DEV_HAS_LARGEDESC) { fp@581: /* packet format 2: supports jumbo frames */ fp@581: np->desc_ver = DESC_VER_2; fp@581: np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; fp@581: } else { fp@581: /* original packet format */ fp@581: np->desc_ver = DESC_VER_1; fp@581: np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; fp@581: } fp@581: fp@581: np->pkt_limit = NV_PKTLIMIT_1; fp@581: if (id->driver_data & DEV_HAS_LARGEDESC) fp@581: np->pkt_limit = NV_PKTLIMIT_2; fp@581: fp@581: if (id->driver_data & DEV_HAS_CHECKSUM) { fp@581: np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; fp@581: dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; fp@581: #ifdef NETIF_F_TSO fp@581: dev->features |= NETIF_F_TSO; fp@581: #endif fp@581: } fp@581: fp@581: np->vlanctl_bits = 0; fp@581: if (id->driver_data & DEV_HAS_VLAN) { fp@581: np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; fp@581: dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; fp@581: dev->vlan_rx_register = nv_vlan_rx_register; fp@581: dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; fp@581: } fp@581: fp@581: np->msi_flags = 0; fp@581: if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) { fp@581: np->msi_flags |= NV_MSI_CAPABLE; fp@581: } fp@581: if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) { fp@581: np->msi_flags |= NV_MSI_X_CAPABLE; fp@581: } fp@581: fp@581: err = -ENOMEM; fp@581: np->base = ioremap(addr, np->register_size); fp@581: if (!np->base) fp@581: goto out_relreg; fp@581: dev->base_addr = (unsigned long)np->base; fp@581: fp@581: dev->irq = pci_dev->irq; fp@581: fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { fp@581: np->rx_ring.orig = pci_alloc_consistent(pci_dev, fp@581: sizeof(struct ring_desc) * (RX_RING + TX_RING), fp@581: &np->ring_addr); fp@581: if (!np->rx_ring.orig) fp@581: goto out_unmap; fp@581: np->tx_ring.orig = &np->rx_ring.orig[RX_RING]; fp@581: } else { fp@581: np->rx_ring.ex = pci_alloc_consistent(pci_dev, fp@581: sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), fp@581: &np->ring_addr); fp@581: if (!np->rx_ring.ex) fp@581: goto out_unmap; fp@581: np->tx_ring.ex = &np->rx_ring.ex[RX_RING]; fp@581: } fp@581: fp@581: dev->open = nv_open; fp@581: dev->stop = nv_close; fp@581: dev->hard_start_xmit = nv_start_xmit; fp@581: dev->get_stats = nv_get_stats; fp@581: dev->change_mtu = nv_change_mtu; fp@581: dev->set_mac_address = nv_set_mac_address; fp@581: dev->set_multicast_list = nv_set_multicast; fp@581: #ifdef CONFIG_NET_POLL_CONTROLLER fp@581: dev->poll_controller = nv_poll_controller; fp@581: #endif fp@581: SET_ETHTOOL_OPS(dev, &ops); fp@581: dev->tx_timeout = nv_tx_timeout; fp@581: dev->watchdog_timeo = NV_WATCHDOG_TIMEO; fp@581: fp@581: pci_set_drvdata(pci_dev, dev); fp@581: fp@581: /* read the mac address */ fp@581: base = get_hwbase(dev); fp@581: np->orig_mac[0] = readl(base + NvRegMacAddrA); fp@581: np->orig_mac[1] = readl(base + NvRegMacAddrB); fp@581: fp@581: dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; fp@581: dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; fp@581: dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; fp@581: dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; fp@581: dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; fp@581: dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; fp@581: memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); fp@581: fp@581: if (!is_valid_ether_addr(dev->perm_addr)) { fp@581: /* fp@581: * Bad mac address. At least one bios sets the mac address fp@581: * to 01:23:45:67:89:ab fp@581: */ fp@581: printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n", fp@581: pci_name(pci_dev), fp@581: dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], fp@581: dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); fp@581: printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); fp@581: dev->dev_addr[0] = 0x00; fp@581: dev->dev_addr[1] = 0x00; fp@581: dev->dev_addr[2] = 0x6c; fp@581: get_random_bytes(&dev->dev_addr[3], 3); fp@581: } fp@581: fp@581: dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev), fp@581: dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], fp@581: dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); fp@581: fp@581: /* disable WOL */ fp@581: writel(0, base + NvRegWakeUpFlags); fp@581: np->wolenabled = 0; fp@581: fp@581: if (id->driver_data & DEV_HAS_POWER_CNTRL) { fp@581: u8 revision_id; fp@581: pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id); fp@581: fp@581: /* take phy and nic out of low power mode */ fp@581: powerstate = readl(base + NvRegPowerState2); fp@581: powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; fp@581: if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || fp@581: id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && fp@581: revision_id >= 0xA3) fp@581: powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; fp@581: writel(powerstate, base + NvRegPowerState2); fp@581: } fp@581: fp@581: if (np->desc_ver == DESC_VER_1) { fp@581: np->tx_flags = NV_TX_VALID; fp@581: } else { fp@581: np->tx_flags = NV_TX2_VALID; fp@581: } fp@581: if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { fp@581: np->irqmask = NVREG_IRQMASK_THROUGHPUT; fp@581: if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ fp@581: np->msi_flags |= 0x0003; fp@581: } else { fp@581: np->irqmask = NVREG_IRQMASK_CPU; fp@581: if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ fp@581: np->msi_flags |= 0x0001; fp@581: } fp@581: fp@581: if (id->driver_data & DEV_NEED_TIMERIRQ) fp@581: np->irqmask |= NVREG_IRQ_TIMER; fp@581: if (id->driver_data & DEV_NEED_LINKTIMER) { fp@581: dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); fp@581: np->need_linktimer = 1; fp@581: np->link_timeout = jiffies + LINK_TIMEOUT; fp@581: } else { fp@581: dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); fp@581: np->need_linktimer = 0; fp@581: } fp@581: fp@581: /* find a suitable phy */ fp@581: for (i = 1; i <= 32; i++) { fp@581: int id1, id2; fp@581: int phyaddr = i & 0x1F; fp@581: fp@581: spin_lock_irq(&np->lock); fp@581: id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); fp@581: spin_unlock_irq(&np->lock); fp@581: if (id1 < 0 || id1 == 0xffff) fp@581: continue; fp@581: spin_lock_irq(&np->lock); fp@581: id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); fp@581: spin_unlock_irq(&np->lock); fp@581: if (id2 < 0 || id2 == 0xffff) fp@581: continue; fp@581: fp@581: id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; fp@581: id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; fp@581: dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", fp@581: pci_name(pci_dev), id1, id2, phyaddr); fp@581: np->phyaddr = phyaddr; fp@581: np->phy_oui = id1 | id2; fp@581: break; fp@581: } fp@581: if (i == 33) { fp@581: printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", fp@581: pci_name(pci_dev)); fp@581: goto out_freering; fp@581: } fp@581: fp@581: /* reset it */ fp@581: phy_init(dev); fp@581: fp@581: /* set default link speed settings */ fp@581: np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; fp@581: np->duplex = 0; fp@581: np->autoneg = 1; fp@581: fp@581: // offer device to EtherCAT master module fp@639: if (ecdev_offer(dev, ec_poll, THIS_MODULE, &np->ecdev)) { fp@581: printk(KERN_ERR "forcedeth: Failed to offer device.\n"); fp@581: goto out_freering; fp@581: } fp@581: fp@592: if (np->ecdev) { fp@592: if (ecdev_open(np->ecdev)) { fp@592: ecdev_withdraw(np->ecdev); fp@592: goto out_freering; fp@592: } fp@592: } fp@592: else { fp@581: err = register_netdev(dev); fp@581: if (err) { fp@581: printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); fp@581: goto out_freering; fp@581: } fp@581: } fp@581: printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", fp@581: dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, fp@581: pci_name(pci_dev)); fp@581: fp@581: return 0; fp@581: fp@581: out_freering: fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) fp@581: pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), fp@581: np->rx_ring.orig, np->ring_addr); fp@581: else fp@581: pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), fp@581: np->rx_ring.ex, np->ring_addr); fp@581: pci_set_drvdata(pci_dev, NULL); fp@581: out_unmap: fp@581: iounmap(get_hwbase(dev)); fp@581: out_relreg: fp@581: pci_release_regions(pci_dev); fp@581: out_disable: fp@581: pci_disable_device(pci_dev); fp@581: out_free: fp@581: free_netdev(dev); fp@581: out: fp@581: return err; fp@581: } fp@581: fp@581: static void __devexit nv_remove(struct pci_dev *pci_dev) fp@581: { fp@581: struct net_device *dev = pci_get_drvdata(pci_dev); fp@581: struct fe_priv *np = netdev_priv(dev); fp@581: fp@581: if (np->ecdev) { fp@581: ecdev_close(np->ecdev); fp@581: ecdev_withdraw(np->ecdev); fp@581: } fp@581: else { fp@581: unregister_netdev(dev); fp@581: } fp@581: fp@581: /* free all structures */ fp@581: if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) fp@581: pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr); fp@581: else fp@581: pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr); fp@581: iounmap(get_hwbase(dev)); fp@581: pci_release_regions(pci_dev); fp@581: pci_disable_device(pci_dev); fp@581: free_netdev(dev); fp@581: pci_set_drvdata(pci_dev, NULL); fp@581: } fp@581: fp@581: static struct pci_device_id pci_tbl[] = { fp@581: { /* nForce Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, fp@581: }, fp@581: { /* nForce2 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, fp@581: }, fp@581: { /* nForce3 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, fp@581: }, fp@581: { /* nForce3 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, fp@581: }, fp@581: { /* nForce3 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, fp@581: }, fp@581: { /* nForce3 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, fp@581: }, fp@581: { /* nForce3 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, fp@581: }, fp@581: { /* CK804 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, fp@581: }, fp@581: { /* CK804 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, fp@581: }, fp@581: { /* MCP04 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, fp@581: }, fp@581: { /* MCP04 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, fp@581: }, fp@581: { /* MCP51 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, fp@581: }, fp@581: { /* MCP51 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, fp@581: }, fp@581: { /* MCP55 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL, fp@581: }, fp@581: { /* MCP55 Ethernet Controller */ fp@581: PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), fp@581: .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL, fp@581: }, fp@581: {0,}, fp@581: }; fp@581: fp@581: static struct pci_driver driver = { fp@581: .name = "forcedeth", fp@581: .id_table = pci_tbl, fp@581: .probe = nv_probe, fp@581: .remove = __devexit_p(nv_remove), fp@581: }; fp@581: fp@581: fp@581: static int __init init_nic(void) fp@581: { fp@592: printk(KERN_INFO "forcedeth: EtherCAT-capable nForce ethernet driver." fp@592: " Version %s, master %s.\n", fp@592: FORCEDETH_VERSION, EC_MASTER_VERSION); fp@581: return pci_module_init(&driver); fp@581: } fp@581: fp@581: static void __exit exit_nic(void) fp@581: { fp@581: pci_unregister_driver(&driver); fp@581: } fp@581: fp@581: module_param(max_interrupt_work, int, 0); fp@581: MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); fp@581: module_param(optimization_mode, int, 0); fp@581: MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); fp@581: module_param(poll_interval, int, 0); fp@581: MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); fp@581: module_param(disable_msi, int, 0); fp@581: MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1."); fp@581: module_param(disable_msix, int, 0); fp@581: MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1."); fp@581: fp@592: MODULE_AUTHOR("Dipl.-Ing. (FH) Florian Pose "); fp@592: MODULE_DESCRIPTION("EtherCAT-capable nForce ethernet driver"); fp@581: MODULE_LICENSE("GPL"); fp@581: fp@581: //MODULE_DEVICE_TABLE(pci, pci_tbl); // prevent auto-loading fp@581: fp@581: module_init(init_nic); fp@581: module_exit(exit_nic);