276 struct net_device *netdev = adapter->netdev; |
276 struct net_device *netdev = adapter->netdev; |
277 irq_handler_t handler = e1000_intr; |
277 irq_handler_t handler = e1000_intr; |
278 int irq_flags = IRQF_SHARED; |
278 int irq_flags = IRQF_SHARED; |
279 int err; |
279 int err; |
280 |
280 |
281 if (adapter->ecdev) |
281 if (adapter->ecdev) { |
282 return 0; |
282 return 0; |
|
283 } |
283 |
284 |
284 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, |
285 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, |
285 netdev); |
286 netdev); |
286 if (err) { |
287 if (err) { |
287 e_err(probe, "Unable to allocate interrupt Error: %d\n", err); |
288 e_err(probe, "Unable to allocate interrupt Error: %d\n", err); |
1931 } |
1935 } |
1932 |
1936 |
1933 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, |
1937 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, |
1934 struct e1000_buffer *buffer_info) |
1938 struct e1000_buffer *buffer_info) |
1935 { |
1939 { |
1936 if (adapter->ecdev) |
1940 if (adapter->ecdev) { |
1937 return; |
1941 return; |
|
1942 } |
1938 |
1943 |
1939 if (buffer_info->dma) { |
1944 if (buffer_info->dma) { |
1940 if (buffer_info->mapped_as_page) |
1945 if (buffer_info->mapped_as_page) |
1941 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, |
1946 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, |
1942 buffer_info->length, DMA_TO_DEVICE); |
1947 buffer_info->length, DMA_TO_DEVICE); |
2370 ew32(TCTL, tctl); |
2375 ew32(TCTL, tctl); |
2371 E1000_WRITE_FLUSH(); |
2376 E1000_WRITE_FLUSH(); |
2372 |
2377 |
2373 adapter->tx_fifo_head = 0; |
2378 adapter->tx_fifo_head = 0; |
2374 atomic_set(&adapter->tx_fifo_stall, 0); |
2379 atomic_set(&adapter->tx_fifo_stall, 0); |
2375 if (!adapter->ecdev) netif_wake_queue(netdev); |
2380 if (!adapter->ecdev) { |
|
2381 netif_wake_queue(netdev); |
|
2382 } |
2376 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { |
2383 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { |
2377 if (!adapter->ecdev) |
2384 if (!adapter->ecdev) { |
2378 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); |
2385 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); |
|
2386 } |
2379 } |
2387 } |
2380 } |
2388 } |
2381 rtnl_unlock(); |
2389 rtnl_unlock(); |
2382 } |
2390 } |
2383 |
2391 |
2433 |
2441 |
2434 if (link) { |
2442 if (link) { |
2435 if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev)) |
2443 if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev)) |
2436 || (!adapter->ecdev && !netif_carrier_ok(netdev))) { |
2444 || (!adapter->ecdev && !netif_carrier_ok(netdev))) { |
2437 u32 ctrl; |
2445 u32 ctrl; |
2438 bool txb2b = true; |
2446 bool txb2b __attribute__ ((unused)) = true; |
2439 /* update snapshot of PHY registers on LSC */ |
2447 /* update snapshot of PHY registers on LSC */ |
2440 e1000_get_speed_and_duplex(hw, |
2448 e1000_get_speed_and_duplex(hw, |
2441 &adapter->link_speed, |
2449 &adapter->link_speed, |
2442 &adapter->link_duplex); |
2450 &adapter->link_duplex); |
2443 |
2451 |
2548 |
2556 |
2549 /* Cause software interrupt to ensure rx ring is cleaned */ |
2557 /* Cause software interrupt to ensure rx ring is cleaned */ |
2550 ew32(ICS, E1000_ICS_RXDMT0); |
2558 ew32(ICS, E1000_ICS_RXDMT0); |
2551 |
2559 |
2552 /* Force detection of hung controller every watchdog period */ |
2560 /* Force detection of hung controller every watchdog period */ |
2553 if (!adapter->ecdev) adapter->detect_tx_hung = true; |
2561 if (!adapter->ecdev) { |
|
2562 adapter->detect_tx_hung = true; |
|
2563 } |
2554 |
2564 |
2555 /* Reset the timer */ |
2565 /* Reset the timer */ |
2556 if (!adapter->ecdev) { |
2566 if (!adapter->ecdev) { |
2557 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
2567 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
2558 mod_timer(&adapter->watchdog_timer, |
2568 mod_timer(&adapter->watchdog_timer, |
3108 * if using multiple tx queues. If the stack breaks away from a |
3118 * if using multiple tx queues. If the stack breaks away from a |
3109 * single qdisc implementation, we can look at this again. */ |
3119 * single qdisc implementation, we can look at this again. */ |
3110 tx_ring = adapter->tx_ring; |
3120 tx_ring = adapter->tx_ring; |
3111 |
3121 |
3112 if (unlikely(skb->len <= 0)) { |
3122 if (unlikely(skb->len <= 0)) { |
3113 if (!adapter->ecdev) |
3123 if (!adapter->ecdev) { |
3114 dev_kfree_skb_any(skb); |
3124 dev_kfree_skb_any(skb); |
|
3125 } |
3115 return NETDEV_TX_OK; |
3126 return NETDEV_TX_OK; |
3116 } |
3127 } |
3117 |
3128 |
3118 mss = skb_shinfo(skb)->gso_size; |
3129 mss = skb_shinfo(skb)->gso_size; |
3119 /* The controller does a simple calculation to |
3130 /* The controller does a simple calculation to |
3236 /* Make sure there is space in the ring for the next send. */ |
3247 /* Make sure there is space in the ring for the next send. */ |
3237 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); |
3248 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); |
3238 } |
3249 } |
3239 |
3250 |
3240 } else { |
3251 } else { |
3241 if (!adapter->ecdev) dev_kfree_skb_any(skb); |
3252 if (!adapter->ecdev) { |
|
3253 dev_kfree_skb_any(skb); |
|
3254 } |
3242 tx_ring->buffer_info[first].time_stamp = 0; |
3255 tx_ring->buffer_info[first].time_stamp = 0; |
3243 tx_ring->next_to_use = first; |
3256 tx_ring->next_to_use = first; |
3244 } |
3257 } |
3245 |
3258 |
3246 return NETDEV_TX_OK; |
3259 return NETDEV_TX_OK; |
3294 { |
3307 { |
3295 struct e1000_adapter *adapter = netdev_priv(netdev); |
3308 struct e1000_adapter *adapter = netdev_priv(netdev); |
3296 struct e1000_hw *hw = &adapter->hw; |
3309 struct e1000_hw *hw = &adapter->hw; |
3297 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
3310 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
3298 |
3311 |
3299 if (adapter->ecdev) |
3312 if (adapter->ecdev) { |
3300 return -EBUSY; |
3313 return -EBUSY; |
|
3314 } |
3301 |
3315 |
3302 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || |
3316 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || |
3303 (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
3317 (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
3304 e_err(probe, "Invalid MTU setting\n"); |
3318 e_err(probe, "Invalid MTU setting\n"); |
3305 return -EINVAL; |
3319 return -EINVAL; |
3383 if (adapter->link_speed == 0) |
3397 if (adapter->link_speed == 0) |
3384 return; |
3398 return; |
3385 if (pci_channel_offline(pdev)) |
3399 if (pci_channel_offline(pdev)) |
3386 return; |
3400 return; |
3387 |
3401 |
3388 if (!adapter->ecdev) |
3402 if (!adapter->ecdev) { |
3389 spin_lock_irqsave(&adapter->stats_lock, flags); |
3403 spin_lock_irqsave(&adapter->stats_lock, flags); |
|
3404 } |
3390 |
3405 |
3391 /* these counters are modified from e1000_tbi_adjust_stats, |
3406 /* these counters are modified from e1000_tbi_adjust_stats, |
3392 * called from the interrupt context, so they must only |
3407 * called from the interrupt context, so they must only |
3393 * be written while holding adapter->stats_lock |
3408 * be written while holding adapter->stats_lock |
3394 */ |
3409 */ |
3512 adapter->stats.mgptc += er32(MGTPTC); |
3527 adapter->stats.mgptc += er32(MGTPTC); |
3513 adapter->stats.mgprc += er32(MGTPRC); |
3528 adapter->stats.mgprc += er32(MGTPRC); |
3514 adapter->stats.mgpdc += er32(MGTPDC); |
3529 adapter->stats.mgpdc += er32(MGTPDC); |
3515 } |
3530 } |
3516 |
3531 |
3517 if (!adapter->ecdev) |
3532 if (!adapter->ecdev) { |
3518 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
3533 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
|
3534 } |
3519 } |
3535 } |
3520 |
3536 |
3521 void ec_poll(struct net_device *netdev) |
3537 void ec_poll(struct net_device *netdev) |
3522 { |
3538 { |
3523 struct e1000_adapter *adapter = netdev_priv(netdev); |
3539 struct e1000_adapter *adapter = netdev_priv(netdev); |
3828 (*work_done)++; |
3844 (*work_done)++; |
3829 rmb(); /* read descriptor and rx_buffer_info after status DD */ |
3845 rmb(); /* read descriptor and rx_buffer_info after status DD */ |
3830 |
3846 |
3831 status = rx_desc->status; |
3847 status = rx_desc->status; |
3832 skb = buffer_info->skb; |
3848 skb = buffer_info->skb; |
3833 if (!adapter->ecdev) buffer_info->skb = NULL; |
3849 if (!adapter->ecdev) { |
|
3850 buffer_info->skb = NULL; |
|
3851 } |
3834 |
3852 |
3835 if (++i == rx_ring->count) i = 0; |
3853 if (++i == rx_ring->count) i = 0; |
3836 next_rxd = E1000_RX_DESC(*rx_ring, i); |
3854 next_rxd = E1000_RX_DESC(*rx_ring, i); |
3837 prefetch(next_rxd); |
3855 prefetch(next_rxd); |
3838 |
3856 |
4045 (*work_done)++; |
4063 (*work_done)++; |
4046 rmb(); /* read descriptor and rx_buffer_info after status DD */ |
4064 rmb(); /* read descriptor and rx_buffer_info after status DD */ |
4047 |
4065 |
4048 status = rx_desc->status; |
4066 status = rx_desc->status; |
4049 skb = buffer_info->skb; |
4067 skb = buffer_info->skb; |
4050 if (!adapter->ecdev) buffer_info->skb = NULL; |
4068 if (!adapter->ecdev) { |
|
4069 buffer_info->skb = NULL; |
|
4070 } |
4051 |
4071 |
4052 prefetch(skb->data - NET_IP_ALIGN); |
4072 prefetch(skb->data - NET_IP_ALIGN); |
4053 |
4073 |
4054 if (++i == rx_ring->count) i = 0; |
4074 if (++i == rx_ring->count) i = 0; |
4055 next_rxd = E1000_RX_DESC(*rx_ring, i); |
4075 next_rxd = E1000_RX_DESC(*rx_ring, i); |
4483 switch (cmd) { |
4503 switch (cmd) { |
4484 case SIOCGMIIPHY: |
4504 case SIOCGMIIPHY: |
4485 data->phy_id = hw->phy_addr; |
4505 data->phy_id = hw->phy_addr; |
4486 break; |
4506 break; |
4487 case SIOCGMIIREG: |
4507 case SIOCGMIIREG: |
4488 if (adapter->ecdev) return -EPERM; |
4508 if (adapter->ecdev) { |
|
4509 return -EPERM; |
|
4510 } |
4489 spin_lock_irqsave(&adapter->stats_lock, flags); |
4511 spin_lock_irqsave(&adapter->stats_lock, flags); |
4490 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, |
4512 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, |
4491 &data->val_out)) { |
4513 &data->val_out)) { |
4492 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4514 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4493 return -EIO; |
4515 return -EIO; |
4494 } |
4516 } |
4495 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4517 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4496 break; |
4518 break; |
4497 case SIOCSMIIREG: |
4519 case SIOCSMIIREG: |
4498 if (adapter->ecdev) return -EPERM; |
4520 if (adapter->ecdev) { |
|
4521 return -EPERM; |
|
4522 } |
4499 if (data->reg_num & ~(0x1F)) |
4523 if (data->reg_num & ~(0x1F)) |
4500 return -EFAULT; |
4524 return -EFAULT; |
4501 mii_reg = data->val_in; |
4525 mii_reg = data->val_in; |
4502 spin_lock_irqsave(&adapter->stats_lock, flags); |
4526 spin_lock_irqsave(&adapter->stats_lock, flags); |
4503 if (e1000_write_phy_reg(hw, data->reg_num, |
4527 if (e1000_write_phy_reg(hw, data->reg_num, |
4834 struct net_device *netdev = pci_get_drvdata(pdev); |
4859 struct net_device *netdev = pci_get_drvdata(pdev); |
4835 struct e1000_adapter *adapter = netdev_priv(netdev); |
4860 struct e1000_adapter *adapter = netdev_priv(netdev); |
4836 struct e1000_hw *hw = &adapter->hw; |
4861 struct e1000_hw *hw = &adapter->hw; |
4837 u32 err; |
4862 u32 err; |
4838 |
4863 |
4839 if (adapter->ecdev) |
4864 if (adapter->ecdev) { |
4840 return -EBUSY; |
4865 return -EBUSY; |
|
4866 } |
4841 |
4867 |
4842 pci_set_power_state(pdev, PCI_D0); |
4868 pci_set_power_state(pdev, PCI_D0); |
4843 pci_restore_state(pdev); |
4869 pci_restore_state(pdev); |
4844 pci_save_state(pdev); |
4870 pci_save_state(pdev); |
4845 |
4871 |