devices/e1000/e1000_main-2.6.18-ethercat.c
changeset 668 09438628d4a3
parent 667 9feff35c9617
child 671 2223549df36c
equal deleted inserted replaced
667:9feff35c9617 668:09438628d4a3
  2366 		link = !adapter->hw.serdes_link_down;
  2366 		link = !adapter->hw.serdes_link_down;
  2367 	else
  2367 	else
  2368 		link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
  2368 		link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
  2369 
  2369 
  2370 	if (link) {
  2370 	if (link) {
  2371 		if (!netif_carrier_ok(netdev)) {
  2371 		if ((adapter->ecdev && !ecdev_link_up(adapter->ecdev))
       
  2372                 || (!adapter->ecdev && !netif_carrier_ok(netdev))) {
  2372 			boolean_t txb2b = 1;
  2373 			boolean_t txb2b = 1;
  2373 			e1000_get_speed_and_duplex(&adapter->hw,
  2374 			e1000_get_speed_and_duplex(&adapter->hw,
  2374 			                           &adapter->link_speed,
  2375 			                           &adapter->link_speed,
  2375 			                           &adapter->link_duplex);
  2376 			                           &adapter->link_duplex);
  2376 
  2377 
  2432 			 * after setting TARC0 */
  2433 			 * after setting TARC0 */
  2433 			tctl = E1000_READ_REG(&adapter->hw, TCTL);
  2434 			tctl = E1000_READ_REG(&adapter->hw, TCTL);
  2434 			tctl |= E1000_TCTL_EN;
  2435 			tctl |= E1000_TCTL_EN;
  2435 			E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
  2436 			E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
  2436 
  2437 
  2437 			netif_carrier_on(netdev);
  2438 			if (adapter->ecdev) {
  2438 			netif_wake_queue(netdev);
  2439                 ecdev_link_state(1);
       
  2440             } else {
       
  2441                 netif_carrier_on(netdev);
       
  2442                 netif_wake_queue(netdev);
       
  2443             }
  2439 			mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
  2444 			mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
  2440 			adapter->smartspeed = 0;
  2445 			adapter->smartspeed = 0;
  2441 		}
  2446 		}
  2442 	} else {
  2447 	} else {
  2443 		if (netif_carrier_ok(netdev)) {
  2448 		if ((adapter->ecdev && ecdev_link_up(adapter->ecdev))
       
  2449                 || (!adapter->ecdev && netif_carrier_ok(netdev))) {
  2444 			adapter->link_speed = 0;
  2450 			adapter->link_speed = 0;
  2445 			adapter->link_duplex = 0;
  2451 			adapter->link_duplex = 0;
  2446 			DPRINTK(LINK, INFO, "NIC Link is Down\n");
  2452 			DPRINTK(LINK, INFO, "NIC Link is Down\n");
  2447 			netif_carrier_off(netdev);
  2453             if (adapter->ecdev) {
  2448 			netif_stop_queue(netdev);
  2454                 ecdev_link_state(0);
       
  2455             } else {
       
  2456                 netif_carrier_off(netdev);
       
  2457                 netif_stop_queue(netdev);
       
  2458             }
  2449 			mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
  2459 			mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
  2450 
  2460 
  2451 			/* 80003ES2LAN workaround--
  2461 			/* 80003ES2LAN workaround--
  2452 			 * For packet buffer work-around on link down event;
  2462 			 * For packet buffer work-around on link down event;
  2453 			 * disable receives in the ISR and
  2463 			 * disable receives in the ISR and
  2474 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
  2484 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
  2475 	adapter->gotcl_old = adapter->stats.gotcl;
  2485 	adapter->gotcl_old = adapter->stats.gotcl;
  2476 
  2486 
  2477 	e1000_update_adaptive(&adapter->hw);
  2487 	e1000_update_adaptive(&adapter->hw);
  2478 
  2488 
  2479 	if (!netif_carrier_ok(netdev)) {
  2489 	if ((adapter->ecdev && !ecdev_link_up(adapter->ecdev))
       
  2490             || (!adapter->ecdev && !netif_carrier_ok(netdev))) {
  2480 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
  2491 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
  2481 			/* We've lost link, so the controller stops DMA,
  2492 			/* We've lost link, so the controller stops DMA,
  2482 			 * but we've got queued Tx work that's never going
  2493 			 * but we've got queued Tx work that's never going
  2483 			 * to get done, so reset controller to flush Tx.
  2494 			 * to get done, so reset controller to flush Tx.
  2484 			 * (Do the reset outside of interrupt context). */
  2495 			 * (Do the reset outside of interrupt context). */
  2891 	len -= skb->data_len;
  2902 	len -= skb->data_len;
  2892 
  2903 
  2893 	tx_ring = adapter->tx_ring;
  2904 	tx_ring = adapter->tx_ring;
  2894 
  2905 
  2895 	if (unlikely(skb->len <= 0)) {
  2906 	if (unlikely(skb->len <= 0)) {
  2896 		dev_kfree_skb_any(skb);
  2907         if (!adapter->ecdev)
       
  2908             dev_kfree_skb_any(skb);
  2897 		return NETDEV_TX_OK;
  2909 		return NETDEV_TX_OK;
  2898 	}
  2910 	}
  2899 
  2911 
  2900 #ifdef NETIF_F_TSO
  2912 #ifdef NETIF_F_TSO
  2901 	mss = skb_shinfo(skb)->gso_size;
  2913 	mss = skb_shinfo(skb)->gso_size;
  2923 			case e1000_ich8lan:
  2935 			case e1000_ich8lan:
  2924 				pull_size = min((unsigned int)4, skb->data_len);
  2936 				pull_size = min((unsigned int)4, skb->data_len);
  2925 				if (!__pskb_pull_tail(skb, pull_size)) {
  2937 				if (!__pskb_pull_tail(skb, pull_size)) {
  2926 					DPRINTK(DRV, ERR,
  2938 					DPRINTK(DRV, ERR,
  2927 						"__pskb_pull_tail failed.\n");
  2939 						"__pskb_pull_tail failed.\n");
  2928 					dev_kfree_skb_any(skb);
  2940 					if (!adapter->ecdev)
       
  2941                         dev_kfree_skb_any(skb);
  2929 					return NETDEV_TX_OK;
  2942 					return NETDEV_TX_OK;
  2930 				}
  2943 				}
  2931 				len = skb->len - skb->data_len;
  2944 				len = skb->len - skb->data_len;
  2932 				break;
  2945 				break;
  2933 			default:
  2946 			default:
  2974 
  2987 
  2975 	if (adapter->hw.tx_pkt_filtering &&
  2988 	if (adapter->hw.tx_pkt_filtering &&
  2976 	    (adapter->hw.mac_type == e1000_82573))
  2989 	    (adapter->hw.mac_type == e1000_82573))
  2977 		e1000_transfer_dhcp_info(adapter, skb);
  2990 		e1000_transfer_dhcp_info(adapter, skb);
  2978 
  2991 
  2979 	local_irq_save(flags);
  2992     if (!adapter->ecdev) {
  2980 	if (!spin_trylock(&tx_ring->tx_lock)) {
  2993         local_irq_save(flags);
  2981 		/* Collision - tell upper layer to requeue */
  2994         if (!spin_trylock(&tx_ring->tx_lock)) {
  2982 		local_irq_restore(flags);
  2995             /* Collision - tell upper layer to requeue */
  2983 		return NETDEV_TX_LOCKED;
  2996             local_irq_restore(flags);
  2984 	}
  2997             return NETDEV_TX_LOCKED;
       
  2998         }
       
  2999     }
  2985 
  3000 
  2986 	/* need: count + 2 desc gap to keep tail from touching
  3001 	/* need: count + 2 desc gap to keep tail from touching
  2987 	 * head, otherwise try next time */
  3002 	 * head, otherwise try next time */
  2988 	if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) {
  3003 	if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) {
  2989 		netif_stop_queue(netdev);
  3004         if (!adapter->ecdev) {
  2990 		spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  3005             netif_stop_queue(netdev);
       
  3006             spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
       
  3007         }
  2991 		return NETDEV_TX_BUSY;
  3008 		return NETDEV_TX_BUSY;
  2992 	}
  3009 	}
  2993 
  3010 
  2994 	if (unlikely(adapter->hw.mac_type == e1000_82547)) {
  3011 	if (unlikely(adapter->hw.mac_type == e1000_82547)) {
  2995 		if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
  3012 		if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
  2996 			netif_stop_queue(netdev);
  3013             if (!adapter->ecdev)
       
  3014                 netif_stop_queue(netdev);
  2997 			mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
  3015 			mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
  2998 			spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  3016             if (!adapter->ecdev)
       
  3017                 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  2999 			return NETDEV_TX_BUSY;
  3018 			return NETDEV_TX_BUSY;
  3000 		}
  3019 		}
  3001 	}
  3020 	}
  3002 
  3021 
  3003 	if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
  3022 	if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
  3007 
  3026 
  3008 	first = tx_ring->next_to_use;
  3027 	first = tx_ring->next_to_use;
  3009 
  3028 
  3010 	tso = e1000_tso(adapter, tx_ring, skb);
  3029 	tso = e1000_tso(adapter, tx_ring, skb);
  3011 	if (tso < 0) {
  3030 	if (tso < 0) {
  3012 		dev_kfree_skb_any(skb);
  3031         if (!adapter->ecdev) {
  3013 		spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  3032             dev_kfree_skb_any(skb);
       
  3033             spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
       
  3034         }
  3014 		return NETDEV_TX_OK;
  3035 		return NETDEV_TX_OK;
  3015 	}
  3036 	}
  3016 
  3037 
  3017 	if (likely(tso)) {
  3038 	if (likely(tso)) {
  3018 		tx_ring->last_tx_tso = 1;
  3039 		tx_ring->last_tx_tso = 1;
  3031 	                            max_per_txd, nr_frags, mss));
  3052 	                            max_per_txd, nr_frags, mss));
  3032 
  3053 
  3033 	netdev->trans_start = jiffies;
  3054 	netdev->trans_start = jiffies;
  3034 
  3055 
  3035 	/* Make sure there is space in the ring for the next send. */
  3056 	/* Make sure there is space in the ring for the next send. */
  3036 	if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 2))
  3057 	if (!adapter->ecdev) {
       
  3058         if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 2))
  3037 		netif_stop_queue(netdev);
  3059 		netif_stop_queue(netdev);
  3038 
  3060 
  3039 	spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
  3061         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
       
  3062     }
  3040 	return NETDEV_TX_OK;
  3063 	return NETDEV_TX_OK;
  3041 }
  3064 }
  3042 
  3065 
  3043 /**
  3066 /**
  3044  * e1000_tx_timeout - Respond to a Tx Hang
  3067  * e1000_tx_timeout - Respond to a Tx Hang
  3164 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
  3187 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
  3165 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
  3188 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
  3166 
  3189 
  3167 	netdev->mtu = new_mtu;
  3190 	netdev->mtu = new_mtu;
  3168 
  3191 
  3169 	if (netif_running(netdev))
  3192 	if (adapter->ecdev || netif_running(netdev))
  3170 		e1000_reinit_locked(adapter);
  3193 		e1000_reinit_locked(adapter);
  3171 
  3194 
  3172 	adapter->hw.max_frame_size = max_frame;
  3195 	adapter->hw.max_frame_size = max_frame;
  3173 
  3196 
  3174 	return 0;
  3197 	return 0;
  3196 	if (adapter->link_speed == 0)
  3219 	if (adapter->link_speed == 0)
  3197 		return;
  3220 		return;
  3198 	if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
  3221 	if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
  3199 		return;
  3222 		return;
  3200 
  3223 
  3201 	spin_lock_irqsave(&adapter->stats_lock, flags);
  3224     if (!adapter->ecdev)
       
  3225         spin_lock_irqsave(&adapter->stats_lock, flags);
  3202 
  3226 
  3203 	/* these counters are modified from e1000_adjust_tbi_stats,
  3227 	/* these counters are modified from e1000_adjust_tbi_stats,
  3204 	 * called from the interrupt context, so they must only
  3228 	 * called from the interrupt context, so they must only
  3205 	 * be written while holding adapter->stats_lock
  3229 	 * be written while holding adapter->stats_lock
  3206 	 */
  3230 	 */
  3337 		   (hw->phy_type == e1000_phy_m88) &&
  3361 		   (hw->phy_type == e1000_phy_m88) &&
  3338 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
  3362 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
  3339 			adapter->phy_stats.receive_errors += phy_tmp;
  3363 			adapter->phy_stats.receive_errors += phy_tmp;
  3340 	}
  3364 	}
  3341 
  3365 
  3342 	spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3366     if (!adapter->ecdev)
       
  3367         spin_unlock_irqrestore(&adapter->stats_lock, flags);
  3343 }
  3368 }
  3344 
  3369 
  3345 /**
  3370 /**
  3346  * e1000_intr - Interrupt Handler
  3371  * e1000_intr - Interrupt Handler
  3347  * @irq: interrupt number
  3372  * @irq: interrupt number
  3367 		atomic_inc(&adapter->irq_sem);
  3392 		atomic_inc(&adapter->irq_sem);
  3368 #endif
  3393 #endif
  3369 
  3394 
  3370 	if (unlikely(!icr)) {
  3395 	if (unlikely(!icr)) {
  3371 #ifdef CONFIG_E1000_NAPI
  3396 #ifdef CONFIG_E1000_NAPI
  3372 		if (hw->mac_type >= e1000_82571)
  3397 		if (!adapter->ecdev && hw->mac_type >= e1000_82571)
  3373 			e1000_irq_enable(adapter);
  3398 			e1000_irq_enable(adapter);
  3374 #endif
  3399 #endif
  3375 		return IRQ_NONE;  /* Not our interrupt */
  3400 		return IRQ_NONE;  /* Not our interrupt */
  3376 	}
  3401 	}
  3377 
  3402 
  3380 		/* 80003ES2LAN workaround--
  3405 		/* 80003ES2LAN workaround--
  3381 		 * For packet buffer work-around on link down event;
  3406 		 * For packet buffer work-around on link down event;
  3382 		 * disable receives here in the ISR and
  3407 		 * disable receives here in the ISR and
  3383 		 * reset adapter in watchdog
  3408 		 * reset adapter in watchdog
  3384 		 */
  3409 		 */
  3385 		if (netif_carrier_ok(netdev) &&
  3410 		if (((adapter->ecdev && ecdev_link_up(adapter->ecdev))
       
  3411                     || (!adapter->ecdev && netif_carrier_ok(netdev))) &&
  3386 		    (adapter->hw.mac_type == e1000_80003es2lan)) {
  3412 		    (adapter->hw.mac_type == e1000_80003es2lan)) {
  3387 			/* disable receives */
  3413 			/* disable receives */
  3388 			rctl = E1000_READ_REG(hw, RCTL);
  3414 			rctl = E1000_READ_REG(hw, RCTL);
  3389 			E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
  3415 			E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
  3390 		}
  3416 		}
  3395 	if (unlikely(hw->mac_type < e1000_82571)) {
  3421 	if (unlikely(hw->mac_type < e1000_82571)) {
  3396 		atomic_inc(&adapter->irq_sem);
  3422 		atomic_inc(&adapter->irq_sem);
  3397 		E1000_WRITE_REG(hw, IMC, ~0);
  3423 		E1000_WRITE_REG(hw, IMC, ~0);
  3398 		E1000_WRITE_FLUSH(hw);
  3424 		E1000_WRITE_FLUSH(hw);
  3399 	}
  3425 	}
  3400 	if (likely(netif_rx_schedule_prep(netdev)))
  3426     if (!adapter->ecdev) {
  3401 		__netif_rx_schedule(netdev);
  3427         if (likely(netif_rx_schedule_prep(netdev)))
  3402 	else
  3428             __netif_rx_schedule(netdev);
  3403 		e1000_irq_enable(adapter);
  3429         else
       
  3430             e1000_irq_enable(adapter);
       
  3431     }
  3404 #else
  3432 #else
  3405 	/* Writing IMC and IMS is needed for 82547.
  3433         /* Writing IMC and IMS is needed for 82547.
  3406 	 * Due to Hub Link bus being occupied, an interrupt
  3434          * Due to Hub Link bus being occupied, an interrupt
  3407 	 * de-assertion message is not able to be sent.
  3435 	 * de-assertion message is not able to be sent.
  3408 	 * When an interrupt assertion message is generated later,
  3436 	 * When an interrupt assertion message is generated later,
  3409 	 * two messages are re-ordered and sent out.
  3437 	 * two messages are re-ordered and sent out.
  3410 	 * That causes APIC to think 82547 is in de-assertion
  3438 	 * That causes APIC to think 82547 is in de-assertion
  3411 	 * state, while 82547 is in assertion state, resulting
  3439 	 * state, while 82547 is in assertion state, resulting
  3420 	for (i = 0; i < E1000_MAX_INTR; i++)
  3448 	for (i = 0; i < E1000_MAX_INTR; i++)
  3421 		if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
  3449 		if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
  3422 		   !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
  3450 		   !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
  3423 			break;
  3451 			break;
  3424 
  3452 
  3425 	if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
  3453 	if (!adapter->ecdev
       
  3454             && (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2))
  3426 		e1000_irq_enable(adapter);
  3455 		e1000_irq_enable(adapter);
  3427 
  3456 
  3428 #endif
  3457 #endif
  3429 
  3458 
  3430 	return IRQ_HANDLED;
  3459 	return IRQ_HANDLED;
  3435  * e1000_clean - NAPI Rx polling callback
  3464  * e1000_clean - NAPI Rx polling callback
  3436  * @adapter: board private structure
  3465  * @adapter: board private structure
  3437  **/
  3466  **/
  3438 
  3467 
  3439 static int
  3468 static int
  3440 e1000_clean(struct net_device *poll_dev, int *budget)
  3469 e1000_clean(struct net_device *poll_dev, int *budget) // never called for EtherCAT
  3441 {
  3470 {
  3442 	struct e1000_adapter *adapter;
  3471 	struct e1000_adapter *adapter;
  3443 	int work_to_do = min(*budget, poll_dev->quota);
  3472 	int work_to_do = min(*budget, poll_dev->quota);
  3444 	int tx_cleaned = 0, work_done = 0;
  3473 	int tx_cleaned = 0, work_done = 0;
  3445 
  3474 
  3524 	}
  3553 	}
  3525 
  3554 
  3526 	tx_ring->next_to_clean = i;
  3555 	tx_ring->next_to_clean = i;
  3527 
  3556 
  3528 #define TX_WAKE_THRESHOLD 32
  3557 #define TX_WAKE_THRESHOLD 32
  3529 	if (unlikely(cleaned && netif_queue_stopped(netdev) &&
  3558 	if (unlikely(!adapter->ecdev && cleaned && netif_queue_stopped(netdev) &&
  3530 	             netif_carrier_ok(netdev))) {
  3559 	             netif_carrier_ok(netdev))) {
  3531 		spin_lock(&tx_ring->tx_lock);
  3560 		spin_lock(&tx_ring->tx_lock);
  3532 		if (netif_queue_stopped(netdev) &&
  3561 		if (netif_queue_stopped(netdev) &&
  3533 		    (E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))
  3562 		    (E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))
  3534 			netif_wake_queue(netdev);
  3563 			netif_wake_queue(netdev);
  3693 			/* recycle */
  3722 			/* recycle */
  3694 			buffer_info-> skb = skb;
  3723 			buffer_info-> skb = skb;
  3695 			goto next_desc;
  3724 			goto next_desc;
  3696 		}
  3725 		}
  3697 
  3726 
  3698 		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
  3727 		if (!adapter->ecdev && unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
  3699 			last_byte = *(skb->data + length - 1);
  3728 			last_byte = *(skb->data + length - 1);
  3700 			if (TBI_ACCEPT(&adapter->hw, status,
  3729 			if (TBI_ACCEPT(&adapter->hw, status,
  3701 			              rx_desc->errors, length, last_byte)) {
  3730 			              rx_desc->errors, length, last_byte)) {
  3702 				spin_lock_irqsave(&adapter->stats_lock, flags);
  3731 				spin_lock_irqsave(&adapter->stats_lock, flags);
  3703 				e1000_tbi_adjust_stats(&adapter->hw,
  3732 				e1000_tbi_adjust_stats(&adapter->hw,
  3715 
  3744 
  3716 		/* code added for copybreak, this should improve
  3745 		/* code added for copybreak, this should improve
  3717 		 * performance for small packets with large amounts
  3746 		 * performance for small packets with large amounts
  3718 		 * of reassembly being done in the stack */
  3747 		 * of reassembly being done in the stack */
  3719 #define E1000_CB_LENGTH 256
  3748 #define E1000_CB_LENGTH 256
  3720 		if (length < E1000_CB_LENGTH) {
  3749 		if (!adapter->ecdev && length < E1000_CB_LENGTH) {
  3721 			struct sk_buff *new_skb =
  3750 			struct sk_buff *new_skb =
  3722 			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
  3751 			    netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
  3723 			if (new_skb) {
  3752 			if (new_skb) {
  3724 				skb_reserve(new_skb, NET_IP_ALIGN);
  3753 				skb_reserve(new_skb, NET_IP_ALIGN);
  3725 				new_skb->dev = netdev;
  3754 				new_skb->dev = netdev;