|
1 /******************************************************************************* |
|
2 |
|
3 |
|
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. |
|
5 |
|
6 This program is free software; you can redistribute it and/or modify it |
|
7 under the terms of the GNU General Public License as published by the Free |
|
8 Software Foundation; either version 2 of the License, or (at your option) |
|
9 any later version. |
|
10 |
|
11 This program is distributed in the hope that it will be useful, but WITHOUT |
|
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
|
14 more details. |
|
15 |
|
16 You should have received a copy of the GNU General Public License along with |
|
17 this program; if not, write to the Free Software Foundation, Inc., 59 |
|
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
|
19 |
|
20 The full GNU General Public License is included in this distribution in the |
|
21 file called LICENSE. |
|
22 |
|
23 Contact Information: |
|
24 Linux NICS <linux.nics@intel.com> |
|
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
|
26 |
|
27 *******************************************************************************/ |
|
28 |
|
29 #include "e1000-2.6.13-ethercat.h" |
|
30 |
|
31 /* Change Log |
|
32 * 6.0.58 4/20/05 |
|
33 * o Accepted ethtool cleanup patch from Stephen Hemminger |
|
34 * 6.0.44+ 2/15/05 |
|
35 * o applied Anton's patch to resolve tx hang in hardware |
|
36 * o Applied Andrew Mortons patch - e1000 stops working after resume |
|
37 */ |
|
38 |
|
39 char e1000_driver_name[] = "ec_e1000"; |
|
40 static char e1000_driver_string[] = "EtherCAT Intel(R) PRO/1000 Network Driver"; |
|
41 #ifndef CONFIG_E1000_NAPI |
|
42 #define DRIVERNAPI |
|
43 #else |
|
44 #define DRIVERNAPI "-NAPI" |
|
45 #endif |
|
46 #define DRV_VERSION "6.0.60-k2"DRIVERNAPI |
|
47 char e1000_driver_version[] = DRV_VERSION; |
|
48 char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; |
|
49 |
|
50 /* e1000_pci_tbl - PCI Device ID Table |
|
51 * |
|
52 * Last entry must be all 0s |
|
53 * |
|
54 * Macro expands to... |
|
55 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} |
|
56 */ |
|
57 static struct pci_device_id e1000_pci_tbl[] = { |
|
58 INTEL_E1000_ETHERNET_DEVICE(0x1000), |
|
59 INTEL_E1000_ETHERNET_DEVICE(0x1001), |
|
60 INTEL_E1000_ETHERNET_DEVICE(0x1004), |
|
61 INTEL_E1000_ETHERNET_DEVICE(0x1008), |
|
62 INTEL_E1000_ETHERNET_DEVICE(0x1009), |
|
63 INTEL_E1000_ETHERNET_DEVICE(0x100C), |
|
64 INTEL_E1000_ETHERNET_DEVICE(0x100D), |
|
65 INTEL_E1000_ETHERNET_DEVICE(0x100E), |
|
66 INTEL_E1000_ETHERNET_DEVICE(0x100F), |
|
67 INTEL_E1000_ETHERNET_DEVICE(0x1010), |
|
68 INTEL_E1000_ETHERNET_DEVICE(0x1011), |
|
69 INTEL_E1000_ETHERNET_DEVICE(0x1012), |
|
70 INTEL_E1000_ETHERNET_DEVICE(0x1013), |
|
71 INTEL_E1000_ETHERNET_DEVICE(0x1014), |
|
72 INTEL_E1000_ETHERNET_DEVICE(0x1015), |
|
73 INTEL_E1000_ETHERNET_DEVICE(0x1016), |
|
74 INTEL_E1000_ETHERNET_DEVICE(0x1017), |
|
75 INTEL_E1000_ETHERNET_DEVICE(0x1018), |
|
76 INTEL_E1000_ETHERNET_DEVICE(0x1019), |
|
77 INTEL_E1000_ETHERNET_DEVICE(0x101A), |
|
78 INTEL_E1000_ETHERNET_DEVICE(0x101D), |
|
79 INTEL_E1000_ETHERNET_DEVICE(0x101E), |
|
80 INTEL_E1000_ETHERNET_DEVICE(0x1026), |
|
81 INTEL_E1000_ETHERNET_DEVICE(0x1027), |
|
82 INTEL_E1000_ETHERNET_DEVICE(0x1028), |
|
83 INTEL_E1000_ETHERNET_DEVICE(0x1075), |
|
84 INTEL_E1000_ETHERNET_DEVICE(0x1076), |
|
85 INTEL_E1000_ETHERNET_DEVICE(0x1077), |
|
86 INTEL_E1000_ETHERNET_DEVICE(0x1078), |
|
87 INTEL_E1000_ETHERNET_DEVICE(0x1079), |
|
88 INTEL_E1000_ETHERNET_DEVICE(0x107A), |
|
89 INTEL_E1000_ETHERNET_DEVICE(0x107B), |
|
90 INTEL_E1000_ETHERNET_DEVICE(0x107C), |
|
91 INTEL_E1000_ETHERNET_DEVICE(0x108A), |
|
92 INTEL_E1000_ETHERNET_DEVICE(0x108B), |
|
93 INTEL_E1000_ETHERNET_DEVICE(0x108C), |
|
94 INTEL_E1000_ETHERNET_DEVICE(0x1099), |
|
95 /* required last entry */ |
|
96 {0,} |
|
97 }; |
|
98 |
|
99 // do not auto-load driver |
|
100 // MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); |
|
101 |
|
102 int e1000_up(struct e1000_adapter *adapter); |
|
103 void e1000_down(struct e1000_adapter *adapter); |
|
104 void e1000_reset(struct e1000_adapter *adapter); |
|
105 int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); |
|
106 int e1000_setup_tx_resources(struct e1000_adapter *adapter); |
|
107 int e1000_setup_rx_resources(struct e1000_adapter *adapter); |
|
108 void e1000_free_tx_resources(struct e1000_adapter *adapter); |
|
109 void e1000_free_rx_resources(struct e1000_adapter *adapter); |
|
110 void e1000_update_stats(struct e1000_adapter *adapter); |
|
111 |
|
112 /* Local Function Prototypes */ |
|
113 |
|
114 static int e1000_init_module(void); |
|
115 static void e1000_exit_module(void); |
|
116 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); |
|
117 static void __devexit e1000_remove(struct pci_dev *pdev); |
|
118 static int e1000_sw_init(struct e1000_adapter *adapter); |
|
119 static int e1000_open(struct net_device *netdev); |
|
120 static int e1000_close(struct net_device *netdev); |
|
121 static void e1000_configure_tx(struct e1000_adapter *adapter); |
|
122 static void e1000_configure_rx(struct e1000_adapter *adapter); |
|
123 static void e1000_setup_rctl(struct e1000_adapter *adapter); |
|
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter); |
|
125 static void e1000_clean_rx_ring(struct e1000_adapter *adapter); |
|
126 static void e1000_set_multi(struct net_device *netdev); |
|
127 static void e1000_update_phy_info(unsigned long data); |
|
128 static void e1000_watchdog(unsigned long data); |
|
129 static void e1000_watchdog_task(struct e1000_adapter *adapter); |
|
130 static void e1000_82547_tx_fifo_stall(unsigned long data); |
|
131 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); |
|
132 static struct net_device_stats * e1000_get_stats(struct net_device *netdev); |
|
133 static int e1000_change_mtu(struct net_device *netdev, int new_mtu); |
|
134 static int e1000_set_mac(struct net_device *netdev, void *p); |
|
135 void ec_poll(struct net_device *); |
|
136 static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs); |
|
137 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter); |
|
138 #ifdef CONFIG_E1000_NAPI |
|
139 static int e1000_clean(struct net_device *netdev, int *budget); |
|
140 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, |
|
141 int *work_done, int work_to_do); |
|
142 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, |
|
143 int *work_done, int work_to_do); |
|
144 #else |
|
145 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter); |
|
146 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter); |
|
147 #endif |
|
148 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter); |
|
149 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter); |
|
150 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); |
|
151 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
|
152 int cmd); |
|
153 void e1000_set_ethtool_ops(struct net_device *netdev); |
|
154 static void e1000_enter_82542_rst(struct e1000_adapter *adapter); |
|
155 static void e1000_leave_82542_rst(struct e1000_adapter *adapter); |
|
156 static void e1000_tx_timeout(struct net_device *dev); |
|
157 static void e1000_tx_timeout_task(struct net_device *dev); |
|
158 static void e1000_smartspeed(struct e1000_adapter *adapter); |
|
159 static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, |
|
160 struct sk_buff *skb); |
|
161 |
|
162 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); |
|
163 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); |
|
164 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); |
|
165 static void e1000_restore_vlan(struct e1000_adapter *adapter); |
|
166 |
|
167 static int e1000_suspend(struct pci_dev *pdev, uint32_t state); |
|
168 #ifdef CONFIG_PM |
|
169 static int e1000_resume(struct pci_dev *pdev); |
|
170 #endif |
|
171 |
|
172 #ifdef CONFIG_NET_POLL_CONTROLLER |
|
173 /* for netdump / net console */ |
|
174 static void e1000_netpoll (struct net_device *netdev); |
|
175 #endif |
|
176 |
|
177 /* Exported from other modules */ |
|
178 |
|
179 extern void e1000_check_options(struct e1000_adapter *adapter); |
|
180 |
|
181 static struct pci_driver e1000_driver = { |
|
182 .name = e1000_driver_name, |
|
183 .id_table = e1000_pci_tbl, |
|
184 .probe = e1000_probe, |
|
185 .remove = __devexit_p(e1000_remove), |
|
186 /* Power Managment Hooks */ |
|
187 #ifdef CONFIG_PM |
|
188 .suspend = e1000_suspend, |
|
189 .resume = e1000_resume |
|
190 #endif |
|
191 }; |
|
192 |
|
193 MODULE_AUTHOR("Florian Pose <fp@igh-essen.com>"); |
|
194 MODULE_DESCRIPTION("EtherCAT-capable Intel(R) PRO/1000 Network Driver"); |
|
195 MODULE_LICENSE("GPL"); |
|
196 MODULE_VERSION(DRV_VERSION); |
|
197 |
|
198 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; |
|
199 module_param(debug, int, 0); |
|
200 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
|
201 |
|
202 /** |
|
203 * e1000_init_module - Driver Registration Routine |
|
204 * |
|
205 * e1000_init_module is the first routine called when the driver is |
|
206 * loaded. All it does is register with the PCI subsystem. |
|
207 **/ |
|
208 |
|
209 static int __init |
|
210 e1000_init_module(void) |
|
211 { |
|
212 int ret; |
|
213 printk(KERN_INFO "%s - version %s\n", |
|
214 e1000_driver_string, e1000_driver_version); |
|
215 |
|
216 printk(KERN_INFO "%s\n", e1000_copyright); |
|
217 |
|
218 ret = pci_module_init(&e1000_driver); |
|
219 |
|
220 return ret; |
|
221 } |
|
222 |
|
223 module_init(e1000_init_module); |
|
224 |
|
225 /** |
|
226 * e1000_exit_module - Driver Exit Cleanup Routine |
|
227 * |
|
228 * e1000_exit_module is called just before the driver is removed |
|
229 * from memory. |
|
230 **/ |
|
231 |
|
232 static void __exit |
|
233 e1000_exit_module(void) |
|
234 { |
|
235 pci_unregister_driver(&e1000_driver); |
|
236 } |
|
237 |
|
238 module_exit(e1000_exit_module); |
|
239 |
|
240 /** |
|
241 * e1000_irq_disable - Mask off interrupt generation on the NIC |
|
242 * @adapter: board private structure |
|
243 **/ |
|
244 |
|
245 static inline void |
|
246 e1000_irq_disable(struct e1000_adapter *adapter) |
|
247 { |
|
248 if (adapter->ecdev) |
|
249 return; |
|
250 atomic_inc(&adapter->irq_sem); |
|
251 E1000_WRITE_REG(&adapter->hw, IMC, ~0); |
|
252 E1000_WRITE_FLUSH(&adapter->hw); |
|
253 synchronize_irq(adapter->pdev->irq); |
|
254 } |
|
255 |
|
256 /** |
|
257 * e1000_irq_enable - Enable default interrupt generation settings |
|
258 * @adapter: board private structure |
|
259 **/ |
|
260 |
|
261 static inline void |
|
262 e1000_irq_enable(struct e1000_adapter *adapter) |
|
263 { |
|
264 if (adapter->ecdev) |
|
265 return; |
|
266 if(likely(atomic_dec_and_test(&adapter->irq_sem))) { |
|
267 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); |
|
268 E1000_WRITE_FLUSH(&adapter->hw); |
|
269 } |
|
270 } |
|
271 void |
|
272 e1000_update_mng_vlan(struct e1000_adapter *adapter) |
|
273 { |
|
274 struct net_device *netdev = adapter->netdev; |
|
275 uint16_t vid = adapter->hw.mng_cookie.vlan_id; |
|
276 uint16_t old_vid = adapter->mng_vlan_id; |
|
277 if(adapter->vlgrp) { |
|
278 if(!adapter->vlgrp->vlan_devices[vid]) { |
|
279 if(adapter->hw.mng_cookie.status & |
|
280 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { |
|
281 e1000_vlan_rx_add_vid(netdev, vid); |
|
282 adapter->mng_vlan_id = vid; |
|
283 } else |
|
284 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
|
285 |
|
286 if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && |
|
287 (vid != old_vid) && |
|
288 !adapter->vlgrp->vlan_devices[old_vid]) |
|
289 e1000_vlan_rx_kill_vid(netdev, old_vid); |
|
290 } |
|
291 } |
|
292 } |
|
293 |
|
294 int |
|
295 e1000_up(struct e1000_adapter *adapter) |
|
296 { |
|
297 struct net_device *netdev = adapter->netdev; |
|
298 int err; |
|
299 |
|
300 /* hardware has been reset, we need to reload some things */ |
|
301 |
|
302 /* Reset the PHY if it was previously powered down */ |
|
303 if(adapter->hw.media_type == e1000_media_type_copper) { |
|
304 uint16_t mii_reg; |
|
305 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); |
|
306 if(mii_reg & MII_CR_POWER_DOWN) |
|
307 e1000_phy_reset(&adapter->hw); |
|
308 } |
|
309 |
|
310 e1000_set_multi(netdev); |
|
311 |
|
312 e1000_restore_vlan(adapter); |
|
313 |
|
314 e1000_configure_tx(adapter); |
|
315 e1000_setup_rctl(adapter); |
|
316 e1000_configure_rx(adapter); |
|
317 adapter->alloc_rx_buf(adapter); |
|
318 |
|
319 #ifdef CONFIG_PCI_MSI |
|
320 if(adapter->hw.mac_type > e1000_82547_rev_2) { |
|
321 adapter->have_msi = TRUE; |
|
322 if((err = pci_enable_msi(adapter->pdev))) { |
|
323 DPRINTK(PROBE, ERR, |
|
324 "Unable to allocate MSI interrupt Error: %d\n", err); |
|
325 adapter->have_msi = FALSE; |
|
326 } |
|
327 } |
|
328 #endif |
|
329 if (!adapter->ecdev) { |
|
330 if((err = request_irq(adapter->pdev->irq, &e1000_intr, |
|
331 SA_SHIRQ | SA_SAMPLE_RANDOM, |
|
332 netdev->name, netdev))) { |
|
333 DPRINTK(PROBE, ERR, |
|
334 "Unable to allocate interrupt Error: %d\n", err); |
|
335 return err; |
|
336 } |
|
337 |
|
338 mod_timer(&adapter->watchdog_timer, jiffies); |
|
339 |
|
340 #ifdef CONFIG_E1000_NAPI |
|
341 netif_poll_enable(netdev); |
|
342 #endif |
|
343 e1000_irq_enable(adapter); |
|
344 } |
|
345 |
|
346 return 0; |
|
347 } |
|
348 |
|
349 void |
|
350 e1000_down(struct e1000_adapter *adapter) |
|
351 { |
|
352 struct net_device *netdev = adapter->netdev; |
|
353 |
|
354 if (!adapter->ecdev) { |
|
355 e1000_irq_disable(adapter); |
|
356 free_irq(adapter->pdev->irq, netdev); |
|
357 } |
|
358 #ifdef CONFIG_PCI_MSI |
|
359 if(adapter->hw.mac_type > e1000_82547_rev_2 && |
|
360 adapter->have_msi == TRUE) |
|
361 pci_disable_msi(adapter->pdev); |
|
362 #endif |
|
363 if (!adapter->ecdev) { |
|
364 del_timer_sync(&adapter->tx_fifo_stall_timer); |
|
365 del_timer_sync(&adapter->watchdog_timer); |
|
366 del_timer_sync(&adapter->phy_info_timer); |
|
367 |
|
368 #ifdef CONFIG_E1000_NAPI |
|
369 netif_poll_disable(netdev); |
|
370 #endif |
|
371 } |
|
372 adapter->link_speed = 0; |
|
373 adapter->link_duplex = 0; |
|
374 if (!adapter->ecdev) { |
|
375 netif_carrier_off(netdev); |
|
376 netif_stop_queue(netdev); |
|
377 } |
|
378 |
|
379 e1000_reset(adapter); |
|
380 e1000_clean_tx_ring(adapter); |
|
381 e1000_clean_rx_ring(adapter); |
|
382 |
|
383 /* If WoL is not enabled |
|
384 * and management mode is not IAMT |
|
385 * Power down the PHY so no link is implied when interface is down */ |
|
386 if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && |
|
387 adapter->hw.media_type == e1000_media_type_copper && |
|
388 !e1000_check_mng_mode(&adapter->hw) && |
|
389 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) { |
|
390 uint16_t mii_reg; |
|
391 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); |
|
392 mii_reg |= MII_CR_POWER_DOWN; |
|
393 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); |
|
394 mdelay(1); |
|
395 } |
|
396 } |
|
397 |
|
398 void |
|
399 e1000_reset(struct e1000_adapter *adapter) |
|
400 { |
|
401 struct net_device *netdev = adapter->netdev; |
|
402 uint32_t pba, manc; |
|
403 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; |
|
404 uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF; |
|
405 |
|
406 /* Repartition Pba for greater than 9k mtu |
|
407 * To take effect CTRL.RST is required. |
|
408 */ |
|
409 |
|
410 switch (adapter->hw.mac_type) { |
|
411 case e1000_82547: |
|
412 case e1000_82547_rev_2: |
|
413 pba = E1000_PBA_30K; |
|
414 break; |
|
415 case e1000_82573: |
|
416 pba = E1000_PBA_12K; |
|
417 break; |
|
418 default: |
|
419 pba = E1000_PBA_48K; |
|
420 break; |
|
421 } |
|
422 |
|
423 if((adapter->hw.mac_type != e1000_82573) && |
|
424 (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) { |
|
425 pba -= 8; /* allocate more FIFO for Tx */ |
|
426 /* send an XOFF when there is enough space in the |
|
427 * Rx FIFO to hold one extra full size Rx packet |
|
428 */ |
|
429 fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE + |
|
430 ETHERNET_FCS_SIZE + 1; |
|
431 fc_low_water_mark = fc_high_water_mark + 8; |
|
432 } |
|
433 |
|
434 |
|
435 if(adapter->hw.mac_type == e1000_82547) { |
|
436 adapter->tx_fifo_head = 0; |
|
437 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; |
|
438 adapter->tx_fifo_size = |
|
439 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; |
|
440 atomic_set(&adapter->tx_fifo_stall, 0); |
|
441 } |
|
442 |
|
443 E1000_WRITE_REG(&adapter->hw, PBA, pba); |
|
444 |
|
445 /* flow control settings */ |
|
446 adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - |
|
447 fc_high_water_mark; |
|
448 adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - |
|
449 fc_low_water_mark; |
|
450 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; |
|
451 adapter->hw.fc_send_xon = 1; |
|
452 adapter->hw.fc = adapter->hw.original_fc; |
|
453 |
|
454 /* Allow time for pending master requests to run */ |
|
455 e1000_reset_hw(&adapter->hw); |
|
456 if(adapter->hw.mac_type >= e1000_82544) |
|
457 E1000_WRITE_REG(&adapter->hw, WUC, 0); |
|
458 if(e1000_init_hw(&adapter->hw)) |
|
459 DPRINTK(PROBE, ERR, "Hardware Error\n"); |
|
460 e1000_update_mng_vlan(adapter); |
|
461 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
|
462 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); |
|
463 |
|
464 e1000_reset_adaptive(&adapter->hw); |
|
465 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); |
|
466 if (adapter->en_mng_pt) { |
|
467 manc = E1000_READ_REG(&adapter->hw, MANC); |
|
468 manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); |
|
469 E1000_WRITE_REG(&adapter->hw, MANC, manc); |
|
470 } |
|
471 } |
|
472 |
|
473 /** |
|
474 * e1000_probe - Device Initialization Routine |
|
475 * @pdev: PCI device information struct |
|
476 * @ent: entry in e1000_pci_tbl |
|
477 * |
|
478 * Returns 0 on success, negative on failure |
|
479 * |
|
480 * e1000_probe initializes an adapter identified by a pci_dev structure. |
|
481 * The OS initialization, configuring of the adapter private structure, |
|
482 * and a hardware reset occur. |
|
483 **/ |
|
484 |
|
485 static int __devinit |
|
486 e1000_probe(struct pci_dev *pdev, |
|
487 const struct pci_device_id *ent) |
|
488 { |
|
489 struct net_device *netdev; |
|
490 struct e1000_adapter *adapter; |
|
491 unsigned long mmio_start, mmio_len; |
|
492 uint32_t swsm; |
|
493 |
|
494 static int cards_found = 0; |
|
495 int i, err, pci_using_dac; |
|
496 uint16_t eeprom_data; |
|
497 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; |
|
498 if((err = pci_enable_device(pdev))) |
|
499 return err; |
|
500 |
|
501 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { |
|
502 pci_using_dac = 1; |
|
503 } else { |
|
504 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { |
|
505 E1000_ERR("No usable DMA configuration, aborting\n"); |
|
506 return err; |
|
507 } |
|
508 pci_using_dac = 0; |
|
509 } |
|
510 |
|
511 if((err = pci_request_regions(pdev, e1000_driver_name))) |
|
512 return err; |
|
513 |
|
514 pci_set_master(pdev); |
|
515 |
|
516 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); |
|
517 if(!netdev) { |
|
518 err = -ENOMEM; |
|
519 goto err_alloc_etherdev; |
|
520 } |
|
521 |
|
522 SET_MODULE_OWNER(netdev); |
|
523 SET_NETDEV_DEV(netdev, &pdev->dev); |
|
524 |
|
525 pci_set_drvdata(pdev, netdev); |
|
526 adapter = netdev_priv(netdev); |
|
527 adapter->netdev = netdev; |
|
528 adapter->pdev = pdev; |
|
529 adapter->hw.back = adapter; |
|
530 adapter->msg_enable = (1 << debug) - 1; |
|
531 |
|
532 mmio_start = pci_resource_start(pdev, BAR_0); |
|
533 mmio_len = pci_resource_len(pdev, BAR_0); |
|
534 |
|
535 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); |
|
536 if(!adapter->hw.hw_addr) { |
|
537 err = -EIO; |
|
538 goto err_ioremap; |
|
539 } |
|
540 |
|
541 for(i = BAR_1; i <= BAR_5; i++) { |
|
542 if(pci_resource_len(pdev, i) == 0) |
|
543 continue; |
|
544 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) { |
|
545 adapter->hw.io_base = pci_resource_start(pdev, i); |
|
546 break; |
|
547 } |
|
548 } |
|
549 |
|
550 netdev->open = &e1000_open; |
|
551 netdev->stop = &e1000_close; |
|
552 netdev->hard_start_xmit = &e1000_xmit_frame; |
|
553 netdev->get_stats = &e1000_get_stats; |
|
554 netdev->set_multicast_list = &e1000_set_multi; |
|
555 netdev->set_mac_address = &e1000_set_mac; |
|
556 netdev->change_mtu = &e1000_change_mtu; |
|
557 netdev->do_ioctl = &e1000_ioctl; |
|
558 e1000_set_ethtool_ops(netdev); |
|
559 netdev->tx_timeout = &e1000_tx_timeout; |
|
560 netdev->watchdog_timeo = 5 * HZ; |
|
561 #ifdef CONFIG_E1000_NAPI |
|
562 netdev->poll = &e1000_clean; |
|
563 netdev->weight = 64; |
|
564 #endif |
|
565 netdev->vlan_rx_register = e1000_vlan_rx_register; |
|
566 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; |
|
567 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid; |
|
568 #ifdef CONFIG_NET_POLL_CONTROLLER |
|
569 netdev->poll_controller = e1000_netpoll; |
|
570 #endif |
|
571 strcpy(netdev->name, pci_name(pdev)); |
|
572 |
|
573 netdev->mem_start = mmio_start; |
|
574 netdev->mem_end = mmio_start + mmio_len; |
|
575 netdev->base_addr = adapter->hw.io_base; |
|
576 |
|
577 adapter->bd_number = cards_found; |
|
578 |
|
579 /* setup the private structure */ |
|
580 |
|
581 if((err = e1000_sw_init(adapter))) |
|
582 goto err_sw_init; |
|
583 |
|
584 if((err = e1000_check_phy_reset_block(&adapter->hw))) |
|
585 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); |
|
586 |
|
587 if(adapter->hw.mac_type >= e1000_82543) { |
|
588 netdev->features = NETIF_F_SG | |
|
589 NETIF_F_HW_CSUM | |
|
590 NETIF_F_HW_VLAN_TX | |
|
591 NETIF_F_HW_VLAN_RX | |
|
592 NETIF_F_HW_VLAN_FILTER; |
|
593 } |
|
594 |
|
595 #ifdef NETIF_F_TSO |
|
596 if((adapter->hw.mac_type >= e1000_82544) && |
|
597 (adapter->hw.mac_type != e1000_82547)) |
|
598 netdev->features |= NETIF_F_TSO; |
|
599 |
|
600 #ifdef NETIF_F_TSO_IPV6 |
|
601 if(adapter->hw.mac_type > e1000_82547_rev_2) |
|
602 netdev->features |= NETIF_F_TSO_IPV6; |
|
603 #endif |
|
604 #endif |
|
605 if(pci_using_dac) |
|
606 netdev->features |= NETIF_F_HIGHDMA; |
|
607 |
|
608 /* hard_start_xmit is safe against parallel locking */ |
|
609 netdev->features |= NETIF_F_LLTX; |
|
610 |
|
611 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); |
|
612 |
|
613 /* before reading the EEPROM, reset the controller to |
|
614 * put the device in a known good starting state */ |
|
615 |
|
616 e1000_reset_hw(&adapter->hw); |
|
617 |
|
618 /* make sure the EEPROM is good */ |
|
619 |
|
620 if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) { |
|
621 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); |
|
622 err = -EIO; |
|
623 goto err_eeprom; |
|
624 } |
|
625 |
|
626 /* copy the MAC address out of the EEPROM */ |
|
627 |
|
628 if(e1000_read_mac_addr(&adapter->hw)) |
|
629 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); |
|
630 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); |
|
631 |
|
632 if(!is_valid_ether_addr(netdev->dev_addr)) { |
|
633 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); |
|
634 err = -EIO; |
|
635 goto err_eeprom; |
|
636 } |
|
637 |
|
638 e1000_read_part_num(&adapter->hw, &(adapter->part_num)); |
|
639 |
|
640 e1000_get_bus_info(&adapter->hw); |
|
641 |
|
642 init_timer(&adapter->tx_fifo_stall_timer); |
|
643 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall; |
|
644 adapter->tx_fifo_stall_timer.data = (unsigned long) adapter; |
|
645 |
|
646 init_timer(&adapter->watchdog_timer); |
|
647 adapter->watchdog_timer.function = &e1000_watchdog; |
|
648 adapter->watchdog_timer.data = (unsigned long) adapter; |
|
649 |
|
650 INIT_WORK(&adapter->watchdog_task, |
|
651 (void (*)(void *))e1000_watchdog_task, adapter); |
|
652 |
|
653 init_timer(&adapter->phy_info_timer); |
|
654 adapter->phy_info_timer.function = &e1000_update_phy_info; |
|
655 adapter->phy_info_timer.data = (unsigned long) adapter; |
|
656 |
|
657 INIT_WORK(&adapter->tx_timeout_task, |
|
658 (void (*)(void *))e1000_tx_timeout_task, netdev); |
|
659 |
|
660 /* we're going to reset, so assume we have no link for now */ |
|
661 |
|
662 if (!adapter->ecdev) { |
|
663 netif_carrier_off(netdev); |
|
664 netif_stop_queue(netdev); |
|
665 } |
|
666 |
|
667 e1000_check_options(adapter); |
|
668 |
|
669 /* Initial Wake on LAN setting |
|
670 * If APM wake is enabled in the EEPROM, |
|
671 * enable the ACPI Magic Packet filter |
|
672 */ |
|
673 |
|
674 switch(adapter->hw.mac_type) { |
|
675 case e1000_82542_rev2_0: |
|
676 case e1000_82542_rev2_1: |
|
677 case e1000_82543: |
|
678 break; |
|
679 case e1000_82544: |
|
680 e1000_read_eeprom(&adapter->hw, |
|
681 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); |
|
682 eeprom_apme_mask = E1000_EEPROM_82544_APM; |
|
683 break; |
|
684 case e1000_82546: |
|
685 case e1000_82546_rev_3: |
|
686 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) |
|
687 && (adapter->hw.media_type == e1000_media_type_copper)) { |
|
688 e1000_read_eeprom(&adapter->hw, |
|
689 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
|
690 break; |
|
691 } |
|
692 /* Fall Through */ |
|
693 default: |
|
694 e1000_read_eeprom(&adapter->hw, |
|
695 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); |
|
696 break; |
|
697 } |
|
698 if(eeprom_data & eeprom_apme_mask) |
|
699 adapter->wol |= E1000_WUFC_MAG; |
|
700 |
|
701 /* reset the hardware with the new settings */ |
|
702 e1000_reset(adapter); |
|
703 |
|
704 /* Let firmware know the driver has taken over */ |
|
705 switch(adapter->hw.mac_type) { |
|
706 case e1000_82573: |
|
707 swsm = E1000_READ_REG(&adapter->hw, SWSM); |
|
708 E1000_WRITE_REG(&adapter->hw, SWSM, |
|
709 swsm | E1000_SWSM_DRV_LOAD); |
|
710 break; |
|
711 default: |
|
712 break; |
|
713 } |
|
714 |
|
715 // offer device to EtherCAT master module |
|
716 if (ecdev_offer(netdev, ec_poll, THIS_MODULE, &adapter->ecdev)) { |
|
717 DPRINTK(PROBE, ERR, "Failed to offer device.\n"); |
|
718 goto err_register; |
|
719 } |
|
720 |
|
721 if (adapter->ecdev) { |
|
722 if (ecdev_open(adapter->ecdev)) { |
|
723 ecdev_withdraw(adapter->ecdev); |
|
724 goto err_register; |
|
725 } |
|
726 } else { |
|
727 strcpy(netdev->name, "eth%d"); |
|
728 if ((err = register_netdev(netdev))) |
|
729 goto err_register; |
|
730 } |
|
731 |
|
732 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); |
|
733 |
|
734 cards_found++; |
|
735 return 0; |
|
736 |
|
737 err_register: |
|
738 err_sw_init: |
|
739 err_eeprom: |
|
740 iounmap(adapter->hw.hw_addr); |
|
741 err_ioremap: |
|
742 free_netdev(netdev); |
|
743 err_alloc_etherdev: |
|
744 pci_release_regions(pdev); |
|
745 return err; |
|
746 } |
|
747 |
|
748 /** |
|
749 * e1000_remove - Device Removal Routine |
|
750 * @pdev: PCI device information struct |
|
751 * |
|
752 * e1000_remove is called by the PCI subsystem to alert the driver |
|
753 * that it should release a PCI device. The could be caused by a |
|
754 * Hot-Plug event, or because the driver is going to be removed from |
|
755 * memory. |
|
756 **/ |
|
757 |
|
758 static void __devexit |
|
759 e1000_remove(struct pci_dev *pdev) |
|
760 { |
|
761 struct net_device *netdev = pci_get_drvdata(pdev); |
|
762 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
763 uint32_t manc, swsm; |
|
764 |
|
765 flush_scheduled_work(); |
|
766 |
|
767 if(adapter->hw.mac_type >= e1000_82540 && |
|
768 adapter->hw.media_type == e1000_media_type_copper) { |
|
769 manc = E1000_READ_REG(&adapter->hw, MANC); |
|
770 if(manc & E1000_MANC_SMBUS_EN) { |
|
771 manc |= E1000_MANC_ARP_EN; |
|
772 E1000_WRITE_REG(&adapter->hw, MANC, manc); |
|
773 } |
|
774 } |
|
775 |
|
776 switch(adapter->hw.mac_type) { |
|
777 case e1000_82573: |
|
778 swsm = E1000_READ_REG(&adapter->hw, SWSM); |
|
779 E1000_WRITE_REG(&adapter->hw, SWSM, |
|
780 swsm & ~E1000_SWSM_DRV_LOAD); |
|
781 break; |
|
782 |
|
783 default: |
|
784 break; |
|
785 } |
|
786 |
|
787 if (adapter->ecdev) { |
|
788 ecdev_close(adapter->ecdev); |
|
789 ecdev_withdraw(adapter->ecdev); |
|
790 } else { |
|
791 unregister_netdev(netdev); |
|
792 } |
|
793 |
|
794 if(!e1000_check_phy_reset_block(&adapter->hw)) |
|
795 e1000_phy_hw_reset(&adapter->hw); |
|
796 |
|
797 iounmap(adapter->hw.hw_addr); |
|
798 pci_release_regions(pdev); |
|
799 |
|
800 free_netdev(netdev); |
|
801 |
|
802 pci_disable_device(pdev); |
|
803 } |
|
804 |
|
805 /** |
|
806 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) |
|
807 * @adapter: board private structure to initialize |
|
808 * |
|
809 * e1000_sw_init initializes the Adapter private data structure. |
|
810 * Fields are initialized based on PCI device information and |
|
811 * OS network device settings (MTU size). |
|
812 **/ |
|
813 |
|
814 static int __devinit |
|
815 e1000_sw_init(struct e1000_adapter *adapter) |
|
816 { |
|
817 struct e1000_hw *hw = &adapter->hw; |
|
818 struct net_device *netdev = adapter->netdev; |
|
819 struct pci_dev *pdev = adapter->pdev; |
|
820 |
|
821 /* PCI config space info */ |
|
822 |
|
823 hw->vendor_id = pdev->vendor; |
|
824 hw->device_id = pdev->device; |
|
825 hw->subsystem_vendor_id = pdev->subsystem_vendor; |
|
826 hw->subsystem_id = pdev->subsystem_device; |
|
827 |
|
828 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); |
|
829 |
|
830 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); |
|
831 |
|
832 adapter->rx_buffer_len = E1000_RXBUFFER_2048; |
|
833 adapter->rx_ps_bsize0 = E1000_RXBUFFER_256; |
|
834 hw->max_frame_size = netdev->mtu + |
|
835 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
|
836 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; |
|
837 |
|
838 /* identify the MAC */ |
|
839 |
|
840 if(e1000_set_mac_type(hw)) { |
|
841 DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); |
|
842 return -EIO; |
|
843 } |
|
844 |
|
845 /* initialize eeprom parameters */ |
|
846 |
|
847 if(e1000_init_eeprom_params(hw)) { |
|
848 E1000_ERR("EEPROM initialization failed\n"); |
|
849 return -EIO; |
|
850 } |
|
851 |
|
852 switch(hw->mac_type) { |
|
853 default: |
|
854 break; |
|
855 case e1000_82541: |
|
856 case e1000_82547: |
|
857 case e1000_82541_rev_2: |
|
858 case e1000_82547_rev_2: |
|
859 hw->phy_init_script = 1; |
|
860 break; |
|
861 } |
|
862 |
|
863 e1000_set_media_type(hw); |
|
864 |
|
865 hw->wait_autoneg_complete = FALSE; |
|
866 hw->tbi_compatibility_en = TRUE; |
|
867 hw->adaptive_ifs = TRUE; |
|
868 |
|
869 /* Copper options */ |
|
870 |
|
871 if(hw->media_type == e1000_media_type_copper) { |
|
872 hw->mdix = AUTO_ALL_MODES; |
|
873 hw->disable_polarity_correction = FALSE; |
|
874 hw->master_slave = E1000_MASTER_SLAVE; |
|
875 } |
|
876 |
|
877 atomic_set(&adapter->irq_sem, 1); |
|
878 spin_lock_init(&adapter->stats_lock); |
|
879 spin_lock_init(&adapter->tx_lock); |
|
880 |
|
881 return 0; |
|
882 } |
|
883 |
|
884 /** |
|
885 * e1000_open - Called when a network interface is made active |
|
886 * @netdev: network interface device structure |
|
887 * |
|
888 * Returns 0 on success, negative value on failure |
|
889 * |
|
890 * The open entry point is called when a network interface is made |
|
891 * active by the system (IFF_UP). At this point all resources needed |
|
892 * for transmit and receive operations are allocated, the interrupt |
|
893 * handler is registered with the OS, the watchdog timer is started, |
|
894 * and the stack is notified that the interface is ready. |
|
895 **/ |
|
896 |
|
897 static int |
|
898 e1000_open(struct net_device *netdev) |
|
899 { |
|
900 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
901 int err; |
|
902 |
|
903 /* allocate transmit descriptors */ |
|
904 |
|
905 if((err = e1000_setup_tx_resources(adapter))) |
|
906 goto err_setup_tx; |
|
907 |
|
908 /* allocate receive descriptors */ |
|
909 |
|
910 if((err = e1000_setup_rx_resources(adapter))) |
|
911 goto err_setup_rx; |
|
912 |
|
913 if((err = e1000_up(adapter))) |
|
914 goto err_up; |
|
915 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
|
916 if((adapter->hw.mng_cookie.status & |
|
917 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { |
|
918 e1000_update_mng_vlan(adapter); |
|
919 } |
|
920 |
|
921 return E1000_SUCCESS; |
|
922 |
|
923 err_up: |
|
924 e1000_free_rx_resources(adapter); |
|
925 err_setup_rx: |
|
926 e1000_free_tx_resources(adapter); |
|
927 err_setup_tx: |
|
928 e1000_reset(adapter); |
|
929 |
|
930 return err; |
|
931 } |
|
932 |
|
933 /** |
|
934 * e1000_close - Disables a network interface |
|
935 * @netdev: network interface device structure |
|
936 * |
|
937 * Returns 0, this is not allowed to fail |
|
938 * |
|
939 * The close entry point is called when an interface is de-activated |
|
940 * by the OS. The hardware is still under the drivers control, but |
|
941 * needs to be disabled. A global MAC reset is issued to stop the |
|
942 * hardware, and all transmit and receive resources are freed. |
|
943 **/ |
|
944 |
|
945 static int |
|
946 e1000_close(struct net_device *netdev) |
|
947 { |
|
948 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
949 |
|
950 e1000_down(adapter); |
|
951 |
|
952 e1000_free_tx_resources(adapter); |
|
953 e1000_free_rx_resources(adapter); |
|
954 |
|
955 if((adapter->hw.mng_cookie.status & |
|
956 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { |
|
957 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
|
958 } |
|
959 return 0; |
|
960 } |
|
961 |
|
962 /** |
|
963 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary |
|
964 * @adapter: address of board private structure |
|
965 * @start: address of beginning of memory |
|
966 * @len: length of memory |
|
967 **/ |
|
968 static inline boolean_t |
|
969 e1000_check_64k_bound(struct e1000_adapter *adapter, |
|
970 void *start, unsigned long len) |
|
971 { |
|
972 unsigned long begin = (unsigned long) start; |
|
973 unsigned long end = begin + len; |
|
974 |
|
975 /* First rev 82545 and 82546 need to not allow any memory |
|
976 * write location to cross 64k boundary due to errata 23 */ |
|
977 if (adapter->hw.mac_type == e1000_82545 || |
|
978 adapter->hw.mac_type == e1000_82546) { |
|
979 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; |
|
980 } |
|
981 |
|
982 return TRUE; |
|
983 } |
|
984 |
|
985 /** |
|
986 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) |
|
987 * @adapter: board private structure |
|
988 * |
|
989 * Return 0 on success, negative on failure |
|
990 **/ |
|
991 |
|
992 int |
|
993 e1000_setup_tx_resources(struct e1000_adapter *adapter) |
|
994 { |
|
995 struct e1000_desc_ring *txdr = &adapter->tx_ring; |
|
996 struct pci_dev *pdev = adapter->pdev; |
|
997 int size; |
|
998 |
|
999 size = sizeof(struct e1000_buffer) * txdr->count; |
|
1000 txdr->buffer_info = vmalloc(size); |
|
1001 if(!txdr->buffer_info) { |
|
1002 DPRINTK(PROBE, ERR, |
|
1003 "Unable to allocate memory for the transmit descriptor ring\n"); |
|
1004 return -ENOMEM; |
|
1005 } |
|
1006 memset(txdr->buffer_info, 0, size); |
|
1007 |
|
1008 /* round up to nearest 4K */ |
|
1009 |
|
1010 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); |
|
1011 E1000_ROUNDUP(txdr->size, 4096); |
|
1012 |
|
1013 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); |
|
1014 if(!txdr->desc) { |
|
1015 setup_tx_desc_die: |
|
1016 vfree(txdr->buffer_info); |
|
1017 DPRINTK(PROBE, ERR, |
|
1018 "Unable to allocate memory for the transmit descriptor ring\n"); |
|
1019 return -ENOMEM; |
|
1020 } |
|
1021 |
|
1022 /* Fix for errata 23, can't cross 64kB boundary */ |
|
1023 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { |
|
1024 void *olddesc = txdr->desc; |
|
1025 dma_addr_t olddma = txdr->dma; |
|
1026 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes " |
|
1027 "at %p\n", txdr->size, txdr->desc); |
|
1028 /* Try again, without freeing the previous */ |
|
1029 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); |
|
1030 if(!txdr->desc) { |
|
1031 /* Failed allocation, critical failure */ |
|
1032 pci_free_consistent(pdev, txdr->size, olddesc, olddma); |
|
1033 goto setup_tx_desc_die; |
|
1034 } |
|
1035 |
|
1036 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { |
|
1037 /* give up */ |
|
1038 pci_free_consistent(pdev, txdr->size, txdr->desc, |
|
1039 txdr->dma); |
|
1040 pci_free_consistent(pdev, txdr->size, olddesc, olddma); |
|
1041 DPRINTK(PROBE, ERR, |
|
1042 "Unable to allocate aligned memory " |
|
1043 "for the transmit descriptor ring\n"); |
|
1044 vfree(txdr->buffer_info); |
|
1045 return -ENOMEM; |
|
1046 } else { |
|
1047 /* Free old allocation, new allocation was successful */ |
|
1048 pci_free_consistent(pdev, txdr->size, olddesc, olddma); |
|
1049 } |
|
1050 } |
|
1051 memset(txdr->desc, 0, txdr->size); |
|
1052 |
|
1053 txdr->next_to_use = 0; |
|
1054 txdr->next_to_clean = 0; |
|
1055 |
|
1056 return 0; |
|
1057 } |
|
1058 |
|
1059 /** |
|
1060 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset |
|
1061 * @adapter: board private structure |
|
1062 * |
|
1063 * Configure the Tx unit of the MAC after a reset. |
|
1064 **/ |
|
1065 |
|
1066 static void |
|
1067 e1000_configure_tx(struct e1000_adapter *adapter) |
|
1068 { |
|
1069 uint64_t tdba = adapter->tx_ring.dma; |
|
1070 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc); |
|
1071 uint32_t tctl, tipg; |
|
1072 |
|
1073 E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL)); |
|
1074 E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32)); |
|
1075 |
|
1076 E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen); |
|
1077 |
|
1078 /* Setup the HW Tx Head and Tail descriptor pointers */ |
|
1079 |
|
1080 E1000_WRITE_REG(&adapter->hw, TDH, 0); |
|
1081 E1000_WRITE_REG(&adapter->hw, TDT, 0); |
|
1082 |
|
1083 /* Set the default values for the Tx Inter Packet Gap timer */ |
|
1084 |
|
1085 switch (adapter->hw.mac_type) { |
|
1086 case e1000_82542_rev2_0: |
|
1087 case e1000_82542_rev2_1: |
|
1088 tipg = DEFAULT_82542_TIPG_IPGT; |
|
1089 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; |
|
1090 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; |
|
1091 break; |
|
1092 default: |
|
1093 if(adapter->hw.media_type == e1000_media_type_fiber || |
|
1094 adapter->hw.media_type == e1000_media_type_internal_serdes) |
|
1095 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; |
|
1096 else |
|
1097 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; |
|
1098 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; |
|
1099 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; |
|
1100 } |
|
1101 E1000_WRITE_REG(&adapter->hw, TIPG, tipg); |
|
1102 |
|
1103 /* Set the Tx Interrupt Delay register */ |
|
1104 |
|
1105 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay); |
|
1106 if(adapter->hw.mac_type >= e1000_82540) |
|
1107 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay); |
|
1108 |
|
1109 /* Program the Transmit Control Register */ |
|
1110 |
|
1111 tctl = E1000_READ_REG(&adapter->hw, TCTL); |
|
1112 |
|
1113 tctl &= ~E1000_TCTL_CT; |
|
1114 tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | |
|
1115 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); |
|
1116 |
|
1117 E1000_WRITE_REG(&adapter->hw, TCTL, tctl); |
|
1118 |
|
1119 e1000_config_collision_dist(&adapter->hw); |
|
1120 |
|
1121 /* Setup Transmit Descriptor Settings for eop descriptor */ |
|
1122 adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP | |
|
1123 E1000_TXD_CMD_IFCS; |
|
1124 |
|
1125 if(adapter->hw.mac_type < e1000_82543) |
|
1126 adapter->txd_cmd |= E1000_TXD_CMD_RPS; |
|
1127 else |
|
1128 adapter->txd_cmd |= E1000_TXD_CMD_RS; |
|
1129 |
|
1130 /* Cache if we're 82544 running in PCI-X because we'll |
|
1131 * need this to apply a workaround later in the send path. */ |
|
1132 if(adapter->hw.mac_type == e1000_82544 && |
|
1133 adapter->hw.bus_type == e1000_bus_type_pcix) |
|
1134 adapter->pcix_82544 = 1; |
|
1135 } |
|
1136 |
|
1137 /** |
|
1138 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) |
|
1139 * @adapter: board private structure |
|
1140 * |
|
1141 * Returns 0 on success, negative on failure |
|
1142 **/ |
|
1143 |
|
1144 int |
|
1145 e1000_setup_rx_resources(struct e1000_adapter *adapter) |
|
1146 { |
|
1147 struct e1000_desc_ring *rxdr = &adapter->rx_ring; |
|
1148 struct pci_dev *pdev = adapter->pdev; |
|
1149 int size, desc_len; |
|
1150 |
|
1151 size = sizeof(struct e1000_buffer) * rxdr->count; |
|
1152 rxdr->buffer_info = vmalloc(size); |
|
1153 if(!rxdr->buffer_info) { |
|
1154 DPRINTK(PROBE, ERR, |
|
1155 "Unable to allocate memory for the receive descriptor ring\n"); |
|
1156 return -ENOMEM; |
|
1157 } |
|
1158 memset(rxdr->buffer_info, 0, size); |
|
1159 |
|
1160 size = sizeof(struct e1000_ps_page) * rxdr->count; |
|
1161 rxdr->ps_page = kmalloc(size, GFP_KERNEL); |
|
1162 if(!rxdr->ps_page) { |
|
1163 vfree(rxdr->buffer_info); |
|
1164 DPRINTK(PROBE, ERR, |
|
1165 "Unable to allocate memory for the receive descriptor ring\n"); |
|
1166 return -ENOMEM; |
|
1167 } |
|
1168 memset(rxdr->ps_page, 0, size); |
|
1169 |
|
1170 size = sizeof(struct e1000_ps_page_dma) * rxdr->count; |
|
1171 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); |
|
1172 if(!rxdr->ps_page_dma) { |
|
1173 vfree(rxdr->buffer_info); |
|
1174 kfree(rxdr->ps_page); |
|
1175 DPRINTK(PROBE, ERR, |
|
1176 "Unable to allocate memory for the receive descriptor ring\n"); |
|
1177 return -ENOMEM; |
|
1178 } |
|
1179 memset(rxdr->ps_page_dma, 0, size); |
|
1180 |
|
1181 if(adapter->hw.mac_type <= e1000_82547_rev_2) |
|
1182 desc_len = sizeof(struct e1000_rx_desc); |
|
1183 else |
|
1184 desc_len = sizeof(union e1000_rx_desc_packet_split); |
|
1185 |
|
1186 /* Round up to nearest 4K */ |
|
1187 |
|
1188 rxdr->size = rxdr->count * desc_len; |
|
1189 E1000_ROUNDUP(rxdr->size, 4096); |
|
1190 |
|
1191 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); |
|
1192 |
|
1193 if(!rxdr->desc) { |
|
1194 setup_rx_desc_die: |
|
1195 vfree(rxdr->buffer_info); |
|
1196 kfree(rxdr->ps_page); |
|
1197 kfree(rxdr->ps_page_dma); |
|
1198 DPRINTK(PROBE, ERR, |
|
1199 "Unable to allocate memory for the receive descriptor ring\n"); |
|
1200 return -ENOMEM; |
|
1201 } |
|
1202 |
|
1203 /* Fix for errata 23, can't cross 64kB boundary */ |
|
1204 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { |
|
1205 void *olddesc = rxdr->desc; |
|
1206 dma_addr_t olddma = rxdr->dma; |
|
1207 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes " |
|
1208 "at %p\n", rxdr->size, rxdr->desc); |
|
1209 /* Try again, without freeing the previous */ |
|
1210 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); |
|
1211 if(!rxdr->desc) { |
|
1212 /* Failed allocation, critical failure */ |
|
1213 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); |
|
1214 goto setup_rx_desc_die; |
|
1215 } |
|
1216 |
|
1217 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { |
|
1218 /* give up */ |
|
1219 pci_free_consistent(pdev, rxdr->size, rxdr->desc, |
|
1220 rxdr->dma); |
|
1221 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); |
|
1222 DPRINTK(PROBE, ERR, |
|
1223 "Unable to allocate aligned memory " |
|
1224 "for the receive descriptor ring\n"); |
|
1225 vfree(rxdr->buffer_info); |
|
1226 kfree(rxdr->ps_page); |
|
1227 kfree(rxdr->ps_page_dma); |
|
1228 return -ENOMEM; |
|
1229 } else { |
|
1230 /* Free old allocation, new allocation was successful */ |
|
1231 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); |
|
1232 } |
|
1233 } |
|
1234 memset(rxdr->desc, 0, rxdr->size); |
|
1235 |
|
1236 rxdr->next_to_clean = 0; |
|
1237 rxdr->next_to_use = 0; |
|
1238 |
|
1239 return 0; |
|
1240 } |
|
1241 |
|
1242 /** |
|
1243 * e1000_setup_rctl - configure the receive control registers |
|
1244 * @adapter: Board private structure |
|
1245 **/ |
|
1246 |
|
1247 static void |
|
1248 e1000_setup_rctl(struct e1000_adapter *adapter) |
|
1249 { |
|
1250 uint32_t rctl, rfctl; |
|
1251 uint32_t psrctl = 0; |
|
1252 |
|
1253 rctl = E1000_READ_REG(&adapter->hw, RCTL); |
|
1254 |
|
1255 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); |
|
1256 |
|
1257 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | |
|
1258 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | |
|
1259 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); |
|
1260 |
|
1261 if(adapter->hw.tbi_compatibility_on == 1) |
|
1262 rctl |= E1000_RCTL_SBP; |
|
1263 else |
|
1264 rctl &= ~E1000_RCTL_SBP; |
|
1265 |
|
1266 if (adapter->netdev->mtu <= ETH_DATA_LEN) |
|
1267 rctl &= ~E1000_RCTL_LPE; |
|
1268 else |
|
1269 rctl |= E1000_RCTL_LPE; |
|
1270 |
|
1271 /* Setup buffer sizes */ |
|
1272 if(adapter->hw.mac_type == e1000_82573) { |
|
1273 /* We can now specify buffers in 1K increments. |
|
1274 * BSIZE and BSEX are ignored in this case. */ |
|
1275 rctl |= adapter->rx_buffer_len << 0x11; |
|
1276 } else { |
|
1277 rctl &= ~E1000_RCTL_SZ_4096; |
|
1278 rctl |= E1000_RCTL_BSEX; |
|
1279 switch (adapter->rx_buffer_len) { |
|
1280 case E1000_RXBUFFER_2048: |
|
1281 default: |
|
1282 rctl |= E1000_RCTL_SZ_2048; |
|
1283 rctl &= ~E1000_RCTL_BSEX; |
|
1284 break; |
|
1285 case E1000_RXBUFFER_4096: |
|
1286 rctl |= E1000_RCTL_SZ_4096; |
|
1287 break; |
|
1288 case E1000_RXBUFFER_8192: |
|
1289 rctl |= E1000_RCTL_SZ_8192; |
|
1290 break; |
|
1291 case E1000_RXBUFFER_16384: |
|
1292 rctl |= E1000_RCTL_SZ_16384; |
|
1293 break; |
|
1294 } |
|
1295 } |
|
1296 |
|
1297 #ifdef CONFIG_E1000_PACKET_SPLIT |
|
1298 /* 82571 and greater support packet-split where the protocol |
|
1299 * header is placed in skb->data and the packet data is |
|
1300 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. |
|
1301 * In the case of a non-split, skb->data is linearly filled, |
|
1302 * followed by the page buffers. Therefore, skb->data is |
|
1303 * sized to hold the largest protocol header. |
|
1304 */ |
|
1305 adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2) |
|
1306 && (adapter->netdev->mtu |
|
1307 < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0)); |
|
1308 #endif |
|
1309 if(adapter->rx_ps) { |
|
1310 /* Configure extra packet-split registers */ |
|
1311 rfctl = E1000_READ_REG(&adapter->hw, RFCTL); |
|
1312 rfctl |= E1000_RFCTL_EXTEN; |
|
1313 /* disable IPv6 packet split support */ |
|
1314 rfctl |= E1000_RFCTL_IPV6_DIS; |
|
1315 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); |
|
1316 |
|
1317 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; |
|
1318 |
|
1319 psrctl |= adapter->rx_ps_bsize0 >> |
|
1320 E1000_PSRCTL_BSIZE0_SHIFT; |
|
1321 psrctl |= PAGE_SIZE >> |
|
1322 E1000_PSRCTL_BSIZE1_SHIFT; |
|
1323 psrctl |= PAGE_SIZE << |
|
1324 E1000_PSRCTL_BSIZE2_SHIFT; |
|
1325 psrctl |= PAGE_SIZE << |
|
1326 E1000_PSRCTL_BSIZE3_SHIFT; |
|
1327 |
|
1328 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl); |
|
1329 } |
|
1330 |
|
1331 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
|
1332 } |
|
1333 |
|
1334 /** |
|
1335 * e1000_configure_rx - Configure 8254x Receive Unit after Reset |
|
1336 * @adapter: board private structure |
|
1337 * |
|
1338 * Configure the Rx unit of the MAC after a reset. |
|
1339 **/ |
|
1340 |
|
1341 static void |
|
1342 e1000_configure_rx(struct e1000_adapter *adapter) |
|
1343 { |
|
1344 uint64_t rdba = adapter->rx_ring.dma; |
|
1345 uint32_t rdlen, rctl, rxcsum; |
|
1346 |
|
1347 if(adapter->rx_ps) { |
|
1348 rdlen = adapter->rx_ring.count * |
|
1349 sizeof(union e1000_rx_desc_packet_split); |
|
1350 adapter->clean_rx = e1000_clean_rx_irq_ps; |
|
1351 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; |
|
1352 } else { |
|
1353 rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); |
|
1354 adapter->clean_rx = e1000_clean_rx_irq; |
|
1355 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; |
|
1356 } |
|
1357 |
|
1358 /* disable receives while setting up the descriptors */ |
|
1359 rctl = E1000_READ_REG(&adapter->hw, RCTL); |
|
1360 E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN); |
|
1361 |
|
1362 /* set the Receive Delay Timer Register */ |
|
1363 E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay); |
|
1364 |
|
1365 if(adapter->hw.mac_type >= e1000_82540) { |
|
1366 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay); |
|
1367 if(adapter->itr > 1) |
|
1368 E1000_WRITE_REG(&adapter->hw, ITR, |
|
1369 1000000000 / (adapter->itr * 256)); |
|
1370 } |
|
1371 |
|
1372 /* Setup the Base and Length of the Rx Descriptor Ring */ |
|
1373 E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL)); |
|
1374 E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32)); |
|
1375 |
|
1376 E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen); |
|
1377 |
|
1378 /* Setup the HW Rx Head and Tail Descriptor Pointers */ |
|
1379 E1000_WRITE_REG(&adapter->hw, RDH, 0); |
|
1380 E1000_WRITE_REG(&adapter->hw, RDT, 0); |
|
1381 |
|
1382 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ |
|
1383 if(adapter->hw.mac_type >= e1000_82543) { |
|
1384 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); |
|
1385 if(adapter->rx_csum == TRUE) { |
|
1386 rxcsum |= E1000_RXCSUM_TUOFL; |
|
1387 |
|
1388 /* Enable 82573 IPv4 payload checksum for UDP fragments |
|
1389 * Must be used in conjunction with packet-split. */ |
|
1390 if((adapter->hw.mac_type > e1000_82547_rev_2) && |
|
1391 (adapter->rx_ps)) { |
|
1392 rxcsum |= E1000_RXCSUM_IPPCSE; |
|
1393 } |
|
1394 } else { |
|
1395 rxcsum &= ~E1000_RXCSUM_TUOFL; |
|
1396 /* don't need to clear IPPCSE as it defaults to 0 */ |
|
1397 } |
|
1398 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum); |
|
1399 } |
|
1400 |
|
1401 if (adapter->hw.mac_type == e1000_82573) |
|
1402 E1000_WRITE_REG(&adapter->hw, ERT, 0x0100); |
|
1403 |
|
1404 /* Enable Receives */ |
|
1405 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
|
1406 } |
|
1407 |
|
1408 /** |
|
1409 * e1000_free_tx_resources - Free Tx Resources |
|
1410 * @adapter: board private structure |
|
1411 * |
|
1412 * Free all transmit software resources |
|
1413 **/ |
|
1414 |
|
1415 void |
|
1416 e1000_free_tx_resources(struct e1000_adapter *adapter) |
|
1417 { |
|
1418 struct pci_dev *pdev = adapter->pdev; |
|
1419 |
|
1420 e1000_clean_tx_ring(adapter); |
|
1421 |
|
1422 vfree(adapter->tx_ring.buffer_info); |
|
1423 adapter->tx_ring.buffer_info = NULL; |
|
1424 |
|
1425 pci_free_consistent(pdev, adapter->tx_ring.size, |
|
1426 adapter->tx_ring.desc, adapter->tx_ring.dma); |
|
1427 |
|
1428 adapter->tx_ring.desc = NULL; |
|
1429 } |
|
1430 |
|
1431 static inline void |
|
1432 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, |
|
1433 struct e1000_buffer *buffer_info) |
|
1434 { |
|
1435 if (adapter->ecdev) |
|
1436 return; |
|
1437 |
|
1438 if(buffer_info->dma) { |
|
1439 pci_unmap_page(adapter->pdev, |
|
1440 buffer_info->dma, |
|
1441 buffer_info->length, |
|
1442 PCI_DMA_TODEVICE); |
|
1443 buffer_info->dma = 0; |
|
1444 } |
|
1445 if(buffer_info->skb) { |
|
1446 dev_kfree_skb_any(buffer_info->skb); |
|
1447 buffer_info->skb = NULL; |
|
1448 } |
|
1449 } |
|
1450 |
|
1451 /** |
|
1452 * e1000_clean_tx_ring - Free Tx Buffers |
|
1453 * @adapter: board private structure |
|
1454 **/ |
|
1455 |
|
1456 static void |
|
1457 e1000_clean_tx_ring(struct e1000_adapter *adapter) |
|
1458 { |
|
1459 struct e1000_desc_ring *tx_ring = &adapter->tx_ring; |
|
1460 struct e1000_buffer *buffer_info; |
|
1461 unsigned long size; |
|
1462 unsigned int i; |
|
1463 |
|
1464 /* Free all the Tx ring sk_buffs */ |
|
1465 |
|
1466 if (likely(adapter->previous_buffer_info.skb != NULL)) { |
|
1467 e1000_unmap_and_free_tx_resource(adapter, |
|
1468 &adapter->previous_buffer_info); |
|
1469 } |
|
1470 |
|
1471 for(i = 0; i < tx_ring->count; i++) { |
|
1472 buffer_info = &tx_ring->buffer_info[i]; |
|
1473 e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
|
1474 } |
|
1475 |
|
1476 size = sizeof(struct e1000_buffer) * tx_ring->count; |
|
1477 memset(tx_ring->buffer_info, 0, size); |
|
1478 |
|
1479 /* Zero out the descriptor ring */ |
|
1480 |
|
1481 memset(tx_ring->desc, 0, tx_ring->size); |
|
1482 |
|
1483 tx_ring->next_to_use = 0; |
|
1484 tx_ring->next_to_clean = 0; |
|
1485 |
|
1486 E1000_WRITE_REG(&adapter->hw, TDH, 0); |
|
1487 E1000_WRITE_REG(&adapter->hw, TDT, 0); |
|
1488 } |
|
1489 |
|
1490 /** |
|
1491 * e1000_free_rx_resources - Free Rx Resources |
|
1492 * @adapter: board private structure |
|
1493 * |
|
1494 * Free all receive software resources |
|
1495 **/ |
|
1496 |
|
1497 void |
|
1498 e1000_free_rx_resources(struct e1000_adapter *adapter) |
|
1499 { |
|
1500 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; |
|
1501 struct pci_dev *pdev = adapter->pdev; |
|
1502 |
|
1503 e1000_clean_rx_ring(adapter); |
|
1504 |
|
1505 vfree(rx_ring->buffer_info); |
|
1506 rx_ring->buffer_info = NULL; |
|
1507 kfree(rx_ring->ps_page); |
|
1508 rx_ring->ps_page = NULL; |
|
1509 kfree(rx_ring->ps_page_dma); |
|
1510 rx_ring->ps_page_dma = NULL; |
|
1511 |
|
1512 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); |
|
1513 |
|
1514 rx_ring->desc = NULL; |
|
1515 } |
|
1516 |
|
1517 /** |
|
1518 * e1000_clean_rx_ring - Free Rx Buffers |
|
1519 * @adapter: board private structure |
|
1520 **/ |
|
1521 |
|
1522 static void |
|
1523 e1000_clean_rx_ring(struct e1000_adapter *adapter) |
|
1524 { |
|
1525 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; |
|
1526 struct e1000_buffer *buffer_info; |
|
1527 struct e1000_ps_page *ps_page; |
|
1528 struct e1000_ps_page_dma *ps_page_dma; |
|
1529 struct pci_dev *pdev = adapter->pdev; |
|
1530 unsigned long size; |
|
1531 unsigned int i, j; |
|
1532 |
|
1533 /* Free all the Rx ring sk_buffs */ |
|
1534 |
|
1535 for(i = 0; i < rx_ring->count; i++) { |
|
1536 buffer_info = &rx_ring->buffer_info[i]; |
|
1537 if(buffer_info->skb) { |
|
1538 ps_page = &rx_ring->ps_page[i]; |
|
1539 ps_page_dma = &rx_ring->ps_page_dma[i]; |
|
1540 pci_unmap_single(pdev, |
|
1541 buffer_info->dma, |
|
1542 buffer_info->length, |
|
1543 PCI_DMA_FROMDEVICE); |
|
1544 |
|
1545 dev_kfree_skb(buffer_info->skb); |
|
1546 buffer_info->skb = NULL; |
|
1547 |
|
1548 for(j = 0; j < PS_PAGE_BUFFERS; j++) { |
|
1549 if(!ps_page->ps_page[j]) break; |
|
1550 pci_unmap_single(pdev, |
|
1551 ps_page_dma->ps_page_dma[j], |
|
1552 PAGE_SIZE, PCI_DMA_FROMDEVICE); |
|
1553 ps_page_dma->ps_page_dma[j] = 0; |
|
1554 put_page(ps_page->ps_page[j]); |
|
1555 ps_page->ps_page[j] = NULL; |
|
1556 } |
|
1557 } |
|
1558 } |
|
1559 |
|
1560 size = sizeof(struct e1000_buffer) * rx_ring->count; |
|
1561 memset(rx_ring->buffer_info, 0, size); |
|
1562 size = sizeof(struct e1000_ps_page) * rx_ring->count; |
|
1563 memset(rx_ring->ps_page, 0, size); |
|
1564 size = sizeof(struct e1000_ps_page_dma) * rx_ring->count; |
|
1565 memset(rx_ring->ps_page_dma, 0, size); |
|
1566 |
|
1567 /* Zero out the descriptor ring */ |
|
1568 |
|
1569 memset(rx_ring->desc, 0, rx_ring->size); |
|
1570 |
|
1571 rx_ring->next_to_clean = 0; |
|
1572 rx_ring->next_to_use = 0; |
|
1573 |
|
1574 E1000_WRITE_REG(&adapter->hw, RDH, 0); |
|
1575 E1000_WRITE_REG(&adapter->hw, RDT, 0); |
|
1576 } |
|
1577 |
|
1578 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset |
|
1579 * and memory write and invalidate disabled for certain operations |
|
1580 */ |
|
1581 static void |
|
1582 e1000_enter_82542_rst(struct e1000_adapter *adapter) |
|
1583 { |
|
1584 struct net_device *netdev = adapter->netdev; |
|
1585 uint32_t rctl; |
|
1586 |
|
1587 e1000_pci_clear_mwi(&adapter->hw); |
|
1588 |
|
1589 rctl = E1000_READ_REG(&adapter->hw, RCTL); |
|
1590 rctl |= E1000_RCTL_RST; |
|
1591 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
|
1592 E1000_WRITE_FLUSH(&adapter->hw); |
|
1593 mdelay(5); |
|
1594 |
|
1595 if (!adapter->ecdev && netif_running(netdev)) |
|
1596 e1000_clean_rx_ring(adapter); |
|
1597 } |
|
1598 |
|
1599 static void |
|
1600 e1000_leave_82542_rst(struct e1000_adapter *adapter) |
|
1601 { |
|
1602 struct net_device *netdev = adapter->netdev; |
|
1603 uint32_t rctl; |
|
1604 |
|
1605 rctl = E1000_READ_REG(&adapter->hw, RCTL); |
|
1606 rctl &= ~E1000_RCTL_RST; |
|
1607 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
|
1608 E1000_WRITE_FLUSH(&adapter->hw); |
|
1609 mdelay(5); |
|
1610 |
|
1611 if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) |
|
1612 e1000_pci_set_mwi(&adapter->hw); |
|
1613 |
|
1614 if (!adapter->ecdev && netif_running(netdev)) { |
|
1615 e1000_configure_rx(adapter); |
|
1616 e1000_alloc_rx_buffers(adapter); |
|
1617 } |
|
1618 } |
|
1619 |
|
1620 /** |
|
1621 * e1000_set_mac - Change the Ethernet Address of the NIC |
|
1622 * @netdev: network interface device structure |
|
1623 * @p: pointer to an address structure |
|
1624 * |
|
1625 * Returns 0 on success, negative on failure |
|
1626 **/ |
|
1627 |
|
1628 static int |
|
1629 e1000_set_mac(struct net_device *netdev, void *p) |
|
1630 { |
|
1631 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
1632 struct sockaddr *addr = p; |
|
1633 |
|
1634 if(!is_valid_ether_addr(addr->sa_data)) |
|
1635 return -EADDRNOTAVAIL; |
|
1636 |
|
1637 /* 82542 2.0 needs to be in reset to write receive address registers */ |
|
1638 |
|
1639 if(adapter->hw.mac_type == e1000_82542_rev2_0) |
|
1640 e1000_enter_82542_rst(adapter); |
|
1641 |
|
1642 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
|
1643 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); |
|
1644 |
|
1645 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); |
|
1646 |
|
1647 if(adapter->hw.mac_type == e1000_82542_rev2_0) |
|
1648 e1000_leave_82542_rst(adapter); |
|
1649 |
|
1650 return 0; |
|
1651 } |
|
1652 |
|
1653 /** |
|
1654 * e1000_set_multi - Multicast and Promiscuous mode set |
|
1655 * @netdev: network interface device structure |
|
1656 * |
|
1657 * The set_multi entry point is called whenever the multicast address |
|
1658 * list or the network interface flags are updated. This routine is |
|
1659 * responsible for configuring the hardware for proper multicast, |
|
1660 * promiscuous mode, and all-multi behavior. |
|
1661 **/ |
|
1662 |
|
1663 static void |
|
1664 e1000_set_multi(struct net_device *netdev) |
|
1665 { |
|
1666 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
1667 struct e1000_hw *hw = &adapter->hw; |
|
1668 struct dev_mc_list *mc_ptr; |
|
1669 unsigned long flags = 0; |
|
1670 uint32_t rctl; |
|
1671 uint32_t hash_value; |
|
1672 int i; |
|
1673 |
|
1674 if (!adapter->ecdev) |
|
1675 spin_lock_irqsave(&adapter->tx_lock, flags); |
|
1676 |
|
1677 /* Check for Promiscuous and All Multicast modes */ |
|
1678 |
|
1679 rctl = E1000_READ_REG(hw, RCTL); |
|
1680 |
|
1681 if(netdev->flags & IFF_PROMISC) { |
|
1682 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); |
|
1683 } else if(netdev->flags & IFF_ALLMULTI) { |
|
1684 rctl |= E1000_RCTL_MPE; |
|
1685 rctl &= ~E1000_RCTL_UPE; |
|
1686 } else { |
|
1687 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); |
|
1688 } |
|
1689 |
|
1690 E1000_WRITE_REG(hw, RCTL, rctl); |
|
1691 |
|
1692 /* 82542 2.0 needs to be in reset to write receive address registers */ |
|
1693 |
|
1694 if(hw->mac_type == e1000_82542_rev2_0) |
|
1695 e1000_enter_82542_rst(adapter); |
|
1696 |
|
1697 /* load the first 14 multicast address into the exact filters 1-14 |
|
1698 * RAR 0 is used for the station MAC adddress |
|
1699 * if there are not 14 addresses, go ahead and clear the filters |
|
1700 */ |
|
1701 mc_ptr = netdev->mc_list; |
|
1702 |
|
1703 for(i = 1; i < E1000_RAR_ENTRIES; i++) { |
|
1704 if(mc_ptr) { |
|
1705 e1000_rar_set(hw, mc_ptr->dmi_addr, i); |
|
1706 mc_ptr = mc_ptr->next; |
|
1707 } else { |
|
1708 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); |
|
1709 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); |
|
1710 } |
|
1711 } |
|
1712 |
|
1713 /* clear the old settings from the multicast hash table */ |
|
1714 |
|
1715 for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) |
|
1716 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
|
1717 |
|
1718 /* load any remaining addresses into the hash table */ |
|
1719 |
|
1720 for(; mc_ptr; mc_ptr = mc_ptr->next) { |
|
1721 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr); |
|
1722 e1000_mta_set(hw, hash_value); |
|
1723 } |
|
1724 |
|
1725 if(hw->mac_type == e1000_82542_rev2_0) |
|
1726 e1000_leave_82542_rst(adapter); |
|
1727 |
|
1728 if (!adapter->ecdev) |
|
1729 spin_unlock_irqrestore(&adapter->tx_lock, flags); |
|
1730 } |
|
1731 |
|
1732 /* Need to wait a few seconds after link up to get diagnostic information from |
|
1733 * the phy */ |
|
1734 |
|
1735 static void |
|
1736 e1000_update_phy_info(unsigned long data) |
|
1737 { |
|
1738 struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
|
1739 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); |
|
1740 } |
|
1741 |
|
1742 /** |
|
1743 * e1000_82547_tx_fifo_stall - Timer Call-back |
|
1744 * @data: pointer to adapter cast into an unsigned long |
|
1745 **/ |
|
1746 |
|
1747 static void |
|
1748 e1000_82547_tx_fifo_stall(unsigned long data) |
|
1749 { |
|
1750 struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
|
1751 struct net_device *netdev = adapter->netdev; |
|
1752 uint32_t tctl; |
|
1753 |
|
1754 if(atomic_read(&adapter->tx_fifo_stall)) { |
|
1755 if((E1000_READ_REG(&adapter->hw, TDT) == |
|
1756 E1000_READ_REG(&adapter->hw, TDH)) && |
|
1757 (E1000_READ_REG(&adapter->hw, TDFT) == |
|
1758 E1000_READ_REG(&adapter->hw, TDFH)) && |
|
1759 (E1000_READ_REG(&adapter->hw, TDFTS) == |
|
1760 E1000_READ_REG(&adapter->hw, TDFHS))) { |
|
1761 tctl = E1000_READ_REG(&adapter->hw, TCTL); |
|
1762 E1000_WRITE_REG(&adapter->hw, TCTL, |
|
1763 tctl & ~E1000_TCTL_EN); |
|
1764 E1000_WRITE_REG(&adapter->hw, TDFT, |
|
1765 adapter->tx_head_addr); |
|
1766 E1000_WRITE_REG(&adapter->hw, TDFH, |
|
1767 adapter->tx_head_addr); |
|
1768 E1000_WRITE_REG(&adapter->hw, TDFTS, |
|
1769 adapter->tx_head_addr); |
|
1770 E1000_WRITE_REG(&adapter->hw, TDFHS, |
|
1771 adapter->tx_head_addr); |
|
1772 E1000_WRITE_REG(&adapter->hw, TCTL, tctl); |
|
1773 E1000_WRITE_FLUSH(&adapter->hw); |
|
1774 |
|
1775 adapter->tx_fifo_head = 0; |
|
1776 atomic_set(&adapter->tx_fifo_stall, 0); |
|
1777 if (!adapter->ecdev) netif_wake_queue(netdev); |
|
1778 } else { |
|
1779 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); |
|
1780 } |
|
1781 } |
|
1782 } |
|
1783 |
|
1784 /** |
|
1785 * e1000_watchdog - Timer Call-back |
|
1786 * @data: pointer to adapter cast into an unsigned long |
|
1787 **/ |
|
1788 static void |
|
1789 e1000_watchdog(unsigned long data) |
|
1790 { |
|
1791 struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
|
1792 |
|
1793 /* Do the rest outside of interrupt context */ |
|
1794 schedule_work(&adapter->watchdog_task); |
|
1795 } |
|
1796 |
|
1797 static void |
|
1798 e1000_watchdog_task(struct e1000_adapter *adapter) |
|
1799 { |
|
1800 struct net_device *netdev = adapter->netdev; |
|
1801 struct e1000_desc_ring *txdr = &adapter->tx_ring; |
|
1802 uint32_t link; |
|
1803 |
|
1804 e1000_check_for_link(&adapter->hw); |
|
1805 if (adapter->hw.mac_type == e1000_82573) { |
|
1806 e1000_enable_tx_pkt_filtering(&adapter->hw); |
|
1807 if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) |
|
1808 e1000_update_mng_vlan(adapter); |
|
1809 } |
|
1810 |
|
1811 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && |
|
1812 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) |
|
1813 link = !adapter->hw.serdes_link_down; |
|
1814 else |
|
1815 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; |
|
1816 |
|
1817 if (link) { |
|
1818 if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev)) |
|
1819 || (!adapter->ecdev && !netif_carrier_ok(netdev))) { |
|
1820 e1000_get_speed_and_duplex(&adapter->hw, |
|
1821 &adapter->link_speed, |
|
1822 &adapter->link_duplex); |
|
1823 |
|
1824 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n", |
|
1825 adapter->link_speed, |
|
1826 adapter->link_duplex == FULL_DUPLEX ? |
|
1827 "Full Duplex" : "Half Duplex"); |
|
1828 |
|
1829 if (adapter->ecdev) { |
|
1830 ecdev_set_link(adapter->ecdev, 1); |
|
1831 } else { |
|
1832 netif_carrier_on(netdev); |
|
1833 netif_wake_queue(netdev); |
|
1834 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); |
|
1835 } |
|
1836 adapter->smartspeed = 0; |
|
1837 } |
|
1838 } else { |
|
1839 if ((adapter->ecdev && ecdev_get_link(adapter->ecdev)) |
|
1840 || (!adapter->ecdev && netif_carrier_ok(netdev))) { |
|
1841 adapter->link_speed = 0; |
|
1842 adapter->link_duplex = 0; |
|
1843 DPRINTK(LINK, INFO, "NIC Link is Down\n"); |
|
1844 if (adapter->ecdev) { |
|
1845 ecdev_set_link(adapter->ecdev, 0); |
|
1846 } else { |
|
1847 netif_carrier_off(netdev); |
|
1848 netif_stop_queue(netdev); |
|
1849 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); |
|
1850 } |
|
1851 } |
|
1852 |
|
1853 e1000_smartspeed(adapter); |
|
1854 } |
|
1855 |
|
1856 e1000_update_stats(adapter); |
|
1857 |
|
1858 adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; |
|
1859 adapter->tpt_old = adapter->stats.tpt; |
|
1860 adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old; |
|
1861 adapter->colc_old = adapter->stats.colc; |
|
1862 |
|
1863 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; |
|
1864 adapter->gorcl_old = adapter->stats.gorcl; |
|
1865 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; |
|
1866 adapter->gotcl_old = adapter->stats.gotcl; |
|
1867 |
|
1868 e1000_update_adaptive(&adapter->hw); |
|
1869 |
|
1870 if (!adapter->ecdev && !netif_carrier_ok(netdev)) { |
|
1871 if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { |
|
1872 /* We've lost link, so the controller stops DMA, |
|
1873 * but we've got queued Tx work that's never going |
|
1874 * to get done, so reset controller to flush Tx. |
|
1875 * (Do the reset outside of interrupt context). */ |
|
1876 schedule_work(&adapter->tx_timeout_task); |
|
1877 } |
|
1878 } |
|
1879 |
|
1880 /* Dynamic mode for Interrupt Throttle Rate (ITR) */ |
|
1881 if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { |
|
1882 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total |
|
1883 * asymmetrical Tx or Rx gets ITR=8000; everyone |
|
1884 * else is between 2000-8000. */ |
|
1885 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; |
|
1886 uint32_t dif = (adapter->gotcl > adapter->gorcl ? |
|
1887 adapter->gotcl - adapter->gorcl : |
|
1888 adapter->gorcl - adapter->gotcl) / 10000; |
|
1889 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; |
|
1890 E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256)); |
|
1891 } |
|
1892 |
|
1893 /* Cause software interrupt to ensure rx ring is cleaned */ |
|
1894 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); |
|
1895 |
|
1896 /* Force detection of hung controller every watchdog period */ |
|
1897 if (!adapter->ecdev) adapter->detect_tx_hung = TRUE; |
|
1898 |
|
1899 /* Reset the timer */ |
|
1900 if (!adapter->ecdev) |
|
1901 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); |
|
1902 } |
|
1903 |
|
1904 #define E1000_TX_FLAGS_CSUM 0x00000001 |
|
1905 #define E1000_TX_FLAGS_VLAN 0x00000002 |
|
1906 #define E1000_TX_FLAGS_TSO 0x00000004 |
|
1907 #define E1000_TX_FLAGS_IPV4 0x00000008 |
|
1908 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 |
|
1909 #define E1000_TX_FLAGS_VLAN_SHIFT 16 |
|
1910 |
|
1911 static inline int |
|
1912 e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb) |
|
1913 { |
|
1914 #ifdef NETIF_F_TSO |
|
1915 struct e1000_context_desc *context_desc; |
|
1916 unsigned int i; |
|
1917 uint32_t cmd_length = 0; |
|
1918 uint16_t ipcse = 0, tucse, mss; |
|
1919 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; |
|
1920 int err; |
|
1921 |
|
1922 if(skb_shinfo(skb)->tso_size) { |
|
1923 if (skb_header_cloned(skb)) { |
|
1924 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
|
1925 if (err) |
|
1926 return err; |
|
1927 } |
|
1928 |
|
1929 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); |
|
1930 mss = skb_shinfo(skb)->tso_size; |
|
1931 if(skb->protocol == ntohs(ETH_P_IP)) { |
|
1932 skb->nh.iph->tot_len = 0; |
|
1933 skb->nh.iph->check = 0; |
|
1934 skb->h.th->check = |
|
1935 ~csum_tcpudp_magic(skb->nh.iph->saddr, |
|
1936 skb->nh.iph->daddr, |
|
1937 0, |
|
1938 IPPROTO_TCP, |
|
1939 0); |
|
1940 cmd_length = E1000_TXD_CMD_IP; |
|
1941 ipcse = skb->h.raw - skb->data - 1; |
|
1942 #ifdef NETIF_F_TSO_IPV6 |
|
1943 } else if(skb->protocol == ntohs(ETH_P_IPV6)) { |
|
1944 skb->nh.ipv6h->payload_len = 0; |
|
1945 skb->h.th->check = |
|
1946 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, |
|
1947 &skb->nh.ipv6h->daddr, |
|
1948 0, |
|
1949 IPPROTO_TCP, |
|
1950 0); |
|
1951 ipcse = 0; |
|
1952 #endif |
|
1953 } |
|
1954 ipcss = skb->nh.raw - skb->data; |
|
1955 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; |
|
1956 tucss = skb->h.raw - skb->data; |
|
1957 tucso = (void *)&(skb->h.th->check) - (void *)skb->data; |
|
1958 tucse = 0; |
|
1959 |
|
1960 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | |
|
1961 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); |
|
1962 |
|
1963 i = adapter->tx_ring.next_to_use; |
|
1964 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); |
|
1965 |
|
1966 context_desc->lower_setup.ip_fields.ipcss = ipcss; |
|
1967 context_desc->lower_setup.ip_fields.ipcso = ipcso; |
|
1968 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); |
|
1969 context_desc->upper_setup.tcp_fields.tucss = tucss; |
|
1970 context_desc->upper_setup.tcp_fields.tucso = tucso; |
|
1971 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); |
|
1972 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); |
|
1973 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; |
|
1974 context_desc->cmd_and_length = cpu_to_le32(cmd_length); |
|
1975 |
|
1976 if(++i == adapter->tx_ring.count) i = 0; |
|
1977 adapter->tx_ring.next_to_use = i; |
|
1978 |
|
1979 return 1; |
|
1980 } |
|
1981 #endif |
|
1982 |
|
1983 return 0; |
|
1984 } |
|
1985 |
|
1986 static inline boolean_t |
|
1987 e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) |
|
1988 { |
|
1989 struct e1000_context_desc *context_desc; |
|
1990 unsigned int i; |
|
1991 uint8_t css; |
|
1992 |
|
1993 if(likely(skb->ip_summed == CHECKSUM_HW)) { |
|
1994 css = skb->h.raw - skb->data; |
|
1995 |
|
1996 i = adapter->tx_ring.next_to_use; |
|
1997 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); |
|
1998 |
|
1999 context_desc->upper_setup.tcp_fields.tucss = css; |
|
2000 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; |
|
2001 context_desc->upper_setup.tcp_fields.tucse = 0; |
|
2002 context_desc->tcp_seg_setup.data = 0; |
|
2003 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); |
|
2004 |
|
2005 if(unlikely(++i == adapter->tx_ring.count)) i = 0; |
|
2006 adapter->tx_ring.next_to_use = i; |
|
2007 |
|
2008 return TRUE; |
|
2009 } |
|
2010 |
|
2011 return FALSE; |
|
2012 } |
|
2013 |
|
2014 #define E1000_MAX_TXD_PWR 12 |
|
2015 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) |
|
2016 |
|
2017 static inline int |
|
2018 e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb, |
|
2019 unsigned int first, unsigned int max_per_txd, |
|
2020 unsigned int nr_frags, unsigned int mss) |
|
2021 { |
|
2022 struct e1000_desc_ring *tx_ring = &adapter->tx_ring; |
|
2023 struct e1000_buffer *buffer_info; |
|
2024 unsigned int len = skb->len; |
|
2025 unsigned int offset = 0, size, count = 0, i; |
|
2026 unsigned int f; |
|
2027 len -= skb->data_len; |
|
2028 |
|
2029 i = tx_ring->next_to_use; |
|
2030 |
|
2031 while(len) { |
|
2032 buffer_info = &tx_ring->buffer_info[i]; |
|
2033 size = min(len, max_per_txd); |
|
2034 #ifdef NETIF_F_TSO |
|
2035 /* Workaround for premature desc write-backs |
|
2036 * in TSO mode. Append 4-byte sentinel desc */ |
|
2037 if(unlikely(mss && !nr_frags && size == len && size > 8)) |
|
2038 size -= 4; |
|
2039 #endif |
|
2040 /* work-around for errata 10 and it applies |
|
2041 * to all controllers in PCI-X mode |
|
2042 * The fix is to make sure that the first descriptor of a |
|
2043 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes |
|
2044 */ |
|
2045 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && |
|
2046 (size > 2015) && count == 0)) |
|
2047 size = 2015; |
|
2048 |
|
2049 /* Workaround for potential 82544 hang in PCI-X. Avoid |
|
2050 * terminating buffers within evenly-aligned dwords. */ |
|
2051 if(unlikely(adapter->pcix_82544 && |
|
2052 !((unsigned long)(skb->data + offset + size - 1) & 4) && |
|
2053 size > 4)) |
|
2054 size -= 4; |
|
2055 |
|
2056 buffer_info->length = size; |
|
2057 buffer_info->dma = |
|
2058 pci_map_single(adapter->pdev, |
|
2059 skb->data + offset, |
|
2060 size, |
|
2061 PCI_DMA_TODEVICE); |
|
2062 buffer_info->time_stamp = jiffies; |
|
2063 |
|
2064 len -= size; |
|
2065 offset += size; |
|
2066 count++; |
|
2067 if(unlikely(++i == tx_ring->count)) i = 0; |
|
2068 } |
|
2069 |
|
2070 for(f = 0; f < nr_frags; f++) { |
|
2071 struct skb_frag_struct *frag; |
|
2072 |
|
2073 frag = &skb_shinfo(skb)->frags[f]; |
|
2074 len = frag->size; |
|
2075 offset = frag->page_offset; |
|
2076 |
|
2077 while(len) { |
|
2078 buffer_info = &tx_ring->buffer_info[i]; |
|
2079 size = min(len, max_per_txd); |
|
2080 #ifdef NETIF_F_TSO |
|
2081 /* Workaround for premature desc write-backs |
|
2082 * in TSO mode. Append 4-byte sentinel desc */ |
|
2083 if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) |
|
2084 size -= 4; |
|
2085 #endif |
|
2086 /* Workaround for potential 82544 hang in PCI-X. |
|
2087 * Avoid terminating buffers within evenly-aligned |
|
2088 * dwords. */ |
|
2089 if(unlikely(adapter->pcix_82544 && |
|
2090 !((unsigned long)(frag->page+offset+size-1) & 4) && |
|
2091 size > 4)) |
|
2092 size -= 4; |
|
2093 |
|
2094 buffer_info->length = size; |
|
2095 buffer_info->dma = |
|
2096 pci_map_page(adapter->pdev, |
|
2097 frag->page, |
|
2098 offset, |
|
2099 size, |
|
2100 PCI_DMA_TODEVICE); |
|
2101 buffer_info->time_stamp = jiffies; |
|
2102 |
|
2103 len -= size; |
|
2104 offset += size; |
|
2105 count++; |
|
2106 if(unlikely(++i == tx_ring->count)) i = 0; |
|
2107 } |
|
2108 } |
|
2109 |
|
2110 i = (i == 0) ? tx_ring->count - 1 : i - 1; |
|
2111 tx_ring->buffer_info[i].skb = skb; |
|
2112 tx_ring->buffer_info[first].next_to_watch = i; |
|
2113 |
|
2114 return count; |
|
2115 } |
|
2116 |
|
2117 static inline void |
|
2118 e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags) |
|
2119 { |
|
2120 struct e1000_desc_ring *tx_ring = &adapter->tx_ring; |
|
2121 struct e1000_tx_desc *tx_desc = NULL; |
|
2122 struct e1000_buffer *buffer_info; |
|
2123 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; |
|
2124 unsigned int i; |
|
2125 |
|
2126 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { |
|
2127 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | |
|
2128 E1000_TXD_CMD_TSE; |
|
2129 txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
|
2130 |
|
2131 if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) |
|
2132 txd_upper |= E1000_TXD_POPTS_IXSM << 8; |
|
2133 } |
|
2134 |
|
2135 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { |
|
2136 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; |
|
2137 txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
|
2138 } |
|
2139 |
|
2140 if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { |
|
2141 txd_lower |= E1000_TXD_CMD_VLE; |
|
2142 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); |
|
2143 } |
|
2144 |
|
2145 i = tx_ring->next_to_use; |
|
2146 |
|
2147 while(count--) { |
|
2148 buffer_info = &tx_ring->buffer_info[i]; |
|
2149 tx_desc = E1000_TX_DESC(*tx_ring, i); |
|
2150 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
|
2151 tx_desc->lower.data = |
|
2152 cpu_to_le32(txd_lower | buffer_info->length); |
|
2153 tx_desc->upper.data = cpu_to_le32(txd_upper); |
|
2154 if(unlikely(++i == tx_ring->count)) i = 0; |
|
2155 } |
|
2156 |
|
2157 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); |
|
2158 |
|
2159 /* Force memory writes to complete before letting h/w |
|
2160 * know there are new descriptors to fetch. (Only |
|
2161 * applicable for weak-ordered memory model archs, |
|
2162 * such as IA-64). */ |
|
2163 wmb(); |
|
2164 |
|
2165 tx_ring->next_to_use = i; |
|
2166 E1000_WRITE_REG(&adapter->hw, TDT, i); |
|
2167 } |
|
2168 |
|
2169 /** |
|
2170 * 82547 workaround to avoid controller hang in half-duplex environment. |
|
2171 * The workaround is to avoid queuing a large packet that would span |
|
2172 * the internal Tx FIFO ring boundary by notifying the stack to resend |
|
2173 * the packet at a later time. This gives the Tx FIFO an opportunity to |
|
2174 * flush all packets. When that occurs, we reset the Tx FIFO pointers |
|
2175 * to the beginning of the Tx FIFO. |
|
2176 **/ |
|
2177 |
|
2178 #define E1000_FIFO_HDR 0x10 |
|
2179 #define E1000_82547_PAD_LEN 0x3E0 |
|
2180 |
|
2181 static inline int |
|
2182 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb) |
|
2183 { |
|
2184 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; |
|
2185 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR; |
|
2186 |
|
2187 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); |
|
2188 |
|
2189 if(adapter->link_duplex != HALF_DUPLEX) |
|
2190 goto no_fifo_stall_required; |
|
2191 |
|
2192 if(atomic_read(&adapter->tx_fifo_stall)) |
|
2193 return 1; |
|
2194 |
|
2195 if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { |
|
2196 atomic_set(&adapter->tx_fifo_stall, 1); |
|
2197 return 1; |
|
2198 } |
|
2199 |
|
2200 no_fifo_stall_required: |
|
2201 adapter->tx_fifo_head += skb_fifo_len; |
|
2202 if(adapter->tx_fifo_head >= adapter->tx_fifo_size) |
|
2203 adapter->tx_fifo_head -= adapter->tx_fifo_size; |
|
2204 return 0; |
|
2205 } |
|
2206 |
|
2207 #define MINIMUM_DHCP_PACKET_SIZE 282 |
|
2208 static inline int |
|
2209 e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) |
|
2210 { |
|
2211 struct e1000_hw *hw = &adapter->hw; |
|
2212 uint16_t length, offset; |
|
2213 if(vlan_tx_tag_present(skb)) { |
|
2214 if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && |
|
2215 ( adapter->hw.mng_cookie.status & |
|
2216 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) |
|
2217 return 0; |
|
2218 } |
|
2219 if(htons(ETH_P_IP) == skb->protocol) { |
|
2220 const struct iphdr *ip = skb->nh.iph; |
|
2221 if(IPPROTO_UDP == ip->protocol) { |
|
2222 struct udphdr *udp = (struct udphdr *)(skb->h.uh); |
|
2223 if(ntohs(udp->dest) == 67) { |
|
2224 offset = (uint8_t *)udp + 8 - skb->data; |
|
2225 length = skb->len - offset; |
|
2226 |
|
2227 return e1000_mng_write_dhcp_info(hw, |
|
2228 (uint8_t *)udp + 8, length); |
|
2229 } |
|
2230 } |
|
2231 } else if((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { |
|
2232 struct ethhdr *eth = (struct ethhdr *) skb->data; |
|
2233 if((htons(ETH_P_IP) == eth->h_proto)) { |
|
2234 const struct iphdr *ip = |
|
2235 (struct iphdr *)((uint8_t *)skb->data+14); |
|
2236 if(IPPROTO_UDP == ip->protocol) { |
|
2237 struct udphdr *udp = |
|
2238 (struct udphdr *)((uint8_t *)ip + |
|
2239 (ip->ihl << 2)); |
|
2240 if(ntohs(udp->dest) == 67) { |
|
2241 offset = (uint8_t *)udp + 8 - skb->data; |
|
2242 length = skb->len - offset; |
|
2243 |
|
2244 return e1000_mng_write_dhcp_info(hw, |
|
2245 (uint8_t *)udp + 8, |
|
2246 length); |
|
2247 } |
|
2248 } |
|
2249 } |
|
2250 } |
|
2251 return 0; |
|
2252 } |
|
2253 |
|
2254 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) |
|
2255 static int |
|
2256 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
|
2257 { |
|
2258 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
2259 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; |
|
2260 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; |
|
2261 unsigned int tx_flags = 0; |
|
2262 unsigned int len = skb->len; |
|
2263 unsigned long flags = 0; |
|
2264 unsigned int nr_frags = 0; |
|
2265 unsigned int mss = 0; |
|
2266 int count = 0; |
|
2267 int tso; |
|
2268 unsigned int f; |
|
2269 len -= skb->data_len; |
|
2270 |
|
2271 if(unlikely(skb->len <= 0)) { |
|
2272 if (!adapter->ecdev) |
|
2273 dev_kfree_skb_any(skb); |
|
2274 return NETDEV_TX_OK; |
|
2275 } |
|
2276 |
|
2277 #ifdef NETIF_F_TSO |
|
2278 mss = skb_shinfo(skb)->tso_size; |
|
2279 /* The controller does a simple calculation to |
|
2280 * make sure there is enough room in the FIFO before |
|
2281 * initiating the DMA for each buffer. The calc is: |
|
2282 * 4 = ceil(buffer len/mss). To make sure we don't |
|
2283 * overrun the FIFO, adjust the max buffer len if mss |
|
2284 * drops. */ |
|
2285 if(mss) { |
|
2286 max_per_txd = min(mss << 2, max_per_txd); |
|
2287 max_txd_pwr = fls(max_per_txd) - 1; |
|
2288 } |
|
2289 |
|
2290 if((mss) || (skb->ip_summed == CHECKSUM_HW)) |
|
2291 count++; |
|
2292 count++; |
|
2293 #else |
|
2294 if(skb->ip_summed == CHECKSUM_HW) |
|
2295 count++; |
|
2296 #endif |
|
2297 count += TXD_USE_COUNT(len, max_txd_pwr); |
|
2298 |
|
2299 if(adapter->pcix_82544) |
|
2300 count++; |
|
2301 |
|
2302 /* work-around for errata 10 and it applies to all controllers |
|
2303 * in PCI-X mode, so add one more descriptor to the count |
|
2304 */ |
|
2305 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && |
|
2306 (len > 2015))) |
|
2307 count++; |
|
2308 |
|
2309 nr_frags = skb_shinfo(skb)->nr_frags; |
|
2310 for(f = 0; f < nr_frags; f++) |
|
2311 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, |
|
2312 max_txd_pwr); |
|
2313 if(adapter->pcix_82544) |
|
2314 count += nr_frags; |
|
2315 |
|
2316 if (!adapter->ecdev) { |
|
2317 local_irq_save(flags); |
|
2318 if (!spin_trylock(&adapter->tx_lock)) { |
|
2319 /* Collision - tell upper layer to requeue */ |
|
2320 local_irq_restore(flags); |
|
2321 return NETDEV_TX_LOCKED; |
|
2322 } |
|
2323 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) |
|
2324 e1000_transfer_dhcp_info(adapter, skb); |
|
2325 } |
|
2326 |
|
2327 |
|
2328 /* need: count + 2 desc gap to keep tail from touching |
|
2329 * head, otherwise try next time */ |
|
2330 if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) { |
|
2331 if (!adapter->ecdev) { |
|
2332 netif_stop_queue(netdev); |
|
2333 spin_unlock_irqrestore(&adapter->tx_lock, flags); |
|
2334 } |
|
2335 return NETDEV_TX_BUSY; |
|
2336 } |
|
2337 |
|
2338 if(unlikely(adapter->hw.mac_type == e1000_82547)) { |
|
2339 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { |
|
2340 if (!adapter->ecdev) { |
|
2341 netif_stop_queue(netdev); |
|
2342 mod_timer(&adapter->tx_fifo_stall_timer, jiffies); |
|
2343 spin_unlock_irqrestore(&adapter->tx_lock, flags); |
|
2344 } |
|
2345 return NETDEV_TX_BUSY; |
|
2346 } |
|
2347 } |
|
2348 |
|
2349 if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { |
|
2350 tx_flags |= E1000_TX_FLAGS_VLAN; |
|
2351 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); |
|
2352 } |
|
2353 |
|
2354 first = adapter->tx_ring.next_to_use; |
|
2355 |
|
2356 tso = e1000_tso(adapter, skb); |
|
2357 if (tso < 0) { |
|
2358 if (!adapter->ecdev) { |
|
2359 dev_kfree_skb_any(skb); |
|
2360 spin_unlock_irqrestore(&adapter->tx_lock, flags); |
|
2361 } |
|
2362 return NETDEV_TX_OK; |
|
2363 } |
|
2364 |
|
2365 if (likely(tso)) |
|
2366 tx_flags |= E1000_TX_FLAGS_TSO; |
|
2367 else if(likely(e1000_tx_csum(adapter, skb))) |
|
2368 tx_flags |= E1000_TX_FLAGS_CSUM; |
|
2369 |
|
2370 /* Old method was to assume IPv4 packet by default if TSO was enabled. |
|
2371 * 82573 hardware supports TSO capabilities for IPv6 as well... |
|
2372 * no longer assume, we must. */ |
|
2373 if(likely(skb->protocol == ntohs(ETH_P_IP))) |
|
2374 tx_flags |= E1000_TX_FLAGS_IPV4; |
|
2375 |
|
2376 e1000_tx_queue(adapter, |
|
2377 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), |
|
2378 tx_flags); |
|
2379 |
|
2380 netdev->trans_start = jiffies; |
|
2381 |
|
2382 /* Make sure there is space in the ring for the next send. */ |
|
2383 if (!adapter->ecdev) { |
|
2384 if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2)) |
|
2385 netif_stop_queue(netdev); |
|
2386 spin_unlock_irqrestore(&adapter->tx_lock, flags); |
|
2387 } |
|
2388 return NETDEV_TX_OK; |
|
2389 } |
|
2390 |
|
2391 /** |
|
2392 * e1000_tx_timeout - Respond to a Tx Hang |
|
2393 * @netdev: network interface device structure |
|
2394 **/ |
|
2395 |
|
2396 static void |
|
2397 e1000_tx_timeout(struct net_device *netdev) |
|
2398 { |
|
2399 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
2400 |
|
2401 /* Do the reset outside of interrupt context */ |
|
2402 schedule_work(&adapter->tx_timeout_task); |
|
2403 } |
|
2404 |
|
2405 static void |
|
2406 e1000_tx_timeout_task(struct net_device *netdev) |
|
2407 { |
|
2408 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
2409 |
|
2410 e1000_down(adapter); |
|
2411 e1000_up(adapter); |
|
2412 } |
|
2413 |
|
2414 /** |
|
2415 * e1000_get_stats - Get System Network Statistics |
|
2416 * @netdev: network interface device structure |
|
2417 * |
|
2418 * Returns the address of the device statistics structure. |
|
2419 * The statistics are actually updated from the timer callback. |
|
2420 **/ |
|
2421 |
|
2422 static struct net_device_stats * |
|
2423 e1000_get_stats(struct net_device *netdev) |
|
2424 { |
|
2425 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
2426 |
|
2427 e1000_update_stats(adapter); |
|
2428 return &adapter->net_stats; |
|
2429 } |
|
2430 |
|
2431 /** |
|
2432 * e1000_change_mtu - Change the Maximum Transfer Unit |
|
2433 * @netdev: network interface device structure |
|
2434 * @new_mtu: new value for maximum frame size |
|
2435 * |
|
2436 * Returns 0 on success, negative on failure |
|
2437 **/ |
|
2438 |
|
2439 static int |
|
2440 e1000_change_mtu(struct net_device *netdev, int new_mtu) |
|
2441 { |
|
2442 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
2443 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
|
2444 |
|
2445 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || |
|
2446 (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
|
2447 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); |
|
2448 return -EINVAL; |
|
2449 } |
|
2450 |
|
2451 #define MAX_STD_JUMBO_FRAME_SIZE 9216 |
|
2452 /* might want this to be bigger enum check... */ |
|
2453 if (adapter->hw.mac_type == e1000_82573 && |
|
2454 max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { |
|
2455 DPRINTK(PROBE, ERR, "Jumbo Frames not supported " |
|
2456 "on 82573\n"); |
|
2457 return -EINVAL; |
|
2458 } |
|
2459 |
|
2460 if(adapter->hw.mac_type > e1000_82547_rev_2) { |
|
2461 adapter->rx_buffer_len = max_frame; |
|
2462 E1000_ROUNDUP(adapter->rx_buffer_len, 1024); |
|
2463 } else { |
|
2464 if(unlikely((adapter->hw.mac_type < e1000_82543) && |
|
2465 (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { |
|
2466 DPRINTK(PROBE, ERR, "Jumbo Frames not supported " |
|
2467 "on 82542\n"); |
|
2468 return -EINVAL; |
|
2469 |
|
2470 } else { |
|
2471 if(max_frame <= E1000_RXBUFFER_2048) { |
|
2472 adapter->rx_buffer_len = E1000_RXBUFFER_2048; |
|
2473 } else if(max_frame <= E1000_RXBUFFER_4096) { |
|
2474 adapter->rx_buffer_len = E1000_RXBUFFER_4096; |
|
2475 } else if(max_frame <= E1000_RXBUFFER_8192) { |
|
2476 adapter->rx_buffer_len = E1000_RXBUFFER_8192; |
|
2477 } else if(max_frame <= E1000_RXBUFFER_16384) { |
|
2478 adapter->rx_buffer_len = E1000_RXBUFFER_16384; |
|
2479 } |
|
2480 } |
|
2481 } |
|
2482 |
|
2483 netdev->mtu = new_mtu; |
|
2484 |
|
2485 if (adapter->ecdev || netif_running(netdev)) { |
|
2486 e1000_down(adapter); |
|
2487 e1000_up(adapter); |
|
2488 } |
|
2489 |
|
2490 adapter->hw.max_frame_size = max_frame; |
|
2491 |
|
2492 return 0; |
|
2493 } |
|
2494 |
|
2495 /** |
|
2496 * e1000_update_stats - Update the board statistics counters |
|
2497 * @adapter: board private structure |
|
2498 **/ |
|
2499 |
|
2500 void |
|
2501 e1000_update_stats(struct e1000_adapter *adapter) |
|
2502 { |
|
2503 struct e1000_hw *hw = &adapter->hw; |
|
2504 unsigned long flags = 0; |
|
2505 uint16_t phy_tmp; |
|
2506 |
|
2507 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF |
|
2508 |
|
2509 if (!adapter->ecdev) |
|
2510 spin_lock_irqsave(&adapter->stats_lock, flags); |
|
2511 |
|
2512 /* these counters are modified from e1000_adjust_tbi_stats, |
|
2513 * called from the interrupt context, so they must only |
|
2514 * be written while holding adapter->stats_lock |
|
2515 */ |
|
2516 |
|
2517 adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS); |
|
2518 adapter->stats.gprc += E1000_READ_REG(hw, GPRC); |
|
2519 adapter->stats.gorcl += E1000_READ_REG(hw, GORCL); |
|
2520 adapter->stats.gorch += E1000_READ_REG(hw, GORCH); |
|
2521 adapter->stats.bprc += E1000_READ_REG(hw, BPRC); |
|
2522 adapter->stats.mprc += E1000_READ_REG(hw, MPRC); |
|
2523 adapter->stats.roc += E1000_READ_REG(hw, ROC); |
|
2524 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); |
|
2525 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); |
|
2526 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); |
|
2527 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); |
|
2528 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); |
|
2529 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); |
|
2530 |
|
2531 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); |
|
2532 adapter->stats.mpc += E1000_READ_REG(hw, MPC); |
|
2533 adapter->stats.scc += E1000_READ_REG(hw, SCC); |
|
2534 adapter->stats.ecol += E1000_READ_REG(hw, ECOL); |
|
2535 adapter->stats.mcc += E1000_READ_REG(hw, MCC); |
|
2536 adapter->stats.latecol += E1000_READ_REG(hw, LATECOL); |
|
2537 adapter->stats.dc += E1000_READ_REG(hw, DC); |
|
2538 adapter->stats.sec += E1000_READ_REG(hw, SEC); |
|
2539 adapter->stats.rlec += E1000_READ_REG(hw, RLEC); |
|
2540 adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC); |
|
2541 adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC); |
|
2542 adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC); |
|
2543 adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC); |
|
2544 adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC); |
|
2545 adapter->stats.gptc += E1000_READ_REG(hw, GPTC); |
|
2546 adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL); |
|
2547 adapter->stats.gotch += E1000_READ_REG(hw, GOTCH); |
|
2548 adapter->stats.rnbc += E1000_READ_REG(hw, RNBC); |
|
2549 adapter->stats.ruc += E1000_READ_REG(hw, RUC); |
|
2550 adapter->stats.rfc += E1000_READ_REG(hw, RFC); |
|
2551 adapter->stats.rjc += E1000_READ_REG(hw, RJC); |
|
2552 adapter->stats.torl += E1000_READ_REG(hw, TORL); |
|
2553 adapter->stats.torh += E1000_READ_REG(hw, TORH); |
|
2554 adapter->stats.totl += E1000_READ_REG(hw, TOTL); |
|
2555 adapter->stats.toth += E1000_READ_REG(hw, TOTH); |
|
2556 adapter->stats.tpr += E1000_READ_REG(hw, TPR); |
|
2557 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); |
|
2558 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); |
|
2559 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); |
|
2560 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); |
|
2561 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); |
|
2562 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); |
|
2563 adapter->stats.mptc += E1000_READ_REG(hw, MPTC); |
|
2564 adapter->stats.bptc += E1000_READ_REG(hw, BPTC); |
|
2565 |
|
2566 /* used for adaptive IFS */ |
|
2567 |
|
2568 hw->tx_packet_delta = E1000_READ_REG(hw, TPT); |
|
2569 adapter->stats.tpt += hw->tx_packet_delta; |
|
2570 hw->collision_delta = E1000_READ_REG(hw, COLC); |
|
2571 adapter->stats.colc += hw->collision_delta; |
|
2572 |
|
2573 if(hw->mac_type >= e1000_82543) { |
|
2574 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); |
|
2575 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); |
|
2576 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); |
|
2577 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR); |
|
2578 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); |
|
2579 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); |
|
2580 } |
|
2581 if(hw->mac_type > e1000_82547_rev_2) { |
|
2582 adapter->stats.iac += E1000_READ_REG(hw, IAC); |
|
2583 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); |
|
2584 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); |
|
2585 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); |
|
2586 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); |
|
2587 adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC); |
|
2588 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); |
|
2589 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); |
|
2590 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); |
|
2591 } |
|
2592 |
|
2593 /* Fill out the OS statistics structure */ |
|
2594 |
|
2595 adapter->net_stats.rx_packets = adapter->stats.gprc; |
|
2596 adapter->net_stats.tx_packets = adapter->stats.gptc; |
|
2597 adapter->net_stats.rx_bytes = adapter->stats.gorcl; |
|
2598 adapter->net_stats.tx_bytes = adapter->stats.gotcl; |
|
2599 adapter->net_stats.multicast = adapter->stats.mprc; |
|
2600 adapter->net_stats.collisions = adapter->stats.colc; |
|
2601 |
|
2602 /* Rx Errors */ |
|
2603 |
|
2604 adapter->net_stats.rx_errors = adapter->stats.rxerrc + |
|
2605 adapter->stats.crcerrs + adapter->stats.algnerrc + |
|
2606 adapter->stats.rlec + adapter->stats.mpc + |
|
2607 adapter->stats.cexterr; |
|
2608 adapter->net_stats.rx_dropped = adapter->stats.mpc; |
|
2609 adapter->net_stats.rx_length_errors = adapter->stats.rlec; |
|
2610 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; |
|
2611 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; |
|
2612 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc; |
|
2613 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; |
|
2614 |
|
2615 /* Tx Errors */ |
|
2616 |
|
2617 adapter->net_stats.tx_errors = adapter->stats.ecol + |
|
2618 adapter->stats.latecol; |
|
2619 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; |
|
2620 adapter->net_stats.tx_window_errors = adapter->stats.latecol; |
|
2621 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; |
|
2622 |
|
2623 /* Tx Dropped needs to be maintained elsewhere */ |
|
2624 |
|
2625 /* Phy Stats */ |
|
2626 |
|
2627 if(hw->media_type == e1000_media_type_copper) { |
|
2628 if((adapter->link_speed == SPEED_1000) && |
|
2629 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { |
|
2630 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; |
|
2631 adapter->phy_stats.idle_errors += phy_tmp; |
|
2632 } |
|
2633 |
|
2634 if((hw->mac_type <= e1000_82546) && |
|
2635 (hw->phy_type == e1000_phy_m88) && |
|
2636 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) |
|
2637 adapter->phy_stats.receive_errors += phy_tmp; |
|
2638 } |
|
2639 |
|
2640 if (!adapter->ecdev) |
|
2641 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
|
2642 } |
|
2643 |
|
2644 void ec_poll(struct net_device *netdev) |
|
2645 { |
|
2646 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
2647 |
|
2648 if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) { |
|
2649 e1000_watchdog_task(adapter); |
|
2650 adapter->ec_watchdog_jiffies = jiffies; |
|
2651 } |
|
2652 |
|
2653 e1000_intr(0, netdev, NULL); |
|
2654 } |
|
2655 |
|
2656 /** |
|
2657 * e1000_intr - Interrupt Handler |
|
2658 * @irq: interrupt number |
|
2659 * @data: pointer to a network interface device structure |
|
2660 * @pt_regs: CPU registers structure |
|
2661 **/ |
|
2662 |
|
2663 static irqreturn_t |
|
2664 e1000_intr(int irq, void *data, struct pt_regs *regs) |
|
2665 { |
|
2666 struct net_device *netdev = data; |
|
2667 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
2668 struct e1000_hw *hw = &adapter->hw; |
|
2669 uint32_t icr = E1000_READ_REG(hw, ICR); |
|
2670 #ifndef CONFIG_E1000_NAPI |
|
2671 unsigned int i; |
|
2672 #endif |
|
2673 |
|
2674 if(unlikely(!icr)) |
|
2675 return IRQ_NONE; /* Not our interrupt */ |
|
2676 |
|
2677 if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
|
2678 hw->get_link_status = 1; |
|
2679 if (!adapter->ecdev) |
|
2680 mod_timer(&adapter->watchdog_timer, jiffies); |
|
2681 } |
|
2682 |
|
2683 #ifdef CONFIG_E1000_NAPI |
|
2684 if(!adapter->ecdev && likely(netif_rx_schedule_prep(netdev))) { |
|
2685 |
|
2686 /* Disable interrupts and register for poll. The flush |
|
2687 of the posted write is intentionally left out. |
|
2688 */ |
|
2689 |
|
2690 atomic_inc(&adapter->irq_sem); |
|
2691 E1000_WRITE_REG(hw, IMC, ~0); |
|
2692 __netif_rx_schedule(netdev); |
|
2693 } |
|
2694 #else |
|
2695 /* Writing IMC and IMS is needed for 82547. |
|
2696 Due to Hub Link bus being occupied, an interrupt |
|
2697 de-assertion message is not able to be sent. |
|
2698 When an interrupt assertion message is generated later, |
|
2699 two messages are re-ordered and sent out. |
|
2700 That causes APIC to think 82547 is in de-assertion |
|
2701 state, while 82547 is in assertion state, resulting |
|
2702 in dead lock. Writing IMC forces 82547 into |
|
2703 de-assertion state. |
|
2704 */ |
|
2705 if(!adapter->ecdev && |
|
2706 (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)){ |
|
2707 atomic_inc(&adapter->irq_sem); |
|
2708 E1000_WRITE_REG(hw, IMC, ~0); |
|
2709 } |
|
2710 |
|
2711 for(i = 0; i < E1000_MAX_INTR; i++) |
|
2712 if(unlikely(!adapter->clean_rx(adapter) & |
|
2713 !e1000_clean_tx_irq(adapter))) |
|
2714 break; |
|
2715 |
|
2716 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) |
|
2717 e1000_irq_enable(adapter); |
|
2718 #endif |
|
2719 |
|
2720 return IRQ_HANDLED; |
|
2721 } |
|
2722 |
|
2723 #ifdef CONFIG_E1000_NAPI |
|
2724 /** |
|
2725 * e1000_clean - NAPI Rx polling callback |
|
2726 * @adapter: board private structure |
|
2727 **/ |
|
2728 |
|
2729 static int |
|
2730 e1000_clean(struct net_device *netdev, int *budget) // never called for EtherCAT |
|
2731 { |
|
2732 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
2733 int work_to_do = min(*budget, netdev->quota); |
|
2734 int tx_cleaned; |
|
2735 int work_done = 0; |
|
2736 |
|
2737 tx_cleaned = e1000_clean_tx_irq(adapter); |
|
2738 adapter->clean_rx(adapter, &work_done, work_to_do); |
|
2739 |
|
2740 *budget -= work_done; |
|
2741 netdev->quota -= work_done; |
|
2742 |
|
2743 if ((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { |
|
2744 /* If no Tx and not enough Rx work done, exit the polling mode */ |
|
2745 netif_rx_complete(netdev); |
|
2746 e1000_irq_enable(adapter); |
|
2747 return 0; |
|
2748 } |
|
2749 |
|
2750 return 1; |
|
2751 } |
|
2752 |
|
2753 #endif |
|
2754 /** |
|
2755 * e1000_clean_tx_irq - Reclaim resources after transmit completes |
|
2756 * @adapter: board private structure |
|
2757 **/ |
|
2758 |
|
2759 static boolean_t |
|
2760 e1000_clean_tx_irq(struct e1000_adapter *adapter) |
|
2761 { |
|
2762 struct e1000_desc_ring *tx_ring = &adapter->tx_ring; |
|
2763 struct net_device *netdev = adapter->netdev; |
|
2764 struct e1000_tx_desc *tx_desc, *eop_desc; |
|
2765 struct e1000_buffer *buffer_info; |
|
2766 unsigned int i, eop; |
|
2767 boolean_t cleaned = FALSE; |
|
2768 |
|
2769 i = tx_ring->next_to_clean; |
|
2770 eop = tx_ring->buffer_info[i].next_to_watch; |
|
2771 eop_desc = E1000_TX_DESC(*tx_ring, eop); |
|
2772 |
|
2773 while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { |
|
2774 /* Premature writeback of Tx descriptors clear (free buffers |
|
2775 * and unmap pci_mapping) previous_buffer_info */ |
|
2776 if (likely(adapter->previous_buffer_info.skb != NULL)) { |
|
2777 e1000_unmap_and_free_tx_resource(adapter, |
|
2778 &adapter->previous_buffer_info); |
|
2779 } |
|
2780 |
|
2781 for(cleaned = FALSE; !cleaned; ) { |
|
2782 tx_desc = E1000_TX_DESC(*tx_ring, i); |
|
2783 buffer_info = &tx_ring->buffer_info[i]; |
|
2784 cleaned = (i == eop); |
|
2785 |
|
2786 #ifdef NETIF_F_TSO |
|
2787 if (!(netdev->features & NETIF_F_TSO)) { |
|
2788 #endif |
|
2789 e1000_unmap_and_free_tx_resource(adapter, |
|
2790 buffer_info); |
|
2791 #ifdef NETIF_F_TSO |
|
2792 } else { |
|
2793 if (cleaned) { |
|
2794 memcpy(&adapter->previous_buffer_info, |
|
2795 buffer_info, |
|
2796 sizeof(struct e1000_buffer)); |
|
2797 memset(buffer_info, 0, |
|
2798 sizeof(struct e1000_buffer)); |
|
2799 } else { |
|
2800 e1000_unmap_and_free_tx_resource( |
|
2801 adapter, buffer_info); |
|
2802 } |
|
2803 } |
|
2804 #endif |
|
2805 |
|
2806 tx_desc->buffer_addr = 0; |
|
2807 tx_desc->lower.data = 0; |
|
2808 tx_desc->upper.data = 0; |
|
2809 |
|
2810 if(unlikely(++i == tx_ring->count)) i = 0; |
|
2811 } |
|
2812 |
|
2813 eop = tx_ring->buffer_info[i].next_to_watch; |
|
2814 eop_desc = E1000_TX_DESC(*tx_ring, eop); |
|
2815 } |
|
2816 |
|
2817 tx_ring->next_to_clean = i; |
|
2818 |
|
2819 if (!adapter->ecdev) { |
|
2820 spin_lock(&adapter->tx_lock); |
|
2821 |
|
2822 if(unlikely(cleaned && netif_queue_stopped(netdev) && |
|
2823 netif_carrier_ok(netdev))) |
|
2824 netif_wake_queue(netdev); |
|
2825 |
|
2826 spin_unlock(&adapter->tx_lock); |
|
2827 } |
|
2828 |
|
2829 if(!adapter->ecdev && adapter->detect_tx_hung) { |
|
2830 |
|
2831 /* Detect a transmit hang in hardware, this serializes the |
|
2832 * check with the clearing of time_stamp and movement of i */ |
|
2833 adapter->detect_tx_hung = FALSE; |
|
2834 if (tx_ring->buffer_info[i].dma && |
|
2835 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) |
|
2836 && !(E1000_READ_REG(&adapter->hw, STATUS) & |
|
2837 E1000_STATUS_TXOFF)) { |
|
2838 |
|
2839 /* detected Tx unit hang */ |
|
2840 i = tx_ring->next_to_clean; |
|
2841 eop = tx_ring->buffer_info[i].next_to_watch; |
|
2842 eop_desc = E1000_TX_DESC(*tx_ring, eop); |
|
2843 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" |
|
2844 " TDH <%x>\n" |
|
2845 " TDT <%x>\n" |
|
2846 " next_to_use <%x>\n" |
|
2847 " next_to_clean <%x>\n" |
|
2848 "buffer_info[next_to_clean]\n" |
|
2849 " dma <%zx>\n" |
|
2850 " time_stamp <%lx>\n" |
|
2851 " next_to_watch <%x>\n" |
|
2852 " jiffies <%lx>\n" |
|
2853 " next_to_watch.status <%x>\n", |
|
2854 E1000_READ_REG(&adapter->hw, TDH), |
|
2855 E1000_READ_REG(&adapter->hw, TDT), |
|
2856 tx_ring->next_to_use, |
|
2857 i, |
|
2858 tx_ring->buffer_info[i].dma, |
|
2859 tx_ring->buffer_info[i].time_stamp, |
|
2860 eop, |
|
2861 jiffies, |
|
2862 eop_desc->upper.fields.status); |
|
2863 netif_stop_queue(netdev); |
|
2864 } |
|
2865 } |
|
2866 #ifdef NETIF_F_TSO |
|
2867 |
|
2868 if( unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && |
|
2869 time_after(jiffies, adapter->previous_buffer_info.time_stamp + HZ))) |
|
2870 e1000_unmap_and_free_tx_resource( |
|
2871 adapter, &adapter->previous_buffer_info); |
|
2872 |
|
2873 #endif |
|
2874 return cleaned; |
|
2875 } |
|
2876 |
|
2877 /** |
|
2878 * e1000_rx_checksum - Receive Checksum Offload for 82543 |
|
2879 * @adapter: board private structure |
|
2880 * @status_err: receive descriptor status and error fields |
|
2881 * @csum: receive descriptor csum field |
|
2882 * @sk_buff: socket buffer with received data |
|
2883 **/ |
|
2884 |
|
2885 static inline void |
|
2886 e1000_rx_checksum(struct e1000_adapter *adapter, |
|
2887 uint32_t status_err, uint32_t csum, |
|
2888 struct sk_buff *skb) |
|
2889 { |
|
2890 uint16_t status = (uint16_t)status_err; |
|
2891 uint8_t errors = (uint8_t)(status_err >> 24); |
|
2892 skb->ip_summed = CHECKSUM_NONE; |
|
2893 |
|
2894 /* 82543 or newer only */ |
|
2895 if(unlikely(adapter->hw.mac_type < e1000_82543)) return; |
|
2896 /* Ignore Checksum bit is set */ |
|
2897 if(unlikely(status & E1000_RXD_STAT_IXSM)) return; |
|
2898 /* TCP/UDP checksum error bit is set */ |
|
2899 if(unlikely(errors & E1000_RXD_ERR_TCPE)) { |
|
2900 /* let the stack verify checksum errors */ |
|
2901 adapter->hw_csum_err++; |
|
2902 return; |
|
2903 } |
|
2904 /* TCP/UDP Checksum has not been calculated */ |
|
2905 if(adapter->hw.mac_type <= e1000_82547_rev_2) { |
|
2906 if(!(status & E1000_RXD_STAT_TCPCS)) |
|
2907 return; |
|
2908 } else { |
|
2909 if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) |
|
2910 return; |
|
2911 } |
|
2912 /* It must be a TCP or UDP packet with a valid checksum */ |
|
2913 if (likely(status & E1000_RXD_STAT_TCPCS)) { |
|
2914 /* TCP checksum is good */ |
|
2915 skb->ip_summed = CHECKSUM_UNNECESSARY; |
|
2916 } else if (adapter->hw.mac_type > e1000_82547_rev_2) { |
|
2917 /* IP fragment with UDP payload */ |
|
2918 /* Hardware complements the payload checksum, so we undo it |
|
2919 * and then put the value in host order for further stack use. |
|
2920 */ |
|
2921 csum = ntohl(csum ^ 0xFFFF); |
|
2922 skb->csum = csum; |
|
2923 skb->ip_summed = CHECKSUM_HW; |
|
2924 } |
|
2925 adapter->hw_csum_good++; |
|
2926 } |
|
2927 |
|
2928 /** |
|
2929 * e1000_clean_rx_irq - Send received data up the network stack; legacy |
|
2930 * @adapter: board private structure |
|
2931 **/ |
|
2932 |
|
2933 static boolean_t |
|
2934 #ifdef CONFIG_E1000_NAPI |
|
2935 e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done, |
|
2936 int work_to_do) |
|
2937 #else |
|
2938 e1000_clean_rx_irq(struct e1000_adapter *adapter) |
|
2939 #endif |
|
2940 { |
|
2941 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; |
|
2942 struct net_device *netdev = adapter->netdev; |
|
2943 struct pci_dev *pdev = adapter->pdev; |
|
2944 struct e1000_rx_desc *rx_desc; |
|
2945 struct e1000_buffer *buffer_info; |
|
2946 struct sk_buff *skb; |
|
2947 unsigned long flags; |
|
2948 uint32_t length; |
|
2949 uint8_t last_byte; |
|
2950 unsigned int i; |
|
2951 boolean_t cleaned = FALSE; |
|
2952 |
|
2953 i = rx_ring->next_to_clean; |
|
2954 rx_desc = E1000_RX_DESC(*rx_ring, i); |
|
2955 |
|
2956 while(rx_desc->status & E1000_RXD_STAT_DD) { |
|
2957 buffer_info = &rx_ring->buffer_info[i]; |
|
2958 #ifdef CONFIG_E1000_NAPI |
|
2959 if(*work_done >= work_to_do) |
|
2960 break; |
|
2961 (*work_done)++; |
|
2962 #endif |
|
2963 cleaned = TRUE; |
|
2964 |
|
2965 pci_unmap_single(pdev, |
|
2966 buffer_info->dma, |
|
2967 buffer_info->length, |
|
2968 PCI_DMA_FROMDEVICE); |
|
2969 |
|
2970 skb = buffer_info->skb; |
|
2971 length = le16_to_cpu(rx_desc->length); |
|
2972 |
|
2973 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { |
|
2974 /* All receives must fit into a single buffer */ |
|
2975 E1000_DBG("%s: Receive packet consumed multiple" |
|
2976 " buffers\n", netdev->name); |
|
2977 if (!adapter->ecdev) dev_kfree_skb_irq(skb); |
|
2978 goto next_desc; |
|
2979 } |
|
2980 |
|
2981 if(!adapter->ecdev && unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { |
|
2982 last_byte = *(skb->data + length - 1); |
|
2983 if(TBI_ACCEPT(&adapter->hw, rx_desc->status, |
|
2984 rx_desc->errors, length, last_byte)) { |
|
2985 spin_lock_irqsave(&adapter->stats_lock, flags); |
|
2986 e1000_tbi_adjust_stats(&adapter->hw, |
|
2987 &adapter->stats, |
|
2988 length, skb->data); |
|
2989 spin_unlock_irqrestore(&adapter->stats_lock, |
|
2990 flags); |
|
2991 length--; |
|
2992 } else { |
|
2993 dev_kfree_skb_irq(skb); |
|
2994 goto next_desc; |
|
2995 } |
|
2996 } |
|
2997 |
|
2998 /* Good Receive */ |
|
2999 skb_put(skb, length - ETHERNET_FCS_SIZE); |
|
3000 |
|
3001 /* Receive Checksum Offload */ |
|
3002 e1000_rx_checksum(adapter, |
|
3003 (uint32_t)(rx_desc->status) | |
|
3004 ((uint32_t)(rx_desc->errors) << 24), |
|
3005 rx_desc->csum, skb); |
|
3006 if (adapter->ecdev) { |
|
3007 ecdev_receive(adapter->ecdev, skb->data, length); |
|
3008 skb_trim(skb, 0); |
|
3009 |
|
3010 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { |
|
3011 /* Force memory writes to complete before letting h/w |
|
3012 * know there are new descriptors to fetch. (Only |
|
3013 * applicable for weak-ordered memory model archs, |
|
3014 * such as IA-64). */ |
|
3015 wmb(); |
|
3016 E1000_WRITE_REG(&adapter->hw, RDT, i); |
|
3017 } |
|
3018 } else { |
|
3019 skb->protocol = eth_type_trans(skb, netdev); |
|
3020 #ifdef CONFIG_E1000_NAPI |
|
3021 if(unlikely(adapter->vlgrp && |
|
3022 (rx_desc->status & E1000_RXD_STAT_VP))) { |
|
3023 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
|
3024 le16_to_cpu(rx_desc->special) & |
|
3025 E1000_RXD_SPC_VLAN_MASK); |
|
3026 } else { |
|
3027 netif_receive_skb(skb); |
|
3028 } |
|
3029 #else /* CONFIG_E1000_NAPI */ |
|
3030 if(unlikely(adapter->vlgrp && |
|
3031 (rx_desc->status & E1000_RXD_STAT_VP))) { |
|
3032 vlan_hwaccel_rx(skb, adapter->vlgrp, |
|
3033 le16_to_cpu(rx_desc->special) & |
|
3034 E1000_RXD_SPC_VLAN_MASK); |
|
3035 } else { |
|
3036 netif_rx(skb); |
|
3037 } |
|
3038 #endif /* CONFIG_E1000_NAPI */ |
|
3039 } |
|
3040 netdev->last_rx = jiffies; |
|
3041 |
|
3042 next_desc: |
|
3043 rx_desc->status = 0; |
|
3044 if (!adapter->ecdev) buffer_info->skb = NULL; |
|
3045 if(unlikely(++i == rx_ring->count)) i = 0; |
|
3046 |
|
3047 rx_desc = E1000_RX_DESC(*rx_ring, i); |
|
3048 } |
|
3049 rx_ring->next_to_clean = i; |
|
3050 if (adapter->ecdev) { |
|
3051 rx_ring->next_to_use = i; |
|
3052 } else { |
|
3053 adapter->alloc_rx_buf(adapter); |
|
3054 } |
|
3055 |
|
3056 return cleaned; |
|
3057 } |
|
3058 |
|
3059 /** |
|
3060 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split |
|
3061 * @adapter: board private structure |
|
3062 **/ |
|
3063 |
|
3064 static boolean_t |
|
3065 #ifdef CONFIG_E1000_NAPI |
|
3066 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done, |
|
3067 int work_to_do) |
|
3068 #else |
|
3069 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter) |
|
3070 #endif |
|
3071 { |
|
3072 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; |
|
3073 union e1000_rx_desc_packet_split *rx_desc; |
|
3074 struct net_device *netdev = adapter->netdev; |
|
3075 struct pci_dev *pdev = adapter->pdev; |
|
3076 struct e1000_buffer *buffer_info; |
|
3077 struct e1000_ps_page *ps_page; |
|
3078 struct e1000_ps_page_dma *ps_page_dma; |
|
3079 struct sk_buff *skb; |
|
3080 unsigned int i, j; |
|
3081 uint32_t length, staterr; |
|
3082 boolean_t cleaned = FALSE; |
|
3083 |
|
3084 i = rx_ring->next_to_clean; |
|
3085 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
|
3086 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
|
3087 |
|
3088 while(staterr & E1000_RXD_STAT_DD) { |
|
3089 buffer_info = &rx_ring->buffer_info[i]; |
|
3090 ps_page = &rx_ring->ps_page[i]; |
|
3091 ps_page_dma = &rx_ring->ps_page_dma[i]; |
|
3092 #ifdef CONFIG_E1000_NAPI |
|
3093 if(unlikely(*work_done >= work_to_do)) |
|
3094 break; |
|
3095 (*work_done)++; |
|
3096 #endif |
|
3097 cleaned = TRUE; |
|
3098 pci_unmap_single(pdev, buffer_info->dma, |
|
3099 buffer_info->length, |
|
3100 PCI_DMA_FROMDEVICE); |
|
3101 |
|
3102 skb = buffer_info->skb; |
|
3103 |
|
3104 if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) { |
|
3105 E1000_DBG("%s: Packet Split buffers didn't pick up" |
|
3106 " the full packet\n", netdev->name); |
|
3107 if (!adapter->ecdev) dev_kfree_skb_irq(skb); |
|
3108 goto next_desc; |
|
3109 } |
|
3110 |
|
3111 if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { |
|
3112 if (!adapter->ecdev) dev_kfree_skb_irq(skb); |
|
3113 goto next_desc; |
|
3114 } |
|
3115 |
|
3116 length = le16_to_cpu(rx_desc->wb.middle.length0); |
|
3117 |
|
3118 if(unlikely(!length)) { |
|
3119 E1000_DBG("%s: Last part of the packet spanning" |
|
3120 " multiple descriptors\n", netdev->name); |
|
3121 if (!adapter->ecdev) dev_kfree_skb_irq(skb); |
|
3122 goto next_desc; |
|
3123 } |
|
3124 |
|
3125 /* Good Receive */ |
|
3126 skb_put(skb, length); |
|
3127 |
|
3128 for(j = 0; j < PS_PAGE_BUFFERS; j++) { |
|
3129 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) |
|
3130 break; |
|
3131 |
|
3132 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], |
|
3133 PAGE_SIZE, PCI_DMA_FROMDEVICE); |
|
3134 ps_page_dma->ps_page_dma[j] = 0; |
|
3135 skb_shinfo(skb)->frags[j].page = |
|
3136 ps_page->ps_page[j]; |
|
3137 ps_page->ps_page[j] = NULL; |
|
3138 skb_shinfo(skb)->frags[j].page_offset = 0; |
|
3139 skb_shinfo(skb)->frags[j].size = length; |
|
3140 skb_shinfo(skb)->nr_frags++; |
|
3141 skb->len += length; |
|
3142 skb->data_len += length; |
|
3143 } |
|
3144 |
|
3145 e1000_rx_checksum(adapter, staterr, |
|
3146 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); |
|
3147 |
|
3148 #ifdef HAVE_RX_ZERO_COPY |
|
3149 if(likely(rx_desc->wb.upper.header_status & |
|
3150 E1000_RXDPS_HDRSTAT_HDRSP)) |
|
3151 skb_shinfo(skb)->zero_copy = TRUE; |
|
3152 #endif |
|
3153 if (adapter->ecdev) { |
|
3154 ecdev_receive(adapter->ecdev, skb->data, length); |
|
3155 skb_trim(skb, 0); |
|
3156 |
|
3157 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { |
|
3158 /* Force memory writes to complete before letting h/w |
|
3159 * know there are new descriptors to fetch. (Only |
|
3160 * applicable for weak-ordered memory model archs, |
|
3161 * such as IA-64). */ |
|
3162 wmb(); |
|
3163 /* Hardware increments by 16 bytes, but packet split |
|
3164 * descriptors are 32 bytes...so we increment tail |
|
3165 * twice as much. |
|
3166 */ |
|
3167 E1000_WRITE_REG(&adapter->hw, RDT, i<<1); |
|
3168 } |
|
3169 } else { |
|
3170 skb->protocol = eth_type_trans(skb, netdev); |
|
3171 #ifdef CONFIG_E1000_NAPI |
|
3172 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { |
|
3173 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
|
3174 le16_to_cpu(rx_desc->wb.middle.vlan) & |
|
3175 E1000_RXD_SPC_VLAN_MASK); |
|
3176 } else { |
|
3177 netif_receive_skb(skb); |
|
3178 } |
|
3179 #else /* CONFIG_E1000_NAPI */ |
|
3180 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { |
|
3181 vlan_hwaccel_rx(skb, adapter->vlgrp, |
|
3182 le16_to_cpu(rx_desc->wb.middle.vlan) & |
|
3183 E1000_RXD_SPC_VLAN_MASK); |
|
3184 } else { |
|
3185 netif_rx(skb); |
|
3186 } |
|
3187 #endif /* CONFIG_E1000_NAPI */ |
|
3188 } |
|
3189 netdev->last_rx = jiffies; |
|
3190 |
|
3191 next_desc: |
|
3192 rx_desc->wb.middle.status_error &= ~0xFF; |
|
3193 if (!adapter->ecdev) buffer_info->skb = NULL; |
|
3194 if(unlikely(++i == rx_ring->count)) i = 0; |
|
3195 |
|
3196 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
|
3197 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
|
3198 } |
|
3199 rx_ring->next_to_clean = i; |
|
3200 if (adapter->ecdev) { |
|
3201 rx_ring->next_to_use = i; |
|
3202 } else { |
|
3203 adapter->alloc_rx_buf(adapter); |
|
3204 } |
|
3205 |
|
3206 return cleaned; |
|
3207 } |
|
3208 |
|
3209 /** |
|
3210 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended |
|
3211 * @adapter: address of board private structure |
|
3212 **/ |
|
3213 |
|
3214 static void |
|
3215 e1000_alloc_rx_buffers(struct e1000_adapter *adapter) |
|
3216 { |
|
3217 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; |
|
3218 struct net_device *netdev = adapter->netdev; |
|
3219 struct pci_dev *pdev = adapter->pdev; |
|
3220 struct e1000_rx_desc *rx_desc; |
|
3221 struct e1000_buffer *buffer_info; |
|
3222 struct sk_buff *skb; |
|
3223 unsigned int i; |
|
3224 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; |
|
3225 |
|
3226 i = rx_ring->next_to_use; |
|
3227 buffer_info = &rx_ring->buffer_info[i]; |
|
3228 |
|
3229 while(!buffer_info->skb) { |
|
3230 skb = dev_alloc_skb(bufsz); |
|
3231 |
|
3232 if(unlikely(!skb)) { |
|
3233 /* Better luck next round */ |
|
3234 break; |
|
3235 } |
|
3236 |
|
3237 /* Fix for errata 23, can't cross 64kB boundary */ |
|
3238 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { |
|
3239 struct sk_buff *oldskb = skb; |
|
3240 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " |
|
3241 "at %p\n", bufsz, skb->data); |
|
3242 /* Try again, without freeing the previous */ |
|
3243 skb = dev_alloc_skb(bufsz); |
|
3244 /* Failed allocation, critical failure */ |
|
3245 if (!skb) { |
|
3246 dev_kfree_skb(oldskb); |
|
3247 break; |
|
3248 } |
|
3249 |
|
3250 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { |
|
3251 /* give up */ |
|
3252 dev_kfree_skb(skb); |
|
3253 dev_kfree_skb(oldskb); |
|
3254 break; /* while !buffer_info->skb */ |
|
3255 } else { |
|
3256 /* Use new allocation */ |
|
3257 dev_kfree_skb(oldskb); |
|
3258 } |
|
3259 } |
|
3260 /* Make buffer alignment 2 beyond a 16 byte boundary |
|
3261 * this will result in a 16 byte aligned IP header after |
|
3262 * the 14 byte MAC header is removed |
|
3263 */ |
|
3264 skb_reserve(skb, NET_IP_ALIGN); |
|
3265 |
|
3266 skb->dev = netdev; |
|
3267 |
|
3268 buffer_info->skb = skb; |
|
3269 buffer_info->length = adapter->rx_buffer_len; |
|
3270 buffer_info->dma = pci_map_single(pdev, |
|
3271 skb->data, |
|
3272 adapter->rx_buffer_len, |
|
3273 PCI_DMA_FROMDEVICE); |
|
3274 |
|
3275 /* Fix for errata 23, can't cross 64kB boundary */ |
|
3276 if (!e1000_check_64k_bound(adapter, |
|
3277 (void *)(unsigned long)buffer_info->dma, |
|
3278 adapter->rx_buffer_len)) { |
|
3279 DPRINTK(RX_ERR, ERR, |
|
3280 "dma align check failed: %u bytes at %p\n", |
|
3281 adapter->rx_buffer_len, |
|
3282 (void *)(unsigned long)buffer_info->dma); |
|
3283 dev_kfree_skb(skb); |
|
3284 buffer_info->skb = NULL; |
|
3285 |
|
3286 pci_unmap_single(pdev, buffer_info->dma, |
|
3287 adapter->rx_buffer_len, |
|
3288 PCI_DMA_FROMDEVICE); |
|
3289 |
|
3290 break; /* while !buffer_info->skb */ |
|
3291 } |
|
3292 rx_desc = E1000_RX_DESC(*rx_ring, i); |
|
3293 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
|
3294 |
|
3295 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { |
|
3296 /* Force memory writes to complete before letting h/w |
|
3297 * know there are new descriptors to fetch. (Only |
|
3298 * applicable for weak-ordered memory model archs, |
|
3299 * such as IA-64). */ |
|
3300 wmb(); |
|
3301 E1000_WRITE_REG(&adapter->hw, RDT, i); |
|
3302 } |
|
3303 |
|
3304 if(unlikely(++i == rx_ring->count)) i = 0; |
|
3305 buffer_info = &rx_ring->buffer_info[i]; |
|
3306 } |
|
3307 |
|
3308 rx_ring->next_to_use = i; |
|
3309 } |
|
3310 |
|
3311 /** |
|
3312 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split |
|
3313 * @adapter: address of board private structure |
|
3314 **/ |
|
3315 |
|
3316 static void |
|
3317 e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter) |
|
3318 { |
|
3319 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; |
|
3320 struct net_device *netdev = adapter->netdev; |
|
3321 struct pci_dev *pdev = adapter->pdev; |
|
3322 union e1000_rx_desc_packet_split *rx_desc; |
|
3323 struct e1000_buffer *buffer_info; |
|
3324 struct e1000_ps_page *ps_page; |
|
3325 struct e1000_ps_page_dma *ps_page_dma; |
|
3326 struct sk_buff *skb; |
|
3327 unsigned int i, j; |
|
3328 |
|
3329 i = rx_ring->next_to_use; |
|
3330 buffer_info = &rx_ring->buffer_info[i]; |
|
3331 ps_page = &rx_ring->ps_page[i]; |
|
3332 ps_page_dma = &rx_ring->ps_page_dma[i]; |
|
3333 |
|
3334 while(!buffer_info->skb) { |
|
3335 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
|
3336 |
|
3337 for(j = 0; j < PS_PAGE_BUFFERS; j++) { |
|
3338 if(unlikely(!ps_page->ps_page[j])) { |
|
3339 ps_page->ps_page[j] = |
|
3340 alloc_page(GFP_ATOMIC); |
|
3341 if(unlikely(!ps_page->ps_page[j])) |
|
3342 goto no_buffers; |
|
3343 ps_page_dma->ps_page_dma[j] = |
|
3344 pci_map_page(pdev, |
|
3345 ps_page->ps_page[j], |
|
3346 0, PAGE_SIZE, |
|
3347 PCI_DMA_FROMDEVICE); |
|
3348 } |
|
3349 /* Refresh the desc even if buffer_addrs didn't |
|
3350 * change because each write-back erases this info. |
|
3351 */ |
|
3352 rx_desc->read.buffer_addr[j+1] = |
|
3353 cpu_to_le64(ps_page_dma->ps_page_dma[j]); |
|
3354 } |
|
3355 |
|
3356 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); |
|
3357 |
|
3358 if(unlikely(!skb)) |
|
3359 break; |
|
3360 |
|
3361 /* Make buffer alignment 2 beyond a 16 byte boundary |
|
3362 * this will result in a 16 byte aligned IP header after |
|
3363 * the 14 byte MAC header is removed |
|
3364 */ |
|
3365 skb_reserve(skb, NET_IP_ALIGN); |
|
3366 |
|
3367 skb->dev = netdev; |
|
3368 |
|
3369 buffer_info->skb = skb; |
|
3370 buffer_info->length = adapter->rx_ps_bsize0; |
|
3371 buffer_info->dma = pci_map_single(pdev, skb->data, |
|
3372 adapter->rx_ps_bsize0, |
|
3373 PCI_DMA_FROMDEVICE); |
|
3374 |
|
3375 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); |
|
3376 |
|
3377 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { |
|
3378 /* Force memory writes to complete before letting h/w |
|
3379 * know there are new descriptors to fetch. (Only |
|
3380 * applicable for weak-ordered memory model archs, |
|
3381 * such as IA-64). */ |
|
3382 wmb(); |
|
3383 /* Hardware increments by 16 bytes, but packet split |
|
3384 * descriptors are 32 bytes...so we increment tail |
|
3385 * twice as much. |
|
3386 */ |
|
3387 E1000_WRITE_REG(&adapter->hw, RDT, i<<1); |
|
3388 } |
|
3389 |
|
3390 if(unlikely(++i == rx_ring->count)) i = 0; |
|
3391 buffer_info = &rx_ring->buffer_info[i]; |
|
3392 ps_page = &rx_ring->ps_page[i]; |
|
3393 ps_page_dma = &rx_ring->ps_page_dma[i]; |
|
3394 } |
|
3395 |
|
3396 no_buffers: |
|
3397 rx_ring->next_to_use = i; |
|
3398 } |
|
3399 |
|
3400 /** |
|
3401 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. |
|
3402 * @adapter: |
|
3403 **/ |
|
3404 |
|
3405 static void |
|
3406 e1000_smartspeed(struct e1000_adapter *adapter) |
|
3407 { |
|
3408 uint16_t phy_status; |
|
3409 uint16_t phy_ctrl; |
|
3410 |
|
3411 if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || |
|
3412 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) |
|
3413 return; |
|
3414 |
|
3415 if(adapter->smartspeed == 0) { |
|
3416 /* If Master/Slave config fault is asserted twice, |
|
3417 * we assume back-to-back */ |
|
3418 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); |
|
3419 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
|
3420 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); |
|
3421 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
|
3422 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); |
|
3423 if(phy_ctrl & CR_1000T_MS_ENABLE) { |
|
3424 phy_ctrl &= ~CR_1000T_MS_ENABLE; |
|
3425 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, |
|
3426 phy_ctrl); |
|
3427 adapter->smartspeed++; |
|
3428 if(!e1000_phy_setup_autoneg(&adapter->hw) && |
|
3429 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, |
|
3430 &phy_ctrl)) { |
|
3431 phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
|
3432 MII_CR_RESTART_AUTO_NEG); |
|
3433 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, |
|
3434 phy_ctrl); |
|
3435 } |
|
3436 } |
|
3437 return; |
|
3438 } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { |
|
3439 /* If still no link, perhaps using 2/3 pair cable */ |
|
3440 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); |
|
3441 phy_ctrl |= CR_1000T_MS_ENABLE; |
|
3442 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); |
|
3443 if(!e1000_phy_setup_autoneg(&adapter->hw) && |
|
3444 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { |
|
3445 phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
|
3446 MII_CR_RESTART_AUTO_NEG); |
|
3447 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl); |
|
3448 } |
|
3449 } |
|
3450 /* Restart process after E1000_SMARTSPEED_MAX iterations */ |
|
3451 if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX) |
|
3452 adapter->smartspeed = 0; |
|
3453 } |
|
3454 |
|
3455 /** |
|
3456 * e1000_ioctl - |
|
3457 * @netdev: |
|
3458 * @ifreq: |
|
3459 * @cmd: |
|
3460 **/ |
|
3461 |
|
3462 static int |
|
3463 e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
|
3464 { |
|
3465 switch (cmd) { |
|
3466 case SIOCGMIIPHY: |
|
3467 case SIOCGMIIREG: |
|
3468 case SIOCSMIIREG: |
|
3469 return e1000_mii_ioctl(netdev, ifr, cmd); |
|
3470 default: |
|
3471 return -EOPNOTSUPP; |
|
3472 } |
|
3473 } |
|
3474 |
|
3475 /** |
|
3476 * e1000_mii_ioctl - |
|
3477 * @netdev: |
|
3478 * @ifreq: |
|
3479 * @cmd: |
|
3480 **/ |
|
3481 |
|
3482 static int |
|
3483 e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
|
3484 { |
|
3485 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
3486 struct mii_ioctl_data *data = if_mii(ifr); |
|
3487 int retval; |
|
3488 uint16_t mii_reg; |
|
3489 uint16_t spddplx; |
|
3490 unsigned long flags; |
|
3491 |
|
3492 if(adapter->hw.media_type != e1000_media_type_copper) |
|
3493 return -EOPNOTSUPP; |
|
3494 |
|
3495 switch (cmd) { |
|
3496 case SIOCGMIIPHY: |
|
3497 data->phy_id = adapter->hw.phy_addr; |
|
3498 break; |
|
3499 case SIOCGMIIREG: |
|
3500 if(adapter->ecdev || !capable(CAP_NET_ADMIN)) |
|
3501 return -EPERM; |
|
3502 spin_lock_irqsave(&adapter->stats_lock, flags); |
|
3503 if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, |
|
3504 &data->val_out)) { |
|
3505 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
|
3506 return -EIO; |
|
3507 } |
|
3508 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
|
3509 break; |
|
3510 case SIOCSMIIREG: |
|
3511 if(adapter->ecdev || !capable(CAP_NET_ADMIN)) |
|
3512 return -EPERM; |
|
3513 if(data->reg_num & ~(0x1F)) |
|
3514 return -EFAULT; |
|
3515 mii_reg = data->val_in; |
|
3516 spin_lock_irqsave(&adapter->stats_lock, flags); |
|
3517 if(e1000_write_phy_reg(&adapter->hw, data->reg_num, |
|
3518 mii_reg)) { |
|
3519 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
|
3520 return -EIO; |
|
3521 } |
|
3522 if(adapter->hw.phy_type == e1000_phy_m88) { |
|
3523 switch (data->reg_num) { |
|
3524 case PHY_CTRL: |
|
3525 if(mii_reg & MII_CR_POWER_DOWN) |
|
3526 break; |
|
3527 if(mii_reg & MII_CR_AUTO_NEG_EN) { |
|
3528 adapter->hw.autoneg = 1; |
|
3529 adapter->hw.autoneg_advertised = 0x2F; |
|
3530 } else { |
|
3531 if (mii_reg & 0x40) |
|
3532 spddplx = SPEED_1000; |
|
3533 else if (mii_reg & 0x2000) |
|
3534 spddplx = SPEED_100; |
|
3535 else |
|
3536 spddplx = SPEED_10; |
|
3537 spddplx += (mii_reg & 0x100) |
|
3538 ? FULL_DUPLEX : |
|
3539 HALF_DUPLEX; |
|
3540 retval = e1000_set_spd_dplx(adapter, |
|
3541 spddplx); |
|
3542 if(retval) { |
|
3543 spin_unlock_irqrestore( |
|
3544 &adapter->stats_lock, |
|
3545 flags); |
|
3546 return retval; |
|
3547 } |
|
3548 } |
|
3549 if(adapter->ecdev || netif_running(adapter->netdev)) { |
|
3550 e1000_down(adapter); |
|
3551 e1000_up(adapter); |
|
3552 } else |
|
3553 e1000_reset(adapter); |
|
3554 break; |
|
3555 case M88E1000_PHY_SPEC_CTRL: |
|
3556 case M88E1000_EXT_PHY_SPEC_CTRL: |
|
3557 if(e1000_phy_reset(&adapter->hw)) { |
|
3558 spin_unlock_irqrestore( |
|
3559 &adapter->stats_lock, flags); |
|
3560 return -EIO; |
|
3561 } |
|
3562 break; |
|
3563 } |
|
3564 } else { |
|
3565 switch (data->reg_num) { |
|
3566 case PHY_CTRL: |
|
3567 if(mii_reg & MII_CR_POWER_DOWN) |
|
3568 break; |
|
3569 if(adapter->ecdev || netif_running(adapter->netdev)) { |
|
3570 e1000_down(adapter); |
|
3571 e1000_up(adapter); |
|
3572 } else |
|
3573 e1000_reset(adapter); |
|
3574 break; |
|
3575 } |
|
3576 } |
|
3577 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
|
3578 break; |
|
3579 default: |
|
3580 return -EOPNOTSUPP; |
|
3581 } |
|
3582 return E1000_SUCCESS; |
|
3583 } |
|
3584 |
|
3585 void |
|
3586 e1000_pci_set_mwi(struct e1000_hw *hw) |
|
3587 { |
|
3588 struct e1000_adapter *adapter = hw->back; |
|
3589 int ret_val = pci_set_mwi(adapter->pdev); |
|
3590 |
|
3591 if(ret_val) |
|
3592 DPRINTK(PROBE, ERR, "Error in setting MWI\n"); |
|
3593 } |
|
3594 |
|
3595 void |
|
3596 e1000_pci_clear_mwi(struct e1000_hw *hw) |
|
3597 { |
|
3598 struct e1000_adapter *adapter = hw->back; |
|
3599 |
|
3600 pci_clear_mwi(adapter->pdev); |
|
3601 } |
|
3602 |
|
3603 void |
|
3604 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) |
|
3605 { |
|
3606 struct e1000_adapter *adapter = hw->back; |
|
3607 |
|
3608 pci_read_config_word(adapter->pdev, reg, value); |
|
3609 } |
|
3610 |
|
3611 void |
|
3612 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) |
|
3613 { |
|
3614 struct e1000_adapter *adapter = hw->back; |
|
3615 |
|
3616 pci_write_config_word(adapter->pdev, reg, *value); |
|
3617 } |
|
3618 |
|
3619 uint32_t |
|
3620 e1000_io_read(struct e1000_hw *hw, unsigned long port) |
|
3621 { |
|
3622 return inl(port); |
|
3623 } |
|
3624 |
|
3625 void |
|
3626 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) |
|
3627 { |
|
3628 outl(value, port); |
|
3629 } |
|
3630 |
|
3631 static void |
|
3632 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) |
|
3633 { |
|
3634 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
3635 uint32_t ctrl, rctl; |
|
3636 |
|
3637 e1000_irq_disable(adapter); |
|
3638 adapter->vlgrp = grp; |
|
3639 |
|
3640 if(grp) { |
|
3641 /* enable VLAN tag insert/strip */ |
|
3642 ctrl = E1000_READ_REG(&adapter->hw, CTRL); |
|
3643 ctrl |= E1000_CTRL_VME; |
|
3644 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); |
|
3645 |
|
3646 /* enable VLAN receive filtering */ |
|
3647 rctl = E1000_READ_REG(&adapter->hw, RCTL); |
|
3648 rctl |= E1000_RCTL_VFE; |
|
3649 rctl &= ~E1000_RCTL_CFIEN; |
|
3650 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
|
3651 e1000_update_mng_vlan(adapter); |
|
3652 } else { |
|
3653 /* disable VLAN tag insert/strip */ |
|
3654 ctrl = E1000_READ_REG(&adapter->hw, CTRL); |
|
3655 ctrl &= ~E1000_CTRL_VME; |
|
3656 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); |
|
3657 |
|
3658 /* disable VLAN filtering */ |
|
3659 rctl = E1000_READ_REG(&adapter->hw, RCTL); |
|
3660 rctl &= ~E1000_RCTL_VFE; |
|
3661 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
|
3662 if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { |
|
3663 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
|
3664 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
|
3665 } |
|
3666 } |
|
3667 |
|
3668 e1000_irq_enable(adapter); |
|
3669 } |
|
3670 |
|
3671 static void |
|
3672 e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) |
|
3673 { |
|
3674 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
3675 uint32_t vfta, index; |
|
3676 if((adapter->hw.mng_cookie.status & |
|
3677 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
|
3678 (vid == adapter->mng_vlan_id)) |
|
3679 return; |
|
3680 /* add VID to filter table */ |
|
3681 index = (vid >> 5) & 0x7F; |
|
3682 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); |
|
3683 vfta |= (1 << (vid & 0x1F)); |
|
3684 e1000_write_vfta(&adapter->hw, index, vfta); |
|
3685 } |
|
3686 |
|
3687 static void |
|
3688 e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) |
|
3689 { |
|
3690 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
3691 uint32_t vfta, index; |
|
3692 |
|
3693 e1000_irq_disable(adapter); |
|
3694 |
|
3695 if(adapter->vlgrp) |
|
3696 adapter->vlgrp->vlan_devices[vid] = NULL; |
|
3697 |
|
3698 e1000_irq_enable(adapter); |
|
3699 |
|
3700 if((adapter->hw.mng_cookie.status & |
|
3701 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
|
3702 (vid == adapter->mng_vlan_id)) |
|
3703 return; |
|
3704 /* remove VID from filter table */ |
|
3705 index = (vid >> 5) & 0x7F; |
|
3706 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); |
|
3707 vfta &= ~(1 << (vid & 0x1F)); |
|
3708 e1000_write_vfta(&adapter->hw, index, vfta); |
|
3709 } |
|
3710 |
|
3711 static void |
|
3712 e1000_restore_vlan(struct e1000_adapter *adapter) |
|
3713 { |
|
3714 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); |
|
3715 |
|
3716 if(adapter->vlgrp) { |
|
3717 uint16_t vid; |
|
3718 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { |
|
3719 if(!adapter->vlgrp->vlan_devices[vid]) |
|
3720 continue; |
|
3721 e1000_vlan_rx_add_vid(adapter->netdev, vid); |
|
3722 } |
|
3723 } |
|
3724 } |
|
3725 |
|
3726 int |
|
3727 e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) |
|
3728 { |
|
3729 adapter->hw.autoneg = 0; |
|
3730 |
|
3731 /* Fiber NICs only allow 1000 gbps Full duplex */ |
|
3732 if((adapter->hw.media_type == e1000_media_type_fiber) && |
|
3733 spddplx != (SPEED_1000 + DUPLEX_FULL)) { |
|
3734 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); |
|
3735 return -EINVAL; |
|
3736 } |
|
3737 |
|
3738 switch(spddplx) { |
|
3739 case SPEED_10 + DUPLEX_HALF: |
|
3740 adapter->hw.forced_speed_duplex = e1000_10_half; |
|
3741 break; |
|
3742 case SPEED_10 + DUPLEX_FULL: |
|
3743 adapter->hw.forced_speed_duplex = e1000_10_full; |
|
3744 break; |
|
3745 case SPEED_100 + DUPLEX_HALF: |
|
3746 adapter->hw.forced_speed_duplex = e1000_100_half; |
|
3747 break; |
|
3748 case SPEED_100 + DUPLEX_FULL: |
|
3749 adapter->hw.forced_speed_duplex = e1000_100_full; |
|
3750 break; |
|
3751 case SPEED_1000 + DUPLEX_FULL: |
|
3752 adapter->hw.autoneg = 1; |
|
3753 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; |
|
3754 break; |
|
3755 case SPEED_1000 + DUPLEX_HALF: /* not supported */ |
|
3756 default: |
|
3757 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); |
|
3758 return -EINVAL; |
|
3759 } |
|
3760 return 0; |
|
3761 } |
|
3762 |
|
3763 static int |
|
3764 e1000_suspend(struct pci_dev *pdev, uint32_t state) |
|
3765 { |
|
3766 struct net_device *netdev = pci_get_drvdata(pdev); |
|
3767 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
3768 uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm; |
|
3769 uint32_t wufc = adapter->wol; |
|
3770 |
|
3771 if (!adapter->ecdev) |
|
3772 netif_device_detach(netdev); |
|
3773 |
|
3774 if (adapter->ecdev || netif_running(netdev)) |
|
3775 e1000_down(adapter); |
|
3776 |
|
3777 status = E1000_READ_REG(&adapter->hw, STATUS); |
|
3778 if(status & E1000_STATUS_LU) |
|
3779 wufc &= ~E1000_WUFC_LNKC; |
|
3780 |
|
3781 if(wufc) { |
|
3782 e1000_setup_rctl(adapter); |
|
3783 e1000_set_multi(netdev); |
|
3784 |
|
3785 /* turn on all-multi mode if wake on multicast is enabled */ |
|
3786 if(adapter->wol & E1000_WUFC_MC) { |
|
3787 rctl = E1000_READ_REG(&adapter->hw, RCTL); |
|
3788 rctl |= E1000_RCTL_MPE; |
|
3789 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
|
3790 } |
|
3791 |
|
3792 if(adapter->hw.mac_type >= e1000_82540) { |
|
3793 ctrl = E1000_READ_REG(&adapter->hw, CTRL); |
|
3794 /* advertise wake from D3Cold */ |
|
3795 #define E1000_CTRL_ADVD3WUC 0x00100000 |
|
3796 /* phy power management enable */ |
|
3797 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 |
|
3798 ctrl |= E1000_CTRL_ADVD3WUC | |
|
3799 E1000_CTRL_EN_PHY_PWR_MGMT; |
|
3800 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); |
|
3801 } |
|
3802 |
|
3803 if(adapter->hw.media_type == e1000_media_type_fiber || |
|
3804 adapter->hw.media_type == e1000_media_type_internal_serdes) { |
|
3805 /* keep the laser running in D3 */ |
|
3806 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); |
|
3807 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; |
|
3808 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext); |
|
3809 } |
|
3810 |
|
3811 /* Allow time for pending master requests to run */ |
|
3812 e1000_disable_pciex_master(&adapter->hw); |
|
3813 |
|
3814 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); |
|
3815 E1000_WRITE_REG(&adapter->hw, WUFC, wufc); |
|
3816 pci_enable_wake(pdev, 3, 1); |
|
3817 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ |
|
3818 } else { |
|
3819 E1000_WRITE_REG(&adapter->hw, WUC, 0); |
|
3820 E1000_WRITE_REG(&adapter->hw, WUFC, 0); |
|
3821 pci_enable_wake(pdev, 3, 0); |
|
3822 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ |
|
3823 } |
|
3824 |
|
3825 pci_save_state(pdev); |
|
3826 |
|
3827 if(adapter->hw.mac_type >= e1000_82540 && |
|
3828 adapter->hw.media_type == e1000_media_type_copper) { |
|
3829 manc = E1000_READ_REG(&adapter->hw, MANC); |
|
3830 if(manc & E1000_MANC_SMBUS_EN) { |
|
3831 manc |= E1000_MANC_ARP_EN; |
|
3832 E1000_WRITE_REG(&adapter->hw, MANC, manc); |
|
3833 pci_enable_wake(pdev, 3, 1); |
|
3834 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ |
|
3835 } |
|
3836 } |
|
3837 |
|
3838 switch(adapter->hw.mac_type) { |
|
3839 case e1000_82573: |
|
3840 swsm = E1000_READ_REG(&adapter->hw, SWSM); |
|
3841 E1000_WRITE_REG(&adapter->hw, SWSM, |
|
3842 swsm & ~E1000_SWSM_DRV_LOAD); |
|
3843 break; |
|
3844 default: |
|
3845 break; |
|
3846 } |
|
3847 |
|
3848 pci_disable_device(pdev); |
|
3849 |
|
3850 state = (state > 0) ? 3 : 0; |
|
3851 pci_set_power_state(pdev, state); |
|
3852 |
|
3853 return 0; |
|
3854 } |
|
3855 |
|
3856 #ifdef CONFIG_PM |
|
3857 static int |
|
3858 e1000_resume(struct pci_dev *pdev) |
|
3859 { |
|
3860 struct net_device *netdev = pci_get_drvdata(pdev); |
|
3861 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
3862 uint32_t manc, ret_val, swsm; |
|
3863 |
|
3864 pci_set_power_state(pdev, 0); |
|
3865 pci_restore_state(pdev); |
|
3866 ret_val = pci_enable_device(pdev); |
|
3867 pci_set_master(pdev); |
|
3868 |
|
3869 pci_enable_wake(pdev, 3, 0); |
|
3870 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ |
|
3871 |
|
3872 e1000_reset(adapter); |
|
3873 E1000_WRITE_REG(&adapter->hw, WUS, ~0); |
|
3874 |
|
3875 if(adapter->ecdev || netif_running(netdev)) |
|
3876 e1000_up(adapter); |
|
3877 |
|
3878 if (!adapter->ecdev) netif_device_attach(netdev); |
|
3879 |
|
3880 if(adapter->hw.mac_type >= e1000_82540 && |
|
3881 adapter->hw.media_type == e1000_media_type_copper) { |
|
3882 manc = E1000_READ_REG(&adapter->hw, MANC); |
|
3883 manc &= ~(E1000_MANC_ARP_EN); |
|
3884 E1000_WRITE_REG(&adapter->hw, MANC, manc); |
|
3885 } |
|
3886 |
|
3887 switch(adapter->hw.mac_type) { |
|
3888 case e1000_82573: |
|
3889 swsm = E1000_READ_REG(&adapter->hw, SWSM); |
|
3890 E1000_WRITE_REG(&adapter->hw, SWSM, |
|
3891 swsm | E1000_SWSM_DRV_LOAD); |
|
3892 break; |
|
3893 default: |
|
3894 break; |
|
3895 } |
|
3896 |
|
3897 return 0; |
|
3898 } |
|
3899 #endif |
|
3900 #ifdef CONFIG_NET_POLL_CONTROLLER |
|
3901 /* |
|
3902 * Polling 'interrupt' - used by things like netconsole to send skbs |
|
3903 * without having to re-enable interrupts. It's not called while |
|
3904 * the interrupt routine is executing. |
|
3905 */ |
|
3906 static void |
|
3907 e1000_netpoll(struct net_device *netdev) |
|
3908 { |
|
3909 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
3910 disable_irq(adapter->pdev->irq); |
|
3911 e1000_intr(adapter->pdev->irq, netdev, NULL); |
|
3912 e1000_clean_tx_irq(adapter); |
|
3913 enable_irq(adapter->pdev->irq); |
|
3914 } |
|
3915 #endif |
|
3916 |
|
3917 /* e1000_main.c */ |