|
1 /******************************************************************************* |
|
2 |
|
3 Intel PRO/1000 Linux driver |
|
4 Copyright(c) 1999 - 2006 Intel Corporation. |
|
5 |
|
6 This program is free software; you can redistribute it and/or modify it |
|
7 under the terms and conditions of the GNU General Public License, |
|
8 version 2, as published by the Free Software Foundation. |
|
9 |
|
10 This program is distributed in the hope it will be useful, but WITHOUT |
|
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
|
13 more details. |
|
14 |
|
15 You should have received a copy of the GNU General Public License along with |
|
16 this program; if not, write to the Free Software Foundation, Inc., |
|
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
|
19 The full GNU General Public License is included in this distribution in |
|
20 the file called "COPYING". |
|
21 |
|
22 Contact Information: |
|
23 Linux NICS <linux.nics@intel.com> |
|
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
|
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
|
26 |
|
27 *******************************************************************************/ |
|
28 |
|
29 #include "e1000.h" |
|
30 #include <net/ip6_checksum.h> |
|
31 |
|
32 char e1000_driver_name[] = "e1000"; |
|
33 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; |
|
34 #define DRV_VERSION "7.3.21-k8-NAPI" |
|
35 const char e1000_driver_version[] = DRV_VERSION; |
|
36 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; |
|
37 |
|
38 /* e1000_pci_tbl - PCI Device ID Table |
|
39 * |
|
40 * Last entry must be all 0s |
|
41 * |
|
42 * Macro expands to... |
|
43 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} |
|
44 */ |
|
45 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { |
|
46 INTEL_E1000_ETHERNET_DEVICE(0x1000), |
|
47 INTEL_E1000_ETHERNET_DEVICE(0x1001), |
|
48 INTEL_E1000_ETHERNET_DEVICE(0x1004), |
|
49 INTEL_E1000_ETHERNET_DEVICE(0x1008), |
|
50 INTEL_E1000_ETHERNET_DEVICE(0x1009), |
|
51 INTEL_E1000_ETHERNET_DEVICE(0x100C), |
|
52 INTEL_E1000_ETHERNET_DEVICE(0x100D), |
|
53 INTEL_E1000_ETHERNET_DEVICE(0x100E), |
|
54 INTEL_E1000_ETHERNET_DEVICE(0x100F), |
|
55 INTEL_E1000_ETHERNET_DEVICE(0x1010), |
|
56 INTEL_E1000_ETHERNET_DEVICE(0x1011), |
|
57 INTEL_E1000_ETHERNET_DEVICE(0x1012), |
|
58 INTEL_E1000_ETHERNET_DEVICE(0x1013), |
|
59 INTEL_E1000_ETHERNET_DEVICE(0x1014), |
|
60 INTEL_E1000_ETHERNET_DEVICE(0x1015), |
|
61 INTEL_E1000_ETHERNET_DEVICE(0x1016), |
|
62 INTEL_E1000_ETHERNET_DEVICE(0x1017), |
|
63 INTEL_E1000_ETHERNET_DEVICE(0x1018), |
|
64 INTEL_E1000_ETHERNET_DEVICE(0x1019), |
|
65 INTEL_E1000_ETHERNET_DEVICE(0x101A), |
|
66 INTEL_E1000_ETHERNET_DEVICE(0x101D), |
|
67 INTEL_E1000_ETHERNET_DEVICE(0x101E), |
|
68 INTEL_E1000_ETHERNET_DEVICE(0x1026), |
|
69 INTEL_E1000_ETHERNET_DEVICE(0x1027), |
|
70 INTEL_E1000_ETHERNET_DEVICE(0x1028), |
|
71 INTEL_E1000_ETHERNET_DEVICE(0x1075), |
|
72 INTEL_E1000_ETHERNET_DEVICE(0x1076), |
|
73 INTEL_E1000_ETHERNET_DEVICE(0x1077), |
|
74 INTEL_E1000_ETHERNET_DEVICE(0x1078), |
|
75 INTEL_E1000_ETHERNET_DEVICE(0x1079), |
|
76 INTEL_E1000_ETHERNET_DEVICE(0x107A), |
|
77 INTEL_E1000_ETHERNET_DEVICE(0x107B), |
|
78 INTEL_E1000_ETHERNET_DEVICE(0x107C), |
|
79 INTEL_E1000_ETHERNET_DEVICE(0x108A), |
|
80 INTEL_E1000_ETHERNET_DEVICE(0x1099), |
|
81 INTEL_E1000_ETHERNET_DEVICE(0x10B5), |
|
82 /* required last entry */ |
|
83 {0,} |
|
84 }; |
|
85 |
|
86 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); |
|
87 |
|
88 int e1000_up(struct e1000_adapter *adapter); |
|
89 void e1000_down(struct e1000_adapter *adapter); |
|
90 void e1000_reinit_locked(struct e1000_adapter *adapter); |
|
91 void e1000_reset(struct e1000_adapter *adapter); |
|
92 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx); |
|
93 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); |
|
94 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); |
|
95 void e1000_free_all_tx_resources(struct e1000_adapter *adapter); |
|
96 void e1000_free_all_rx_resources(struct e1000_adapter *adapter); |
|
97 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, |
|
98 struct e1000_tx_ring *txdr); |
|
99 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, |
|
100 struct e1000_rx_ring *rxdr); |
|
101 static void e1000_free_tx_resources(struct e1000_adapter *adapter, |
|
102 struct e1000_tx_ring *tx_ring); |
|
103 static void e1000_free_rx_resources(struct e1000_adapter *adapter, |
|
104 struct e1000_rx_ring *rx_ring); |
|
105 void e1000_update_stats(struct e1000_adapter *adapter); |
|
106 |
|
107 static int e1000_init_module(void); |
|
108 static void e1000_exit_module(void); |
|
109 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); |
|
110 static void __devexit e1000_remove(struct pci_dev *pdev); |
|
111 static int e1000_alloc_queues(struct e1000_adapter *adapter); |
|
112 static int e1000_sw_init(struct e1000_adapter *adapter); |
|
113 static int e1000_open(struct net_device *netdev); |
|
114 static int e1000_close(struct net_device *netdev); |
|
115 static void e1000_configure_tx(struct e1000_adapter *adapter); |
|
116 static void e1000_configure_rx(struct e1000_adapter *adapter); |
|
117 static void e1000_setup_rctl(struct e1000_adapter *adapter); |
|
118 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); |
|
119 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); |
|
120 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, |
|
121 struct e1000_tx_ring *tx_ring); |
|
122 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, |
|
123 struct e1000_rx_ring *rx_ring); |
|
124 static void e1000_set_rx_mode(struct net_device *netdev); |
|
125 static void e1000_update_phy_info(unsigned long data); |
|
126 static void e1000_update_phy_info_task(struct work_struct *work); |
|
127 static void e1000_watchdog(unsigned long data); |
|
128 static void e1000_82547_tx_fifo_stall(unsigned long data); |
|
129 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); |
|
130 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, |
|
131 struct net_device *netdev); |
|
132 static struct net_device_stats * e1000_get_stats(struct net_device *netdev); |
|
133 static int e1000_change_mtu(struct net_device *netdev, int new_mtu); |
|
134 static int e1000_set_mac(struct net_device *netdev, void *p); |
|
135 static irqreturn_t e1000_intr(int irq, void *data); |
|
136 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, |
|
137 struct e1000_tx_ring *tx_ring); |
|
138 static int e1000_clean(struct napi_struct *napi, int budget); |
|
139 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, |
|
140 struct e1000_rx_ring *rx_ring, |
|
141 int *work_done, int work_to_do); |
|
142 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, |
|
143 struct e1000_rx_ring *rx_ring, |
|
144 int *work_done, int work_to_do); |
|
145 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
|
146 struct e1000_rx_ring *rx_ring, |
|
147 int cleaned_count); |
|
148 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, |
|
149 struct e1000_rx_ring *rx_ring, |
|
150 int cleaned_count); |
|
151 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); |
|
152 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
|
153 int cmd); |
|
154 static void e1000_enter_82542_rst(struct e1000_adapter *adapter); |
|
155 static void e1000_leave_82542_rst(struct e1000_adapter *adapter); |
|
156 static void e1000_tx_timeout(struct net_device *dev); |
|
157 static void e1000_reset_task(struct work_struct *work); |
|
158 static void e1000_smartspeed(struct e1000_adapter *adapter); |
|
159 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, |
|
160 struct sk_buff *skb); |
|
161 |
|
162 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); |
|
163 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); |
|
164 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); |
|
165 static void e1000_restore_vlan(struct e1000_adapter *adapter); |
|
166 |
|
167 #ifdef CONFIG_PM |
|
168 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); |
|
169 static int e1000_resume(struct pci_dev *pdev); |
|
170 #endif |
|
171 static void e1000_shutdown(struct pci_dev *pdev); |
|
172 |
|
173 #ifdef CONFIG_NET_POLL_CONTROLLER |
|
174 /* for netdump / net console */ |
|
175 static void e1000_netpoll (struct net_device *netdev); |
|
176 #endif |
|
177 |
|
178 #define COPYBREAK_DEFAULT 256 |
|
179 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; |
|
180 module_param(copybreak, uint, 0644); |
|
181 MODULE_PARM_DESC(copybreak, |
|
182 "Maximum size of packet that is copied to a new buffer on receive"); |
|
183 |
|
184 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, |
|
185 pci_channel_state_t state); |
|
186 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); |
|
187 static void e1000_io_resume(struct pci_dev *pdev); |
|
188 |
|
189 static struct pci_error_handlers e1000_err_handler = { |
|
190 .error_detected = e1000_io_error_detected, |
|
191 .slot_reset = e1000_io_slot_reset, |
|
192 .resume = e1000_io_resume, |
|
193 }; |
|
194 |
|
195 static struct pci_driver e1000_driver = { |
|
196 .name = e1000_driver_name, |
|
197 .id_table = e1000_pci_tbl, |
|
198 .probe = e1000_probe, |
|
199 .remove = __devexit_p(e1000_remove), |
|
200 #ifdef CONFIG_PM |
|
201 /* Power Managment Hooks */ |
|
202 .suspend = e1000_suspend, |
|
203 .resume = e1000_resume, |
|
204 #endif |
|
205 .shutdown = e1000_shutdown, |
|
206 .err_handler = &e1000_err_handler |
|
207 }; |
|
208 |
|
209 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
|
210 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); |
|
211 MODULE_LICENSE("GPL"); |
|
212 MODULE_VERSION(DRV_VERSION); |
|
213 |
|
214 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; |
|
215 module_param(debug, int, 0); |
|
216 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
|
217 |
|
218 /** |
|
219 * e1000_get_hw_dev - return device |
|
220 * used by hardware layer to print debugging information |
|
221 * |
|
222 **/ |
|
223 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) |
|
224 { |
|
225 struct e1000_adapter *adapter = hw->back; |
|
226 return adapter->netdev; |
|
227 } |
|
228 |
|
229 /** |
|
230 * e1000_init_module - Driver Registration Routine |
|
231 * |
|
232 * e1000_init_module is the first routine called when the driver is |
|
233 * loaded. All it does is register with the PCI subsystem. |
|
234 **/ |
|
235 |
|
236 static int __init e1000_init_module(void) |
|
237 { |
|
238 int ret; |
|
239 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); |
|
240 |
|
241 pr_info("%s\n", e1000_copyright); |
|
242 |
|
243 ret = pci_register_driver(&e1000_driver); |
|
244 if (copybreak != COPYBREAK_DEFAULT) { |
|
245 if (copybreak == 0) |
|
246 pr_info("copybreak disabled\n"); |
|
247 else |
|
248 pr_info("copybreak enabled for " |
|
249 "packets <= %u bytes\n", copybreak); |
|
250 } |
|
251 return ret; |
|
252 } |
|
253 |
|
254 module_init(e1000_init_module); |
|
255 |
|
256 /** |
|
257 * e1000_exit_module - Driver Exit Cleanup Routine |
|
258 * |
|
259 * e1000_exit_module is called just before the driver is removed |
|
260 * from memory. |
|
261 **/ |
|
262 |
|
263 static void __exit e1000_exit_module(void) |
|
264 { |
|
265 pci_unregister_driver(&e1000_driver); |
|
266 } |
|
267 |
|
268 module_exit(e1000_exit_module); |
|
269 |
|
270 static int e1000_request_irq(struct e1000_adapter *adapter) |
|
271 { |
|
272 struct net_device *netdev = adapter->netdev; |
|
273 irq_handler_t handler = e1000_intr; |
|
274 int irq_flags = IRQF_SHARED; |
|
275 int err; |
|
276 |
|
277 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, |
|
278 netdev); |
|
279 if (err) { |
|
280 e_err(probe, "Unable to allocate interrupt Error: %d\n", err); |
|
281 } |
|
282 |
|
283 return err; |
|
284 } |
|
285 |
|
286 static void e1000_free_irq(struct e1000_adapter *adapter) |
|
287 { |
|
288 struct net_device *netdev = adapter->netdev; |
|
289 |
|
290 free_irq(adapter->pdev->irq, netdev); |
|
291 } |
|
292 |
|
293 /** |
|
294 * e1000_irq_disable - Mask off interrupt generation on the NIC |
|
295 * @adapter: board private structure |
|
296 **/ |
|
297 |
|
298 static void e1000_irq_disable(struct e1000_adapter *adapter) |
|
299 { |
|
300 struct e1000_hw *hw = &adapter->hw; |
|
301 |
|
302 ew32(IMC, ~0); |
|
303 E1000_WRITE_FLUSH(); |
|
304 synchronize_irq(adapter->pdev->irq); |
|
305 } |
|
306 |
|
307 /** |
|
308 * e1000_irq_enable - Enable default interrupt generation settings |
|
309 * @adapter: board private structure |
|
310 **/ |
|
311 |
|
312 static void e1000_irq_enable(struct e1000_adapter *adapter) |
|
313 { |
|
314 struct e1000_hw *hw = &adapter->hw; |
|
315 |
|
316 ew32(IMS, IMS_ENABLE_MASK); |
|
317 E1000_WRITE_FLUSH(); |
|
318 } |
|
319 |
|
320 static void e1000_update_mng_vlan(struct e1000_adapter *adapter) |
|
321 { |
|
322 struct e1000_hw *hw = &adapter->hw; |
|
323 struct net_device *netdev = adapter->netdev; |
|
324 u16 vid = hw->mng_cookie.vlan_id; |
|
325 u16 old_vid = adapter->mng_vlan_id; |
|
326 if (adapter->vlgrp) { |
|
327 if (!vlan_group_get_device(adapter->vlgrp, vid)) { |
|
328 if (hw->mng_cookie.status & |
|
329 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { |
|
330 e1000_vlan_rx_add_vid(netdev, vid); |
|
331 adapter->mng_vlan_id = vid; |
|
332 } else |
|
333 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
|
334 |
|
335 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && |
|
336 (vid != old_vid) && |
|
337 !vlan_group_get_device(adapter->vlgrp, old_vid)) |
|
338 e1000_vlan_rx_kill_vid(netdev, old_vid); |
|
339 } else |
|
340 adapter->mng_vlan_id = vid; |
|
341 } |
|
342 } |
|
343 |
|
344 static void e1000_init_manageability(struct e1000_adapter *adapter) |
|
345 { |
|
346 struct e1000_hw *hw = &adapter->hw; |
|
347 |
|
348 if (adapter->en_mng_pt) { |
|
349 u32 manc = er32(MANC); |
|
350 |
|
351 /* disable hardware interception of ARP */ |
|
352 manc &= ~(E1000_MANC_ARP_EN); |
|
353 |
|
354 ew32(MANC, manc); |
|
355 } |
|
356 } |
|
357 |
|
358 static void e1000_release_manageability(struct e1000_adapter *adapter) |
|
359 { |
|
360 struct e1000_hw *hw = &adapter->hw; |
|
361 |
|
362 if (adapter->en_mng_pt) { |
|
363 u32 manc = er32(MANC); |
|
364 |
|
365 /* re-enable hardware interception of ARP */ |
|
366 manc |= E1000_MANC_ARP_EN; |
|
367 |
|
368 ew32(MANC, manc); |
|
369 } |
|
370 } |
|
371 |
|
372 /** |
|
373 * e1000_configure - configure the hardware for RX and TX |
|
374 * @adapter = private board structure |
|
375 **/ |
|
376 static void e1000_configure(struct e1000_adapter *adapter) |
|
377 { |
|
378 struct net_device *netdev = adapter->netdev; |
|
379 int i; |
|
380 |
|
381 e1000_set_rx_mode(netdev); |
|
382 |
|
383 e1000_restore_vlan(adapter); |
|
384 e1000_init_manageability(adapter); |
|
385 |
|
386 e1000_configure_tx(adapter); |
|
387 e1000_setup_rctl(adapter); |
|
388 e1000_configure_rx(adapter); |
|
389 /* call E1000_DESC_UNUSED which always leaves |
|
390 * at least 1 descriptor unused to make sure |
|
391 * next_to_use != next_to_clean */ |
|
392 for (i = 0; i < adapter->num_rx_queues; i++) { |
|
393 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; |
|
394 adapter->alloc_rx_buf(adapter, ring, |
|
395 E1000_DESC_UNUSED(ring)); |
|
396 } |
|
397 } |
|
398 |
|
399 int e1000_up(struct e1000_adapter *adapter) |
|
400 { |
|
401 struct e1000_hw *hw = &adapter->hw; |
|
402 |
|
403 /* hardware has been reset, we need to reload some things */ |
|
404 e1000_configure(adapter); |
|
405 |
|
406 clear_bit(__E1000_DOWN, &adapter->flags); |
|
407 |
|
408 napi_enable(&adapter->napi); |
|
409 |
|
410 e1000_irq_enable(adapter); |
|
411 |
|
412 netif_wake_queue(adapter->netdev); |
|
413 |
|
414 /* fire a link change interrupt to start the watchdog */ |
|
415 ew32(ICS, E1000_ICS_LSC); |
|
416 return 0; |
|
417 } |
|
418 |
|
419 /** |
|
420 * e1000_power_up_phy - restore link in case the phy was powered down |
|
421 * @adapter: address of board private structure |
|
422 * |
|
423 * The phy may be powered down to save power and turn off link when the |
|
424 * driver is unloaded and wake on lan is not enabled (among others) |
|
425 * *** this routine MUST be followed by a call to e1000_reset *** |
|
426 * |
|
427 **/ |
|
428 |
|
429 void e1000_power_up_phy(struct e1000_adapter *adapter) |
|
430 { |
|
431 struct e1000_hw *hw = &adapter->hw; |
|
432 u16 mii_reg = 0; |
|
433 |
|
434 /* Just clear the power down bit to wake the phy back up */ |
|
435 if (hw->media_type == e1000_media_type_copper) { |
|
436 /* according to the manual, the phy will retain its |
|
437 * settings across a power-down/up cycle */ |
|
438 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); |
|
439 mii_reg &= ~MII_CR_POWER_DOWN; |
|
440 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); |
|
441 } |
|
442 } |
|
443 |
|
444 static void e1000_power_down_phy(struct e1000_adapter *adapter) |
|
445 { |
|
446 struct e1000_hw *hw = &adapter->hw; |
|
447 |
|
448 /* Power down the PHY so no link is implied when interface is down * |
|
449 * The PHY cannot be powered down if any of the following is true * |
|
450 * (a) WoL is enabled |
|
451 * (b) AMT is active |
|
452 * (c) SoL/IDER session is active */ |
|
453 if (!adapter->wol && hw->mac_type >= e1000_82540 && |
|
454 hw->media_type == e1000_media_type_copper) { |
|
455 u16 mii_reg = 0; |
|
456 |
|
457 switch (hw->mac_type) { |
|
458 case e1000_82540: |
|
459 case e1000_82545: |
|
460 case e1000_82545_rev_3: |
|
461 case e1000_82546: |
|
462 case e1000_82546_rev_3: |
|
463 case e1000_82541: |
|
464 case e1000_82541_rev_2: |
|
465 case e1000_82547: |
|
466 case e1000_82547_rev_2: |
|
467 if (er32(MANC) & E1000_MANC_SMBUS_EN) |
|
468 goto out; |
|
469 break; |
|
470 default: |
|
471 goto out; |
|
472 } |
|
473 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); |
|
474 mii_reg |= MII_CR_POWER_DOWN; |
|
475 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); |
|
476 mdelay(1); |
|
477 } |
|
478 out: |
|
479 return; |
|
480 } |
|
481 |
|
482 void e1000_down(struct e1000_adapter *adapter) |
|
483 { |
|
484 struct e1000_hw *hw = &adapter->hw; |
|
485 struct net_device *netdev = adapter->netdev; |
|
486 u32 rctl, tctl; |
|
487 |
|
488 |
|
489 /* disable receives in the hardware */ |
|
490 rctl = er32(RCTL); |
|
491 ew32(RCTL, rctl & ~E1000_RCTL_EN); |
|
492 /* flush and sleep below */ |
|
493 |
|
494 netif_tx_disable(netdev); |
|
495 |
|
496 /* disable transmits in the hardware */ |
|
497 tctl = er32(TCTL); |
|
498 tctl &= ~E1000_TCTL_EN; |
|
499 ew32(TCTL, tctl); |
|
500 /* flush both disables and wait for them to finish */ |
|
501 E1000_WRITE_FLUSH(); |
|
502 msleep(10); |
|
503 |
|
504 napi_disable(&adapter->napi); |
|
505 |
|
506 e1000_irq_disable(adapter); |
|
507 |
|
508 /* |
|
509 * Setting DOWN must be after irq_disable to prevent |
|
510 * a screaming interrupt. Setting DOWN also prevents |
|
511 * timers and tasks from rescheduling. |
|
512 */ |
|
513 set_bit(__E1000_DOWN, &adapter->flags); |
|
514 |
|
515 del_timer_sync(&adapter->tx_fifo_stall_timer); |
|
516 del_timer_sync(&adapter->watchdog_timer); |
|
517 del_timer_sync(&adapter->phy_info_timer); |
|
518 |
|
519 adapter->link_speed = 0; |
|
520 adapter->link_duplex = 0; |
|
521 netif_carrier_off(netdev); |
|
522 |
|
523 e1000_reset(adapter); |
|
524 e1000_clean_all_tx_rings(adapter); |
|
525 e1000_clean_all_rx_rings(adapter); |
|
526 } |
|
527 |
|
528 static void e1000_reinit_safe(struct e1000_adapter *adapter) |
|
529 { |
|
530 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) |
|
531 msleep(1); |
|
532 rtnl_lock(); |
|
533 e1000_down(adapter); |
|
534 e1000_up(adapter); |
|
535 rtnl_unlock(); |
|
536 clear_bit(__E1000_RESETTING, &adapter->flags); |
|
537 } |
|
538 |
|
539 void e1000_reinit_locked(struct e1000_adapter *adapter) |
|
540 { |
|
541 /* if rtnl_lock is not held the call path is bogus */ |
|
542 ASSERT_RTNL(); |
|
543 WARN_ON(in_interrupt()); |
|
544 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) |
|
545 msleep(1); |
|
546 e1000_down(adapter); |
|
547 e1000_up(adapter); |
|
548 clear_bit(__E1000_RESETTING, &adapter->flags); |
|
549 } |
|
550 |
|
551 void e1000_reset(struct e1000_adapter *adapter) |
|
552 { |
|
553 struct e1000_hw *hw = &adapter->hw; |
|
554 u32 pba = 0, tx_space, min_tx_space, min_rx_space; |
|
555 bool legacy_pba_adjust = false; |
|
556 u16 hwm; |
|
557 |
|
558 /* Repartition Pba for greater than 9k mtu |
|
559 * To take effect CTRL.RST is required. |
|
560 */ |
|
561 |
|
562 switch (hw->mac_type) { |
|
563 case e1000_82542_rev2_0: |
|
564 case e1000_82542_rev2_1: |
|
565 case e1000_82543: |
|
566 case e1000_82544: |
|
567 case e1000_82540: |
|
568 case e1000_82541: |
|
569 case e1000_82541_rev_2: |
|
570 legacy_pba_adjust = true; |
|
571 pba = E1000_PBA_48K; |
|
572 break; |
|
573 case e1000_82545: |
|
574 case e1000_82545_rev_3: |
|
575 case e1000_82546: |
|
576 case e1000_82546_rev_3: |
|
577 pba = E1000_PBA_48K; |
|
578 break; |
|
579 case e1000_82547: |
|
580 case e1000_82547_rev_2: |
|
581 legacy_pba_adjust = true; |
|
582 pba = E1000_PBA_30K; |
|
583 break; |
|
584 case e1000_undefined: |
|
585 case e1000_num_macs: |
|
586 break; |
|
587 } |
|
588 |
|
589 if (legacy_pba_adjust) { |
|
590 if (hw->max_frame_size > E1000_RXBUFFER_8192) |
|
591 pba -= 8; /* allocate more FIFO for Tx */ |
|
592 |
|
593 if (hw->mac_type == e1000_82547) { |
|
594 adapter->tx_fifo_head = 0; |
|
595 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; |
|
596 adapter->tx_fifo_size = |
|
597 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; |
|
598 atomic_set(&adapter->tx_fifo_stall, 0); |
|
599 } |
|
600 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { |
|
601 /* adjust PBA for jumbo frames */ |
|
602 ew32(PBA, pba); |
|
603 |
|
604 /* To maintain wire speed transmits, the Tx FIFO should be |
|
605 * large enough to accommodate two full transmit packets, |
|
606 * rounded up to the next 1KB and expressed in KB. Likewise, |
|
607 * the Rx FIFO should be large enough to accommodate at least |
|
608 * one full receive packet and is similarly rounded up and |
|
609 * expressed in KB. */ |
|
610 pba = er32(PBA); |
|
611 /* upper 16 bits has Tx packet buffer allocation size in KB */ |
|
612 tx_space = pba >> 16; |
|
613 /* lower 16 bits has Rx packet buffer allocation size in KB */ |
|
614 pba &= 0xffff; |
|
615 /* |
|
616 * the tx fifo also stores 16 bytes of information about the tx |
|
617 * but don't include ethernet FCS because hardware appends it |
|
618 */ |
|
619 min_tx_space = (hw->max_frame_size + |
|
620 sizeof(struct e1000_tx_desc) - |
|
621 ETH_FCS_LEN) * 2; |
|
622 min_tx_space = ALIGN(min_tx_space, 1024); |
|
623 min_tx_space >>= 10; |
|
624 /* software strips receive CRC, so leave room for it */ |
|
625 min_rx_space = hw->max_frame_size; |
|
626 min_rx_space = ALIGN(min_rx_space, 1024); |
|
627 min_rx_space >>= 10; |
|
628 |
|
629 /* If current Tx allocation is less than the min Tx FIFO size, |
|
630 * and the min Tx FIFO size is less than the current Rx FIFO |
|
631 * allocation, take space away from current Rx allocation */ |
|
632 if (tx_space < min_tx_space && |
|
633 ((min_tx_space - tx_space) < pba)) { |
|
634 pba = pba - (min_tx_space - tx_space); |
|
635 |
|
636 /* PCI/PCIx hardware has PBA alignment constraints */ |
|
637 switch (hw->mac_type) { |
|
638 case e1000_82545 ... e1000_82546_rev_3: |
|
639 pba &= ~(E1000_PBA_8K - 1); |
|
640 break; |
|
641 default: |
|
642 break; |
|
643 } |
|
644 |
|
645 /* if short on rx space, rx wins and must trump tx |
|
646 * adjustment or use Early Receive if available */ |
|
647 if (pba < min_rx_space) |
|
648 pba = min_rx_space; |
|
649 } |
|
650 } |
|
651 |
|
652 ew32(PBA, pba); |
|
653 |
|
654 /* |
|
655 * flow control settings: |
|
656 * The high water mark must be low enough to fit one full frame |
|
657 * (or the size used for early receive) above it in the Rx FIFO. |
|
658 * Set it to the lower of: |
|
659 * - 90% of the Rx FIFO size, and |
|
660 * - the full Rx FIFO size minus the early receive size (for parts |
|
661 * with ERT support assuming ERT set to E1000_ERT_2048), or |
|
662 * - the full Rx FIFO size minus one full frame |
|
663 */ |
|
664 hwm = min(((pba << 10) * 9 / 10), |
|
665 ((pba << 10) - hw->max_frame_size)); |
|
666 |
|
667 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ |
|
668 hw->fc_low_water = hw->fc_high_water - 8; |
|
669 hw->fc_pause_time = E1000_FC_PAUSE_TIME; |
|
670 hw->fc_send_xon = 1; |
|
671 hw->fc = hw->original_fc; |
|
672 |
|
673 /* Allow time for pending master requests to run */ |
|
674 e1000_reset_hw(hw); |
|
675 if (hw->mac_type >= e1000_82544) |
|
676 ew32(WUC, 0); |
|
677 |
|
678 if (e1000_init_hw(hw)) |
|
679 e_dev_err("Hardware Error\n"); |
|
680 e1000_update_mng_vlan(adapter); |
|
681 |
|
682 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ |
|
683 if (hw->mac_type >= e1000_82544 && |
|
684 hw->autoneg == 1 && |
|
685 hw->autoneg_advertised == ADVERTISE_1000_FULL) { |
|
686 u32 ctrl = er32(CTRL); |
|
687 /* clear phy power management bit if we are in gig only mode, |
|
688 * which if enabled will attempt negotiation to 100Mb, which |
|
689 * can cause a loss of link at power off or driver unload */ |
|
690 ctrl &= ~E1000_CTRL_SWDPIN3; |
|
691 ew32(CTRL, ctrl); |
|
692 } |
|
693 |
|
694 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
|
695 ew32(VET, ETHERNET_IEEE_VLAN_TYPE); |
|
696 |
|
697 e1000_reset_adaptive(hw); |
|
698 e1000_phy_get_info(hw, &adapter->phy_info); |
|
699 |
|
700 e1000_release_manageability(adapter); |
|
701 } |
|
702 |
|
703 /** |
|
704 * Dump the eeprom for users having checksum issues |
|
705 **/ |
|
706 static void e1000_dump_eeprom(struct e1000_adapter *adapter) |
|
707 { |
|
708 struct net_device *netdev = adapter->netdev; |
|
709 struct ethtool_eeprom eeprom; |
|
710 const struct ethtool_ops *ops = netdev->ethtool_ops; |
|
711 u8 *data; |
|
712 int i; |
|
713 u16 csum_old, csum_new = 0; |
|
714 |
|
715 eeprom.len = ops->get_eeprom_len(netdev); |
|
716 eeprom.offset = 0; |
|
717 |
|
718 data = kmalloc(eeprom.len, GFP_KERNEL); |
|
719 if (!data) { |
|
720 pr_err("Unable to allocate memory to dump EEPROM data\n"); |
|
721 return; |
|
722 } |
|
723 |
|
724 ops->get_eeprom(netdev, &eeprom, data); |
|
725 |
|
726 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + |
|
727 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); |
|
728 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) |
|
729 csum_new += data[i] + (data[i + 1] << 8); |
|
730 csum_new = EEPROM_SUM - csum_new; |
|
731 |
|
732 pr_err("/*********************/\n"); |
|
733 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); |
|
734 pr_err("Calculated : 0x%04x\n", csum_new); |
|
735 |
|
736 pr_err("Offset Values\n"); |
|
737 pr_err("======== ======\n"); |
|
738 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); |
|
739 |
|
740 pr_err("Include this output when contacting your support provider.\n"); |
|
741 pr_err("This is not a software error! Something bad happened to\n"); |
|
742 pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); |
|
743 pr_err("result in further problems, possibly loss of data,\n"); |
|
744 pr_err("corruption or system hangs!\n"); |
|
745 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); |
|
746 pr_err("which is invalid and requires you to set the proper MAC\n"); |
|
747 pr_err("address manually before continuing to enable this network\n"); |
|
748 pr_err("device. Please inspect the EEPROM dump and report the\n"); |
|
749 pr_err("issue to your hardware vendor or Intel Customer Support.\n"); |
|
750 pr_err("/*********************/\n"); |
|
751 |
|
752 kfree(data); |
|
753 } |
|
754 |
|
755 /** |
|
756 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not |
|
757 * @pdev: PCI device information struct |
|
758 * |
|
759 * Return true if an adapter needs ioport resources |
|
760 **/ |
|
761 static int e1000_is_need_ioport(struct pci_dev *pdev) |
|
762 { |
|
763 switch (pdev->device) { |
|
764 case E1000_DEV_ID_82540EM: |
|
765 case E1000_DEV_ID_82540EM_LOM: |
|
766 case E1000_DEV_ID_82540EP: |
|
767 case E1000_DEV_ID_82540EP_LOM: |
|
768 case E1000_DEV_ID_82540EP_LP: |
|
769 case E1000_DEV_ID_82541EI: |
|
770 case E1000_DEV_ID_82541EI_MOBILE: |
|
771 case E1000_DEV_ID_82541ER: |
|
772 case E1000_DEV_ID_82541ER_LOM: |
|
773 case E1000_DEV_ID_82541GI: |
|
774 case E1000_DEV_ID_82541GI_LF: |
|
775 case E1000_DEV_ID_82541GI_MOBILE: |
|
776 case E1000_DEV_ID_82544EI_COPPER: |
|
777 case E1000_DEV_ID_82544EI_FIBER: |
|
778 case E1000_DEV_ID_82544GC_COPPER: |
|
779 case E1000_DEV_ID_82544GC_LOM: |
|
780 case E1000_DEV_ID_82545EM_COPPER: |
|
781 case E1000_DEV_ID_82545EM_FIBER: |
|
782 case E1000_DEV_ID_82546EB_COPPER: |
|
783 case E1000_DEV_ID_82546EB_FIBER: |
|
784 case E1000_DEV_ID_82546EB_QUAD_COPPER: |
|
785 return true; |
|
786 default: |
|
787 return false; |
|
788 } |
|
789 } |
|
790 |
|
791 static const struct net_device_ops e1000_netdev_ops = { |
|
792 .ndo_open = e1000_open, |
|
793 .ndo_stop = e1000_close, |
|
794 .ndo_start_xmit = e1000_xmit_frame, |
|
795 .ndo_get_stats = e1000_get_stats, |
|
796 .ndo_set_rx_mode = e1000_set_rx_mode, |
|
797 .ndo_set_mac_address = e1000_set_mac, |
|
798 .ndo_tx_timeout = e1000_tx_timeout, |
|
799 .ndo_change_mtu = e1000_change_mtu, |
|
800 .ndo_do_ioctl = e1000_ioctl, |
|
801 .ndo_validate_addr = eth_validate_addr, |
|
802 |
|
803 .ndo_vlan_rx_register = e1000_vlan_rx_register, |
|
804 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, |
|
805 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, |
|
806 #ifdef CONFIG_NET_POLL_CONTROLLER |
|
807 .ndo_poll_controller = e1000_netpoll, |
|
808 #endif |
|
809 }; |
|
810 |
|
811 /** |
|
812 * e1000_init_hw_struct - initialize members of hw struct |
|
813 * @adapter: board private struct |
|
814 * @hw: structure used by e1000_hw.c |
|
815 * |
|
816 * Factors out initialization of the e1000_hw struct to its own function |
|
817 * that can be called very early at init (just after struct allocation). |
|
818 * Fields are initialized based on PCI device information and |
|
819 * OS network device settings (MTU size). |
|
820 * Returns negative error codes if MAC type setup fails. |
|
821 */ |
|
822 static int e1000_init_hw_struct(struct e1000_adapter *adapter, |
|
823 struct e1000_hw *hw) |
|
824 { |
|
825 struct pci_dev *pdev = adapter->pdev; |
|
826 |
|
827 /* PCI config space info */ |
|
828 hw->vendor_id = pdev->vendor; |
|
829 hw->device_id = pdev->device; |
|
830 hw->subsystem_vendor_id = pdev->subsystem_vendor; |
|
831 hw->subsystem_id = pdev->subsystem_device; |
|
832 hw->revision_id = pdev->revision; |
|
833 |
|
834 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); |
|
835 |
|
836 hw->max_frame_size = adapter->netdev->mtu + |
|
837 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
|
838 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; |
|
839 |
|
840 /* identify the MAC */ |
|
841 if (e1000_set_mac_type(hw)) { |
|
842 e_err(probe, "Unknown MAC Type\n"); |
|
843 return -EIO; |
|
844 } |
|
845 |
|
846 switch (hw->mac_type) { |
|
847 default: |
|
848 break; |
|
849 case e1000_82541: |
|
850 case e1000_82547: |
|
851 case e1000_82541_rev_2: |
|
852 case e1000_82547_rev_2: |
|
853 hw->phy_init_script = 1; |
|
854 break; |
|
855 } |
|
856 |
|
857 e1000_set_media_type(hw); |
|
858 e1000_get_bus_info(hw); |
|
859 |
|
860 hw->wait_autoneg_complete = false; |
|
861 hw->tbi_compatibility_en = true; |
|
862 hw->adaptive_ifs = true; |
|
863 |
|
864 /* Copper options */ |
|
865 |
|
866 if (hw->media_type == e1000_media_type_copper) { |
|
867 hw->mdix = AUTO_ALL_MODES; |
|
868 hw->disable_polarity_correction = false; |
|
869 hw->master_slave = E1000_MASTER_SLAVE; |
|
870 } |
|
871 |
|
872 return 0; |
|
873 } |
|
874 |
|
875 /** |
|
876 * e1000_probe - Device Initialization Routine |
|
877 * @pdev: PCI device information struct |
|
878 * @ent: entry in e1000_pci_tbl |
|
879 * |
|
880 * Returns 0 on success, negative on failure |
|
881 * |
|
882 * e1000_probe initializes an adapter identified by a pci_dev structure. |
|
883 * The OS initialization, configuring of the adapter private structure, |
|
884 * and a hardware reset occur. |
|
885 **/ |
|
886 static int __devinit e1000_probe(struct pci_dev *pdev, |
|
887 const struct pci_device_id *ent) |
|
888 { |
|
889 struct net_device *netdev; |
|
890 struct e1000_adapter *adapter; |
|
891 struct e1000_hw *hw; |
|
892 |
|
893 static int cards_found = 0; |
|
894 static int global_quad_port_a = 0; /* global ksp3 port a indication */ |
|
895 int i, err, pci_using_dac; |
|
896 u16 eeprom_data = 0; |
|
897 u16 eeprom_apme_mask = E1000_EEPROM_APME; |
|
898 int bars, need_ioport; |
|
899 |
|
900 /* do not allocate ioport bars when not needed */ |
|
901 need_ioport = e1000_is_need_ioport(pdev); |
|
902 if (need_ioport) { |
|
903 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); |
|
904 err = pci_enable_device(pdev); |
|
905 } else { |
|
906 bars = pci_select_bars(pdev, IORESOURCE_MEM); |
|
907 err = pci_enable_device_mem(pdev); |
|
908 } |
|
909 if (err) |
|
910 return err; |
|
911 |
|
912 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); |
|
913 if (err) |
|
914 goto err_pci_reg; |
|
915 |
|
916 pci_set_master(pdev); |
|
917 err = pci_save_state(pdev); |
|
918 if (err) |
|
919 goto err_alloc_etherdev; |
|
920 |
|
921 err = -ENOMEM; |
|
922 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); |
|
923 if (!netdev) |
|
924 goto err_alloc_etherdev; |
|
925 |
|
926 SET_NETDEV_DEV(netdev, &pdev->dev); |
|
927 |
|
928 pci_set_drvdata(pdev, netdev); |
|
929 adapter = netdev_priv(netdev); |
|
930 adapter->netdev = netdev; |
|
931 adapter->pdev = pdev; |
|
932 adapter->msg_enable = (1 << debug) - 1; |
|
933 adapter->bars = bars; |
|
934 adapter->need_ioport = need_ioport; |
|
935 |
|
936 hw = &adapter->hw; |
|
937 hw->back = adapter; |
|
938 |
|
939 err = -EIO; |
|
940 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); |
|
941 if (!hw->hw_addr) |
|
942 goto err_ioremap; |
|
943 |
|
944 if (adapter->need_ioport) { |
|
945 for (i = BAR_1; i <= BAR_5; i++) { |
|
946 if (pci_resource_len(pdev, i) == 0) |
|
947 continue; |
|
948 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { |
|
949 hw->io_base = pci_resource_start(pdev, i); |
|
950 break; |
|
951 } |
|
952 } |
|
953 } |
|
954 |
|
955 /* make ready for any if (hw->...) below */ |
|
956 err = e1000_init_hw_struct(adapter, hw); |
|
957 if (err) |
|
958 goto err_sw_init; |
|
959 |
|
960 /* |
|
961 * there is a workaround being applied below that limits |
|
962 * 64-bit DMA addresses to 64-bit hardware. There are some |
|
963 * 32-bit adapters that Tx hang when given 64-bit DMA addresses |
|
964 */ |
|
965 pci_using_dac = 0; |
|
966 if ((hw->bus_type == e1000_bus_type_pcix) && |
|
967 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
|
968 /* |
|
969 * according to DMA-API-HOWTO, coherent calls will always |
|
970 * succeed if the set call did |
|
971 */ |
|
972 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
|
973 pci_using_dac = 1; |
|
974 } else { |
|
975 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
|
976 if (err) { |
|
977 pr_err("No usable DMA config, aborting\n"); |
|
978 goto err_dma; |
|
979 } |
|
980 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
|
981 } |
|
982 |
|
983 netdev->netdev_ops = &e1000_netdev_ops; |
|
984 e1000_set_ethtool_ops(netdev); |
|
985 netdev->watchdog_timeo = 5 * HZ; |
|
986 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); |
|
987 |
|
988 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); |
|
989 |
|
990 adapter->bd_number = cards_found; |
|
991 |
|
992 /* setup the private structure */ |
|
993 |
|
994 err = e1000_sw_init(adapter); |
|
995 if (err) |
|
996 goto err_sw_init; |
|
997 |
|
998 err = -EIO; |
|
999 |
|
1000 if (hw->mac_type >= e1000_82543) { |
|
1001 netdev->features = NETIF_F_SG | |
|
1002 NETIF_F_HW_CSUM | |
|
1003 NETIF_F_HW_VLAN_TX | |
|
1004 NETIF_F_HW_VLAN_RX | |
|
1005 NETIF_F_HW_VLAN_FILTER; |
|
1006 } |
|
1007 |
|
1008 if ((hw->mac_type >= e1000_82544) && |
|
1009 (hw->mac_type != e1000_82547)) |
|
1010 netdev->features |= NETIF_F_TSO; |
|
1011 |
|
1012 if (pci_using_dac) { |
|
1013 netdev->features |= NETIF_F_HIGHDMA; |
|
1014 netdev->vlan_features |= NETIF_F_HIGHDMA; |
|
1015 } |
|
1016 |
|
1017 netdev->vlan_features |= NETIF_F_TSO; |
|
1018 netdev->vlan_features |= NETIF_F_HW_CSUM; |
|
1019 netdev->vlan_features |= NETIF_F_SG; |
|
1020 |
|
1021 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); |
|
1022 |
|
1023 /* initialize eeprom parameters */ |
|
1024 if (e1000_init_eeprom_params(hw)) { |
|
1025 e_err(probe, "EEPROM initialization failed\n"); |
|
1026 goto err_eeprom; |
|
1027 } |
|
1028 |
|
1029 /* before reading the EEPROM, reset the controller to |
|
1030 * put the device in a known good starting state */ |
|
1031 |
|
1032 e1000_reset_hw(hw); |
|
1033 |
|
1034 /* make sure the EEPROM is good */ |
|
1035 if (e1000_validate_eeprom_checksum(hw) < 0) { |
|
1036 e_err(probe, "The EEPROM Checksum Is Not Valid\n"); |
|
1037 e1000_dump_eeprom(adapter); |
|
1038 /* |
|
1039 * set MAC address to all zeroes to invalidate and temporary |
|
1040 * disable this device for the user. This blocks regular |
|
1041 * traffic while still permitting ethtool ioctls from reaching |
|
1042 * the hardware as well as allowing the user to run the |
|
1043 * interface after manually setting a hw addr using |
|
1044 * `ip set address` |
|
1045 */ |
|
1046 memset(hw->mac_addr, 0, netdev->addr_len); |
|
1047 } else { |
|
1048 /* copy the MAC address out of the EEPROM */ |
|
1049 if (e1000_read_mac_addr(hw)) |
|
1050 e_err(probe, "EEPROM Read Error\n"); |
|
1051 } |
|
1052 /* don't block initalization here due to bad MAC address */ |
|
1053 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); |
|
1054 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); |
|
1055 |
|
1056 if (!is_valid_ether_addr(netdev->perm_addr)) |
|
1057 e_err(probe, "Invalid MAC Address\n"); |
|
1058 |
|
1059 init_timer(&adapter->tx_fifo_stall_timer); |
|
1060 adapter->tx_fifo_stall_timer.function = e1000_82547_tx_fifo_stall; |
|
1061 adapter->tx_fifo_stall_timer.data = (unsigned long)adapter; |
|
1062 |
|
1063 init_timer(&adapter->watchdog_timer); |
|
1064 adapter->watchdog_timer.function = e1000_watchdog; |
|
1065 adapter->watchdog_timer.data = (unsigned long) adapter; |
|
1066 |
|
1067 init_timer(&adapter->phy_info_timer); |
|
1068 adapter->phy_info_timer.function = e1000_update_phy_info; |
|
1069 adapter->phy_info_timer.data = (unsigned long)adapter; |
|
1070 |
|
1071 INIT_WORK(&adapter->fifo_stall_task, e1000_82547_tx_fifo_stall_task); |
|
1072 INIT_WORK(&adapter->reset_task, e1000_reset_task); |
|
1073 INIT_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); |
|
1074 |
|
1075 e1000_check_options(adapter); |
|
1076 |
|
1077 /* Initial Wake on LAN setting |
|
1078 * If APM wake is enabled in the EEPROM, |
|
1079 * enable the ACPI Magic Packet filter |
|
1080 */ |
|
1081 |
|
1082 switch (hw->mac_type) { |
|
1083 case e1000_82542_rev2_0: |
|
1084 case e1000_82542_rev2_1: |
|
1085 case e1000_82543: |
|
1086 break; |
|
1087 case e1000_82544: |
|
1088 e1000_read_eeprom(hw, |
|
1089 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); |
|
1090 eeprom_apme_mask = E1000_EEPROM_82544_APM; |
|
1091 break; |
|
1092 case e1000_82546: |
|
1093 case e1000_82546_rev_3: |
|
1094 if (er32(STATUS) & E1000_STATUS_FUNC_1){ |
|
1095 e1000_read_eeprom(hw, |
|
1096 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
|
1097 break; |
|
1098 } |
|
1099 /* Fall Through */ |
|
1100 default: |
|
1101 e1000_read_eeprom(hw, |
|
1102 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); |
|
1103 break; |
|
1104 } |
|
1105 if (eeprom_data & eeprom_apme_mask) |
|
1106 adapter->eeprom_wol |= E1000_WUFC_MAG; |
|
1107 |
|
1108 /* now that we have the eeprom settings, apply the special cases |
|
1109 * where the eeprom may be wrong or the board simply won't support |
|
1110 * wake on lan on a particular port */ |
|
1111 switch (pdev->device) { |
|
1112 case E1000_DEV_ID_82546GB_PCIE: |
|
1113 adapter->eeprom_wol = 0; |
|
1114 break; |
|
1115 case E1000_DEV_ID_82546EB_FIBER: |
|
1116 case E1000_DEV_ID_82546GB_FIBER: |
|
1117 /* Wake events only supported on port A for dual fiber |
|
1118 * regardless of eeprom setting */ |
|
1119 if (er32(STATUS) & E1000_STATUS_FUNC_1) |
|
1120 adapter->eeprom_wol = 0; |
|
1121 break; |
|
1122 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: |
|
1123 /* if quad port adapter, disable WoL on all but port A */ |
|
1124 if (global_quad_port_a != 0) |
|
1125 adapter->eeprom_wol = 0; |
|
1126 else |
|
1127 adapter->quad_port_a = 1; |
|
1128 /* Reset for multiple quad port adapters */ |
|
1129 if (++global_quad_port_a == 4) |
|
1130 global_quad_port_a = 0; |
|
1131 break; |
|
1132 } |
|
1133 |
|
1134 /* initialize the wol settings based on the eeprom settings */ |
|
1135 adapter->wol = adapter->eeprom_wol; |
|
1136 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); |
|
1137 |
|
1138 /* reset the hardware with the new settings */ |
|
1139 e1000_reset(adapter); |
|
1140 |
|
1141 strcpy(netdev->name, "eth%d"); |
|
1142 err = register_netdev(netdev); |
|
1143 if (err) |
|
1144 goto err_register; |
|
1145 |
|
1146 /* print bus type/speed/width info */ |
|
1147 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", |
|
1148 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), |
|
1149 ((hw->bus_speed == e1000_bus_speed_133) ? 133 : |
|
1150 (hw->bus_speed == e1000_bus_speed_120) ? 120 : |
|
1151 (hw->bus_speed == e1000_bus_speed_100) ? 100 : |
|
1152 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), |
|
1153 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), |
|
1154 netdev->dev_addr); |
|
1155 |
|
1156 /* carrier off reporting is important to ethtool even BEFORE open */ |
|
1157 netif_carrier_off(netdev); |
|
1158 |
|
1159 e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); |
|
1160 |
|
1161 cards_found++; |
|
1162 return 0; |
|
1163 |
|
1164 err_register: |
|
1165 err_eeprom: |
|
1166 e1000_phy_hw_reset(hw); |
|
1167 |
|
1168 if (hw->flash_address) |
|
1169 iounmap(hw->flash_address); |
|
1170 kfree(adapter->tx_ring); |
|
1171 kfree(adapter->rx_ring); |
|
1172 err_dma: |
|
1173 err_sw_init: |
|
1174 iounmap(hw->hw_addr); |
|
1175 err_ioremap: |
|
1176 free_netdev(netdev); |
|
1177 err_alloc_etherdev: |
|
1178 pci_release_selected_regions(pdev, bars); |
|
1179 err_pci_reg: |
|
1180 pci_disable_device(pdev); |
|
1181 return err; |
|
1182 } |
|
1183 |
|
1184 /** |
|
1185 * e1000_remove - Device Removal Routine |
|
1186 * @pdev: PCI device information struct |
|
1187 * |
|
1188 * e1000_remove is called by the PCI subsystem to alert the driver |
|
1189 * that it should release a PCI device. The could be caused by a |
|
1190 * Hot-Plug event, or because the driver is going to be removed from |
|
1191 * memory. |
|
1192 **/ |
|
1193 |
|
1194 static void __devexit e1000_remove(struct pci_dev *pdev) |
|
1195 { |
|
1196 struct net_device *netdev = pci_get_drvdata(pdev); |
|
1197 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
1198 struct e1000_hw *hw = &adapter->hw; |
|
1199 |
|
1200 set_bit(__E1000_DOWN, &adapter->flags); |
|
1201 del_timer_sync(&adapter->tx_fifo_stall_timer); |
|
1202 del_timer_sync(&adapter->watchdog_timer); |
|
1203 del_timer_sync(&adapter->phy_info_timer); |
|
1204 |
|
1205 cancel_work_sync(&adapter->reset_task); |
|
1206 |
|
1207 e1000_release_manageability(adapter); |
|
1208 |
|
1209 unregister_netdev(netdev); |
|
1210 |
|
1211 e1000_phy_hw_reset(hw); |
|
1212 |
|
1213 kfree(adapter->tx_ring); |
|
1214 kfree(adapter->rx_ring); |
|
1215 |
|
1216 iounmap(hw->hw_addr); |
|
1217 if (hw->flash_address) |
|
1218 iounmap(hw->flash_address); |
|
1219 pci_release_selected_regions(pdev, adapter->bars); |
|
1220 |
|
1221 free_netdev(netdev); |
|
1222 |
|
1223 pci_disable_device(pdev); |
|
1224 } |
|
1225 |
|
1226 /** |
|
1227 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) |
|
1228 * @adapter: board private structure to initialize |
|
1229 * |
|
1230 * e1000_sw_init initializes the Adapter private data structure. |
|
1231 * e1000_init_hw_struct MUST be called before this function |
|
1232 **/ |
|
1233 |
|
1234 static int __devinit e1000_sw_init(struct e1000_adapter *adapter) |
|
1235 { |
|
1236 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
|
1237 |
|
1238 adapter->num_tx_queues = 1; |
|
1239 adapter->num_rx_queues = 1; |
|
1240 |
|
1241 if (e1000_alloc_queues(adapter)) { |
|
1242 e_err(probe, "Unable to allocate memory for queues\n"); |
|
1243 return -ENOMEM; |
|
1244 } |
|
1245 |
|
1246 /* Explicitly disable IRQ since the NIC can be in any state. */ |
|
1247 e1000_irq_disable(adapter); |
|
1248 |
|
1249 spin_lock_init(&adapter->stats_lock); |
|
1250 |
|
1251 set_bit(__E1000_DOWN, &adapter->flags); |
|
1252 |
|
1253 return 0; |
|
1254 } |
|
1255 |
|
1256 /** |
|
1257 * e1000_alloc_queues - Allocate memory for all rings |
|
1258 * @adapter: board private structure to initialize |
|
1259 * |
|
1260 * We allocate one ring per queue at run-time since we don't know the |
|
1261 * number of queues at compile-time. |
|
1262 **/ |
|
1263 |
|
1264 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) |
|
1265 { |
|
1266 adapter->tx_ring = kcalloc(adapter->num_tx_queues, |
|
1267 sizeof(struct e1000_tx_ring), GFP_KERNEL); |
|
1268 if (!adapter->tx_ring) |
|
1269 return -ENOMEM; |
|
1270 |
|
1271 adapter->rx_ring = kcalloc(adapter->num_rx_queues, |
|
1272 sizeof(struct e1000_rx_ring), GFP_KERNEL); |
|
1273 if (!adapter->rx_ring) { |
|
1274 kfree(adapter->tx_ring); |
|
1275 return -ENOMEM; |
|
1276 } |
|
1277 |
|
1278 return E1000_SUCCESS; |
|
1279 } |
|
1280 |
|
1281 /** |
|
1282 * e1000_open - Called when a network interface is made active |
|
1283 * @netdev: network interface device structure |
|
1284 * |
|
1285 * Returns 0 on success, negative value on failure |
|
1286 * |
|
1287 * The open entry point is called when a network interface is made |
|
1288 * active by the system (IFF_UP). At this point all resources needed |
|
1289 * for transmit and receive operations are allocated, the interrupt |
|
1290 * handler is registered with the OS, the watchdog timer is started, |
|
1291 * and the stack is notified that the interface is ready. |
|
1292 **/ |
|
1293 |
|
1294 static int e1000_open(struct net_device *netdev) |
|
1295 { |
|
1296 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
1297 struct e1000_hw *hw = &adapter->hw; |
|
1298 int err; |
|
1299 |
|
1300 /* disallow open during test */ |
|
1301 if (test_bit(__E1000_TESTING, &adapter->flags)) |
|
1302 return -EBUSY; |
|
1303 |
|
1304 netif_carrier_off(netdev); |
|
1305 |
|
1306 /* allocate transmit descriptors */ |
|
1307 err = e1000_setup_all_tx_resources(adapter); |
|
1308 if (err) |
|
1309 goto err_setup_tx; |
|
1310 |
|
1311 /* allocate receive descriptors */ |
|
1312 err = e1000_setup_all_rx_resources(adapter); |
|
1313 if (err) |
|
1314 goto err_setup_rx; |
|
1315 |
|
1316 e1000_power_up_phy(adapter); |
|
1317 |
|
1318 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
|
1319 if ((hw->mng_cookie.status & |
|
1320 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { |
|
1321 e1000_update_mng_vlan(adapter); |
|
1322 } |
|
1323 |
|
1324 /* before we allocate an interrupt, we must be ready to handle it. |
|
1325 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt |
|
1326 * as soon as we call pci_request_irq, so we have to setup our |
|
1327 * clean_rx handler before we do so. */ |
|
1328 e1000_configure(adapter); |
|
1329 |
|
1330 err = e1000_request_irq(adapter); |
|
1331 if (err) |
|
1332 goto err_req_irq; |
|
1333 |
|
1334 /* From here on the code is the same as e1000_up() */ |
|
1335 clear_bit(__E1000_DOWN, &adapter->flags); |
|
1336 |
|
1337 napi_enable(&adapter->napi); |
|
1338 |
|
1339 e1000_irq_enable(adapter); |
|
1340 |
|
1341 netif_start_queue(netdev); |
|
1342 |
|
1343 /* fire a link status change interrupt to start the watchdog */ |
|
1344 ew32(ICS, E1000_ICS_LSC); |
|
1345 |
|
1346 return E1000_SUCCESS; |
|
1347 |
|
1348 err_req_irq: |
|
1349 e1000_power_down_phy(adapter); |
|
1350 e1000_free_all_rx_resources(adapter); |
|
1351 err_setup_rx: |
|
1352 e1000_free_all_tx_resources(adapter); |
|
1353 err_setup_tx: |
|
1354 e1000_reset(adapter); |
|
1355 |
|
1356 return err; |
|
1357 } |
|
1358 |
|
1359 /** |
|
1360 * e1000_close - Disables a network interface |
|
1361 * @netdev: network interface device structure |
|
1362 * |
|
1363 * Returns 0, this is not allowed to fail |
|
1364 * |
|
1365 * The close entry point is called when an interface is de-activated |
|
1366 * by the OS. The hardware is still under the drivers control, but |
|
1367 * needs to be disabled. A global MAC reset is issued to stop the |
|
1368 * hardware, and all transmit and receive resources are freed. |
|
1369 **/ |
|
1370 |
|
1371 static int e1000_close(struct net_device *netdev) |
|
1372 { |
|
1373 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
1374 struct e1000_hw *hw = &adapter->hw; |
|
1375 |
|
1376 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
|
1377 e1000_down(adapter); |
|
1378 e1000_power_down_phy(adapter); |
|
1379 e1000_free_irq(adapter); |
|
1380 |
|
1381 e1000_free_all_tx_resources(adapter); |
|
1382 e1000_free_all_rx_resources(adapter); |
|
1383 |
|
1384 /* kill manageability vlan ID if supported, but not if a vlan with |
|
1385 * the same ID is registered on the host OS (let 8021q kill it) */ |
|
1386 if ((hw->mng_cookie.status & |
|
1387 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
|
1388 !(adapter->vlgrp && |
|
1389 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { |
|
1390 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
|
1391 } |
|
1392 |
|
1393 return 0; |
|
1394 } |
|
1395 |
|
1396 /** |
|
1397 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary |
|
1398 * @adapter: address of board private structure |
|
1399 * @start: address of beginning of memory |
|
1400 * @len: length of memory |
|
1401 **/ |
|
1402 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, |
|
1403 unsigned long len) |
|
1404 { |
|
1405 struct e1000_hw *hw = &adapter->hw; |
|
1406 unsigned long begin = (unsigned long)start; |
|
1407 unsigned long end = begin + len; |
|
1408 |
|
1409 /* First rev 82545 and 82546 need to not allow any memory |
|
1410 * write location to cross 64k boundary due to errata 23 */ |
|
1411 if (hw->mac_type == e1000_82545 || |
|
1412 hw->mac_type == e1000_82546) { |
|
1413 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; |
|
1414 } |
|
1415 |
|
1416 return true; |
|
1417 } |
|
1418 |
|
1419 /** |
|
1420 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) |
|
1421 * @adapter: board private structure |
|
1422 * @txdr: tx descriptor ring (for a specific queue) to setup |
|
1423 * |
|
1424 * Return 0 on success, negative on failure |
|
1425 **/ |
|
1426 |
|
1427 static int e1000_setup_tx_resources(struct e1000_adapter *adapter, |
|
1428 struct e1000_tx_ring *txdr) |
|
1429 { |
|
1430 struct pci_dev *pdev = adapter->pdev; |
|
1431 int size; |
|
1432 |
|
1433 size = sizeof(struct e1000_buffer) * txdr->count; |
|
1434 txdr->buffer_info = vmalloc(size); |
|
1435 if (!txdr->buffer_info) { |
|
1436 e_err(probe, "Unable to allocate memory for the Tx descriptor " |
|
1437 "ring\n"); |
|
1438 return -ENOMEM; |
|
1439 } |
|
1440 memset(txdr->buffer_info, 0, size); |
|
1441 |
|
1442 /* round up to nearest 4K */ |
|
1443 |
|
1444 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); |
|
1445 txdr->size = ALIGN(txdr->size, 4096); |
|
1446 |
|
1447 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, |
|
1448 GFP_KERNEL); |
|
1449 if (!txdr->desc) { |
|
1450 setup_tx_desc_die: |
|
1451 vfree(txdr->buffer_info); |
|
1452 e_err(probe, "Unable to allocate memory for the Tx descriptor " |
|
1453 "ring\n"); |
|
1454 return -ENOMEM; |
|
1455 } |
|
1456 |
|
1457 /* Fix for errata 23, can't cross 64kB boundary */ |
|
1458 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { |
|
1459 void *olddesc = txdr->desc; |
|
1460 dma_addr_t olddma = txdr->dma; |
|
1461 e_err(tx_err, "txdr align check failed: %u bytes at %p\n", |
|
1462 txdr->size, txdr->desc); |
|
1463 /* Try again, without freeing the previous */ |
|
1464 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, |
|
1465 &txdr->dma, GFP_KERNEL); |
|
1466 /* Failed allocation, critical failure */ |
|
1467 if (!txdr->desc) { |
|
1468 dma_free_coherent(&pdev->dev, txdr->size, olddesc, |
|
1469 olddma); |
|
1470 goto setup_tx_desc_die; |
|
1471 } |
|
1472 |
|
1473 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { |
|
1474 /* give up */ |
|
1475 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, |
|
1476 txdr->dma); |
|
1477 dma_free_coherent(&pdev->dev, txdr->size, olddesc, |
|
1478 olddma); |
|
1479 e_err(probe, "Unable to allocate aligned memory " |
|
1480 "for the transmit descriptor ring\n"); |
|
1481 vfree(txdr->buffer_info); |
|
1482 return -ENOMEM; |
|
1483 } else { |
|
1484 /* Free old allocation, new allocation was successful */ |
|
1485 dma_free_coherent(&pdev->dev, txdr->size, olddesc, |
|
1486 olddma); |
|
1487 } |
|
1488 } |
|
1489 memset(txdr->desc, 0, txdr->size); |
|
1490 |
|
1491 txdr->next_to_use = 0; |
|
1492 txdr->next_to_clean = 0; |
|
1493 |
|
1494 return 0; |
|
1495 } |
|
1496 |
|
1497 /** |
|
1498 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources |
|
1499 * (Descriptors) for all queues |
|
1500 * @adapter: board private structure |
|
1501 * |
|
1502 * Return 0 on success, negative on failure |
|
1503 **/ |
|
1504 |
|
1505 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) |
|
1506 { |
|
1507 int i, err = 0; |
|
1508 |
|
1509 for (i = 0; i < adapter->num_tx_queues; i++) { |
|
1510 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); |
|
1511 if (err) { |
|
1512 e_err(probe, "Allocation for Tx Queue %u failed\n", i); |
|
1513 for (i-- ; i >= 0; i--) |
|
1514 e1000_free_tx_resources(adapter, |
|
1515 &adapter->tx_ring[i]); |
|
1516 break; |
|
1517 } |
|
1518 } |
|
1519 |
|
1520 return err; |
|
1521 } |
|
1522 |
|
1523 /** |
|
1524 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset |
|
1525 * @adapter: board private structure |
|
1526 * |
|
1527 * Configure the Tx unit of the MAC after a reset. |
|
1528 **/ |
|
1529 |
|
1530 static void e1000_configure_tx(struct e1000_adapter *adapter) |
|
1531 { |
|
1532 u64 tdba; |
|
1533 struct e1000_hw *hw = &adapter->hw; |
|
1534 u32 tdlen, tctl, tipg; |
|
1535 u32 ipgr1, ipgr2; |
|
1536 |
|
1537 /* Setup the HW Tx Head and Tail descriptor pointers */ |
|
1538 |
|
1539 switch (adapter->num_tx_queues) { |
|
1540 case 1: |
|
1541 default: |
|
1542 tdba = adapter->tx_ring[0].dma; |
|
1543 tdlen = adapter->tx_ring[0].count * |
|
1544 sizeof(struct e1000_tx_desc); |
|
1545 ew32(TDLEN, tdlen); |
|
1546 ew32(TDBAH, (tdba >> 32)); |
|
1547 ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); |
|
1548 ew32(TDT, 0); |
|
1549 ew32(TDH, 0); |
|
1550 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); |
|
1551 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); |
|
1552 break; |
|
1553 } |
|
1554 |
|
1555 /* Set the default values for the Tx Inter Packet Gap timer */ |
|
1556 if ((hw->media_type == e1000_media_type_fiber || |
|
1557 hw->media_type == e1000_media_type_internal_serdes)) |
|
1558 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; |
|
1559 else |
|
1560 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; |
|
1561 |
|
1562 switch (hw->mac_type) { |
|
1563 case e1000_82542_rev2_0: |
|
1564 case e1000_82542_rev2_1: |
|
1565 tipg = DEFAULT_82542_TIPG_IPGT; |
|
1566 ipgr1 = DEFAULT_82542_TIPG_IPGR1; |
|
1567 ipgr2 = DEFAULT_82542_TIPG_IPGR2; |
|
1568 break; |
|
1569 default: |
|
1570 ipgr1 = DEFAULT_82543_TIPG_IPGR1; |
|
1571 ipgr2 = DEFAULT_82543_TIPG_IPGR2; |
|
1572 break; |
|
1573 } |
|
1574 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; |
|
1575 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; |
|
1576 ew32(TIPG, tipg); |
|
1577 |
|
1578 /* Set the Tx Interrupt Delay register */ |
|
1579 |
|
1580 ew32(TIDV, adapter->tx_int_delay); |
|
1581 if (hw->mac_type >= e1000_82540) |
|
1582 ew32(TADV, adapter->tx_abs_int_delay); |
|
1583 |
|
1584 /* Program the Transmit Control Register */ |
|
1585 |
|
1586 tctl = er32(TCTL); |
|
1587 tctl &= ~E1000_TCTL_CT; |
|
1588 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | |
|
1589 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); |
|
1590 |
|
1591 e1000_config_collision_dist(hw); |
|
1592 |
|
1593 /* Setup Transmit Descriptor Settings for eop descriptor */ |
|
1594 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; |
|
1595 |
|
1596 /* only set IDE if we are delaying interrupts using the timers */ |
|
1597 if (adapter->tx_int_delay) |
|
1598 adapter->txd_cmd |= E1000_TXD_CMD_IDE; |
|
1599 |
|
1600 if (hw->mac_type < e1000_82543) |
|
1601 adapter->txd_cmd |= E1000_TXD_CMD_RPS; |
|
1602 else |
|
1603 adapter->txd_cmd |= E1000_TXD_CMD_RS; |
|
1604 |
|
1605 /* Cache if we're 82544 running in PCI-X because we'll |
|
1606 * need this to apply a workaround later in the send path. */ |
|
1607 if (hw->mac_type == e1000_82544 && |
|
1608 hw->bus_type == e1000_bus_type_pcix) |
|
1609 adapter->pcix_82544 = 1; |
|
1610 |
|
1611 ew32(TCTL, tctl); |
|
1612 |
|
1613 } |
|
1614 |
|
1615 /** |
|
1616 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) |
|
1617 * @adapter: board private structure |
|
1618 * @rxdr: rx descriptor ring (for a specific queue) to setup |
|
1619 * |
|
1620 * Returns 0 on success, negative on failure |
|
1621 **/ |
|
1622 |
|
1623 static int e1000_setup_rx_resources(struct e1000_adapter *adapter, |
|
1624 struct e1000_rx_ring *rxdr) |
|
1625 { |
|
1626 struct pci_dev *pdev = adapter->pdev; |
|
1627 int size, desc_len; |
|
1628 |
|
1629 size = sizeof(struct e1000_buffer) * rxdr->count; |
|
1630 rxdr->buffer_info = vmalloc(size); |
|
1631 if (!rxdr->buffer_info) { |
|
1632 e_err(probe, "Unable to allocate memory for the Rx descriptor " |
|
1633 "ring\n"); |
|
1634 return -ENOMEM; |
|
1635 } |
|
1636 memset(rxdr->buffer_info, 0, size); |
|
1637 |
|
1638 desc_len = sizeof(struct e1000_rx_desc); |
|
1639 |
|
1640 /* Round up to nearest 4K */ |
|
1641 |
|
1642 rxdr->size = rxdr->count * desc_len; |
|
1643 rxdr->size = ALIGN(rxdr->size, 4096); |
|
1644 |
|
1645 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, |
|
1646 GFP_KERNEL); |
|
1647 |
|
1648 if (!rxdr->desc) { |
|
1649 e_err(probe, "Unable to allocate memory for the Rx descriptor " |
|
1650 "ring\n"); |
|
1651 setup_rx_desc_die: |
|
1652 vfree(rxdr->buffer_info); |
|
1653 return -ENOMEM; |
|
1654 } |
|
1655 |
|
1656 /* Fix for errata 23, can't cross 64kB boundary */ |
|
1657 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { |
|
1658 void *olddesc = rxdr->desc; |
|
1659 dma_addr_t olddma = rxdr->dma; |
|
1660 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", |
|
1661 rxdr->size, rxdr->desc); |
|
1662 /* Try again, without freeing the previous */ |
|
1663 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, |
|
1664 &rxdr->dma, GFP_KERNEL); |
|
1665 /* Failed allocation, critical failure */ |
|
1666 if (!rxdr->desc) { |
|
1667 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, |
|
1668 olddma); |
|
1669 e_err(probe, "Unable to allocate memory for the Rx " |
|
1670 "descriptor ring\n"); |
|
1671 goto setup_rx_desc_die; |
|
1672 } |
|
1673 |
|
1674 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { |
|
1675 /* give up */ |
|
1676 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, |
|
1677 rxdr->dma); |
|
1678 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, |
|
1679 olddma); |
|
1680 e_err(probe, "Unable to allocate aligned memory for " |
|
1681 "the Rx descriptor ring\n"); |
|
1682 goto setup_rx_desc_die; |
|
1683 } else { |
|
1684 /* Free old allocation, new allocation was successful */ |
|
1685 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, |
|
1686 olddma); |
|
1687 } |
|
1688 } |
|
1689 memset(rxdr->desc, 0, rxdr->size); |
|
1690 |
|
1691 rxdr->next_to_clean = 0; |
|
1692 rxdr->next_to_use = 0; |
|
1693 rxdr->rx_skb_top = NULL; |
|
1694 |
|
1695 return 0; |
|
1696 } |
|
1697 |
|
1698 /** |
|
1699 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources |
|
1700 * (Descriptors) for all queues |
|
1701 * @adapter: board private structure |
|
1702 * |
|
1703 * Return 0 on success, negative on failure |
|
1704 **/ |
|
1705 |
|
1706 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) |
|
1707 { |
|
1708 int i, err = 0; |
|
1709 |
|
1710 for (i = 0; i < adapter->num_rx_queues; i++) { |
|
1711 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); |
|
1712 if (err) { |
|
1713 e_err(probe, "Allocation for Rx Queue %u failed\n", i); |
|
1714 for (i-- ; i >= 0; i--) |
|
1715 e1000_free_rx_resources(adapter, |
|
1716 &adapter->rx_ring[i]); |
|
1717 break; |
|
1718 } |
|
1719 } |
|
1720 |
|
1721 return err; |
|
1722 } |
|
1723 |
|
1724 /** |
|
1725 * e1000_setup_rctl - configure the receive control registers |
|
1726 * @adapter: Board private structure |
|
1727 **/ |
|
1728 static void e1000_setup_rctl(struct e1000_adapter *adapter) |
|
1729 { |
|
1730 struct e1000_hw *hw = &adapter->hw; |
|
1731 u32 rctl; |
|
1732 |
|
1733 rctl = er32(RCTL); |
|
1734 |
|
1735 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); |
|
1736 |
|
1737 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | |
|
1738 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | |
|
1739 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); |
|
1740 |
|
1741 if (hw->tbi_compatibility_on == 1) |
|
1742 rctl |= E1000_RCTL_SBP; |
|
1743 else |
|
1744 rctl &= ~E1000_RCTL_SBP; |
|
1745 |
|
1746 if (adapter->netdev->mtu <= ETH_DATA_LEN) |
|
1747 rctl &= ~E1000_RCTL_LPE; |
|
1748 else |
|
1749 rctl |= E1000_RCTL_LPE; |
|
1750 |
|
1751 /* Setup buffer sizes */ |
|
1752 rctl &= ~E1000_RCTL_SZ_4096; |
|
1753 rctl |= E1000_RCTL_BSEX; |
|
1754 switch (adapter->rx_buffer_len) { |
|
1755 case E1000_RXBUFFER_2048: |
|
1756 default: |
|
1757 rctl |= E1000_RCTL_SZ_2048; |
|
1758 rctl &= ~E1000_RCTL_BSEX; |
|
1759 break; |
|
1760 case E1000_RXBUFFER_4096: |
|
1761 rctl |= E1000_RCTL_SZ_4096; |
|
1762 break; |
|
1763 case E1000_RXBUFFER_8192: |
|
1764 rctl |= E1000_RCTL_SZ_8192; |
|
1765 break; |
|
1766 case E1000_RXBUFFER_16384: |
|
1767 rctl |= E1000_RCTL_SZ_16384; |
|
1768 break; |
|
1769 } |
|
1770 |
|
1771 ew32(RCTL, rctl); |
|
1772 } |
|
1773 |
|
1774 /** |
|
1775 * e1000_configure_rx - Configure 8254x Receive Unit after Reset |
|
1776 * @adapter: board private structure |
|
1777 * |
|
1778 * Configure the Rx unit of the MAC after a reset. |
|
1779 **/ |
|
1780 |
|
1781 static void e1000_configure_rx(struct e1000_adapter *adapter) |
|
1782 { |
|
1783 u64 rdba; |
|
1784 struct e1000_hw *hw = &adapter->hw; |
|
1785 u32 rdlen, rctl, rxcsum; |
|
1786 |
|
1787 if (adapter->netdev->mtu > ETH_DATA_LEN) { |
|
1788 rdlen = adapter->rx_ring[0].count * |
|
1789 sizeof(struct e1000_rx_desc); |
|
1790 adapter->clean_rx = e1000_clean_jumbo_rx_irq; |
|
1791 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; |
|
1792 } else { |
|
1793 rdlen = adapter->rx_ring[0].count * |
|
1794 sizeof(struct e1000_rx_desc); |
|
1795 adapter->clean_rx = e1000_clean_rx_irq; |
|
1796 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; |
|
1797 } |
|
1798 |
|
1799 /* disable receives while setting up the descriptors */ |
|
1800 rctl = er32(RCTL); |
|
1801 ew32(RCTL, rctl & ~E1000_RCTL_EN); |
|
1802 |
|
1803 /* set the Receive Delay Timer Register */ |
|
1804 ew32(RDTR, adapter->rx_int_delay); |
|
1805 |
|
1806 if (hw->mac_type >= e1000_82540) { |
|
1807 ew32(RADV, adapter->rx_abs_int_delay); |
|
1808 if (adapter->itr_setting != 0) |
|
1809 ew32(ITR, 1000000000 / (adapter->itr * 256)); |
|
1810 } |
|
1811 |
|
1812 /* Setup the HW Rx Head and Tail Descriptor Pointers and |
|
1813 * the Base and Length of the Rx Descriptor Ring */ |
|
1814 switch (adapter->num_rx_queues) { |
|
1815 case 1: |
|
1816 default: |
|
1817 rdba = adapter->rx_ring[0].dma; |
|
1818 ew32(RDLEN, rdlen); |
|
1819 ew32(RDBAH, (rdba >> 32)); |
|
1820 ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); |
|
1821 ew32(RDT, 0); |
|
1822 ew32(RDH, 0); |
|
1823 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); |
|
1824 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); |
|
1825 break; |
|
1826 } |
|
1827 |
|
1828 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ |
|
1829 if (hw->mac_type >= e1000_82543) { |
|
1830 rxcsum = er32(RXCSUM); |
|
1831 if (adapter->rx_csum) |
|
1832 rxcsum |= E1000_RXCSUM_TUOFL; |
|
1833 else |
|
1834 /* don't need to clear IPPCSE as it defaults to 0 */ |
|
1835 rxcsum &= ~E1000_RXCSUM_TUOFL; |
|
1836 ew32(RXCSUM, rxcsum); |
|
1837 } |
|
1838 |
|
1839 /* Enable Receives */ |
|
1840 ew32(RCTL, rctl); |
|
1841 } |
|
1842 |
|
1843 /** |
|
1844 * e1000_free_tx_resources - Free Tx Resources per Queue |
|
1845 * @adapter: board private structure |
|
1846 * @tx_ring: Tx descriptor ring for a specific queue |
|
1847 * |
|
1848 * Free all transmit software resources |
|
1849 **/ |
|
1850 |
|
1851 static void e1000_free_tx_resources(struct e1000_adapter *adapter, |
|
1852 struct e1000_tx_ring *tx_ring) |
|
1853 { |
|
1854 struct pci_dev *pdev = adapter->pdev; |
|
1855 |
|
1856 e1000_clean_tx_ring(adapter, tx_ring); |
|
1857 |
|
1858 vfree(tx_ring->buffer_info); |
|
1859 tx_ring->buffer_info = NULL; |
|
1860 |
|
1861 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, |
|
1862 tx_ring->dma); |
|
1863 |
|
1864 tx_ring->desc = NULL; |
|
1865 } |
|
1866 |
|
1867 /** |
|
1868 * e1000_free_all_tx_resources - Free Tx Resources for All Queues |
|
1869 * @adapter: board private structure |
|
1870 * |
|
1871 * Free all transmit software resources |
|
1872 **/ |
|
1873 |
|
1874 void e1000_free_all_tx_resources(struct e1000_adapter *adapter) |
|
1875 { |
|
1876 int i; |
|
1877 |
|
1878 for (i = 0; i < adapter->num_tx_queues; i++) |
|
1879 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); |
|
1880 } |
|
1881 |
|
1882 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, |
|
1883 struct e1000_buffer *buffer_info) |
|
1884 { |
|
1885 if (buffer_info->dma) { |
|
1886 if (buffer_info->mapped_as_page) |
|
1887 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, |
|
1888 buffer_info->length, DMA_TO_DEVICE); |
|
1889 else |
|
1890 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, |
|
1891 buffer_info->length, |
|
1892 DMA_TO_DEVICE); |
|
1893 buffer_info->dma = 0; |
|
1894 } |
|
1895 if (buffer_info->skb) { |
|
1896 dev_kfree_skb_any(buffer_info->skb); |
|
1897 buffer_info->skb = NULL; |
|
1898 } |
|
1899 buffer_info->time_stamp = 0; |
|
1900 /* buffer_info must be completely set up in the transmit path */ |
|
1901 } |
|
1902 |
|
1903 /** |
|
1904 * e1000_clean_tx_ring - Free Tx Buffers |
|
1905 * @adapter: board private structure |
|
1906 * @tx_ring: ring to be cleaned |
|
1907 **/ |
|
1908 |
|
1909 static void e1000_clean_tx_ring(struct e1000_adapter *adapter, |
|
1910 struct e1000_tx_ring *tx_ring) |
|
1911 { |
|
1912 struct e1000_hw *hw = &adapter->hw; |
|
1913 struct e1000_buffer *buffer_info; |
|
1914 unsigned long size; |
|
1915 unsigned int i; |
|
1916 |
|
1917 /* Free all the Tx ring sk_buffs */ |
|
1918 |
|
1919 for (i = 0; i < tx_ring->count; i++) { |
|
1920 buffer_info = &tx_ring->buffer_info[i]; |
|
1921 e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
|
1922 } |
|
1923 |
|
1924 size = sizeof(struct e1000_buffer) * tx_ring->count; |
|
1925 memset(tx_ring->buffer_info, 0, size); |
|
1926 |
|
1927 /* Zero out the descriptor ring */ |
|
1928 |
|
1929 memset(tx_ring->desc, 0, tx_ring->size); |
|
1930 |
|
1931 tx_ring->next_to_use = 0; |
|
1932 tx_ring->next_to_clean = 0; |
|
1933 tx_ring->last_tx_tso = 0; |
|
1934 |
|
1935 writel(0, hw->hw_addr + tx_ring->tdh); |
|
1936 writel(0, hw->hw_addr + tx_ring->tdt); |
|
1937 } |
|
1938 |
|
1939 /** |
|
1940 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues |
|
1941 * @adapter: board private structure |
|
1942 **/ |
|
1943 |
|
1944 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) |
|
1945 { |
|
1946 int i; |
|
1947 |
|
1948 for (i = 0; i < adapter->num_tx_queues; i++) |
|
1949 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); |
|
1950 } |
|
1951 |
|
1952 /** |
|
1953 * e1000_free_rx_resources - Free Rx Resources |
|
1954 * @adapter: board private structure |
|
1955 * @rx_ring: ring to clean the resources from |
|
1956 * |
|
1957 * Free all receive software resources |
|
1958 **/ |
|
1959 |
|
1960 static void e1000_free_rx_resources(struct e1000_adapter *adapter, |
|
1961 struct e1000_rx_ring *rx_ring) |
|
1962 { |
|
1963 struct pci_dev *pdev = adapter->pdev; |
|
1964 |
|
1965 e1000_clean_rx_ring(adapter, rx_ring); |
|
1966 |
|
1967 vfree(rx_ring->buffer_info); |
|
1968 rx_ring->buffer_info = NULL; |
|
1969 |
|
1970 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, |
|
1971 rx_ring->dma); |
|
1972 |
|
1973 rx_ring->desc = NULL; |
|
1974 } |
|
1975 |
|
1976 /** |
|
1977 * e1000_free_all_rx_resources - Free Rx Resources for All Queues |
|
1978 * @adapter: board private structure |
|
1979 * |
|
1980 * Free all receive software resources |
|
1981 **/ |
|
1982 |
|
1983 void e1000_free_all_rx_resources(struct e1000_adapter *adapter) |
|
1984 { |
|
1985 int i; |
|
1986 |
|
1987 for (i = 0; i < adapter->num_rx_queues; i++) |
|
1988 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); |
|
1989 } |
|
1990 |
|
1991 /** |
|
1992 * e1000_clean_rx_ring - Free Rx Buffers per Queue |
|
1993 * @adapter: board private structure |
|
1994 * @rx_ring: ring to free buffers from |
|
1995 **/ |
|
1996 |
|
1997 static void e1000_clean_rx_ring(struct e1000_adapter *adapter, |
|
1998 struct e1000_rx_ring *rx_ring) |
|
1999 { |
|
2000 struct e1000_hw *hw = &adapter->hw; |
|
2001 struct e1000_buffer *buffer_info; |
|
2002 struct pci_dev *pdev = adapter->pdev; |
|
2003 unsigned long size; |
|
2004 unsigned int i; |
|
2005 |
|
2006 /* Free all the Rx ring sk_buffs */ |
|
2007 for (i = 0; i < rx_ring->count; i++) { |
|
2008 buffer_info = &rx_ring->buffer_info[i]; |
|
2009 if (buffer_info->dma && |
|
2010 adapter->clean_rx == e1000_clean_rx_irq) { |
|
2011 dma_unmap_single(&pdev->dev, buffer_info->dma, |
|
2012 buffer_info->length, |
|
2013 DMA_FROM_DEVICE); |
|
2014 } else if (buffer_info->dma && |
|
2015 adapter->clean_rx == e1000_clean_jumbo_rx_irq) { |
|
2016 dma_unmap_page(&pdev->dev, buffer_info->dma, |
|
2017 buffer_info->length, |
|
2018 DMA_FROM_DEVICE); |
|
2019 } |
|
2020 |
|
2021 buffer_info->dma = 0; |
|
2022 if (buffer_info->page) { |
|
2023 put_page(buffer_info->page); |
|
2024 buffer_info->page = NULL; |
|
2025 } |
|
2026 if (buffer_info->skb) { |
|
2027 dev_kfree_skb(buffer_info->skb); |
|
2028 buffer_info->skb = NULL; |
|
2029 } |
|
2030 } |
|
2031 |
|
2032 /* there also may be some cached data from a chained receive */ |
|
2033 if (rx_ring->rx_skb_top) { |
|
2034 dev_kfree_skb(rx_ring->rx_skb_top); |
|
2035 rx_ring->rx_skb_top = NULL; |
|
2036 } |
|
2037 |
|
2038 size = sizeof(struct e1000_buffer) * rx_ring->count; |
|
2039 memset(rx_ring->buffer_info, 0, size); |
|
2040 |
|
2041 /* Zero out the descriptor ring */ |
|
2042 memset(rx_ring->desc, 0, rx_ring->size); |
|
2043 |
|
2044 rx_ring->next_to_clean = 0; |
|
2045 rx_ring->next_to_use = 0; |
|
2046 |
|
2047 writel(0, hw->hw_addr + rx_ring->rdh); |
|
2048 writel(0, hw->hw_addr + rx_ring->rdt); |
|
2049 } |
|
2050 |
|
2051 /** |
|
2052 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues |
|
2053 * @adapter: board private structure |
|
2054 **/ |
|
2055 |
|
2056 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) |
|
2057 { |
|
2058 int i; |
|
2059 |
|
2060 for (i = 0; i < adapter->num_rx_queues; i++) |
|
2061 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); |
|
2062 } |
|
2063 |
|
2064 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset |
|
2065 * and memory write and invalidate disabled for certain operations |
|
2066 */ |
|
2067 static void e1000_enter_82542_rst(struct e1000_adapter *adapter) |
|
2068 { |
|
2069 struct e1000_hw *hw = &adapter->hw; |
|
2070 struct net_device *netdev = adapter->netdev; |
|
2071 u32 rctl; |
|
2072 |
|
2073 e1000_pci_clear_mwi(hw); |
|
2074 |
|
2075 rctl = er32(RCTL); |
|
2076 rctl |= E1000_RCTL_RST; |
|
2077 ew32(RCTL, rctl); |
|
2078 E1000_WRITE_FLUSH(); |
|
2079 mdelay(5); |
|
2080 |
|
2081 if (netif_running(netdev)) |
|
2082 e1000_clean_all_rx_rings(adapter); |
|
2083 } |
|
2084 |
|
2085 static void e1000_leave_82542_rst(struct e1000_adapter *adapter) |
|
2086 { |
|
2087 struct e1000_hw *hw = &adapter->hw; |
|
2088 struct net_device *netdev = adapter->netdev; |
|
2089 u32 rctl; |
|
2090 |
|
2091 rctl = er32(RCTL); |
|
2092 rctl &= ~E1000_RCTL_RST; |
|
2093 ew32(RCTL, rctl); |
|
2094 E1000_WRITE_FLUSH(); |
|
2095 mdelay(5); |
|
2096 |
|
2097 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) |
|
2098 e1000_pci_set_mwi(hw); |
|
2099 |
|
2100 if (netif_running(netdev)) { |
|
2101 /* No need to loop, because 82542 supports only 1 queue */ |
|
2102 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; |
|
2103 e1000_configure_rx(adapter); |
|
2104 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); |
|
2105 } |
|
2106 } |
|
2107 |
|
2108 /** |
|
2109 * e1000_set_mac - Change the Ethernet Address of the NIC |
|
2110 * @netdev: network interface device structure |
|
2111 * @p: pointer to an address structure |
|
2112 * |
|
2113 * Returns 0 on success, negative on failure |
|
2114 **/ |
|
2115 |
|
2116 static int e1000_set_mac(struct net_device *netdev, void *p) |
|
2117 { |
|
2118 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
2119 struct e1000_hw *hw = &adapter->hw; |
|
2120 struct sockaddr *addr = p; |
|
2121 |
|
2122 if (!is_valid_ether_addr(addr->sa_data)) |
|
2123 return -EADDRNOTAVAIL; |
|
2124 |
|
2125 /* 82542 2.0 needs to be in reset to write receive address registers */ |
|
2126 |
|
2127 if (hw->mac_type == e1000_82542_rev2_0) |
|
2128 e1000_enter_82542_rst(adapter); |
|
2129 |
|
2130 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
|
2131 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); |
|
2132 |
|
2133 e1000_rar_set(hw, hw->mac_addr, 0); |
|
2134 |
|
2135 if (hw->mac_type == e1000_82542_rev2_0) |
|
2136 e1000_leave_82542_rst(adapter); |
|
2137 |
|
2138 return 0; |
|
2139 } |
|
2140 |
|
2141 /** |
|
2142 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set |
|
2143 * @netdev: network interface device structure |
|
2144 * |
|
2145 * The set_rx_mode entry point is called whenever the unicast or multicast |
|
2146 * address lists or the network interface flags are updated. This routine is |
|
2147 * responsible for configuring the hardware for proper unicast, multicast, |
|
2148 * promiscuous mode, and all-multi behavior. |
|
2149 **/ |
|
2150 |
|
2151 static void e1000_set_rx_mode(struct net_device *netdev) |
|
2152 { |
|
2153 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
2154 struct e1000_hw *hw = &adapter->hw; |
|
2155 struct netdev_hw_addr *ha; |
|
2156 bool use_uc = false; |
|
2157 u32 rctl; |
|
2158 u32 hash_value; |
|
2159 int i, rar_entries = E1000_RAR_ENTRIES; |
|
2160 int mta_reg_count = E1000_NUM_MTA_REGISTERS; |
|
2161 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); |
|
2162 |
|
2163 if (!mcarray) { |
|
2164 e_err(probe, "memory allocation failed\n"); |
|
2165 return; |
|
2166 } |
|
2167 |
|
2168 /* Check for Promiscuous and All Multicast modes */ |
|
2169 |
|
2170 rctl = er32(RCTL); |
|
2171 |
|
2172 if (netdev->flags & IFF_PROMISC) { |
|
2173 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); |
|
2174 rctl &= ~E1000_RCTL_VFE; |
|
2175 } else { |
|
2176 if (netdev->flags & IFF_ALLMULTI) |
|
2177 rctl |= E1000_RCTL_MPE; |
|
2178 else |
|
2179 rctl &= ~E1000_RCTL_MPE; |
|
2180 /* Enable VLAN filter if there is a VLAN */ |
|
2181 if (adapter->vlgrp) |
|
2182 rctl |= E1000_RCTL_VFE; |
|
2183 } |
|
2184 |
|
2185 if (netdev_uc_count(netdev) > rar_entries - 1) { |
|
2186 rctl |= E1000_RCTL_UPE; |
|
2187 } else if (!(netdev->flags & IFF_PROMISC)) { |
|
2188 rctl &= ~E1000_RCTL_UPE; |
|
2189 use_uc = true; |
|
2190 } |
|
2191 |
|
2192 ew32(RCTL, rctl); |
|
2193 |
|
2194 /* 82542 2.0 needs to be in reset to write receive address registers */ |
|
2195 |
|
2196 if (hw->mac_type == e1000_82542_rev2_0) |
|
2197 e1000_enter_82542_rst(adapter); |
|
2198 |
|
2199 /* load the first 14 addresses into the exact filters 1-14. Unicast |
|
2200 * addresses take precedence to avoid disabling unicast filtering |
|
2201 * when possible. |
|
2202 * |
|
2203 * RAR 0 is used for the station MAC adddress |
|
2204 * if there are not 14 addresses, go ahead and clear the filters |
|
2205 */ |
|
2206 i = 1; |
|
2207 if (use_uc) |
|
2208 netdev_for_each_uc_addr(ha, netdev) { |
|
2209 if (i == rar_entries) |
|
2210 break; |
|
2211 e1000_rar_set(hw, ha->addr, i++); |
|
2212 } |
|
2213 |
|
2214 netdev_for_each_mc_addr(ha, netdev) { |
|
2215 if (i == rar_entries) { |
|
2216 /* load any remaining addresses into the hash table */ |
|
2217 u32 hash_reg, hash_bit, mta; |
|
2218 hash_value = e1000_hash_mc_addr(hw, ha->addr); |
|
2219 hash_reg = (hash_value >> 5) & 0x7F; |
|
2220 hash_bit = hash_value & 0x1F; |
|
2221 mta = (1 << hash_bit); |
|
2222 mcarray[hash_reg] |= mta; |
|
2223 } else { |
|
2224 e1000_rar_set(hw, ha->addr, i++); |
|
2225 } |
|
2226 } |
|
2227 |
|
2228 for (; i < rar_entries; i++) { |
|
2229 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); |
|
2230 E1000_WRITE_FLUSH(); |
|
2231 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); |
|
2232 E1000_WRITE_FLUSH(); |
|
2233 } |
|
2234 |
|
2235 /* write the hash table completely, write from bottom to avoid |
|
2236 * both stupid write combining chipsets, and flushing each write */ |
|
2237 for (i = mta_reg_count - 1; i >= 0 ; i--) { |
|
2238 /* |
|
2239 * If we are on an 82544 has an errata where writing odd |
|
2240 * offsets overwrites the previous even offset, but writing |
|
2241 * backwards over the range solves the issue by always |
|
2242 * writing the odd offset first |
|
2243 */ |
|
2244 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); |
|
2245 } |
|
2246 E1000_WRITE_FLUSH(); |
|
2247 |
|
2248 if (hw->mac_type == e1000_82542_rev2_0) |
|
2249 e1000_leave_82542_rst(adapter); |
|
2250 |
|
2251 kfree(mcarray); |
|
2252 } |
|
2253 |
|
2254 /* Need to wait a few seconds after link up to get diagnostic information from |
|
2255 * the phy */ |
|
2256 |
|
2257 static void e1000_update_phy_info(unsigned long data) |
|
2258 { |
|
2259 struct e1000_adapter *adapter = (struct e1000_adapter *)data; |
|
2260 schedule_work(&adapter->phy_info_task); |
|
2261 } |
|
2262 |
|
2263 static void e1000_update_phy_info_task(struct work_struct *work) |
|
2264 { |
|
2265 struct e1000_adapter *adapter = container_of(work, |
|
2266 struct e1000_adapter, |
|
2267 phy_info_task); |
|
2268 struct e1000_hw *hw = &adapter->hw; |
|
2269 |
|
2270 rtnl_lock(); |
|
2271 e1000_phy_get_info(hw, &adapter->phy_info); |
|
2272 rtnl_unlock(); |
|
2273 } |
|
2274 |
|
2275 /** |
|
2276 * e1000_82547_tx_fifo_stall - Timer Call-back |
|
2277 * @data: pointer to adapter cast into an unsigned long |
|
2278 **/ |
|
2279 static void e1000_82547_tx_fifo_stall(unsigned long data) |
|
2280 { |
|
2281 struct e1000_adapter *adapter = (struct e1000_adapter *)data; |
|
2282 schedule_work(&adapter->fifo_stall_task); |
|
2283 } |
|
2284 |
|
2285 /** |
|
2286 * e1000_82547_tx_fifo_stall_task - task to complete work |
|
2287 * @work: work struct contained inside adapter struct |
|
2288 **/ |
|
2289 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) |
|
2290 { |
|
2291 struct e1000_adapter *adapter = container_of(work, |
|
2292 struct e1000_adapter, |
|
2293 fifo_stall_task); |
|
2294 struct e1000_hw *hw = &adapter->hw; |
|
2295 struct net_device *netdev = adapter->netdev; |
|
2296 u32 tctl; |
|
2297 |
|
2298 rtnl_lock(); |
|
2299 if (atomic_read(&adapter->tx_fifo_stall)) { |
|
2300 if ((er32(TDT) == er32(TDH)) && |
|
2301 (er32(TDFT) == er32(TDFH)) && |
|
2302 (er32(TDFTS) == er32(TDFHS))) { |
|
2303 tctl = er32(TCTL); |
|
2304 ew32(TCTL, tctl & ~E1000_TCTL_EN); |
|
2305 ew32(TDFT, adapter->tx_head_addr); |
|
2306 ew32(TDFH, adapter->tx_head_addr); |
|
2307 ew32(TDFTS, adapter->tx_head_addr); |
|
2308 ew32(TDFHS, adapter->tx_head_addr); |
|
2309 ew32(TCTL, tctl); |
|
2310 E1000_WRITE_FLUSH(); |
|
2311 |
|
2312 adapter->tx_fifo_head = 0; |
|
2313 atomic_set(&adapter->tx_fifo_stall, 0); |
|
2314 netif_wake_queue(netdev); |
|
2315 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { |
|
2316 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); |
|
2317 } |
|
2318 } |
|
2319 rtnl_unlock(); |
|
2320 } |
|
2321 |
|
2322 bool e1000_has_link(struct e1000_adapter *adapter) |
|
2323 { |
|
2324 struct e1000_hw *hw = &adapter->hw; |
|
2325 bool link_active = false; |
|
2326 |
|
2327 /* get_link_status is set on LSC (link status) interrupt or |
|
2328 * rx sequence error interrupt. get_link_status will stay |
|
2329 * false until the e1000_check_for_link establishes link |
|
2330 * for copper adapters ONLY |
|
2331 */ |
|
2332 switch (hw->media_type) { |
|
2333 case e1000_media_type_copper: |
|
2334 if (hw->get_link_status) { |
|
2335 e1000_check_for_link(hw); |
|
2336 link_active = !hw->get_link_status; |
|
2337 } else { |
|
2338 link_active = true; |
|
2339 } |
|
2340 break; |
|
2341 case e1000_media_type_fiber: |
|
2342 e1000_check_for_link(hw); |
|
2343 link_active = !!(er32(STATUS) & E1000_STATUS_LU); |
|
2344 break; |
|
2345 case e1000_media_type_internal_serdes: |
|
2346 e1000_check_for_link(hw); |
|
2347 link_active = hw->serdes_has_link; |
|
2348 break; |
|
2349 default: |
|
2350 break; |
|
2351 } |
|
2352 |
|
2353 return link_active; |
|
2354 } |
|
2355 |
|
2356 /** |
|
2357 * e1000_watchdog - Timer Call-back |
|
2358 * @data: pointer to adapter cast into an unsigned long |
|
2359 **/ |
|
2360 static void e1000_watchdog(unsigned long data) |
|
2361 { |
|
2362 struct e1000_adapter *adapter = (struct e1000_adapter *)data; |
|
2363 struct e1000_hw *hw = &adapter->hw; |
|
2364 struct net_device *netdev = adapter->netdev; |
|
2365 struct e1000_tx_ring *txdr = adapter->tx_ring; |
|
2366 u32 link, tctl; |
|
2367 |
|
2368 link = e1000_has_link(adapter); |
|
2369 if ((netif_carrier_ok(netdev)) && link) |
|
2370 goto link_up; |
|
2371 |
|
2372 if (link) { |
|
2373 if (!netif_carrier_ok(netdev)) { |
|
2374 u32 ctrl; |
|
2375 bool txb2b = true; |
|
2376 /* update snapshot of PHY registers on LSC */ |
|
2377 e1000_get_speed_and_duplex(hw, |
|
2378 &adapter->link_speed, |
|
2379 &adapter->link_duplex); |
|
2380 |
|
2381 ctrl = er32(CTRL); |
|
2382 pr_info("%s NIC Link is Up %d Mbps %s, " |
|
2383 "Flow Control: %s\n", |
|
2384 netdev->name, |
|
2385 adapter->link_speed, |
|
2386 adapter->link_duplex == FULL_DUPLEX ? |
|
2387 "Full Duplex" : "Half Duplex", |
|
2388 ((ctrl & E1000_CTRL_TFCE) && (ctrl & |
|
2389 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & |
|
2390 E1000_CTRL_RFCE) ? "RX" : ((ctrl & |
|
2391 E1000_CTRL_TFCE) ? "TX" : "None"))); |
|
2392 |
|
2393 /* adjust timeout factor according to speed/duplex */ |
|
2394 adapter->tx_timeout_factor = 1; |
|
2395 switch (adapter->link_speed) { |
|
2396 case SPEED_10: |
|
2397 txb2b = false; |
|
2398 adapter->tx_timeout_factor = 16; |
|
2399 break; |
|
2400 case SPEED_100: |
|
2401 txb2b = false; |
|
2402 /* maybe add some timeout factor ? */ |
|
2403 break; |
|
2404 } |
|
2405 |
|
2406 /* enable transmits in the hardware */ |
|
2407 tctl = er32(TCTL); |
|
2408 tctl |= E1000_TCTL_EN; |
|
2409 ew32(TCTL, tctl); |
|
2410 |
|
2411 netif_carrier_on(netdev); |
|
2412 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
|
2413 mod_timer(&adapter->phy_info_timer, |
|
2414 round_jiffies(jiffies + 2 * HZ)); |
|
2415 adapter->smartspeed = 0; |
|
2416 } |
|
2417 } else { |
|
2418 if (netif_carrier_ok(netdev)) { |
|
2419 adapter->link_speed = 0; |
|
2420 adapter->link_duplex = 0; |
|
2421 pr_info("%s NIC Link is Down\n", |
|
2422 netdev->name); |
|
2423 netif_carrier_off(netdev); |
|
2424 |
|
2425 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
|
2426 mod_timer(&adapter->phy_info_timer, |
|
2427 round_jiffies(jiffies + 2 * HZ)); |
|
2428 } |
|
2429 |
|
2430 e1000_smartspeed(adapter); |
|
2431 } |
|
2432 |
|
2433 link_up: |
|
2434 e1000_update_stats(adapter); |
|
2435 |
|
2436 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; |
|
2437 adapter->tpt_old = adapter->stats.tpt; |
|
2438 hw->collision_delta = adapter->stats.colc - adapter->colc_old; |
|
2439 adapter->colc_old = adapter->stats.colc; |
|
2440 |
|
2441 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; |
|
2442 adapter->gorcl_old = adapter->stats.gorcl; |
|
2443 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; |
|
2444 adapter->gotcl_old = adapter->stats.gotcl; |
|
2445 |
|
2446 e1000_update_adaptive(hw); |
|
2447 |
|
2448 if (!netif_carrier_ok(netdev)) { |
|
2449 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { |
|
2450 /* We've lost link, so the controller stops DMA, |
|
2451 * but we've got queued Tx work that's never going |
|
2452 * to get done, so reset controller to flush Tx. |
|
2453 * (Do the reset outside of interrupt context). */ |
|
2454 adapter->tx_timeout_count++; |
|
2455 schedule_work(&adapter->reset_task); |
|
2456 /* return immediately since reset is imminent */ |
|
2457 return; |
|
2458 } |
|
2459 } |
|
2460 |
|
2461 /* Simple mode for Interrupt Throttle Rate (ITR) */ |
|
2462 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { |
|
2463 /* |
|
2464 * Symmetric Tx/Rx gets a reduced ITR=2000; |
|
2465 * Total asymmetrical Tx or Rx gets ITR=8000; |
|
2466 * everyone else is between 2000-8000. |
|
2467 */ |
|
2468 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; |
|
2469 u32 dif = (adapter->gotcl > adapter->gorcl ? |
|
2470 adapter->gotcl - adapter->gorcl : |
|
2471 adapter->gorcl - adapter->gotcl) / 10000; |
|
2472 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; |
|
2473 |
|
2474 ew32(ITR, 1000000000 / (itr * 256)); |
|
2475 } |
|
2476 |
|
2477 /* Cause software interrupt to ensure rx ring is cleaned */ |
|
2478 ew32(ICS, E1000_ICS_RXDMT0); |
|
2479 |
|
2480 /* Force detection of hung controller every watchdog period */ |
|
2481 adapter->detect_tx_hung = true; |
|
2482 |
|
2483 /* Reset the timer */ |
|
2484 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
|
2485 mod_timer(&adapter->watchdog_timer, |
|
2486 round_jiffies(jiffies + 2 * HZ)); |
|
2487 } |
|
2488 |
|
2489 enum latency_range { |
|
2490 lowest_latency = 0, |
|
2491 low_latency = 1, |
|
2492 bulk_latency = 2, |
|
2493 latency_invalid = 255 |
|
2494 }; |
|
2495 |
|
2496 /** |
|
2497 * e1000_update_itr - update the dynamic ITR value based on statistics |
|
2498 * @adapter: pointer to adapter |
|
2499 * @itr_setting: current adapter->itr |
|
2500 * @packets: the number of packets during this measurement interval |
|
2501 * @bytes: the number of bytes during this measurement interval |
|
2502 * |
|
2503 * Stores a new ITR value based on packets and byte |
|
2504 * counts during the last interrupt. The advantage of per interrupt |
|
2505 * computation is faster updates and more accurate ITR for the current |
|
2506 * traffic pattern. Constants in this function were computed |
|
2507 * based on theoretical maximum wire speed and thresholds were set based |
|
2508 * on testing data as well as attempting to minimize response time |
|
2509 * while increasing bulk throughput. |
|
2510 * this functionality is controlled by the InterruptThrottleRate module |
|
2511 * parameter (see e1000_param.c) |
|
2512 **/ |
|
2513 static unsigned int e1000_update_itr(struct e1000_adapter *adapter, |
|
2514 u16 itr_setting, int packets, int bytes) |
|
2515 { |
|
2516 unsigned int retval = itr_setting; |
|
2517 struct e1000_hw *hw = &adapter->hw; |
|
2518 |
|
2519 if (unlikely(hw->mac_type < e1000_82540)) |
|
2520 goto update_itr_done; |
|
2521 |
|
2522 if (packets == 0) |
|
2523 goto update_itr_done; |
|
2524 |
|
2525 switch (itr_setting) { |
|
2526 case lowest_latency: |
|
2527 /* jumbo frames get bulk treatment*/ |
|
2528 if (bytes/packets > 8000) |
|
2529 retval = bulk_latency; |
|
2530 else if ((packets < 5) && (bytes > 512)) |
|
2531 retval = low_latency; |
|
2532 break; |
|
2533 case low_latency: /* 50 usec aka 20000 ints/s */ |
|
2534 if (bytes > 10000) { |
|
2535 /* jumbo frames need bulk latency setting */ |
|
2536 if (bytes/packets > 8000) |
|
2537 retval = bulk_latency; |
|
2538 else if ((packets < 10) || ((bytes/packets) > 1200)) |
|
2539 retval = bulk_latency; |
|
2540 else if ((packets > 35)) |
|
2541 retval = lowest_latency; |
|
2542 } else if (bytes/packets > 2000) |
|
2543 retval = bulk_latency; |
|
2544 else if (packets <= 2 && bytes < 512) |
|
2545 retval = lowest_latency; |
|
2546 break; |
|
2547 case bulk_latency: /* 250 usec aka 4000 ints/s */ |
|
2548 if (bytes > 25000) { |
|
2549 if (packets > 35) |
|
2550 retval = low_latency; |
|
2551 } else if (bytes < 6000) { |
|
2552 retval = low_latency; |
|
2553 } |
|
2554 break; |
|
2555 } |
|
2556 |
|
2557 update_itr_done: |
|
2558 return retval; |
|
2559 } |
|
2560 |
|
2561 static void e1000_set_itr(struct e1000_adapter *adapter) |
|
2562 { |
|
2563 struct e1000_hw *hw = &adapter->hw; |
|
2564 u16 current_itr; |
|
2565 u32 new_itr = adapter->itr; |
|
2566 |
|
2567 if (unlikely(hw->mac_type < e1000_82540)) |
|
2568 return; |
|
2569 |
|
2570 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ |
|
2571 if (unlikely(adapter->link_speed != SPEED_1000)) { |
|
2572 current_itr = 0; |
|
2573 new_itr = 4000; |
|
2574 goto set_itr_now; |
|
2575 } |
|
2576 |
|
2577 adapter->tx_itr = e1000_update_itr(adapter, |
|
2578 adapter->tx_itr, |
|
2579 adapter->total_tx_packets, |
|
2580 adapter->total_tx_bytes); |
|
2581 /* conservative mode (itr 3) eliminates the lowest_latency setting */ |
|
2582 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) |
|
2583 adapter->tx_itr = low_latency; |
|
2584 |
|
2585 adapter->rx_itr = e1000_update_itr(adapter, |
|
2586 adapter->rx_itr, |
|
2587 adapter->total_rx_packets, |
|
2588 adapter->total_rx_bytes); |
|
2589 /* conservative mode (itr 3) eliminates the lowest_latency setting */ |
|
2590 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) |
|
2591 adapter->rx_itr = low_latency; |
|
2592 |
|
2593 current_itr = max(adapter->rx_itr, adapter->tx_itr); |
|
2594 |
|
2595 switch (current_itr) { |
|
2596 /* counts and packets in update_itr are dependent on these numbers */ |
|
2597 case lowest_latency: |
|
2598 new_itr = 70000; |
|
2599 break; |
|
2600 case low_latency: |
|
2601 new_itr = 20000; /* aka hwitr = ~200 */ |
|
2602 break; |
|
2603 case bulk_latency: |
|
2604 new_itr = 4000; |
|
2605 break; |
|
2606 default: |
|
2607 break; |
|
2608 } |
|
2609 |
|
2610 set_itr_now: |
|
2611 if (new_itr != adapter->itr) { |
|
2612 /* this attempts to bias the interrupt rate towards Bulk |
|
2613 * by adding intermediate steps when interrupt rate is |
|
2614 * increasing */ |
|
2615 new_itr = new_itr > adapter->itr ? |
|
2616 min(adapter->itr + (new_itr >> 2), new_itr) : |
|
2617 new_itr; |
|
2618 adapter->itr = new_itr; |
|
2619 ew32(ITR, 1000000000 / (new_itr * 256)); |
|
2620 } |
|
2621 } |
|
2622 |
|
2623 #define E1000_TX_FLAGS_CSUM 0x00000001 |
|
2624 #define E1000_TX_FLAGS_VLAN 0x00000002 |
|
2625 #define E1000_TX_FLAGS_TSO 0x00000004 |
|
2626 #define E1000_TX_FLAGS_IPV4 0x00000008 |
|
2627 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 |
|
2628 #define E1000_TX_FLAGS_VLAN_SHIFT 16 |
|
2629 |
|
2630 static int e1000_tso(struct e1000_adapter *adapter, |
|
2631 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) |
|
2632 { |
|
2633 struct e1000_context_desc *context_desc; |
|
2634 struct e1000_buffer *buffer_info; |
|
2635 unsigned int i; |
|
2636 u32 cmd_length = 0; |
|
2637 u16 ipcse = 0, tucse, mss; |
|
2638 u8 ipcss, ipcso, tucss, tucso, hdr_len; |
|
2639 int err; |
|
2640 |
|
2641 if (skb_is_gso(skb)) { |
|
2642 if (skb_header_cloned(skb)) { |
|
2643 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
|
2644 if (err) |
|
2645 return err; |
|
2646 } |
|
2647 |
|
2648 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
|
2649 mss = skb_shinfo(skb)->gso_size; |
|
2650 if (skb->protocol == htons(ETH_P_IP)) { |
|
2651 struct iphdr *iph = ip_hdr(skb); |
|
2652 iph->tot_len = 0; |
|
2653 iph->check = 0; |
|
2654 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
|
2655 iph->daddr, 0, |
|
2656 IPPROTO_TCP, |
|
2657 0); |
|
2658 cmd_length = E1000_TXD_CMD_IP; |
|
2659 ipcse = skb_transport_offset(skb) - 1; |
|
2660 } else if (skb->protocol == htons(ETH_P_IPV6)) { |
|
2661 ipv6_hdr(skb)->payload_len = 0; |
|
2662 tcp_hdr(skb)->check = |
|
2663 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
|
2664 &ipv6_hdr(skb)->daddr, |
|
2665 0, IPPROTO_TCP, 0); |
|
2666 ipcse = 0; |
|
2667 } |
|
2668 ipcss = skb_network_offset(skb); |
|
2669 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; |
|
2670 tucss = skb_transport_offset(skb); |
|
2671 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; |
|
2672 tucse = 0; |
|
2673 |
|
2674 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | |
|
2675 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); |
|
2676 |
|
2677 i = tx_ring->next_to_use; |
|
2678 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); |
|
2679 buffer_info = &tx_ring->buffer_info[i]; |
|
2680 |
|
2681 context_desc->lower_setup.ip_fields.ipcss = ipcss; |
|
2682 context_desc->lower_setup.ip_fields.ipcso = ipcso; |
|
2683 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); |
|
2684 context_desc->upper_setup.tcp_fields.tucss = tucss; |
|
2685 context_desc->upper_setup.tcp_fields.tucso = tucso; |
|
2686 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); |
|
2687 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); |
|
2688 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; |
|
2689 context_desc->cmd_and_length = cpu_to_le32(cmd_length); |
|
2690 |
|
2691 buffer_info->time_stamp = jiffies; |
|
2692 buffer_info->next_to_watch = i; |
|
2693 |
|
2694 if (++i == tx_ring->count) i = 0; |
|
2695 tx_ring->next_to_use = i; |
|
2696 |
|
2697 return true; |
|
2698 } |
|
2699 return false; |
|
2700 } |
|
2701 |
|
2702 static bool e1000_tx_csum(struct e1000_adapter *adapter, |
|
2703 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) |
|
2704 { |
|
2705 struct e1000_context_desc *context_desc; |
|
2706 struct e1000_buffer *buffer_info; |
|
2707 unsigned int i; |
|
2708 u8 css; |
|
2709 u32 cmd_len = E1000_TXD_CMD_DEXT; |
|
2710 |
|
2711 if (skb->ip_summed != CHECKSUM_PARTIAL) |
|
2712 return false; |
|
2713 |
|
2714 switch (skb->protocol) { |
|
2715 case cpu_to_be16(ETH_P_IP): |
|
2716 if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
|
2717 cmd_len |= E1000_TXD_CMD_TCP; |
|
2718 break; |
|
2719 case cpu_to_be16(ETH_P_IPV6): |
|
2720 /* XXX not handling all IPV6 headers */ |
|
2721 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
|
2722 cmd_len |= E1000_TXD_CMD_TCP; |
|
2723 break; |
|
2724 default: |
|
2725 if (unlikely(net_ratelimit())) |
|
2726 e_warn(drv, "checksum_partial proto=%x!\n", |
|
2727 skb->protocol); |
|
2728 break; |
|
2729 } |
|
2730 |
|
2731 css = skb_transport_offset(skb); |
|
2732 |
|
2733 i = tx_ring->next_to_use; |
|
2734 buffer_info = &tx_ring->buffer_info[i]; |
|
2735 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); |
|
2736 |
|
2737 context_desc->lower_setup.ip_config = 0; |
|
2738 context_desc->upper_setup.tcp_fields.tucss = css; |
|
2739 context_desc->upper_setup.tcp_fields.tucso = |
|
2740 css + skb->csum_offset; |
|
2741 context_desc->upper_setup.tcp_fields.tucse = 0; |
|
2742 context_desc->tcp_seg_setup.data = 0; |
|
2743 context_desc->cmd_and_length = cpu_to_le32(cmd_len); |
|
2744 |
|
2745 buffer_info->time_stamp = jiffies; |
|
2746 buffer_info->next_to_watch = i; |
|
2747 |
|
2748 if (unlikely(++i == tx_ring->count)) i = 0; |
|
2749 tx_ring->next_to_use = i; |
|
2750 |
|
2751 return true; |
|
2752 } |
|
2753 |
|
2754 #define E1000_MAX_TXD_PWR 12 |
|
2755 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) |
|
2756 |
|
2757 static int e1000_tx_map(struct e1000_adapter *adapter, |
|
2758 struct e1000_tx_ring *tx_ring, |
|
2759 struct sk_buff *skb, unsigned int first, |
|
2760 unsigned int max_per_txd, unsigned int nr_frags, |
|
2761 unsigned int mss) |
|
2762 { |
|
2763 struct e1000_hw *hw = &adapter->hw; |
|
2764 struct pci_dev *pdev = adapter->pdev; |
|
2765 struct e1000_buffer *buffer_info; |
|
2766 unsigned int len = skb_headlen(skb); |
|
2767 unsigned int offset = 0, size, count = 0, i; |
|
2768 unsigned int f; |
|
2769 |
|
2770 i = tx_ring->next_to_use; |
|
2771 |
|
2772 while (len) { |
|
2773 buffer_info = &tx_ring->buffer_info[i]; |
|
2774 size = min(len, max_per_txd); |
|
2775 /* Workaround for Controller erratum -- |
|
2776 * descriptor for non-tso packet in a linear SKB that follows a |
|
2777 * tso gets written back prematurely before the data is fully |
|
2778 * DMA'd to the controller */ |
|
2779 if (!skb->data_len && tx_ring->last_tx_tso && |
|
2780 !skb_is_gso(skb)) { |
|
2781 tx_ring->last_tx_tso = 0; |
|
2782 size -= 4; |
|
2783 } |
|
2784 |
|
2785 /* Workaround for premature desc write-backs |
|
2786 * in TSO mode. Append 4-byte sentinel desc */ |
|
2787 if (unlikely(mss && !nr_frags && size == len && size > 8)) |
|
2788 size -= 4; |
|
2789 /* work-around for errata 10 and it applies |
|
2790 * to all controllers in PCI-X mode |
|
2791 * The fix is to make sure that the first descriptor of a |
|
2792 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes |
|
2793 */ |
|
2794 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && |
|
2795 (size > 2015) && count == 0)) |
|
2796 size = 2015; |
|
2797 |
|
2798 /* Workaround for potential 82544 hang in PCI-X. Avoid |
|
2799 * terminating buffers within evenly-aligned dwords. */ |
|
2800 if (unlikely(adapter->pcix_82544 && |
|
2801 !((unsigned long)(skb->data + offset + size - 1) & 4) && |
|
2802 size > 4)) |
|
2803 size -= 4; |
|
2804 |
|
2805 buffer_info->length = size; |
|
2806 /* set time_stamp *before* dma to help avoid a possible race */ |
|
2807 buffer_info->time_stamp = jiffies; |
|
2808 buffer_info->mapped_as_page = false; |
|
2809 buffer_info->dma = dma_map_single(&pdev->dev, |
|
2810 skb->data + offset, |
|
2811 size, DMA_TO_DEVICE); |
|
2812 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
|
2813 goto dma_error; |
|
2814 buffer_info->next_to_watch = i; |
|
2815 |
|
2816 len -= size; |
|
2817 offset += size; |
|
2818 count++; |
|
2819 if (len) { |
|
2820 i++; |
|
2821 if (unlikely(i == tx_ring->count)) |
|
2822 i = 0; |
|
2823 } |
|
2824 } |
|
2825 |
|
2826 for (f = 0; f < nr_frags; f++) { |
|
2827 struct skb_frag_struct *frag; |
|
2828 |
|
2829 frag = &skb_shinfo(skb)->frags[f]; |
|
2830 len = frag->size; |
|
2831 offset = frag->page_offset; |
|
2832 |
|
2833 while (len) { |
|
2834 i++; |
|
2835 if (unlikely(i == tx_ring->count)) |
|
2836 i = 0; |
|
2837 |
|
2838 buffer_info = &tx_ring->buffer_info[i]; |
|
2839 size = min(len, max_per_txd); |
|
2840 /* Workaround for premature desc write-backs |
|
2841 * in TSO mode. Append 4-byte sentinel desc */ |
|
2842 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) |
|
2843 size -= 4; |
|
2844 /* Workaround for potential 82544 hang in PCI-X. |
|
2845 * Avoid terminating buffers within evenly-aligned |
|
2846 * dwords. */ |
|
2847 if (unlikely(adapter->pcix_82544 && |
|
2848 !((unsigned long)(page_to_phys(frag->page) + offset |
|
2849 + size - 1) & 4) && |
|
2850 size > 4)) |
|
2851 size -= 4; |
|
2852 |
|
2853 buffer_info->length = size; |
|
2854 buffer_info->time_stamp = jiffies; |
|
2855 buffer_info->mapped_as_page = true; |
|
2856 buffer_info->dma = dma_map_page(&pdev->dev, frag->page, |
|
2857 offset, size, |
|
2858 DMA_TO_DEVICE); |
|
2859 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
|
2860 goto dma_error; |
|
2861 buffer_info->next_to_watch = i; |
|
2862 |
|
2863 len -= size; |
|
2864 offset += size; |
|
2865 count++; |
|
2866 } |
|
2867 } |
|
2868 |
|
2869 tx_ring->buffer_info[i].skb = skb; |
|
2870 tx_ring->buffer_info[first].next_to_watch = i; |
|
2871 |
|
2872 return count; |
|
2873 |
|
2874 dma_error: |
|
2875 dev_err(&pdev->dev, "TX DMA map failed\n"); |
|
2876 buffer_info->dma = 0; |
|
2877 if (count) |
|
2878 count--; |
|
2879 |
|
2880 while (count--) { |
|
2881 if (i==0) |
|
2882 i += tx_ring->count; |
|
2883 i--; |
|
2884 buffer_info = &tx_ring->buffer_info[i]; |
|
2885 e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
|
2886 } |
|
2887 |
|
2888 return 0; |
|
2889 } |
|
2890 |
|
2891 static void e1000_tx_queue(struct e1000_adapter *adapter, |
|
2892 struct e1000_tx_ring *tx_ring, int tx_flags, |
|
2893 int count) |
|
2894 { |
|
2895 struct e1000_hw *hw = &adapter->hw; |
|
2896 struct e1000_tx_desc *tx_desc = NULL; |
|
2897 struct e1000_buffer *buffer_info; |
|
2898 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; |
|
2899 unsigned int i; |
|
2900 |
|
2901 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { |
|
2902 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | |
|
2903 E1000_TXD_CMD_TSE; |
|
2904 txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
|
2905 |
|
2906 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) |
|
2907 txd_upper |= E1000_TXD_POPTS_IXSM << 8; |
|
2908 } |
|
2909 |
|
2910 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { |
|
2911 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; |
|
2912 txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
|
2913 } |
|
2914 |
|
2915 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { |
|
2916 txd_lower |= E1000_TXD_CMD_VLE; |
|
2917 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); |
|
2918 } |
|
2919 |
|
2920 i = tx_ring->next_to_use; |
|
2921 |
|
2922 while (count--) { |
|
2923 buffer_info = &tx_ring->buffer_info[i]; |
|
2924 tx_desc = E1000_TX_DESC(*tx_ring, i); |
|
2925 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
|
2926 tx_desc->lower.data = |
|
2927 cpu_to_le32(txd_lower | buffer_info->length); |
|
2928 tx_desc->upper.data = cpu_to_le32(txd_upper); |
|
2929 if (unlikely(++i == tx_ring->count)) i = 0; |
|
2930 } |
|
2931 |
|
2932 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); |
|
2933 |
|
2934 /* Force memory writes to complete before letting h/w |
|
2935 * know there are new descriptors to fetch. (Only |
|
2936 * applicable for weak-ordered memory model archs, |
|
2937 * such as IA-64). */ |
|
2938 wmb(); |
|
2939 |
|
2940 tx_ring->next_to_use = i; |
|
2941 writel(i, hw->hw_addr + tx_ring->tdt); |
|
2942 /* we need this if more than one processor can write to our tail |
|
2943 * at a time, it syncronizes IO on IA64/Altix systems */ |
|
2944 mmiowb(); |
|
2945 } |
|
2946 |
|
2947 /** |
|
2948 * 82547 workaround to avoid controller hang in half-duplex environment. |
|
2949 * The workaround is to avoid queuing a large packet that would span |
|
2950 * the internal Tx FIFO ring boundary by notifying the stack to resend |
|
2951 * the packet at a later time. This gives the Tx FIFO an opportunity to |
|
2952 * flush all packets. When that occurs, we reset the Tx FIFO pointers |
|
2953 * to the beginning of the Tx FIFO. |
|
2954 **/ |
|
2955 |
|
2956 #define E1000_FIFO_HDR 0x10 |
|
2957 #define E1000_82547_PAD_LEN 0x3E0 |
|
2958 |
|
2959 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, |
|
2960 struct sk_buff *skb) |
|
2961 { |
|
2962 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; |
|
2963 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; |
|
2964 |
|
2965 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); |
|
2966 |
|
2967 if (adapter->link_duplex != HALF_DUPLEX) |
|
2968 goto no_fifo_stall_required; |
|
2969 |
|
2970 if (atomic_read(&adapter->tx_fifo_stall)) |
|
2971 return 1; |
|
2972 |
|
2973 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { |
|
2974 atomic_set(&adapter->tx_fifo_stall, 1); |
|
2975 return 1; |
|
2976 } |
|
2977 |
|
2978 no_fifo_stall_required: |
|
2979 adapter->tx_fifo_head += skb_fifo_len; |
|
2980 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) |
|
2981 adapter->tx_fifo_head -= adapter->tx_fifo_size; |
|
2982 return 0; |
|
2983 } |
|
2984 |
|
2985 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) |
|
2986 { |
|
2987 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
2988 struct e1000_tx_ring *tx_ring = adapter->tx_ring; |
|
2989 |
|
2990 netif_stop_queue(netdev); |
|
2991 /* Herbert's original patch had: |
|
2992 * smp_mb__after_netif_stop_queue(); |
|
2993 * but since that doesn't exist yet, just open code it. */ |
|
2994 smp_mb(); |
|
2995 |
|
2996 /* We need to check again in a case another CPU has just |
|
2997 * made room available. */ |
|
2998 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) |
|
2999 return -EBUSY; |
|
3000 |
|
3001 /* A reprieve! */ |
|
3002 netif_start_queue(netdev); |
|
3003 ++adapter->restart_queue; |
|
3004 return 0; |
|
3005 } |
|
3006 |
|
3007 static int e1000_maybe_stop_tx(struct net_device *netdev, |
|
3008 struct e1000_tx_ring *tx_ring, int size) |
|
3009 { |
|
3010 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) |
|
3011 return 0; |
|
3012 return __e1000_maybe_stop_tx(netdev, size); |
|
3013 } |
|
3014 |
|
3015 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) |
|
3016 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, |
|
3017 struct net_device *netdev) |
|
3018 { |
|
3019 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
3020 struct e1000_hw *hw = &adapter->hw; |
|
3021 struct e1000_tx_ring *tx_ring; |
|
3022 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; |
|
3023 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; |
|
3024 unsigned int tx_flags = 0; |
|
3025 unsigned int len = skb_headlen(skb); |
|
3026 unsigned int nr_frags; |
|
3027 unsigned int mss; |
|
3028 int count = 0; |
|
3029 int tso; |
|
3030 unsigned int f; |
|
3031 |
|
3032 /* This goes back to the question of how to logically map a tx queue |
|
3033 * to a flow. Right now, performance is impacted slightly negatively |
|
3034 * if using multiple tx queues. If the stack breaks away from a |
|
3035 * single qdisc implementation, we can look at this again. */ |
|
3036 tx_ring = adapter->tx_ring; |
|
3037 |
|
3038 if (unlikely(skb->len <= 0)) { |
|
3039 dev_kfree_skb_any(skb); |
|
3040 return NETDEV_TX_OK; |
|
3041 } |
|
3042 |
|
3043 mss = skb_shinfo(skb)->gso_size; |
|
3044 /* The controller does a simple calculation to |
|
3045 * make sure there is enough room in the FIFO before |
|
3046 * initiating the DMA for each buffer. The calc is: |
|
3047 * 4 = ceil(buffer len/mss). To make sure we don't |
|
3048 * overrun the FIFO, adjust the max buffer len if mss |
|
3049 * drops. */ |
|
3050 if (mss) { |
|
3051 u8 hdr_len; |
|
3052 max_per_txd = min(mss << 2, max_per_txd); |
|
3053 max_txd_pwr = fls(max_per_txd) - 1; |
|
3054 |
|
3055 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
|
3056 if (skb->data_len && hdr_len == len) { |
|
3057 switch (hw->mac_type) { |
|
3058 unsigned int pull_size; |
|
3059 case e1000_82544: |
|
3060 /* Make sure we have room to chop off 4 bytes, |
|
3061 * and that the end alignment will work out to |
|
3062 * this hardware's requirements |
|
3063 * NOTE: this is a TSO only workaround |
|
3064 * if end byte alignment not correct move us |
|
3065 * into the next dword */ |
|
3066 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) |
|
3067 break; |
|
3068 /* fall through */ |
|
3069 pull_size = min((unsigned int)4, skb->data_len); |
|
3070 if (!__pskb_pull_tail(skb, pull_size)) { |
|
3071 e_err(drv, "__pskb_pull_tail " |
|
3072 "failed.\n"); |
|
3073 dev_kfree_skb_any(skb); |
|
3074 return NETDEV_TX_OK; |
|
3075 } |
|
3076 len = skb_headlen(skb); |
|
3077 break; |
|
3078 default: |
|
3079 /* do nothing */ |
|
3080 break; |
|
3081 } |
|
3082 } |
|
3083 } |
|
3084 |
|
3085 /* reserve a descriptor for the offload context */ |
|
3086 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) |
|
3087 count++; |
|
3088 count++; |
|
3089 |
|
3090 /* Controller Erratum workaround */ |
|
3091 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) |
|
3092 count++; |
|
3093 |
|
3094 count += TXD_USE_COUNT(len, max_txd_pwr); |
|
3095 |
|
3096 if (adapter->pcix_82544) |
|
3097 count++; |
|
3098 |
|
3099 /* work-around for errata 10 and it applies to all controllers |
|
3100 * in PCI-X mode, so add one more descriptor to the count |
|
3101 */ |
|
3102 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && |
|
3103 (len > 2015))) |
|
3104 count++; |
|
3105 |
|
3106 nr_frags = skb_shinfo(skb)->nr_frags; |
|
3107 for (f = 0; f < nr_frags; f++) |
|
3108 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, |
|
3109 max_txd_pwr); |
|
3110 if (adapter->pcix_82544) |
|
3111 count += nr_frags; |
|
3112 |
|
3113 /* need: count + 2 desc gap to keep tail from touching |
|
3114 * head, otherwise try next time */ |
|
3115 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) |
|
3116 return NETDEV_TX_BUSY; |
|
3117 |
|
3118 if (unlikely(hw->mac_type == e1000_82547)) { |
|
3119 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { |
|
3120 netif_stop_queue(netdev); |
|
3121 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
|
3122 mod_timer(&adapter->tx_fifo_stall_timer, |
|
3123 jiffies + 1); |
|
3124 return NETDEV_TX_BUSY; |
|
3125 } |
|
3126 } |
|
3127 |
|
3128 if (unlikely(vlan_tx_tag_present(skb))) { |
|
3129 tx_flags |= E1000_TX_FLAGS_VLAN; |
|
3130 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); |
|
3131 } |
|
3132 |
|
3133 first = tx_ring->next_to_use; |
|
3134 |
|
3135 tso = e1000_tso(adapter, tx_ring, skb); |
|
3136 if (tso < 0) { |
|
3137 dev_kfree_skb_any(skb); |
|
3138 return NETDEV_TX_OK; |
|
3139 } |
|
3140 |
|
3141 if (likely(tso)) { |
|
3142 if (likely(hw->mac_type != e1000_82544)) |
|
3143 tx_ring->last_tx_tso = 1; |
|
3144 tx_flags |= E1000_TX_FLAGS_TSO; |
|
3145 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) |
|
3146 tx_flags |= E1000_TX_FLAGS_CSUM; |
|
3147 |
|
3148 if (likely(skb->protocol == htons(ETH_P_IP))) |
|
3149 tx_flags |= E1000_TX_FLAGS_IPV4; |
|
3150 |
|
3151 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, |
|
3152 nr_frags, mss); |
|
3153 |
|
3154 if (count) { |
|
3155 e1000_tx_queue(adapter, tx_ring, tx_flags, count); |
|
3156 /* Make sure there is space in the ring for the next send. */ |
|
3157 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); |
|
3158 |
|
3159 } else { |
|
3160 dev_kfree_skb_any(skb); |
|
3161 tx_ring->buffer_info[first].time_stamp = 0; |
|
3162 tx_ring->next_to_use = first; |
|
3163 } |
|
3164 |
|
3165 return NETDEV_TX_OK; |
|
3166 } |
|
3167 |
|
3168 /** |
|
3169 * e1000_tx_timeout - Respond to a Tx Hang |
|
3170 * @netdev: network interface device structure |
|
3171 **/ |
|
3172 |
|
3173 static void e1000_tx_timeout(struct net_device *netdev) |
|
3174 { |
|
3175 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
3176 |
|
3177 /* Do the reset outside of interrupt context */ |
|
3178 adapter->tx_timeout_count++; |
|
3179 schedule_work(&adapter->reset_task); |
|
3180 } |
|
3181 |
|
3182 static void e1000_reset_task(struct work_struct *work) |
|
3183 { |
|
3184 struct e1000_adapter *adapter = |
|
3185 container_of(work, struct e1000_adapter, reset_task); |
|
3186 |
|
3187 e1000_reinit_safe(adapter); |
|
3188 } |
|
3189 |
|
3190 /** |
|
3191 * e1000_get_stats - Get System Network Statistics |
|
3192 * @netdev: network interface device structure |
|
3193 * |
|
3194 * Returns the address of the device statistics structure. |
|
3195 * The statistics are actually updated from the timer callback. |
|
3196 **/ |
|
3197 |
|
3198 static struct net_device_stats *e1000_get_stats(struct net_device *netdev) |
|
3199 { |
|
3200 /* only return the current stats */ |
|
3201 return &netdev->stats; |
|
3202 } |
|
3203 |
|
3204 /** |
|
3205 * e1000_change_mtu - Change the Maximum Transfer Unit |
|
3206 * @netdev: network interface device structure |
|
3207 * @new_mtu: new value for maximum frame size |
|
3208 * |
|
3209 * Returns 0 on success, negative on failure |
|
3210 **/ |
|
3211 |
|
3212 static int e1000_change_mtu(struct net_device *netdev, int new_mtu) |
|
3213 { |
|
3214 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
3215 struct e1000_hw *hw = &adapter->hw; |
|
3216 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
|
3217 |
|
3218 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || |
|
3219 (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
|
3220 e_err(probe, "Invalid MTU setting\n"); |
|
3221 return -EINVAL; |
|
3222 } |
|
3223 |
|
3224 /* Adapter-specific max frame size limits. */ |
|
3225 switch (hw->mac_type) { |
|
3226 case e1000_undefined ... e1000_82542_rev2_1: |
|
3227 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { |
|
3228 e_err(probe, "Jumbo Frames not supported.\n"); |
|
3229 return -EINVAL; |
|
3230 } |
|
3231 break; |
|
3232 default: |
|
3233 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ |
|
3234 break; |
|
3235 } |
|
3236 |
|
3237 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) |
|
3238 msleep(1); |
|
3239 /* e1000_down has a dependency on max_frame_size */ |
|
3240 hw->max_frame_size = max_frame; |
|
3241 if (netif_running(netdev)) |
|
3242 e1000_down(adapter); |
|
3243 |
|
3244 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
|
3245 * means we reserve 2 more, this pushes us to allocate from the next |
|
3246 * larger slab size. |
|
3247 * i.e. RXBUFFER_2048 --> size-4096 slab |
|
3248 * however with the new *_jumbo_rx* routines, jumbo receives will use |
|
3249 * fragmented skbs */ |
|
3250 |
|
3251 if (max_frame <= E1000_RXBUFFER_2048) |
|
3252 adapter->rx_buffer_len = E1000_RXBUFFER_2048; |
|
3253 else |
|
3254 #if (PAGE_SIZE >= E1000_RXBUFFER_16384) |
|
3255 adapter->rx_buffer_len = E1000_RXBUFFER_16384; |
|
3256 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096) |
|
3257 adapter->rx_buffer_len = PAGE_SIZE; |
|
3258 #endif |
|
3259 |
|
3260 /* adjust allocation if LPE protects us, and we aren't using SBP */ |
|
3261 if (!hw->tbi_compatibility_on && |
|
3262 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || |
|
3263 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) |
|
3264 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
|
3265 |
|
3266 pr_info("%s changing MTU from %d to %d\n", |
|
3267 netdev->name, netdev->mtu, new_mtu); |
|
3268 netdev->mtu = new_mtu; |
|
3269 |
|
3270 if (netif_running(netdev)) |
|
3271 e1000_up(adapter); |
|
3272 else |
|
3273 e1000_reset(adapter); |
|
3274 |
|
3275 clear_bit(__E1000_RESETTING, &adapter->flags); |
|
3276 |
|
3277 return 0; |
|
3278 } |
|
3279 |
|
3280 /** |
|
3281 * e1000_update_stats - Update the board statistics counters |
|
3282 * @adapter: board private structure |
|
3283 **/ |
|
3284 |
|
3285 void e1000_update_stats(struct e1000_adapter *adapter) |
|
3286 { |
|
3287 struct net_device *netdev = adapter->netdev; |
|
3288 struct e1000_hw *hw = &adapter->hw; |
|
3289 struct pci_dev *pdev = adapter->pdev; |
|
3290 unsigned long flags; |
|
3291 u16 phy_tmp; |
|
3292 |
|
3293 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF |
|
3294 |
|
3295 /* |
|
3296 * Prevent stats update while adapter is being reset, or if the pci |
|
3297 * connection is down. |
|
3298 */ |
|
3299 if (adapter->link_speed == 0) |
|
3300 return; |
|
3301 if (pci_channel_offline(pdev)) |
|
3302 return; |
|
3303 |
|
3304 spin_lock_irqsave(&adapter->stats_lock, flags); |
|
3305 |
|
3306 /* these counters are modified from e1000_tbi_adjust_stats, |
|
3307 * called from the interrupt context, so they must only |
|
3308 * be written while holding adapter->stats_lock |
|
3309 */ |
|
3310 |
|
3311 adapter->stats.crcerrs += er32(CRCERRS); |
|
3312 adapter->stats.gprc += er32(GPRC); |
|
3313 adapter->stats.gorcl += er32(GORCL); |
|
3314 adapter->stats.gorch += er32(GORCH); |
|
3315 adapter->stats.bprc += er32(BPRC); |
|
3316 adapter->stats.mprc += er32(MPRC); |
|
3317 adapter->stats.roc += er32(ROC); |
|
3318 |
|
3319 adapter->stats.prc64 += er32(PRC64); |
|
3320 adapter->stats.prc127 += er32(PRC127); |
|
3321 adapter->stats.prc255 += er32(PRC255); |
|
3322 adapter->stats.prc511 += er32(PRC511); |
|
3323 adapter->stats.prc1023 += er32(PRC1023); |
|
3324 adapter->stats.prc1522 += er32(PRC1522); |
|
3325 |
|
3326 adapter->stats.symerrs += er32(SYMERRS); |
|
3327 adapter->stats.mpc += er32(MPC); |
|
3328 adapter->stats.scc += er32(SCC); |
|
3329 adapter->stats.ecol += er32(ECOL); |
|
3330 adapter->stats.mcc += er32(MCC); |
|
3331 adapter->stats.latecol += er32(LATECOL); |
|
3332 adapter->stats.dc += er32(DC); |
|
3333 adapter->stats.sec += er32(SEC); |
|
3334 adapter->stats.rlec += er32(RLEC); |
|
3335 adapter->stats.xonrxc += er32(XONRXC); |
|
3336 adapter->stats.xontxc += er32(XONTXC); |
|
3337 adapter->stats.xoffrxc += er32(XOFFRXC); |
|
3338 adapter->stats.xofftxc += er32(XOFFTXC); |
|
3339 adapter->stats.fcruc += er32(FCRUC); |
|
3340 adapter->stats.gptc += er32(GPTC); |
|
3341 adapter->stats.gotcl += er32(GOTCL); |
|
3342 adapter->stats.gotch += er32(GOTCH); |
|
3343 adapter->stats.rnbc += er32(RNBC); |
|
3344 adapter->stats.ruc += er32(RUC); |
|
3345 adapter->stats.rfc += er32(RFC); |
|
3346 adapter->stats.rjc += er32(RJC); |
|
3347 adapter->stats.torl += er32(TORL); |
|
3348 adapter->stats.torh += er32(TORH); |
|
3349 adapter->stats.totl += er32(TOTL); |
|
3350 adapter->stats.toth += er32(TOTH); |
|
3351 adapter->stats.tpr += er32(TPR); |
|
3352 |
|
3353 adapter->stats.ptc64 += er32(PTC64); |
|
3354 adapter->stats.ptc127 += er32(PTC127); |
|
3355 adapter->stats.ptc255 += er32(PTC255); |
|
3356 adapter->stats.ptc511 += er32(PTC511); |
|
3357 adapter->stats.ptc1023 += er32(PTC1023); |
|
3358 adapter->stats.ptc1522 += er32(PTC1522); |
|
3359 |
|
3360 adapter->stats.mptc += er32(MPTC); |
|
3361 adapter->stats.bptc += er32(BPTC); |
|
3362 |
|
3363 /* used for adaptive IFS */ |
|
3364 |
|
3365 hw->tx_packet_delta = er32(TPT); |
|
3366 adapter->stats.tpt += hw->tx_packet_delta; |
|
3367 hw->collision_delta = er32(COLC); |
|
3368 adapter->stats.colc += hw->collision_delta; |
|
3369 |
|
3370 if (hw->mac_type >= e1000_82543) { |
|
3371 adapter->stats.algnerrc += er32(ALGNERRC); |
|
3372 adapter->stats.rxerrc += er32(RXERRC); |
|
3373 adapter->stats.tncrs += er32(TNCRS); |
|
3374 adapter->stats.cexterr += er32(CEXTERR); |
|
3375 adapter->stats.tsctc += er32(TSCTC); |
|
3376 adapter->stats.tsctfc += er32(TSCTFC); |
|
3377 } |
|
3378 |
|
3379 /* Fill out the OS statistics structure */ |
|
3380 netdev->stats.multicast = adapter->stats.mprc; |
|
3381 netdev->stats.collisions = adapter->stats.colc; |
|
3382 |
|
3383 /* Rx Errors */ |
|
3384 |
|
3385 /* RLEC on some newer hardware can be incorrect so build |
|
3386 * our own version based on RUC and ROC */ |
|
3387 netdev->stats.rx_errors = adapter->stats.rxerrc + |
|
3388 adapter->stats.crcerrs + adapter->stats.algnerrc + |
|
3389 adapter->stats.ruc + adapter->stats.roc + |
|
3390 adapter->stats.cexterr; |
|
3391 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; |
|
3392 netdev->stats.rx_length_errors = adapter->stats.rlerrc; |
|
3393 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; |
|
3394 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; |
|
3395 netdev->stats.rx_missed_errors = adapter->stats.mpc; |
|
3396 |
|
3397 /* Tx Errors */ |
|
3398 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; |
|
3399 netdev->stats.tx_errors = adapter->stats.txerrc; |
|
3400 netdev->stats.tx_aborted_errors = adapter->stats.ecol; |
|
3401 netdev->stats.tx_window_errors = adapter->stats.latecol; |
|
3402 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; |
|
3403 if (hw->bad_tx_carr_stats_fd && |
|
3404 adapter->link_duplex == FULL_DUPLEX) { |
|
3405 netdev->stats.tx_carrier_errors = 0; |
|
3406 adapter->stats.tncrs = 0; |
|
3407 } |
|
3408 |
|
3409 /* Tx Dropped needs to be maintained elsewhere */ |
|
3410 |
|
3411 /* Phy Stats */ |
|
3412 if (hw->media_type == e1000_media_type_copper) { |
|
3413 if ((adapter->link_speed == SPEED_1000) && |
|
3414 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { |
|
3415 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; |
|
3416 adapter->phy_stats.idle_errors += phy_tmp; |
|
3417 } |
|
3418 |
|
3419 if ((hw->mac_type <= e1000_82546) && |
|
3420 (hw->phy_type == e1000_phy_m88) && |
|
3421 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) |
|
3422 adapter->phy_stats.receive_errors += phy_tmp; |
|
3423 } |
|
3424 |
|
3425 /* Management Stats */ |
|
3426 if (hw->has_smbus) { |
|
3427 adapter->stats.mgptc += er32(MGTPTC); |
|
3428 adapter->stats.mgprc += er32(MGTPRC); |
|
3429 adapter->stats.mgpdc += er32(MGTPDC); |
|
3430 } |
|
3431 |
|
3432 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
|
3433 } |
|
3434 |
|
3435 /** |
|
3436 * e1000_intr - Interrupt Handler |
|
3437 * @irq: interrupt number |
|
3438 * @data: pointer to a network interface device structure |
|
3439 **/ |
|
3440 |
|
3441 static irqreturn_t e1000_intr(int irq, void *data) |
|
3442 { |
|
3443 struct net_device *netdev = data; |
|
3444 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
3445 struct e1000_hw *hw = &adapter->hw; |
|
3446 u32 icr = er32(ICR); |
|
3447 |
|
3448 if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) |
|
3449 return IRQ_NONE; /* Not our interrupt */ |
|
3450 |
|
3451 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
|
3452 hw->get_link_status = 1; |
|
3453 /* guard against interrupt when we're going down */ |
|
3454 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
|
3455 mod_timer(&adapter->watchdog_timer, jiffies + 1); |
|
3456 } |
|
3457 |
|
3458 /* disable interrupts, without the synchronize_irq bit */ |
|
3459 ew32(IMC, ~0); |
|
3460 E1000_WRITE_FLUSH(); |
|
3461 |
|
3462 if (likely(napi_schedule_prep(&adapter->napi))) { |
|
3463 adapter->total_tx_bytes = 0; |
|
3464 adapter->total_tx_packets = 0; |
|
3465 adapter->total_rx_bytes = 0; |
|
3466 adapter->total_rx_packets = 0; |
|
3467 __napi_schedule(&adapter->napi); |
|
3468 } else { |
|
3469 /* this really should not happen! if it does it is basically a |
|
3470 * bug, but not a hard error, so enable ints and continue */ |
|
3471 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
|
3472 e1000_irq_enable(adapter); |
|
3473 } |
|
3474 |
|
3475 return IRQ_HANDLED; |
|
3476 } |
|
3477 |
|
3478 /** |
|
3479 * e1000_clean - NAPI Rx polling callback |
|
3480 * @adapter: board private structure |
|
3481 **/ |
|
3482 static int e1000_clean(struct napi_struct *napi, int budget) |
|
3483 { |
|
3484 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); |
|
3485 int tx_clean_complete = 0, work_done = 0; |
|
3486 |
|
3487 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); |
|
3488 |
|
3489 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); |
|
3490 |
|
3491 if (!tx_clean_complete) |
|
3492 work_done = budget; |
|
3493 |
|
3494 /* If budget not fully consumed, exit the polling mode */ |
|
3495 if (work_done < budget) { |
|
3496 if (likely(adapter->itr_setting & 3)) |
|
3497 e1000_set_itr(adapter); |
|
3498 napi_complete(napi); |
|
3499 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
|
3500 e1000_irq_enable(adapter); |
|
3501 } |
|
3502 |
|
3503 return work_done; |
|
3504 } |
|
3505 |
|
3506 /** |
|
3507 * e1000_clean_tx_irq - Reclaim resources after transmit completes |
|
3508 * @adapter: board private structure |
|
3509 **/ |
|
3510 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, |
|
3511 struct e1000_tx_ring *tx_ring) |
|
3512 { |
|
3513 struct e1000_hw *hw = &adapter->hw; |
|
3514 struct net_device *netdev = adapter->netdev; |
|
3515 struct e1000_tx_desc *tx_desc, *eop_desc; |
|
3516 struct e1000_buffer *buffer_info; |
|
3517 unsigned int i, eop; |
|
3518 unsigned int count = 0; |
|
3519 unsigned int total_tx_bytes=0, total_tx_packets=0; |
|
3520 |
|
3521 i = tx_ring->next_to_clean; |
|
3522 eop = tx_ring->buffer_info[i].next_to_watch; |
|
3523 eop_desc = E1000_TX_DESC(*tx_ring, eop); |
|
3524 |
|
3525 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && |
|
3526 (count < tx_ring->count)) { |
|
3527 bool cleaned = false; |
|
3528 rmb(); /* read buffer_info after eop_desc */ |
|
3529 for ( ; !cleaned; count++) { |
|
3530 tx_desc = E1000_TX_DESC(*tx_ring, i); |
|
3531 buffer_info = &tx_ring->buffer_info[i]; |
|
3532 cleaned = (i == eop); |
|
3533 |
|
3534 if (cleaned) { |
|
3535 struct sk_buff *skb = buffer_info->skb; |
|
3536 unsigned int segs, bytecount; |
|
3537 segs = skb_shinfo(skb)->gso_segs ?: 1; |
|
3538 /* multiply data chunks by size of headers */ |
|
3539 bytecount = ((segs - 1) * skb_headlen(skb)) + |
|
3540 skb->len; |
|
3541 total_tx_packets += segs; |
|
3542 total_tx_bytes += bytecount; |
|
3543 } |
|
3544 e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
|
3545 tx_desc->upper.data = 0; |
|
3546 |
|
3547 if (unlikely(++i == tx_ring->count)) i = 0; |
|
3548 } |
|
3549 |
|
3550 eop = tx_ring->buffer_info[i].next_to_watch; |
|
3551 eop_desc = E1000_TX_DESC(*tx_ring, eop); |
|
3552 } |
|
3553 |
|
3554 tx_ring->next_to_clean = i; |
|
3555 |
|
3556 #define TX_WAKE_THRESHOLD 32 |
|
3557 if (unlikely(count && netif_carrier_ok(netdev) && |
|
3558 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { |
|
3559 /* Make sure that anybody stopping the queue after this |
|
3560 * sees the new next_to_clean. |
|
3561 */ |
|
3562 smp_mb(); |
|
3563 |
|
3564 if (netif_queue_stopped(netdev) && |
|
3565 !(test_bit(__E1000_DOWN, &adapter->flags))) { |
|
3566 netif_wake_queue(netdev); |
|
3567 ++adapter->restart_queue; |
|
3568 } |
|
3569 } |
|
3570 |
|
3571 if (adapter->detect_tx_hung) { |
|
3572 /* Detect a transmit hang in hardware, this serializes the |
|
3573 * check with the clearing of time_stamp and movement of i */ |
|
3574 adapter->detect_tx_hung = false; |
|
3575 if (tx_ring->buffer_info[eop].time_stamp && |
|
3576 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + |
|
3577 (adapter->tx_timeout_factor * HZ)) && |
|
3578 !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
|
3579 |
|
3580 /* detected Tx unit hang */ |
|
3581 e_err(drv, "Detected Tx Unit Hang\n" |
|
3582 " Tx Queue <%lu>\n" |
|
3583 " TDH <%x>\n" |
|
3584 " TDT <%x>\n" |
|
3585 " next_to_use <%x>\n" |
|
3586 " next_to_clean <%x>\n" |
|
3587 "buffer_info[next_to_clean]\n" |
|
3588 " time_stamp <%lx>\n" |
|
3589 " next_to_watch <%x>\n" |
|
3590 " jiffies <%lx>\n" |
|
3591 " next_to_watch.status <%x>\n", |
|
3592 (unsigned long)((tx_ring - adapter->tx_ring) / |
|
3593 sizeof(struct e1000_tx_ring)), |
|
3594 readl(hw->hw_addr + tx_ring->tdh), |
|
3595 readl(hw->hw_addr + tx_ring->tdt), |
|
3596 tx_ring->next_to_use, |
|
3597 tx_ring->next_to_clean, |
|
3598 tx_ring->buffer_info[eop].time_stamp, |
|
3599 eop, |
|
3600 jiffies, |
|
3601 eop_desc->upper.fields.status); |
|
3602 netif_stop_queue(netdev); |
|
3603 } |
|
3604 } |
|
3605 adapter->total_tx_bytes += total_tx_bytes; |
|
3606 adapter->total_tx_packets += total_tx_packets; |
|
3607 netdev->stats.tx_bytes += total_tx_bytes; |
|
3608 netdev->stats.tx_packets += total_tx_packets; |
|
3609 return count < tx_ring->count; |
|
3610 } |
|
3611 |
|
3612 /** |
|
3613 * e1000_rx_checksum - Receive Checksum Offload for 82543 |
|
3614 * @adapter: board private structure |
|
3615 * @status_err: receive descriptor status and error fields |
|
3616 * @csum: receive descriptor csum field |
|
3617 * @sk_buff: socket buffer with received data |
|
3618 **/ |
|
3619 |
|
3620 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, |
|
3621 u32 csum, struct sk_buff *skb) |
|
3622 { |
|
3623 struct e1000_hw *hw = &adapter->hw; |
|
3624 u16 status = (u16)status_err; |
|
3625 u8 errors = (u8)(status_err >> 24); |
|
3626 |
|
3627 skb_checksum_none_assert(skb); |
|
3628 |
|
3629 /* 82543 or newer only */ |
|
3630 if (unlikely(hw->mac_type < e1000_82543)) return; |
|
3631 /* Ignore Checksum bit is set */ |
|
3632 if (unlikely(status & E1000_RXD_STAT_IXSM)) return; |
|
3633 /* TCP/UDP checksum error bit is set */ |
|
3634 if (unlikely(errors & E1000_RXD_ERR_TCPE)) { |
|
3635 /* let the stack verify checksum errors */ |
|
3636 adapter->hw_csum_err++; |
|
3637 return; |
|
3638 } |
|
3639 /* TCP/UDP Checksum has not been calculated */ |
|
3640 if (!(status & E1000_RXD_STAT_TCPCS)) |
|
3641 return; |
|
3642 |
|
3643 /* It must be a TCP or UDP packet with a valid checksum */ |
|
3644 if (likely(status & E1000_RXD_STAT_TCPCS)) { |
|
3645 /* TCP checksum is good */ |
|
3646 skb->ip_summed = CHECKSUM_UNNECESSARY; |
|
3647 } |
|
3648 adapter->hw_csum_good++; |
|
3649 } |
|
3650 |
|
3651 /** |
|
3652 * e1000_consume_page - helper function |
|
3653 **/ |
|
3654 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, |
|
3655 u16 length) |
|
3656 { |
|
3657 bi->page = NULL; |
|
3658 skb->len += length; |
|
3659 skb->data_len += length; |
|
3660 skb->truesize += length; |
|
3661 } |
|
3662 |
|
3663 /** |
|
3664 * e1000_receive_skb - helper function to handle rx indications |
|
3665 * @adapter: board private structure |
|
3666 * @status: descriptor status field as written by hardware |
|
3667 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) |
|
3668 * @skb: pointer to sk_buff to be indicated to stack |
|
3669 */ |
|
3670 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, |
|
3671 __le16 vlan, struct sk_buff *skb) |
|
3672 { |
|
3673 skb->protocol = eth_type_trans(skb, adapter->netdev); |
|
3674 |
|
3675 if ((unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP)))) |
|
3676 vlan_gro_receive(&adapter->napi, adapter->vlgrp, |
|
3677 le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK, |
|
3678 skb); |
|
3679 else |
|
3680 napi_gro_receive(&adapter->napi, skb); |
|
3681 } |
|
3682 |
|
3683 /** |
|
3684 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy |
|
3685 * @adapter: board private structure |
|
3686 * @rx_ring: ring to clean |
|
3687 * @work_done: amount of napi work completed this call |
|
3688 * @work_to_do: max amount of work allowed for this call to do |
|
3689 * |
|
3690 * the return value indicates whether actual cleaning was done, there |
|
3691 * is no guarantee that everything was cleaned |
|
3692 */ |
|
3693 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, |
|
3694 struct e1000_rx_ring *rx_ring, |
|
3695 int *work_done, int work_to_do) |
|
3696 { |
|
3697 struct e1000_hw *hw = &adapter->hw; |
|
3698 struct net_device *netdev = adapter->netdev; |
|
3699 struct pci_dev *pdev = adapter->pdev; |
|
3700 struct e1000_rx_desc *rx_desc, *next_rxd; |
|
3701 struct e1000_buffer *buffer_info, *next_buffer; |
|
3702 unsigned long irq_flags; |
|
3703 u32 length; |
|
3704 unsigned int i; |
|
3705 int cleaned_count = 0; |
|
3706 bool cleaned = false; |
|
3707 unsigned int total_rx_bytes=0, total_rx_packets=0; |
|
3708 |
|
3709 i = rx_ring->next_to_clean; |
|
3710 rx_desc = E1000_RX_DESC(*rx_ring, i); |
|
3711 buffer_info = &rx_ring->buffer_info[i]; |
|
3712 |
|
3713 while (rx_desc->status & E1000_RXD_STAT_DD) { |
|
3714 struct sk_buff *skb; |
|
3715 u8 status; |
|
3716 |
|
3717 if (*work_done >= work_to_do) |
|
3718 break; |
|
3719 (*work_done)++; |
|
3720 rmb(); /* read descriptor and rx_buffer_info after status DD */ |
|
3721 |
|
3722 status = rx_desc->status; |
|
3723 skb = buffer_info->skb; |
|
3724 buffer_info->skb = NULL; |
|
3725 |
|
3726 if (++i == rx_ring->count) i = 0; |
|
3727 next_rxd = E1000_RX_DESC(*rx_ring, i); |
|
3728 prefetch(next_rxd); |
|
3729 |
|
3730 next_buffer = &rx_ring->buffer_info[i]; |
|
3731 |
|
3732 cleaned = true; |
|
3733 cleaned_count++; |
|
3734 dma_unmap_page(&pdev->dev, buffer_info->dma, |
|
3735 buffer_info->length, DMA_FROM_DEVICE); |
|
3736 buffer_info->dma = 0; |
|
3737 |
|
3738 length = le16_to_cpu(rx_desc->length); |
|
3739 |
|
3740 /* errors is only valid for DD + EOP descriptors */ |
|
3741 if (unlikely((status & E1000_RXD_STAT_EOP) && |
|
3742 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { |
|
3743 u8 last_byte = *(skb->data + length - 1); |
|
3744 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, |
|
3745 last_byte)) { |
|
3746 spin_lock_irqsave(&adapter->stats_lock, |
|
3747 irq_flags); |
|
3748 e1000_tbi_adjust_stats(hw, &adapter->stats, |
|
3749 length, skb->data); |
|
3750 spin_unlock_irqrestore(&adapter->stats_lock, |
|
3751 irq_flags); |
|
3752 length--; |
|
3753 } else { |
|
3754 /* recycle both page and skb */ |
|
3755 buffer_info->skb = skb; |
|
3756 /* an error means any chain goes out the window |
|
3757 * too */ |
|
3758 if (rx_ring->rx_skb_top) |
|
3759 dev_kfree_skb(rx_ring->rx_skb_top); |
|
3760 rx_ring->rx_skb_top = NULL; |
|
3761 goto next_desc; |
|
3762 } |
|
3763 } |
|
3764 |
|
3765 #define rxtop rx_ring->rx_skb_top |
|
3766 if (!(status & E1000_RXD_STAT_EOP)) { |
|
3767 /* this descriptor is only the beginning (or middle) */ |
|
3768 if (!rxtop) { |
|
3769 /* this is the beginning of a chain */ |
|
3770 rxtop = skb; |
|
3771 skb_fill_page_desc(rxtop, 0, buffer_info->page, |
|
3772 0, length); |
|
3773 } else { |
|
3774 /* this is the middle of a chain */ |
|
3775 skb_fill_page_desc(rxtop, |
|
3776 skb_shinfo(rxtop)->nr_frags, |
|
3777 buffer_info->page, 0, length); |
|
3778 /* re-use the skb, only consumed the page */ |
|
3779 buffer_info->skb = skb; |
|
3780 } |
|
3781 e1000_consume_page(buffer_info, rxtop, length); |
|
3782 goto next_desc; |
|
3783 } else { |
|
3784 if (rxtop) { |
|
3785 /* end of the chain */ |
|
3786 skb_fill_page_desc(rxtop, |
|
3787 skb_shinfo(rxtop)->nr_frags, |
|
3788 buffer_info->page, 0, length); |
|
3789 /* re-use the current skb, we only consumed the |
|
3790 * page */ |
|
3791 buffer_info->skb = skb; |
|
3792 skb = rxtop; |
|
3793 rxtop = NULL; |
|
3794 e1000_consume_page(buffer_info, skb, length); |
|
3795 } else { |
|
3796 /* no chain, got EOP, this buf is the packet |
|
3797 * copybreak to save the put_page/alloc_page */ |
|
3798 if (length <= copybreak && |
|
3799 skb_tailroom(skb) >= length) { |
|
3800 u8 *vaddr; |
|
3801 vaddr = kmap_atomic(buffer_info->page, |
|
3802 KM_SKB_DATA_SOFTIRQ); |
|
3803 memcpy(skb_tail_pointer(skb), vaddr, length); |
|
3804 kunmap_atomic(vaddr, |
|
3805 KM_SKB_DATA_SOFTIRQ); |
|
3806 /* re-use the page, so don't erase |
|
3807 * buffer_info->page */ |
|
3808 skb_put(skb, length); |
|
3809 } else { |
|
3810 skb_fill_page_desc(skb, 0, |
|
3811 buffer_info->page, 0, |
|
3812 length); |
|
3813 e1000_consume_page(buffer_info, skb, |
|
3814 length); |
|
3815 } |
|
3816 } |
|
3817 } |
|
3818 |
|
3819 /* Receive Checksum Offload XXX recompute due to CRC strip? */ |
|
3820 e1000_rx_checksum(adapter, |
|
3821 (u32)(status) | |
|
3822 ((u32)(rx_desc->errors) << 24), |
|
3823 le16_to_cpu(rx_desc->csum), skb); |
|
3824 |
|
3825 pskb_trim(skb, skb->len - 4); |
|
3826 |
|
3827 /* probably a little skewed due to removing CRC */ |
|
3828 total_rx_bytes += skb->len; |
|
3829 total_rx_packets++; |
|
3830 |
|
3831 /* eth type trans needs skb->data to point to something */ |
|
3832 if (!pskb_may_pull(skb, ETH_HLEN)) { |
|
3833 e_err(drv, "pskb_may_pull failed.\n"); |
|
3834 dev_kfree_skb(skb); |
|
3835 goto next_desc; |
|
3836 } |
|
3837 |
|
3838 e1000_receive_skb(adapter, status, rx_desc->special, skb); |
|
3839 |
|
3840 next_desc: |
|
3841 rx_desc->status = 0; |
|
3842 |
|
3843 /* return some buffers to hardware, one at a time is too slow */ |
|
3844 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { |
|
3845 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); |
|
3846 cleaned_count = 0; |
|
3847 } |
|
3848 |
|
3849 /* use prefetched values */ |
|
3850 rx_desc = next_rxd; |
|
3851 buffer_info = next_buffer; |
|
3852 } |
|
3853 rx_ring->next_to_clean = i; |
|
3854 |
|
3855 cleaned_count = E1000_DESC_UNUSED(rx_ring); |
|
3856 if (cleaned_count) |
|
3857 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); |
|
3858 |
|
3859 adapter->total_rx_packets += total_rx_packets; |
|
3860 adapter->total_rx_bytes += total_rx_bytes; |
|
3861 netdev->stats.rx_bytes += total_rx_bytes; |
|
3862 netdev->stats.rx_packets += total_rx_packets; |
|
3863 return cleaned; |
|
3864 } |
|
3865 |
|
3866 /* |
|
3867 * this should improve performance for small packets with large amounts |
|
3868 * of reassembly being done in the stack |
|
3869 */ |
|
3870 static void e1000_check_copybreak(struct net_device *netdev, |
|
3871 struct e1000_buffer *buffer_info, |
|
3872 u32 length, struct sk_buff **skb) |
|
3873 { |
|
3874 struct sk_buff *new_skb; |
|
3875 |
|
3876 if (length > copybreak) |
|
3877 return; |
|
3878 |
|
3879 new_skb = netdev_alloc_skb_ip_align(netdev, length); |
|
3880 if (!new_skb) |
|
3881 return; |
|
3882 |
|
3883 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, |
|
3884 (*skb)->data - NET_IP_ALIGN, |
|
3885 length + NET_IP_ALIGN); |
|
3886 /* save the skb in buffer_info as good */ |
|
3887 buffer_info->skb = *skb; |
|
3888 *skb = new_skb; |
|
3889 } |
|
3890 |
|
3891 /** |
|
3892 * e1000_clean_rx_irq - Send received data up the network stack; legacy |
|
3893 * @adapter: board private structure |
|
3894 * @rx_ring: ring to clean |
|
3895 * @work_done: amount of napi work completed this call |
|
3896 * @work_to_do: max amount of work allowed for this call to do |
|
3897 */ |
|
3898 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, |
|
3899 struct e1000_rx_ring *rx_ring, |
|
3900 int *work_done, int work_to_do) |
|
3901 { |
|
3902 struct e1000_hw *hw = &adapter->hw; |
|
3903 struct net_device *netdev = adapter->netdev; |
|
3904 struct pci_dev *pdev = adapter->pdev; |
|
3905 struct e1000_rx_desc *rx_desc, *next_rxd; |
|
3906 struct e1000_buffer *buffer_info, *next_buffer; |
|
3907 unsigned long flags; |
|
3908 u32 length; |
|
3909 unsigned int i; |
|
3910 int cleaned_count = 0; |
|
3911 bool cleaned = false; |
|
3912 unsigned int total_rx_bytes=0, total_rx_packets=0; |
|
3913 |
|
3914 i = rx_ring->next_to_clean; |
|
3915 rx_desc = E1000_RX_DESC(*rx_ring, i); |
|
3916 buffer_info = &rx_ring->buffer_info[i]; |
|
3917 |
|
3918 while (rx_desc->status & E1000_RXD_STAT_DD) { |
|
3919 struct sk_buff *skb; |
|
3920 u8 status; |
|
3921 |
|
3922 if (*work_done >= work_to_do) |
|
3923 break; |
|
3924 (*work_done)++; |
|
3925 rmb(); /* read descriptor and rx_buffer_info after status DD */ |
|
3926 |
|
3927 status = rx_desc->status; |
|
3928 skb = buffer_info->skb; |
|
3929 buffer_info->skb = NULL; |
|
3930 |
|
3931 prefetch(skb->data - NET_IP_ALIGN); |
|
3932 |
|
3933 if (++i == rx_ring->count) i = 0; |
|
3934 next_rxd = E1000_RX_DESC(*rx_ring, i); |
|
3935 prefetch(next_rxd); |
|
3936 |
|
3937 next_buffer = &rx_ring->buffer_info[i]; |
|
3938 |
|
3939 cleaned = true; |
|
3940 cleaned_count++; |
|
3941 dma_unmap_single(&pdev->dev, buffer_info->dma, |
|
3942 buffer_info->length, DMA_FROM_DEVICE); |
|
3943 buffer_info->dma = 0; |
|
3944 |
|
3945 length = le16_to_cpu(rx_desc->length); |
|
3946 /* !EOP means multiple descriptors were used to store a single |
|
3947 * packet, if thats the case we need to toss it. In fact, we |
|
3948 * to toss every packet with the EOP bit clear and the next |
|
3949 * frame that _does_ have the EOP bit set, as it is by |
|
3950 * definition only a frame fragment |
|
3951 */ |
|
3952 if (unlikely(!(status & E1000_RXD_STAT_EOP))) |
|
3953 adapter->discarding = true; |
|
3954 |
|
3955 if (adapter->discarding) { |
|
3956 /* All receives must fit into a single buffer */ |
|
3957 e_dbg("Receive packet consumed multiple buffers\n"); |
|
3958 /* recycle */ |
|
3959 buffer_info->skb = skb; |
|
3960 if (status & E1000_RXD_STAT_EOP) |
|
3961 adapter->discarding = false; |
|
3962 goto next_desc; |
|
3963 } |
|
3964 |
|
3965 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { |
|
3966 u8 last_byte = *(skb->data + length - 1); |
|
3967 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, |
|
3968 last_byte)) { |
|
3969 spin_lock_irqsave(&adapter->stats_lock, flags); |
|
3970 e1000_tbi_adjust_stats(hw, &adapter->stats, |
|
3971 length, skb->data); |
|
3972 spin_unlock_irqrestore(&adapter->stats_lock, |
|
3973 flags); |
|
3974 length--; |
|
3975 } else { |
|
3976 /* recycle */ |
|
3977 buffer_info->skb = skb; |
|
3978 goto next_desc; |
|
3979 } |
|
3980 } |
|
3981 |
|
3982 /* adjust length to remove Ethernet CRC, this must be |
|
3983 * done after the TBI_ACCEPT workaround above */ |
|
3984 length -= 4; |
|
3985 |
|
3986 /* probably a little skewed due to removing CRC */ |
|
3987 total_rx_bytes += length; |
|
3988 total_rx_packets++; |
|
3989 |
|
3990 e1000_check_copybreak(netdev, buffer_info, length, &skb); |
|
3991 |
|
3992 skb_put(skb, length); |
|
3993 |
|
3994 /* Receive Checksum Offload */ |
|
3995 e1000_rx_checksum(adapter, |
|
3996 (u32)(status) | |
|
3997 ((u32)(rx_desc->errors) << 24), |
|
3998 le16_to_cpu(rx_desc->csum), skb); |
|
3999 |
|
4000 e1000_receive_skb(adapter, status, rx_desc->special, skb); |
|
4001 |
|
4002 next_desc: |
|
4003 rx_desc->status = 0; |
|
4004 |
|
4005 /* return some buffers to hardware, one at a time is too slow */ |
|
4006 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { |
|
4007 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); |
|
4008 cleaned_count = 0; |
|
4009 } |
|
4010 |
|
4011 /* use prefetched values */ |
|
4012 rx_desc = next_rxd; |
|
4013 buffer_info = next_buffer; |
|
4014 } |
|
4015 rx_ring->next_to_clean = i; |
|
4016 |
|
4017 cleaned_count = E1000_DESC_UNUSED(rx_ring); |
|
4018 if (cleaned_count) |
|
4019 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); |
|
4020 |
|
4021 adapter->total_rx_packets += total_rx_packets; |
|
4022 adapter->total_rx_bytes += total_rx_bytes; |
|
4023 netdev->stats.rx_bytes += total_rx_bytes; |
|
4024 netdev->stats.rx_packets += total_rx_packets; |
|
4025 return cleaned; |
|
4026 } |
|
4027 |
|
4028 /** |
|
4029 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers |
|
4030 * @adapter: address of board private structure |
|
4031 * @rx_ring: pointer to receive ring structure |
|
4032 * @cleaned_count: number of buffers to allocate this pass |
|
4033 **/ |
|
4034 |
|
4035 static void |
|
4036 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, |
|
4037 struct e1000_rx_ring *rx_ring, int cleaned_count) |
|
4038 { |
|
4039 struct net_device *netdev = adapter->netdev; |
|
4040 struct pci_dev *pdev = adapter->pdev; |
|
4041 struct e1000_rx_desc *rx_desc; |
|
4042 struct e1000_buffer *buffer_info; |
|
4043 struct sk_buff *skb; |
|
4044 unsigned int i; |
|
4045 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ; |
|
4046 |
|
4047 i = rx_ring->next_to_use; |
|
4048 buffer_info = &rx_ring->buffer_info[i]; |
|
4049 |
|
4050 while (cleaned_count--) { |
|
4051 skb = buffer_info->skb; |
|
4052 if (skb) { |
|
4053 skb_trim(skb, 0); |
|
4054 goto check_page; |
|
4055 } |
|
4056 |
|
4057 skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
|
4058 if (unlikely(!skb)) { |
|
4059 /* Better luck next round */ |
|
4060 adapter->alloc_rx_buff_failed++; |
|
4061 break; |
|
4062 } |
|
4063 |
|
4064 /* Fix for errata 23, can't cross 64kB boundary */ |
|
4065 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { |
|
4066 struct sk_buff *oldskb = skb; |
|
4067 e_err(rx_err, "skb align check failed: %u bytes at " |
|
4068 "%p\n", bufsz, skb->data); |
|
4069 /* Try again, without freeing the previous */ |
|
4070 skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
|
4071 /* Failed allocation, critical failure */ |
|
4072 if (!skb) { |
|
4073 dev_kfree_skb(oldskb); |
|
4074 adapter->alloc_rx_buff_failed++; |
|
4075 break; |
|
4076 } |
|
4077 |
|
4078 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { |
|
4079 /* give up */ |
|
4080 dev_kfree_skb(skb); |
|
4081 dev_kfree_skb(oldskb); |
|
4082 break; /* while (cleaned_count--) */ |
|
4083 } |
|
4084 |
|
4085 /* Use new allocation */ |
|
4086 dev_kfree_skb(oldskb); |
|
4087 } |
|
4088 buffer_info->skb = skb; |
|
4089 buffer_info->length = adapter->rx_buffer_len; |
|
4090 check_page: |
|
4091 /* allocate a new page if necessary */ |
|
4092 if (!buffer_info->page) { |
|
4093 buffer_info->page = alloc_page(GFP_ATOMIC); |
|
4094 if (unlikely(!buffer_info->page)) { |
|
4095 adapter->alloc_rx_buff_failed++; |
|
4096 break; |
|
4097 } |
|
4098 } |
|
4099 |
|
4100 if (!buffer_info->dma) { |
|
4101 buffer_info->dma = dma_map_page(&pdev->dev, |
|
4102 buffer_info->page, 0, |
|
4103 buffer_info->length, |
|
4104 DMA_FROM_DEVICE); |
|
4105 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
|
4106 put_page(buffer_info->page); |
|
4107 dev_kfree_skb(skb); |
|
4108 buffer_info->page = NULL; |
|
4109 buffer_info->skb = NULL; |
|
4110 buffer_info->dma = 0; |
|
4111 adapter->alloc_rx_buff_failed++; |
|
4112 break; /* while !buffer_info->skb */ |
|
4113 } |
|
4114 } |
|
4115 |
|
4116 rx_desc = E1000_RX_DESC(*rx_ring, i); |
|
4117 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
|
4118 |
|
4119 if (unlikely(++i == rx_ring->count)) |
|
4120 i = 0; |
|
4121 buffer_info = &rx_ring->buffer_info[i]; |
|
4122 } |
|
4123 |
|
4124 if (likely(rx_ring->next_to_use != i)) { |
|
4125 rx_ring->next_to_use = i; |
|
4126 if (unlikely(i-- == 0)) |
|
4127 i = (rx_ring->count - 1); |
|
4128 |
|
4129 /* Force memory writes to complete before letting h/w |
|
4130 * know there are new descriptors to fetch. (Only |
|
4131 * applicable for weak-ordered memory model archs, |
|
4132 * such as IA-64). */ |
|
4133 wmb(); |
|
4134 writel(i, adapter->hw.hw_addr + rx_ring->rdt); |
|
4135 } |
|
4136 } |
|
4137 |
|
4138 /** |
|
4139 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended |
|
4140 * @adapter: address of board private structure |
|
4141 **/ |
|
4142 |
|
4143 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
|
4144 struct e1000_rx_ring *rx_ring, |
|
4145 int cleaned_count) |
|
4146 { |
|
4147 struct e1000_hw *hw = &adapter->hw; |
|
4148 struct net_device *netdev = adapter->netdev; |
|
4149 struct pci_dev *pdev = adapter->pdev; |
|
4150 struct e1000_rx_desc *rx_desc; |
|
4151 struct e1000_buffer *buffer_info; |
|
4152 struct sk_buff *skb; |
|
4153 unsigned int i; |
|
4154 unsigned int bufsz = adapter->rx_buffer_len; |
|
4155 |
|
4156 i = rx_ring->next_to_use; |
|
4157 buffer_info = &rx_ring->buffer_info[i]; |
|
4158 |
|
4159 while (cleaned_count--) { |
|
4160 skb = buffer_info->skb; |
|
4161 if (skb) { |
|
4162 skb_trim(skb, 0); |
|
4163 goto map_skb; |
|
4164 } |
|
4165 |
|
4166 skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
|
4167 if (unlikely(!skb)) { |
|
4168 /* Better luck next round */ |
|
4169 adapter->alloc_rx_buff_failed++; |
|
4170 break; |
|
4171 } |
|
4172 |
|
4173 /* Fix for errata 23, can't cross 64kB boundary */ |
|
4174 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { |
|
4175 struct sk_buff *oldskb = skb; |
|
4176 e_err(rx_err, "skb align check failed: %u bytes at " |
|
4177 "%p\n", bufsz, skb->data); |
|
4178 /* Try again, without freeing the previous */ |
|
4179 skb = netdev_alloc_skb_ip_align(netdev, bufsz); |
|
4180 /* Failed allocation, critical failure */ |
|
4181 if (!skb) { |
|
4182 dev_kfree_skb(oldskb); |
|
4183 adapter->alloc_rx_buff_failed++; |
|
4184 break; |
|
4185 } |
|
4186 |
|
4187 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { |
|
4188 /* give up */ |
|
4189 dev_kfree_skb(skb); |
|
4190 dev_kfree_skb(oldskb); |
|
4191 adapter->alloc_rx_buff_failed++; |
|
4192 break; /* while !buffer_info->skb */ |
|
4193 } |
|
4194 |
|
4195 /* Use new allocation */ |
|
4196 dev_kfree_skb(oldskb); |
|
4197 } |
|
4198 buffer_info->skb = skb; |
|
4199 buffer_info->length = adapter->rx_buffer_len; |
|
4200 map_skb: |
|
4201 buffer_info->dma = dma_map_single(&pdev->dev, |
|
4202 skb->data, |
|
4203 buffer_info->length, |
|
4204 DMA_FROM_DEVICE); |
|
4205 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
|
4206 dev_kfree_skb(skb); |
|
4207 buffer_info->skb = NULL; |
|
4208 buffer_info->dma = 0; |
|
4209 adapter->alloc_rx_buff_failed++; |
|
4210 break; /* while !buffer_info->skb */ |
|
4211 } |
|
4212 |
|
4213 /* |
|
4214 * XXX if it was allocated cleanly it will never map to a |
|
4215 * boundary crossing |
|
4216 */ |
|
4217 |
|
4218 /* Fix for errata 23, can't cross 64kB boundary */ |
|
4219 if (!e1000_check_64k_bound(adapter, |
|
4220 (void *)(unsigned long)buffer_info->dma, |
|
4221 adapter->rx_buffer_len)) { |
|
4222 e_err(rx_err, "dma align check failed: %u bytes at " |
|
4223 "%p\n", adapter->rx_buffer_len, |
|
4224 (void *)(unsigned long)buffer_info->dma); |
|
4225 dev_kfree_skb(skb); |
|
4226 buffer_info->skb = NULL; |
|
4227 |
|
4228 dma_unmap_single(&pdev->dev, buffer_info->dma, |
|
4229 adapter->rx_buffer_len, |
|
4230 DMA_FROM_DEVICE); |
|
4231 buffer_info->dma = 0; |
|
4232 |
|
4233 adapter->alloc_rx_buff_failed++; |
|
4234 break; /* while !buffer_info->skb */ |
|
4235 } |
|
4236 rx_desc = E1000_RX_DESC(*rx_ring, i); |
|
4237 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
|
4238 |
|
4239 if (unlikely(++i == rx_ring->count)) |
|
4240 i = 0; |
|
4241 buffer_info = &rx_ring->buffer_info[i]; |
|
4242 } |
|
4243 |
|
4244 if (likely(rx_ring->next_to_use != i)) { |
|
4245 rx_ring->next_to_use = i; |
|
4246 if (unlikely(i-- == 0)) |
|
4247 i = (rx_ring->count - 1); |
|
4248 |
|
4249 /* Force memory writes to complete before letting h/w |
|
4250 * know there are new descriptors to fetch. (Only |
|
4251 * applicable for weak-ordered memory model archs, |
|
4252 * such as IA-64). */ |
|
4253 wmb(); |
|
4254 writel(i, hw->hw_addr + rx_ring->rdt); |
|
4255 } |
|
4256 } |
|
4257 |
|
4258 /** |
|
4259 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. |
|
4260 * @adapter: |
|
4261 **/ |
|
4262 |
|
4263 static void e1000_smartspeed(struct e1000_adapter *adapter) |
|
4264 { |
|
4265 struct e1000_hw *hw = &adapter->hw; |
|
4266 u16 phy_status; |
|
4267 u16 phy_ctrl; |
|
4268 |
|
4269 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || |
|
4270 !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) |
|
4271 return; |
|
4272 |
|
4273 if (adapter->smartspeed == 0) { |
|
4274 /* If Master/Slave config fault is asserted twice, |
|
4275 * we assume back-to-back */ |
|
4276 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); |
|
4277 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
|
4278 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); |
|
4279 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
|
4280 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); |
|
4281 if (phy_ctrl & CR_1000T_MS_ENABLE) { |
|
4282 phy_ctrl &= ~CR_1000T_MS_ENABLE; |
|
4283 e1000_write_phy_reg(hw, PHY_1000T_CTRL, |
|
4284 phy_ctrl); |
|
4285 adapter->smartspeed++; |
|
4286 if (!e1000_phy_setup_autoneg(hw) && |
|
4287 !e1000_read_phy_reg(hw, PHY_CTRL, |
|
4288 &phy_ctrl)) { |
|
4289 phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
|
4290 MII_CR_RESTART_AUTO_NEG); |
|
4291 e1000_write_phy_reg(hw, PHY_CTRL, |
|
4292 phy_ctrl); |
|
4293 } |
|
4294 } |
|
4295 return; |
|
4296 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { |
|
4297 /* If still no link, perhaps using 2/3 pair cable */ |
|
4298 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); |
|
4299 phy_ctrl |= CR_1000T_MS_ENABLE; |
|
4300 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); |
|
4301 if (!e1000_phy_setup_autoneg(hw) && |
|
4302 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { |
|
4303 phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
|
4304 MII_CR_RESTART_AUTO_NEG); |
|
4305 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); |
|
4306 } |
|
4307 } |
|
4308 /* Restart process after E1000_SMARTSPEED_MAX iterations */ |
|
4309 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) |
|
4310 adapter->smartspeed = 0; |
|
4311 } |
|
4312 |
|
4313 /** |
|
4314 * e1000_ioctl - |
|
4315 * @netdev: |
|
4316 * @ifreq: |
|
4317 * @cmd: |
|
4318 **/ |
|
4319 |
|
4320 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
|
4321 { |
|
4322 switch (cmd) { |
|
4323 case SIOCGMIIPHY: |
|
4324 case SIOCGMIIREG: |
|
4325 case SIOCSMIIREG: |
|
4326 return e1000_mii_ioctl(netdev, ifr, cmd); |
|
4327 default: |
|
4328 return -EOPNOTSUPP; |
|
4329 } |
|
4330 } |
|
4331 |
|
4332 /** |
|
4333 * e1000_mii_ioctl - |
|
4334 * @netdev: |
|
4335 * @ifreq: |
|
4336 * @cmd: |
|
4337 **/ |
|
4338 |
|
4339 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
|
4340 int cmd) |
|
4341 { |
|
4342 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
4343 struct e1000_hw *hw = &adapter->hw; |
|
4344 struct mii_ioctl_data *data = if_mii(ifr); |
|
4345 int retval; |
|
4346 u16 mii_reg; |
|
4347 u16 spddplx; |
|
4348 unsigned long flags; |
|
4349 |
|
4350 if (hw->media_type != e1000_media_type_copper) |
|
4351 return -EOPNOTSUPP; |
|
4352 |
|
4353 switch (cmd) { |
|
4354 case SIOCGMIIPHY: |
|
4355 data->phy_id = hw->phy_addr; |
|
4356 break; |
|
4357 case SIOCGMIIREG: |
|
4358 spin_lock_irqsave(&adapter->stats_lock, flags); |
|
4359 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, |
|
4360 &data->val_out)) { |
|
4361 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
|
4362 return -EIO; |
|
4363 } |
|
4364 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
|
4365 break; |
|
4366 case SIOCSMIIREG: |
|
4367 if (data->reg_num & ~(0x1F)) |
|
4368 return -EFAULT; |
|
4369 mii_reg = data->val_in; |
|
4370 spin_lock_irqsave(&adapter->stats_lock, flags); |
|
4371 if (e1000_write_phy_reg(hw, data->reg_num, |
|
4372 mii_reg)) { |
|
4373 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
|
4374 return -EIO; |
|
4375 } |
|
4376 spin_unlock_irqrestore(&adapter->stats_lock, flags); |
|
4377 if (hw->media_type == e1000_media_type_copper) { |
|
4378 switch (data->reg_num) { |
|
4379 case PHY_CTRL: |
|
4380 if (mii_reg & MII_CR_POWER_DOWN) |
|
4381 break; |
|
4382 if (mii_reg & MII_CR_AUTO_NEG_EN) { |
|
4383 hw->autoneg = 1; |
|
4384 hw->autoneg_advertised = 0x2F; |
|
4385 } else { |
|
4386 if (mii_reg & 0x40) |
|
4387 spddplx = SPEED_1000; |
|
4388 else if (mii_reg & 0x2000) |
|
4389 spddplx = SPEED_100; |
|
4390 else |
|
4391 spddplx = SPEED_10; |
|
4392 spddplx += (mii_reg & 0x100) |
|
4393 ? DUPLEX_FULL : |
|
4394 DUPLEX_HALF; |
|
4395 retval = e1000_set_spd_dplx(adapter, |
|
4396 spddplx); |
|
4397 if (retval) |
|
4398 return retval; |
|
4399 } |
|
4400 if (netif_running(adapter->netdev)) |
|
4401 e1000_reinit_locked(adapter); |
|
4402 else |
|
4403 e1000_reset(adapter); |
|
4404 break; |
|
4405 case M88E1000_PHY_SPEC_CTRL: |
|
4406 case M88E1000_EXT_PHY_SPEC_CTRL: |
|
4407 if (e1000_phy_reset(hw)) |
|
4408 return -EIO; |
|
4409 break; |
|
4410 } |
|
4411 } else { |
|
4412 switch (data->reg_num) { |
|
4413 case PHY_CTRL: |
|
4414 if (mii_reg & MII_CR_POWER_DOWN) |
|
4415 break; |
|
4416 if (netif_running(adapter->netdev)) |
|
4417 e1000_reinit_locked(adapter); |
|
4418 else |
|
4419 e1000_reset(adapter); |
|
4420 break; |
|
4421 } |
|
4422 } |
|
4423 break; |
|
4424 default: |
|
4425 return -EOPNOTSUPP; |
|
4426 } |
|
4427 return E1000_SUCCESS; |
|
4428 } |
|
4429 |
|
4430 void e1000_pci_set_mwi(struct e1000_hw *hw) |
|
4431 { |
|
4432 struct e1000_adapter *adapter = hw->back; |
|
4433 int ret_val = pci_set_mwi(adapter->pdev); |
|
4434 |
|
4435 if (ret_val) |
|
4436 e_err(probe, "Error in setting MWI\n"); |
|
4437 } |
|
4438 |
|
4439 void e1000_pci_clear_mwi(struct e1000_hw *hw) |
|
4440 { |
|
4441 struct e1000_adapter *adapter = hw->back; |
|
4442 |
|
4443 pci_clear_mwi(adapter->pdev); |
|
4444 } |
|
4445 |
|
4446 int e1000_pcix_get_mmrbc(struct e1000_hw *hw) |
|
4447 { |
|
4448 struct e1000_adapter *adapter = hw->back; |
|
4449 return pcix_get_mmrbc(adapter->pdev); |
|
4450 } |
|
4451 |
|
4452 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) |
|
4453 { |
|
4454 struct e1000_adapter *adapter = hw->back; |
|
4455 pcix_set_mmrbc(adapter->pdev, mmrbc); |
|
4456 } |
|
4457 |
|
4458 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) |
|
4459 { |
|
4460 outl(value, port); |
|
4461 } |
|
4462 |
|
4463 static void e1000_vlan_rx_register(struct net_device *netdev, |
|
4464 struct vlan_group *grp) |
|
4465 { |
|
4466 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
4467 struct e1000_hw *hw = &adapter->hw; |
|
4468 u32 ctrl, rctl; |
|
4469 |
|
4470 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
|
4471 e1000_irq_disable(adapter); |
|
4472 adapter->vlgrp = grp; |
|
4473 |
|
4474 if (grp) { |
|
4475 /* enable VLAN tag insert/strip */ |
|
4476 ctrl = er32(CTRL); |
|
4477 ctrl |= E1000_CTRL_VME; |
|
4478 ew32(CTRL, ctrl); |
|
4479 |
|
4480 /* enable VLAN receive filtering */ |
|
4481 rctl = er32(RCTL); |
|
4482 rctl &= ~E1000_RCTL_CFIEN; |
|
4483 if (!(netdev->flags & IFF_PROMISC)) |
|
4484 rctl |= E1000_RCTL_VFE; |
|
4485 ew32(RCTL, rctl); |
|
4486 e1000_update_mng_vlan(adapter); |
|
4487 } else { |
|
4488 /* disable VLAN tag insert/strip */ |
|
4489 ctrl = er32(CTRL); |
|
4490 ctrl &= ~E1000_CTRL_VME; |
|
4491 ew32(CTRL, ctrl); |
|
4492 |
|
4493 /* disable VLAN receive filtering */ |
|
4494 rctl = er32(RCTL); |
|
4495 rctl &= ~E1000_RCTL_VFE; |
|
4496 ew32(RCTL, rctl); |
|
4497 |
|
4498 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { |
|
4499 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
|
4500 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
|
4501 } |
|
4502 } |
|
4503 |
|
4504 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
|
4505 e1000_irq_enable(adapter); |
|
4506 } |
|
4507 |
|
4508 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
|
4509 { |
|
4510 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
4511 struct e1000_hw *hw = &adapter->hw; |
|
4512 u32 vfta, index; |
|
4513 |
|
4514 if ((hw->mng_cookie.status & |
|
4515 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
|
4516 (vid == adapter->mng_vlan_id)) |
|
4517 return; |
|
4518 /* add VID to filter table */ |
|
4519 index = (vid >> 5) & 0x7F; |
|
4520 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); |
|
4521 vfta |= (1 << (vid & 0x1F)); |
|
4522 e1000_write_vfta(hw, index, vfta); |
|
4523 } |
|
4524 |
|
4525 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
|
4526 { |
|
4527 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
4528 struct e1000_hw *hw = &adapter->hw; |
|
4529 u32 vfta, index; |
|
4530 |
|
4531 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
|
4532 e1000_irq_disable(adapter); |
|
4533 vlan_group_set_device(adapter->vlgrp, vid, NULL); |
|
4534 if (!test_bit(__E1000_DOWN, &adapter->flags)) |
|
4535 e1000_irq_enable(adapter); |
|
4536 |
|
4537 /* remove VID from filter table */ |
|
4538 index = (vid >> 5) & 0x7F; |
|
4539 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); |
|
4540 vfta &= ~(1 << (vid & 0x1F)); |
|
4541 e1000_write_vfta(hw, index, vfta); |
|
4542 } |
|
4543 |
|
4544 static void e1000_restore_vlan(struct e1000_adapter *adapter) |
|
4545 { |
|
4546 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); |
|
4547 |
|
4548 if (adapter->vlgrp) { |
|
4549 u16 vid; |
|
4550 for (vid = 0; vid < VLAN_N_VID; vid++) { |
|
4551 if (!vlan_group_get_device(adapter->vlgrp, vid)) |
|
4552 continue; |
|
4553 e1000_vlan_rx_add_vid(adapter->netdev, vid); |
|
4554 } |
|
4555 } |
|
4556 } |
|
4557 |
|
4558 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) |
|
4559 { |
|
4560 struct e1000_hw *hw = &adapter->hw; |
|
4561 |
|
4562 hw->autoneg = 0; |
|
4563 |
|
4564 /* Fiber NICs only allow 1000 gbps Full duplex */ |
|
4565 if ((hw->media_type == e1000_media_type_fiber) && |
|
4566 spddplx != (SPEED_1000 + DUPLEX_FULL)) { |
|
4567 e_err(probe, "Unsupported Speed/Duplex configuration\n"); |
|
4568 return -EINVAL; |
|
4569 } |
|
4570 |
|
4571 switch (spddplx) { |
|
4572 case SPEED_10 + DUPLEX_HALF: |
|
4573 hw->forced_speed_duplex = e1000_10_half; |
|
4574 break; |
|
4575 case SPEED_10 + DUPLEX_FULL: |
|
4576 hw->forced_speed_duplex = e1000_10_full; |
|
4577 break; |
|
4578 case SPEED_100 + DUPLEX_HALF: |
|
4579 hw->forced_speed_duplex = e1000_100_half; |
|
4580 break; |
|
4581 case SPEED_100 + DUPLEX_FULL: |
|
4582 hw->forced_speed_duplex = e1000_100_full; |
|
4583 break; |
|
4584 case SPEED_1000 + DUPLEX_FULL: |
|
4585 hw->autoneg = 1; |
|
4586 hw->autoneg_advertised = ADVERTISE_1000_FULL; |
|
4587 break; |
|
4588 case SPEED_1000 + DUPLEX_HALF: /* not supported */ |
|
4589 default: |
|
4590 e_err(probe, "Unsupported Speed/Duplex configuration\n"); |
|
4591 return -EINVAL; |
|
4592 } |
|
4593 return 0; |
|
4594 } |
|
4595 |
|
4596 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) |
|
4597 { |
|
4598 struct net_device *netdev = pci_get_drvdata(pdev); |
|
4599 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
4600 struct e1000_hw *hw = &adapter->hw; |
|
4601 u32 ctrl, ctrl_ext, rctl, status; |
|
4602 u32 wufc = adapter->wol; |
|
4603 #ifdef CONFIG_PM |
|
4604 int retval = 0; |
|
4605 #endif |
|
4606 |
|
4607 netif_device_detach(netdev); |
|
4608 |
|
4609 if (netif_running(netdev)) { |
|
4610 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
|
4611 e1000_down(adapter); |
|
4612 } |
|
4613 |
|
4614 #ifdef CONFIG_PM |
|
4615 retval = pci_save_state(pdev); |
|
4616 if (retval) |
|
4617 return retval; |
|
4618 #endif |
|
4619 |
|
4620 status = er32(STATUS); |
|
4621 if (status & E1000_STATUS_LU) |
|
4622 wufc &= ~E1000_WUFC_LNKC; |
|
4623 |
|
4624 if (wufc) { |
|
4625 e1000_setup_rctl(adapter); |
|
4626 e1000_set_rx_mode(netdev); |
|
4627 |
|
4628 /* turn on all-multi mode if wake on multicast is enabled */ |
|
4629 if (wufc & E1000_WUFC_MC) { |
|
4630 rctl = er32(RCTL); |
|
4631 rctl |= E1000_RCTL_MPE; |
|
4632 ew32(RCTL, rctl); |
|
4633 } |
|
4634 |
|
4635 if (hw->mac_type >= e1000_82540) { |
|
4636 ctrl = er32(CTRL); |
|
4637 /* advertise wake from D3Cold */ |
|
4638 #define E1000_CTRL_ADVD3WUC 0x00100000 |
|
4639 /* phy power management enable */ |
|
4640 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 |
|
4641 ctrl |= E1000_CTRL_ADVD3WUC | |
|
4642 E1000_CTRL_EN_PHY_PWR_MGMT; |
|
4643 ew32(CTRL, ctrl); |
|
4644 } |
|
4645 |
|
4646 if (hw->media_type == e1000_media_type_fiber || |
|
4647 hw->media_type == e1000_media_type_internal_serdes) { |
|
4648 /* keep the laser running in D3 */ |
|
4649 ctrl_ext = er32(CTRL_EXT); |
|
4650 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; |
|
4651 ew32(CTRL_EXT, ctrl_ext); |
|
4652 } |
|
4653 |
|
4654 ew32(WUC, E1000_WUC_PME_EN); |
|
4655 ew32(WUFC, wufc); |
|
4656 } else { |
|
4657 ew32(WUC, 0); |
|
4658 ew32(WUFC, 0); |
|
4659 } |
|
4660 |
|
4661 e1000_release_manageability(adapter); |
|
4662 |
|
4663 *enable_wake = !!wufc; |
|
4664 |
|
4665 /* make sure adapter isn't asleep if manageability is enabled */ |
|
4666 if (adapter->en_mng_pt) |
|
4667 *enable_wake = true; |
|
4668 |
|
4669 if (netif_running(netdev)) |
|
4670 e1000_free_irq(adapter); |
|
4671 |
|
4672 pci_disable_device(pdev); |
|
4673 |
|
4674 return 0; |
|
4675 } |
|
4676 |
|
4677 #ifdef CONFIG_PM |
|
4678 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) |
|
4679 { |
|
4680 int retval; |
|
4681 bool wake; |
|
4682 |
|
4683 retval = __e1000_shutdown(pdev, &wake); |
|
4684 if (retval) |
|
4685 return retval; |
|
4686 |
|
4687 if (wake) { |
|
4688 pci_prepare_to_sleep(pdev); |
|
4689 } else { |
|
4690 pci_wake_from_d3(pdev, false); |
|
4691 pci_set_power_state(pdev, PCI_D3hot); |
|
4692 } |
|
4693 |
|
4694 return 0; |
|
4695 } |
|
4696 |
|
4697 static int e1000_resume(struct pci_dev *pdev) |
|
4698 { |
|
4699 struct net_device *netdev = pci_get_drvdata(pdev); |
|
4700 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
4701 struct e1000_hw *hw = &adapter->hw; |
|
4702 u32 err; |
|
4703 |
|
4704 pci_set_power_state(pdev, PCI_D0); |
|
4705 pci_restore_state(pdev); |
|
4706 pci_save_state(pdev); |
|
4707 |
|
4708 if (adapter->need_ioport) |
|
4709 err = pci_enable_device(pdev); |
|
4710 else |
|
4711 err = pci_enable_device_mem(pdev); |
|
4712 if (err) { |
|
4713 pr_err("Cannot enable PCI device from suspend\n"); |
|
4714 return err; |
|
4715 } |
|
4716 pci_set_master(pdev); |
|
4717 |
|
4718 pci_enable_wake(pdev, PCI_D3hot, 0); |
|
4719 pci_enable_wake(pdev, PCI_D3cold, 0); |
|
4720 |
|
4721 if (netif_running(netdev)) { |
|
4722 err = e1000_request_irq(adapter); |
|
4723 if (err) |
|
4724 return err; |
|
4725 } |
|
4726 |
|
4727 e1000_power_up_phy(adapter); |
|
4728 e1000_reset(adapter); |
|
4729 ew32(WUS, ~0); |
|
4730 |
|
4731 e1000_init_manageability(adapter); |
|
4732 |
|
4733 if (netif_running(netdev)) |
|
4734 e1000_up(adapter); |
|
4735 |
|
4736 netif_device_attach(netdev); |
|
4737 |
|
4738 return 0; |
|
4739 } |
|
4740 #endif |
|
4741 |
|
4742 static void e1000_shutdown(struct pci_dev *pdev) |
|
4743 { |
|
4744 bool wake; |
|
4745 |
|
4746 __e1000_shutdown(pdev, &wake); |
|
4747 |
|
4748 if (system_state == SYSTEM_POWER_OFF) { |
|
4749 pci_wake_from_d3(pdev, wake); |
|
4750 pci_set_power_state(pdev, PCI_D3hot); |
|
4751 } |
|
4752 } |
|
4753 |
|
4754 #ifdef CONFIG_NET_POLL_CONTROLLER |
|
4755 /* |
|
4756 * Polling 'interrupt' - used by things like netconsole to send skbs |
|
4757 * without having to re-enable interrupts. It's not called while |
|
4758 * the interrupt routine is executing. |
|
4759 */ |
|
4760 static void e1000_netpoll(struct net_device *netdev) |
|
4761 { |
|
4762 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
4763 |
|
4764 disable_irq(adapter->pdev->irq); |
|
4765 e1000_intr(adapter->pdev->irq, netdev); |
|
4766 enable_irq(adapter->pdev->irq); |
|
4767 } |
|
4768 #endif |
|
4769 |
|
4770 /** |
|
4771 * e1000_io_error_detected - called when PCI error is detected |
|
4772 * @pdev: Pointer to PCI device |
|
4773 * @state: The current pci connection state |
|
4774 * |
|
4775 * This function is called after a PCI bus error affecting |
|
4776 * this device has been detected. |
|
4777 */ |
|
4778 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, |
|
4779 pci_channel_state_t state) |
|
4780 { |
|
4781 struct net_device *netdev = pci_get_drvdata(pdev); |
|
4782 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
4783 |
|
4784 netif_device_detach(netdev); |
|
4785 |
|
4786 if (state == pci_channel_io_perm_failure) |
|
4787 return PCI_ERS_RESULT_DISCONNECT; |
|
4788 |
|
4789 if (netif_running(netdev)) |
|
4790 e1000_down(adapter); |
|
4791 pci_disable_device(pdev); |
|
4792 |
|
4793 /* Request a slot slot reset. */ |
|
4794 return PCI_ERS_RESULT_NEED_RESET; |
|
4795 } |
|
4796 |
|
4797 /** |
|
4798 * e1000_io_slot_reset - called after the pci bus has been reset. |
|
4799 * @pdev: Pointer to PCI device |
|
4800 * |
|
4801 * Restart the card from scratch, as if from a cold-boot. Implementation |
|
4802 * resembles the first-half of the e1000_resume routine. |
|
4803 */ |
|
4804 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) |
|
4805 { |
|
4806 struct net_device *netdev = pci_get_drvdata(pdev); |
|
4807 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
4808 struct e1000_hw *hw = &adapter->hw; |
|
4809 int err; |
|
4810 |
|
4811 if (adapter->need_ioport) |
|
4812 err = pci_enable_device(pdev); |
|
4813 else |
|
4814 err = pci_enable_device_mem(pdev); |
|
4815 if (err) { |
|
4816 pr_err("Cannot re-enable PCI device after reset.\n"); |
|
4817 return PCI_ERS_RESULT_DISCONNECT; |
|
4818 } |
|
4819 pci_set_master(pdev); |
|
4820 |
|
4821 pci_enable_wake(pdev, PCI_D3hot, 0); |
|
4822 pci_enable_wake(pdev, PCI_D3cold, 0); |
|
4823 |
|
4824 e1000_reset(adapter); |
|
4825 ew32(WUS, ~0); |
|
4826 |
|
4827 return PCI_ERS_RESULT_RECOVERED; |
|
4828 } |
|
4829 |
|
4830 /** |
|
4831 * e1000_io_resume - called when traffic can start flowing again. |
|
4832 * @pdev: Pointer to PCI device |
|
4833 * |
|
4834 * This callback is called when the error recovery driver tells us that |
|
4835 * its OK to resume normal operation. Implementation resembles the |
|
4836 * second-half of the e1000_resume routine. |
|
4837 */ |
|
4838 static void e1000_io_resume(struct pci_dev *pdev) |
|
4839 { |
|
4840 struct net_device *netdev = pci_get_drvdata(pdev); |
|
4841 struct e1000_adapter *adapter = netdev_priv(netdev); |
|
4842 |
|
4843 e1000_init_manageability(adapter); |
|
4844 |
|
4845 if (netif_running(netdev)) { |
|
4846 if (e1000_up(adapter)) { |
|
4847 pr_info("can't bring device back up after reset\n"); |
|
4848 return; |
|
4849 } |
|
4850 } |
|
4851 |
|
4852 netif_device_attach(netdev); |
|
4853 } |
|
4854 |
|
4855 /* e1000_main.c */ |