|
1 /******************************************************************************* |
|
2 |
|
3 Intel PRO/1000 Linux driver |
|
4 Copyright(c) 1999 - 2010 Intel Corporation. |
|
5 |
|
6 This program is free software; you can redistribute it and/or modify it |
|
7 under the terms and conditions of the GNU General Public License, |
|
8 version 2, as published by the Free Software Foundation. |
|
9 |
|
10 This program is distributed in the hope it will be useful, but WITHOUT |
|
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
|
13 more details. |
|
14 |
|
15 You should have received a copy of the GNU General Public License along with |
|
16 this program; if not, write to the Free Software Foundation, Inc., |
|
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
|
19 The full GNU General Public License is included in this distribution in |
|
20 the file called "COPYING". |
|
21 |
|
22 Contact Information: |
|
23 Linux NICS <linux.nics@intel.com> |
|
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
|
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
|
26 |
|
27 *******************************************************************************/ |
|
28 |
|
29 #include "e1000.h" |
|
30 |
|
31 enum e1000_mng_mode { |
|
32 e1000_mng_mode_none = 0, |
|
33 e1000_mng_mode_asf, |
|
34 e1000_mng_mode_pt, |
|
35 e1000_mng_mode_ipmi, |
|
36 e1000_mng_mode_host_if_only |
|
37 }; |
|
38 |
|
39 #define E1000_FACTPS_MNGCG 0x20000000 |
|
40 |
|
41 /* Intel(R) Active Management Technology signature */ |
|
42 #define E1000_IAMT_SIGNATURE 0x544D4149 |
|
43 |
|
44 /** |
|
45 * e1000e_get_bus_info_pcie - Get PCIe bus information |
|
46 * @hw: pointer to the HW structure |
|
47 * |
|
48 * Determines and stores the system bus information for a particular |
|
49 * network interface. The following bus information is determined and stored: |
|
50 * bus speed, bus width, type (PCIe), and PCIe function. |
|
51 **/ |
|
52 s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) |
|
53 { |
|
54 struct e1000_mac_info *mac = &hw->mac; |
|
55 struct e1000_bus_info *bus = &hw->bus; |
|
56 struct e1000_adapter *adapter = hw->adapter; |
|
57 u16 pcie_link_status, cap_offset; |
|
58 |
|
59 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); |
|
60 if (!cap_offset) { |
|
61 bus->width = e1000_bus_width_unknown; |
|
62 } else { |
|
63 pci_read_config_word(adapter->pdev, |
|
64 cap_offset + PCIE_LINK_STATUS, |
|
65 &pcie_link_status); |
|
66 bus->width = (enum e1000_bus_width)((pcie_link_status & |
|
67 PCIE_LINK_WIDTH_MASK) >> |
|
68 PCIE_LINK_WIDTH_SHIFT); |
|
69 } |
|
70 |
|
71 mac->ops.set_lan_id(hw); |
|
72 |
|
73 return 0; |
|
74 } |
|
75 |
|
76 /** |
|
77 * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices |
|
78 * |
|
79 * @hw: pointer to the HW structure |
|
80 * |
|
81 * Determines the LAN function id by reading memory-mapped registers |
|
82 * and swaps the port value if requested. |
|
83 **/ |
|
84 void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) |
|
85 { |
|
86 struct e1000_bus_info *bus = &hw->bus; |
|
87 u32 reg; |
|
88 |
|
89 /* |
|
90 * The status register reports the correct function number |
|
91 * for the device regardless of function swap state. |
|
92 */ |
|
93 reg = er32(STATUS); |
|
94 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; |
|
95 } |
|
96 |
|
97 /** |
|
98 * e1000_set_lan_id_single_port - Set LAN id for a single port device |
|
99 * @hw: pointer to the HW structure |
|
100 * |
|
101 * Sets the LAN function id to zero for a single port device. |
|
102 **/ |
|
103 void e1000_set_lan_id_single_port(struct e1000_hw *hw) |
|
104 { |
|
105 struct e1000_bus_info *bus = &hw->bus; |
|
106 |
|
107 bus->func = 0; |
|
108 } |
|
109 |
|
110 /** |
|
111 * e1000_clear_vfta_generic - Clear VLAN filter table |
|
112 * @hw: pointer to the HW structure |
|
113 * |
|
114 * Clears the register array which contains the VLAN filter table by |
|
115 * setting all the values to 0. |
|
116 **/ |
|
117 void e1000_clear_vfta_generic(struct e1000_hw *hw) |
|
118 { |
|
119 u32 offset; |
|
120 |
|
121 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { |
|
122 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); |
|
123 e1e_flush(); |
|
124 } |
|
125 } |
|
126 |
|
127 /** |
|
128 * e1000_write_vfta_generic - Write value to VLAN filter table |
|
129 * @hw: pointer to the HW structure |
|
130 * @offset: register offset in VLAN filter table |
|
131 * @value: register value written to VLAN filter table |
|
132 * |
|
133 * Writes value at the given offset in the register array which stores |
|
134 * the VLAN filter table. |
|
135 **/ |
|
136 void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) |
|
137 { |
|
138 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); |
|
139 e1e_flush(); |
|
140 } |
|
141 |
|
142 /** |
|
143 * e1000e_init_rx_addrs - Initialize receive address's |
|
144 * @hw: pointer to the HW structure |
|
145 * @rar_count: receive address registers |
|
146 * |
|
147 * Setups the receive address registers by setting the base receive address |
|
148 * register to the devices MAC address and clearing all the other receive |
|
149 * address registers to 0. |
|
150 **/ |
|
151 void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) |
|
152 { |
|
153 u32 i; |
|
154 u8 mac_addr[ETH_ALEN] = {0}; |
|
155 |
|
156 /* Setup the receive address */ |
|
157 e_dbg("Programming MAC Address into RAR[0]\n"); |
|
158 |
|
159 e1000e_rar_set(hw, hw->mac.addr, 0); |
|
160 |
|
161 /* Zero out the other (rar_entry_count - 1) receive addresses */ |
|
162 e_dbg("Clearing RAR[1-%u]\n", rar_count-1); |
|
163 for (i = 1; i < rar_count; i++) |
|
164 e1000e_rar_set(hw, mac_addr, i); |
|
165 } |
|
166 |
|
167 /** |
|
168 * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr |
|
169 * @hw: pointer to the HW structure |
|
170 * |
|
171 * Checks the nvm for an alternate MAC address. An alternate MAC address |
|
172 * can be setup by pre-boot software and must be treated like a permanent |
|
173 * address and must override the actual permanent MAC address. If an |
|
174 * alternate MAC address is found it is programmed into RAR0, replacing |
|
175 * the permanent address that was installed into RAR0 by the Si on reset. |
|
176 * This function will return SUCCESS unless it encounters an error while |
|
177 * reading the EEPROM. |
|
178 **/ |
|
179 s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) |
|
180 { |
|
181 u32 i; |
|
182 s32 ret_val = 0; |
|
183 u16 offset, nvm_alt_mac_addr_offset, nvm_data; |
|
184 u8 alt_mac_addr[ETH_ALEN]; |
|
185 |
|
186 ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); |
|
187 if (ret_val) |
|
188 goto out; |
|
189 |
|
190 /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ |
|
191 if (!((nvm_data & NVM_COMPAT_LOM) || |
|
192 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || |
|
193 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) |
|
194 goto out; |
|
195 |
|
196 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, |
|
197 &nvm_alt_mac_addr_offset); |
|
198 if (ret_val) { |
|
199 e_dbg("NVM Read Error\n"); |
|
200 goto out; |
|
201 } |
|
202 |
|
203 if (nvm_alt_mac_addr_offset == 0xFFFF) { |
|
204 /* There is no Alternate MAC Address */ |
|
205 goto out; |
|
206 } |
|
207 |
|
208 if (hw->bus.func == E1000_FUNC_1) |
|
209 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; |
|
210 for (i = 0; i < ETH_ALEN; i += 2) { |
|
211 offset = nvm_alt_mac_addr_offset + (i >> 1); |
|
212 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); |
|
213 if (ret_val) { |
|
214 e_dbg("NVM Read Error\n"); |
|
215 goto out; |
|
216 } |
|
217 |
|
218 alt_mac_addr[i] = (u8)(nvm_data & 0xFF); |
|
219 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); |
|
220 } |
|
221 |
|
222 /* if multicast bit is set, the alternate address will not be used */ |
|
223 if (alt_mac_addr[0] & 0x01) { |
|
224 e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); |
|
225 goto out; |
|
226 } |
|
227 |
|
228 /* |
|
229 * We have a valid alternate MAC address, and we want to treat it the |
|
230 * same as the normal permanent MAC address stored by the HW into the |
|
231 * RAR. Do this by mapping this address into RAR0. |
|
232 */ |
|
233 e1000e_rar_set(hw, alt_mac_addr, 0); |
|
234 |
|
235 out: |
|
236 return ret_val; |
|
237 } |
|
238 |
|
239 /** |
|
240 * e1000e_rar_set - Set receive address register |
|
241 * @hw: pointer to the HW structure |
|
242 * @addr: pointer to the receive address |
|
243 * @index: receive address array register |
|
244 * |
|
245 * Sets the receive address array register at index to the address passed |
|
246 * in by addr. |
|
247 **/ |
|
248 void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) |
|
249 { |
|
250 u32 rar_low, rar_high; |
|
251 |
|
252 /* |
|
253 * HW expects these in little endian so we reverse the byte order |
|
254 * from network order (big endian) to little endian |
|
255 */ |
|
256 rar_low = ((u32) addr[0] | |
|
257 ((u32) addr[1] << 8) | |
|
258 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); |
|
259 |
|
260 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); |
|
261 |
|
262 /* If MAC address zero, no need to set the AV bit */ |
|
263 if (rar_low || rar_high) |
|
264 rar_high |= E1000_RAH_AV; |
|
265 |
|
266 /* |
|
267 * Some bridges will combine consecutive 32-bit writes into |
|
268 * a single burst write, which will malfunction on some parts. |
|
269 * The flushes avoid this. |
|
270 */ |
|
271 ew32(RAL(index), rar_low); |
|
272 e1e_flush(); |
|
273 ew32(RAH(index), rar_high); |
|
274 e1e_flush(); |
|
275 } |
|
276 |
|
277 /** |
|
278 * e1000_hash_mc_addr - Generate a multicast hash value |
|
279 * @hw: pointer to the HW structure |
|
280 * @mc_addr: pointer to a multicast address |
|
281 * |
|
282 * Generates a multicast address hash value which is used to determine |
|
283 * the multicast filter table array address and new table value. See |
|
284 * e1000_mta_set_generic() |
|
285 **/ |
|
286 static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) |
|
287 { |
|
288 u32 hash_value, hash_mask; |
|
289 u8 bit_shift = 0; |
|
290 |
|
291 /* Register count multiplied by bits per register */ |
|
292 hash_mask = (hw->mac.mta_reg_count * 32) - 1; |
|
293 |
|
294 /* |
|
295 * For a mc_filter_type of 0, bit_shift is the number of left-shifts |
|
296 * where 0xFF would still fall within the hash mask. |
|
297 */ |
|
298 while (hash_mask >> bit_shift != 0xFF) |
|
299 bit_shift++; |
|
300 |
|
301 /* |
|
302 * The portion of the address that is used for the hash table |
|
303 * is determined by the mc_filter_type setting. |
|
304 * The algorithm is such that there is a total of 8 bits of shifting. |
|
305 * The bit_shift for a mc_filter_type of 0 represents the number of |
|
306 * left-shifts where the MSB of mc_addr[5] would still fall within |
|
307 * the hash_mask. Case 0 does this exactly. Since there are a total |
|
308 * of 8 bits of shifting, then mc_addr[4] will shift right the |
|
309 * remaining number of bits. Thus 8 - bit_shift. The rest of the |
|
310 * cases are a variation of this algorithm...essentially raising the |
|
311 * number of bits to shift mc_addr[5] left, while still keeping the |
|
312 * 8-bit shifting total. |
|
313 * |
|
314 * For example, given the following Destination MAC Address and an |
|
315 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), |
|
316 * we can see that the bit_shift for case 0 is 4. These are the hash |
|
317 * values resulting from each mc_filter_type... |
|
318 * [0] [1] [2] [3] [4] [5] |
|
319 * 01 AA 00 12 34 56 |
|
320 * LSB MSB |
|
321 * |
|
322 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 |
|
323 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 |
|
324 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 |
|
325 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 |
|
326 */ |
|
327 switch (hw->mac.mc_filter_type) { |
|
328 default: |
|
329 case 0: |
|
330 break; |
|
331 case 1: |
|
332 bit_shift += 1; |
|
333 break; |
|
334 case 2: |
|
335 bit_shift += 2; |
|
336 break; |
|
337 case 3: |
|
338 bit_shift += 4; |
|
339 break; |
|
340 } |
|
341 |
|
342 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | |
|
343 (((u16) mc_addr[5]) << bit_shift))); |
|
344 |
|
345 return hash_value; |
|
346 } |
|
347 |
|
348 /** |
|
349 * e1000e_update_mc_addr_list_generic - Update Multicast addresses |
|
350 * @hw: pointer to the HW structure |
|
351 * @mc_addr_list: array of multicast addresses to program |
|
352 * @mc_addr_count: number of multicast addresses to program |
|
353 * |
|
354 * Updates entire Multicast Table Array. |
|
355 * The caller must have a packed mc_addr_list of multicast addresses. |
|
356 **/ |
|
357 void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, |
|
358 u8 *mc_addr_list, u32 mc_addr_count) |
|
359 { |
|
360 u32 hash_value, hash_bit, hash_reg; |
|
361 int i; |
|
362 |
|
363 /* clear mta_shadow */ |
|
364 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); |
|
365 |
|
366 /* update mta_shadow from mc_addr_list */ |
|
367 for (i = 0; (u32) i < mc_addr_count; i++) { |
|
368 hash_value = e1000_hash_mc_addr(hw, mc_addr_list); |
|
369 |
|
370 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); |
|
371 hash_bit = hash_value & 0x1F; |
|
372 |
|
373 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); |
|
374 mc_addr_list += (ETH_ALEN); |
|
375 } |
|
376 |
|
377 /* replace the entire MTA table */ |
|
378 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) |
|
379 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); |
|
380 e1e_flush(); |
|
381 } |
|
382 |
|
383 /** |
|
384 * e1000e_clear_hw_cntrs_base - Clear base hardware counters |
|
385 * @hw: pointer to the HW structure |
|
386 * |
|
387 * Clears the base hardware counters by reading the counter registers. |
|
388 **/ |
|
389 void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) |
|
390 { |
|
391 er32(CRCERRS); |
|
392 er32(SYMERRS); |
|
393 er32(MPC); |
|
394 er32(SCC); |
|
395 er32(ECOL); |
|
396 er32(MCC); |
|
397 er32(LATECOL); |
|
398 er32(COLC); |
|
399 er32(DC); |
|
400 er32(SEC); |
|
401 er32(RLEC); |
|
402 er32(XONRXC); |
|
403 er32(XONTXC); |
|
404 er32(XOFFRXC); |
|
405 er32(XOFFTXC); |
|
406 er32(FCRUC); |
|
407 er32(GPRC); |
|
408 er32(BPRC); |
|
409 er32(MPRC); |
|
410 er32(GPTC); |
|
411 er32(GORCL); |
|
412 er32(GORCH); |
|
413 er32(GOTCL); |
|
414 er32(GOTCH); |
|
415 er32(RNBC); |
|
416 er32(RUC); |
|
417 er32(RFC); |
|
418 er32(ROC); |
|
419 er32(RJC); |
|
420 er32(TORL); |
|
421 er32(TORH); |
|
422 er32(TOTL); |
|
423 er32(TOTH); |
|
424 er32(TPR); |
|
425 er32(TPT); |
|
426 er32(MPTC); |
|
427 er32(BPTC); |
|
428 } |
|
429 |
|
430 /** |
|
431 * e1000e_check_for_copper_link - Check for link (Copper) |
|
432 * @hw: pointer to the HW structure |
|
433 * |
|
434 * Checks to see of the link status of the hardware has changed. If a |
|
435 * change in link status has been detected, then we read the PHY registers |
|
436 * to get the current speed/duplex if link exists. |
|
437 **/ |
|
438 s32 e1000e_check_for_copper_link(struct e1000_hw *hw) |
|
439 { |
|
440 struct e1000_mac_info *mac = &hw->mac; |
|
441 s32 ret_val; |
|
442 bool link; |
|
443 |
|
444 /* |
|
445 * We only want to go out to the PHY registers to see if Auto-Neg |
|
446 * has completed and/or if our link status has changed. The |
|
447 * get_link_status flag is set upon receiving a Link Status |
|
448 * Change or Rx Sequence Error interrupt. |
|
449 */ |
|
450 if (!mac->get_link_status) |
|
451 return 0; |
|
452 |
|
453 /* |
|
454 * First we want to see if the MII Status Register reports |
|
455 * link. If so, then we want to get the current speed/duplex |
|
456 * of the PHY. |
|
457 */ |
|
458 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); |
|
459 if (ret_val) |
|
460 return ret_val; |
|
461 |
|
462 if (!link) |
|
463 return ret_val; /* No link detected */ |
|
464 |
|
465 mac->get_link_status = false; |
|
466 |
|
467 /* |
|
468 * Check if there was DownShift, must be checked |
|
469 * immediately after link-up |
|
470 */ |
|
471 e1000e_check_downshift(hw); |
|
472 |
|
473 /* |
|
474 * If we are forcing speed/duplex, then we simply return since |
|
475 * we have already determined whether we have link or not. |
|
476 */ |
|
477 if (!mac->autoneg) { |
|
478 ret_val = -E1000_ERR_CONFIG; |
|
479 return ret_val; |
|
480 } |
|
481 |
|
482 /* |
|
483 * Auto-Neg is enabled. Auto Speed Detection takes care |
|
484 * of MAC speed/duplex configuration. So we only need to |
|
485 * configure Collision Distance in the MAC. |
|
486 */ |
|
487 e1000e_config_collision_dist(hw); |
|
488 |
|
489 /* |
|
490 * Configure Flow Control now that Auto-Neg has completed. |
|
491 * First, we need to restore the desired flow control |
|
492 * settings because we may have had to re-autoneg with a |
|
493 * different link partner. |
|
494 */ |
|
495 ret_val = e1000e_config_fc_after_link_up(hw); |
|
496 if (ret_val) { |
|
497 e_dbg("Error configuring flow control\n"); |
|
498 } |
|
499 |
|
500 return ret_val; |
|
501 } |
|
502 |
|
503 /** |
|
504 * e1000e_check_for_fiber_link - Check for link (Fiber) |
|
505 * @hw: pointer to the HW structure |
|
506 * |
|
507 * Checks for link up on the hardware. If link is not up and we have |
|
508 * a signal, then we need to force link up. |
|
509 **/ |
|
510 s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) |
|
511 { |
|
512 struct e1000_mac_info *mac = &hw->mac; |
|
513 u32 rxcw; |
|
514 u32 ctrl; |
|
515 u32 status; |
|
516 s32 ret_val; |
|
517 |
|
518 ctrl = er32(CTRL); |
|
519 status = er32(STATUS); |
|
520 rxcw = er32(RXCW); |
|
521 |
|
522 /* |
|
523 * If we don't have link (auto-negotiation failed or link partner |
|
524 * cannot auto-negotiate), the cable is plugged in (we have signal), |
|
525 * and our link partner is not trying to auto-negotiate with us (we |
|
526 * are receiving idles or data), we need to force link up. We also |
|
527 * need to give auto-negotiation time to complete, in case the cable |
|
528 * was just plugged in. The autoneg_failed flag does this. |
|
529 */ |
|
530 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ |
|
531 if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && |
|
532 (!(rxcw & E1000_RXCW_C))) { |
|
533 if (mac->autoneg_failed == 0) { |
|
534 mac->autoneg_failed = 1; |
|
535 return 0; |
|
536 } |
|
537 e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); |
|
538 |
|
539 /* Disable auto-negotiation in the TXCW register */ |
|
540 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
|
541 |
|
542 /* Force link-up and also force full-duplex. */ |
|
543 ctrl = er32(CTRL); |
|
544 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); |
|
545 ew32(CTRL, ctrl); |
|
546 |
|
547 /* Configure Flow Control after forcing link up. */ |
|
548 ret_val = e1000e_config_fc_after_link_up(hw); |
|
549 if (ret_val) { |
|
550 e_dbg("Error configuring flow control\n"); |
|
551 return ret_val; |
|
552 } |
|
553 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
|
554 /* |
|
555 * If we are forcing link and we are receiving /C/ ordered |
|
556 * sets, re-enable auto-negotiation in the TXCW register |
|
557 * and disable forced link in the Device Control register |
|
558 * in an attempt to auto-negotiate with our link partner. |
|
559 */ |
|
560 e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
|
561 ew32(TXCW, mac->txcw); |
|
562 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
|
563 |
|
564 mac->serdes_has_link = true; |
|
565 } |
|
566 |
|
567 return 0; |
|
568 } |
|
569 |
|
570 /** |
|
571 * e1000e_check_for_serdes_link - Check for link (Serdes) |
|
572 * @hw: pointer to the HW structure |
|
573 * |
|
574 * Checks for link up on the hardware. If link is not up and we have |
|
575 * a signal, then we need to force link up. |
|
576 **/ |
|
577 s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) |
|
578 { |
|
579 struct e1000_mac_info *mac = &hw->mac; |
|
580 u32 rxcw; |
|
581 u32 ctrl; |
|
582 u32 status; |
|
583 s32 ret_val; |
|
584 |
|
585 ctrl = er32(CTRL); |
|
586 status = er32(STATUS); |
|
587 rxcw = er32(RXCW); |
|
588 |
|
589 /* |
|
590 * If we don't have link (auto-negotiation failed or link partner |
|
591 * cannot auto-negotiate), and our link partner is not trying to |
|
592 * auto-negotiate with us (we are receiving idles or data), |
|
593 * we need to force link up. We also need to give auto-negotiation |
|
594 * time to complete. |
|
595 */ |
|
596 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ |
|
597 if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { |
|
598 if (mac->autoneg_failed == 0) { |
|
599 mac->autoneg_failed = 1; |
|
600 return 0; |
|
601 } |
|
602 e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); |
|
603 |
|
604 /* Disable auto-negotiation in the TXCW register */ |
|
605 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
|
606 |
|
607 /* Force link-up and also force full-duplex. */ |
|
608 ctrl = er32(CTRL); |
|
609 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); |
|
610 ew32(CTRL, ctrl); |
|
611 |
|
612 /* Configure Flow Control after forcing link up. */ |
|
613 ret_val = e1000e_config_fc_after_link_up(hw); |
|
614 if (ret_val) { |
|
615 e_dbg("Error configuring flow control\n"); |
|
616 return ret_val; |
|
617 } |
|
618 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
|
619 /* |
|
620 * If we are forcing link and we are receiving /C/ ordered |
|
621 * sets, re-enable auto-negotiation in the TXCW register |
|
622 * and disable forced link in the Device Control register |
|
623 * in an attempt to auto-negotiate with our link partner. |
|
624 */ |
|
625 e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
|
626 ew32(TXCW, mac->txcw); |
|
627 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
|
628 |
|
629 mac->serdes_has_link = true; |
|
630 } else if (!(E1000_TXCW_ANE & er32(TXCW))) { |
|
631 /* |
|
632 * If we force link for non-auto-negotiation switch, check |
|
633 * link status based on MAC synchronization for internal |
|
634 * serdes media type. |
|
635 */ |
|
636 /* SYNCH bit and IV bit are sticky. */ |
|
637 udelay(10); |
|
638 rxcw = er32(RXCW); |
|
639 if (rxcw & E1000_RXCW_SYNCH) { |
|
640 if (!(rxcw & E1000_RXCW_IV)) { |
|
641 mac->serdes_has_link = true; |
|
642 e_dbg("SERDES: Link up - forced.\n"); |
|
643 } |
|
644 } else { |
|
645 mac->serdes_has_link = false; |
|
646 e_dbg("SERDES: Link down - force failed.\n"); |
|
647 } |
|
648 } |
|
649 |
|
650 if (E1000_TXCW_ANE & er32(TXCW)) { |
|
651 status = er32(STATUS); |
|
652 if (status & E1000_STATUS_LU) { |
|
653 /* SYNCH bit and IV bit are sticky, so reread rxcw. */ |
|
654 udelay(10); |
|
655 rxcw = er32(RXCW); |
|
656 if (rxcw & E1000_RXCW_SYNCH) { |
|
657 if (!(rxcw & E1000_RXCW_IV)) { |
|
658 mac->serdes_has_link = true; |
|
659 e_dbg("SERDES: Link up - autoneg " |
|
660 "completed successfully.\n"); |
|
661 } else { |
|
662 mac->serdes_has_link = false; |
|
663 e_dbg("SERDES: Link down - invalid" |
|
664 "codewords detected in autoneg.\n"); |
|
665 } |
|
666 } else { |
|
667 mac->serdes_has_link = false; |
|
668 e_dbg("SERDES: Link down - no sync.\n"); |
|
669 } |
|
670 } else { |
|
671 mac->serdes_has_link = false; |
|
672 e_dbg("SERDES: Link down - autoneg failed\n"); |
|
673 } |
|
674 } |
|
675 |
|
676 return 0; |
|
677 } |
|
678 |
|
679 /** |
|
680 * e1000_set_default_fc_generic - Set flow control default values |
|
681 * @hw: pointer to the HW structure |
|
682 * |
|
683 * Read the EEPROM for the default values for flow control and store the |
|
684 * values. |
|
685 **/ |
|
686 static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) |
|
687 { |
|
688 s32 ret_val; |
|
689 u16 nvm_data; |
|
690 |
|
691 /* |
|
692 * Read and store word 0x0F of the EEPROM. This word contains bits |
|
693 * that determine the hardware's default PAUSE (flow control) mode, |
|
694 * a bit that determines whether the HW defaults to enabling or |
|
695 * disabling auto-negotiation, and the direction of the |
|
696 * SW defined pins. If there is no SW over-ride of the flow |
|
697 * control setting, then the variable hw->fc will |
|
698 * be initialized based on a value in the EEPROM. |
|
699 */ |
|
700 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); |
|
701 |
|
702 if (ret_val) { |
|
703 e_dbg("NVM Read Error\n"); |
|
704 return ret_val; |
|
705 } |
|
706 |
|
707 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) |
|
708 hw->fc.requested_mode = e1000_fc_none; |
|
709 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == |
|
710 NVM_WORD0F_ASM_DIR) |
|
711 hw->fc.requested_mode = e1000_fc_tx_pause; |
|
712 else |
|
713 hw->fc.requested_mode = e1000_fc_full; |
|
714 |
|
715 return 0; |
|
716 } |
|
717 |
|
718 /** |
|
719 * e1000e_setup_link - Setup flow control and link settings |
|
720 * @hw: pointer to the HW structure |
|
721 * |
|
722 * Determines which flow control settings to use, then configures flow |
|
723 * control. Calls the appropriate media-specific link configuration |
|
724 * function. Assuming the adapter has a valid link partner, a valid link |
|
725 * should be established. Assumes the hardware has previously been reset |
|
726 * and the transmitter and receiver are not enabled. |
|
727 **/ |
|
728 s32 e1000e_setup_link(struct e1000_hw *hw) |
|
729 { |
|
730 struct e1000_mac_info *mac = &hw->mac; |
|
731 s32 ret_val; |
|
732 |
|
733 /* |
|
734 * In the case of the phy reset being blocked, we already have a link. |
|
735 * We do not need to set it up again. |
|
736 */ |
|
737 if (e1000_check_reset_block(hw)) |
|
738 return 0; |
|
739 |
|
740 /* |
|
741 * If requested flow control is set to default, set flow control |
|
742 * based on the EEPROM flow control settings. |
|
743 */ |
|
744 if (hw->fc.requested_mode == e1000_fc_default) { |
|
745 ret_val = e1000_set_default_fc_generic(hw); |
|
746 if (ret_val) |
|
747 return ret_val; |
|
748 } |
|
749 |
|
750 /* |
|
751 * Save off the requested flow control mode for use later. Depending |
|
752 * on the link partner's capabilities, we may or may not use this mode. |
|
753 */ |
|
754 hw->fc.current_mode = hw->fc.requested_mode; |
|
755 |
|
756 e_dbg("After fix-ups FlowControl is now = %x\n", |
|
757 hw->fc.current_mode); |
|
758 |
|
759 /* Call the necessary media_type subroutine to configure the link. */ |
|
760 ret_val = mac->ops.setup_physical_interface(hw); |
|
761 if (ret_val) |
|
762 return ret_val; |
|
763 |
|
764 /* |
|
765 * Initialize the flow control address, type, and PAUSE timer |
|
766 * registers to their default values. This is done even if flow |
|
767 * control is disabled, because it does not hurt anything to |
|
768 * initialize these registers. |
|
769 */ |
|
770 e_dbg("Initializing the Flow Control address, type and timer regs\n"); |
|
771 ew32(FCT, FLOW_CONTROL_TYPE); |
|
772 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); |
|
773 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); |
|
774 |
|
775 ew32(FCTTV, hw->fc.pause_time); |
|
776 |
|
777 return e1000e_set_fc_watermarks(hw); |
|
778 } |
|
779 |
|
780 /** |
|
781 * e1000_commit_fc_settings_generic - Configure flow control |
|
782 * @hw: pointer to the HW structure |
|
783 * |
|
784 * Write the flow control settings to the Transmit Config Word Register (TXCW) |
|
785 * base on the flow control settings in e1000_mac_info. |
|
786 **/ |
|
787 static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) |
|
788 { |
|
789 struct e1000_mac_info *mac = &hw->mac; |
|
790 u32 txcw; |
|
791 |
|
792 /* |
|
793 * Check for a software override of the flow control settings, and |
|
794 * setup the device accordingly. If auto-negotiation is enabled, then |
|
795 * software will have to set the "PAUSE" bits to the correct value in |
|
796 * the Transmit Config Word Register (TXCW) and re-start auto- |
|
797 * negotiation. However, if auto-negotiation is disabled, then |
|
798 * software will have to manually configure the two flow control enable |
|
799 * bits in the CTRL register. |
|
800 * |
|
801 * The possible values of the "fc" parameter are: |
|
802 * 0: Flow control is completely disabled |
|
803 * 1: Rx flow control is enabled (we can receive pause frames, |
|
804 * but not send pause frames). |
|
805 * 2: Tx flow control is enabled (we can send pause frames but we |
|
806 * do not support receiving pause frames). |
|
807 * 3: Both Rx and Tx flow control (symmetric) are enabled. |
|
808 */ |
|
809 switch (hw->fc.current_mode) { |
|
810 case e1000_fc_none: |
|
811 /* Flow control completely disabled by a software over-ride. */ |
|
812 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); |
|
813 break; |
|
814 case e1000_fc_rx_pause: |
|
815 /* |
|
816 * Rx Flow control is enabled and Tx Flow control is disabled |
|
817 * by a software over-ride. Since there really isn't a way to |
|
818 * advertise that we are capable of Rx Pause ONLY, we will |
|
819 * advertise that we support both symmetric and asymmetric Rx |
|
820 * PAUSE. Later, we will disable the adapter's ability to send |
|
821 * PAUSE frames. |
|
822 */ |
|
823 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); |
|
824 break; |
|
825 case e1000_fc_tx_pause: |
|
826 /* |
|
827 * Tx Flow control is enabled, and Rx Flow control is disabled, |
|
828 * by a software over-ride. |
|
829 */ |
|
830 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); |
|
831 break; |
|
832 case e1000_fc_full: |
|
833 /* |
|
834 * Flow control (both Rx and Tx) is enabled by a software |
|
835 * over-ride. |
|
836 */ |
|
837 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); |
|
838 break; |
|
839 default: |
|
840 e_dbg("Flow control param set incorrectly\n"); |
|
841 return -E1000_ERR_CONFIG; |
|
842 break; |
|
843 } |
|
844 |
|
845 ew32(TXCW, txcw); |
|
846 mac->txcw = txcw; |
|
847 |
|
848 return 0; |
|
849 } |
|
850 |
|
851 /** |
|
852 * e1000_poll_fiber_serdes_link_generic - Poll for link up |
|
853 * @hw: pointer to the HW structure |
|
854 * |
|
855 * Polls for link up by reading the status register, if link fails to come |
|
856 * up with auto-negotiation, then the link is forced if a signal is detected. |
|
857 **/ |
|
858 static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) |
|
859 { |
|
860 struct e1000_mac_info *mac = &hw->mac; |
|
861 u32 i, status; |
|
862 s32 ret_val; |
|
863 |
|
864 /* |
|
865 * If we have a signal (the cable is plugged in, or assumed true for |
|
866 * serdes media) then poll for a "Link-Up" indication in the Device |
|
867 * Status Register. Time-out if a link isn't seen in 500 milliseconds |
|
868 * seconds (Auto-negotiation should complete in less than 500 |
|
869 * milliseconds even if the other end is doing it in SW). |
|
870 */ |
|
871 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { |
|
872 msleep(10); |
|
873 status = er32(STATUS); |
|
874 if (status & E1000_STATUS_LU) |
|
875 break; |
|
876 } |
|
877 if (i == FIBER_LINK_UP_LIMIT) { |
|
878 e_dbg("Never got a valid link from auto-neg!!!\n"); |
|
879 mac->autoneg_failed = 1; |
|
880 /* |
|
881 * AutoNeg failed to achieve a link, so we'll call |
|
882 * mac->check_for_link. This routine will force the |
|
883 * link up if we detect a signal. This will allow us to |
|
884 * communicate with non-autonegotiating link partners. |
|
885 */ |
|
886 ret_val = mac->ops.check_for_link(hw); |
|
887 if (ret_val) { |
|
888 e_dbg("Error while checking for link\n"); |
|
889 return ret_val; |
|
890 } |
|
891 mac->autoneg_failed = 0; |
|
892 } else { |
|
893 mac->autoneg_failed = 0; |
|
894 e_dbg("Valid Link Found\n"); |
|
895 } |
|
896 |
|
897 return 0; |
|
898 } |
|
899 |
|
900 /** |
|
901 * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes |
|
902 * @hw: pointer to the HW structure |
|
903 * |
|
904 * Configures collision distance and flow control for fiber and serdes |
|
905 * links. Upon successful setup, poll for link. |
|
906 **/ |
|
907 s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) |
|
908 { |
|
909 u32 ctrl; |
|
910 s32 ret_val; |
|
911 |
|
912 ctrl = er32(CTRL); |
|
913 |
|
914 /* Take the link out of reset */ |
|
915 ctrl &= ~E1000_CTRL_LRST; |
|
916 |
|
917 e1000e_config_collision_dist(hw); |
|
918 |
|
919 ret_val = e1000_commit_fc_settings_generic(hw); |
|
920 if (ret_val) |
|
921 return ret_val; |
|
922 |
|
923 /* |
|
924 * Since auto-negotiation is enabled, take the link out of reset (the |
|
925 * link will be in reset, because we previously reset the chip). This |
|
926 * will restart auto-negotiation. If auto-negotiation is successful |
|
927 * then the link-up status bit will be set and the flow control enable |
|
928 * bits (RFCE and TFCE) will be set according to their negotiated value. |
|
929 */ |
|
930 e_dbg("Auto-negotiation enabled\n"); |
|
931 |
|
932 ew32(CTRL, ctrl); |
|
933 e1e_flush(); |
|
934 msleep(1); |
|
935 |
|
936 /* |
|
937 * For these adapters, the SW definable pin 1 is set when the optics |
|
938 * detect a signal. If we have a signal, then poll for a "Link-Up" |
|
939 * indication. |
|
940 */ |
|
941 if (hw->phy.media_type == e1000_media_type_internal_serdes || |
|
942 (er32(CTRL) & E1000_CTRL_SWDPIN1)) { |
|
943 ret_val = e1000_poll_fiber_serdes_link_generic(hw); |
|
944 } else { |
|
945 e_dbg("No signal detected\n"); |
|
946 } |
|
947 |
|
948 return 0; |
|
949 } |
|
950 |
|
951 /** |
|
952 * e1000e_config_collision_dist - Configure collision distance |
|
953 * @hw: pointer to the HW structure |
|
954 * |
|
955 * Configures the collision distance to the default value and is used |
|
956 * during link setup. Currently no func pointer exists and all |
|
957 * implementations are handled in the generic version of this function. |
|
958 **/ |
|
959 void e1000e_config_collision_dist(struct e1000_hw *hw) |
|
960 { |
|
961 u32 tctl; |
|
962 |
|
963 tctl = er32(TCTL); |
|
964 |
|
965 tctl &= ~E1000_TCTL_COLD; |
|
966 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; |
|
967 |
|
968 ew32(TCTL, tctl); |
|
969 e1e_flush(); |
|
970 } |
|
971 |
|
972 /** |
|
973 * e1000e_set_fc_watermarks - Set flow control high/low watermarks |
|
974 * @hw: pointer to the HW structure |
|
975 * |
|
976 * Sets the flow control high/low threshold (watermark) registers. If |
|
977 * flow control XON frame transmission is enabled, then set XON frame |
|
978 * transmission as well. |
|
979 **/ |
|
980 s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) |
|
981 { |
|
982 u32 fcrtl = 0, fcrth = 0; |
|
983 |
|
984 /* |
|
985 * Set the flow control receive threshold registers. Normally, |
|
986 * these registers will be set to a default threshold that may be |
|
987 * adjusted later by the driver's runtime code. However, if the |
|
988 * ability to transmit pause frames is not enabled, then these |
|
989 * registers will be set to 0. |
|
990 */ |
|
991 if (hw->fc.current_mode & e1000_fc_tx_pause) { |
|
992 /* |
|
993 * We need to set up the Receive Threshold high and low water |
|
994 * marks as well as (optionally) enabling the transmission of |
|
995 * XON frames. |
|
996 */ |
|
997 fcrtl = hw->fc.low_water; |
|
998 fcrtl |= E1000_FCRTL_XONE; |
|
999 fcrth = hw->fc.high_water; |
|
1000 } |
|
1001 ew32(FCRTL, fcrtl); |
|
1002 ew32(FCRTH, fcrth); |
|
1003 |
|
1004 return 0; |
|
1005 } |
|
1006 |
|
1007 /** |
|
1008 * e1000e_force_mac_fc - Force the MAC's flow control settings |
|
1009 * @hw: pointer to the HW structure |
|
1010 * |
|
1011 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the |
|
1012 * device control register to reflect the adapter settings. TFCE and RFCE |
|
1013 * need to be explicitly set by software when a copper PHY is used because |
|
1014 * autonegotiation is managed by the PHY rather than the MAC. Software must |
|
1015 * also configure these bits when link is forced on a fiber connection. |
|
1016 **/ |
|
1017 s32 e1000e_force_mac_fc(struct e1000_hw *hw) |
|
1018 { |
|
1019 u32 ctrl; |
|
1020 |
|
1021 ctrl = er32(CTRL); |
|
1022 |
|
1023 /* |
|
1024 * Because we didn't get link via the internal auto-negotiation |
|
1025 * mechanism (we either forced link or we got link via PHY |
|
1026 * auto-neg), we have to manually enable/disable transmit an |
|
1027 * receive flow control. |
|
1028 * |
|
1029 * The "Case" statement below enables/disable flow control |
|
1030 * according to the "hw->fc.current_mode" parameter. |
|
1031 * |
|
1032 * The possible values of the "fc" parameter are: |
|
1033 * 0: Flow control is completely disabled |
|
1034 * 1: Rx flow control is enabled (we can receive pause |
|
1035 * frames but not send pause frames). |
|
1036 * 2: Tx flow control is enabled (we can send pause frames |
|
1037 * frames but we do not receive pause frames). |
|
1038 * 3: Both Rx and Tx flow control (symmetric) is enabled. |
|
1039 * other: No other values should be possible at this point. |
|
1040 */ |
|
1041 e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); |
|
1042 |
|
1043 switch (hw->fc.current_mode) { |
|
1044 case e1000_fc_none: |
|
1045 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); |
|
1046 break; |
|
1047 case e1000_fc_rx_pause: |
|
1048 ctrl &= (~E1000_CTRL_TFCE); |
|
1049 ctrl |= E1000_CTRL_RFCE; |
|
1050 break; |
|
1051 case e1000_fc_tx_pause: |
|
1052 ctrl &= (~E1000_CTRL_RFCE); |
|
1053 ctrl |= E1000_CTRL_TFCE; |
|
1054 break; |
|
1055 case e1000_fc_full: |
|
1056 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); |
|
1057 break; |
|
1058 default: |
|
1059 e_dbg("Flow control param set incorrectly\n"); |
|
1060 return -E1000_ERR_CONFIG; |
|
1061 } |
|
1062 |
|
1063 ew32(CTRL, ctrl); |
|
1064 |
|
1065 return 0; |
|
1066 } |
|
1067 |
|
1068 /** |
|
1069 * e1000e_config_fc_after_link_up - Configures flow control after link |
|
1070 * @hw: pointer to the HW structure |
|
1071 * |
|
1072 * Checks the status of auto-negotiation after link up to ensure that the |
|
1073 * speed and duplex were not forced. If the link needed to be forced, then |
|
1074 * flow control needs to be forced also. If auto-negotiation is enabled |
|
1075 * and did not fail, then we configure flow control based on our link |
|
1076 * partner. |
|
1077 **/ |
|
1078 s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) |
|
1079 { |
|
1080 struct e1000_mac_info *mac = &hw->mac; |
|
1081 s32 ret_val = 0; |
|
1082 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; |
|
1083 u16 speed, duplex; |
|
1084 |
|
1085 /* |
|
1086 * Check for the case where we have fiber media and auto-neg failed |
|
1087 * so we had to force link. In this case, we need to force the |
|
1088 * configuration of the MAC to match the "fc" parameter. |
|
1089 */ |
|
1090 if (mac->autoneg_failed) { |
|
1091 if (hw->phy.media_type == e1000_media_type_fiber || |
|
1092 hw->phy.media_type == e1000_media_type_internal_serdes) |
|
1093 ret_val = e1000e_force_mac_fc(hw); |
|
1094 } else { |
|
1095 if (hw->phy.media_type == e1000_media_type_copper) |
|
1096 ret_val = e1000e_force_mac_fc(hw); |
|
1097 } |
|
1098 |
|
1099 if (ret_val) { |
|
1100 e_dbg("Error forcing flow control settings\n"); |
|
1101 return ret_val; |
|
1102 } |
|
1103 |
|
1104 /* |
|
1105 * Check for the case where we have copper media and auto-neg is |
|
1106 * enabled. In this case, we need to check and see if Auto-Neg |
|
1107 * has completed, and if so, how the PHY and link partner has |
|
1108 * flow control configured. |
|
1109 */ |
|
1110 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { |
|
1111 /* |
|
1112 * Read the MII Status Register and check to see if AutoNeg |
|
1113 * has completed. We read this twice because this reg has |
|
1114 * some "sticky" (latched) bits. |
|
1115 */ |
|
1116 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); |
|
1117 if (ret_val) |
|
1118 return ret_val; |
|
1119 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); |
|
1120 if (ret_val) |
|
1121 return ret_val; |
|
1122 |
|
1123 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { |
|
1124 e_dbg("Copper PHY and Auto Neg " |
|
1125 "has not completed.\n"); |
|
1126 return ret_val; |
|
1127 } |
|
1128 |
|
1129 /* |
|
1130 * The AutoNeg process has completed, so we now need to |
|
1131 * read both the Auto Negotiation Advertisement |
|
1132 * Register (Address 4) and the Auto_Negotiation Base |
|
1133 * Page Ability Register (Address 5) to determine how |
|
1134 * flow control was negotiated. |
|
1135 */ |
|
1136 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); |
|
1137 if (ret_val) |
|
1138 return ret_val; |
|
1139 ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); |
|
1140 if (ret_val) |
|
1141 return ret_val; |
|
1142 |
|
1143 /* |
|
1144 * Two bits in the Auto Negotiation Advertisement Register |
|
1145 * (Address 4) and two bits in the Auto Negotiation Base |
|
1146 * Page Ability Register (Address 5) determine flow control |
|
1147 * for both the PHY and the link partner. The following |
|
1148 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, |
|
1149 * 1999, describes these PAUSE resolution bits and how flow |
|
1150 * control is determined based upon these settings. |
|
1151 * NOTE: DC = Don't Care |
|
1152 * |
|
1153 * LOCAL DEVICE | LINK PARTNER |
|
1154 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution |
|
1155 *-------|---------|-------|---------|-------------------- |
|
1156 * 0 | 0 | DC | DC | e1000_fc_none |
|
1157 * 0 | 1 | 0 | DC | e1000_fc_none |
|
1158 * 0 | 1 | 1 | 0 | e1000_fc_none |
|
1159 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause |
|
1160 * 1 | 0 | 0 | DC | e1000_fc_none |
|
1161 * 1 | DC | 1 | DC | e1000_fc_full |
|
1162 * 1 | 1 | 0 | 0 | e1000_fc_none |
|
1163 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause |
|
1164 * |
|
1165 * Are both PAUSE bits set to 1? If so, this implies |
|
1166 * Symmetric Flow Control is enabled at both ends. The |
|
1167 * ASM_DIR bits are irrelevant per the spec. |
|
1168 * |
|
1169 * For Symmetric Flow Control: |
|
1170 * |
|
1171 * LOCAL DEVICE | LINK PARTNER |
|
1172 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
|
1173 *-------|---------|-------|---------|-------------------- |
|
1174 * 1 | DC | 1 | DC | E1000_fc_full |
|
1175 * |
|
1176 */ |
|
1177 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && |
|
1178 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { |
|
1179 /* |
|
1180 * Now we need to check if the user selected Rx ONLY |
|
1181 * of pause frames. In this case, we had to advertise |
|
1182 * FULL flow control because we could not advertise Rx |
|
1183 * ONLY. Hence, we must now check to see if we need to |
|
1184 * turn OFF the TRANSMISSION of PAUSE frames. |
|
1185 */ |
|
1186 if (hw->fc.requested_mode == e1000_fc_full) { |
|
1187 hw->fc.current_mode = e1000_fc_full; |
|
1188 e_dbg("Flow Control = FULL.\r\n"); |
|
1189 } else { |
|
1190 hw->fc.current_mode = e1000_fc_rx_pause; |
|
1191 e_dbg("Flow Control = " |
|
1192 "RX PAUSE frames only.\r\n"); |
|
1193 } |
|
1194 } |
|
1195 /* |
|
1196 * For receiving PAUSE frames ONLY. |
|
1197 * |
|
1198 * LOCAL DEVICE | LINK PARTNER |
|
1199 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
|
1200 *-------|---------|-------|---------|-------------------- |
|
1201 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause |
|
1202 */ |
|
1203 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && |
|
1204 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && |
|
1205 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
|
1206 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
|
1207 hw->fc.current_mode = e1000_fc_tx_pause; |
|
1208 e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); |
|
1209 } |
|
1210 /* |
|
1211 * For transmitting PAUSE frames ONLY. |
|
1212 * |
|
1213 * LOCAL DEVICE | LINK PARTNER |
|
1214 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
|
1215 *-------|---------|-------|---------|-------------------- |
|
1216 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause |
|
1217 */ |
|
1218 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && |
|
1219 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && |
|
1220 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
|
1221 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
|
1222 hw->fc.current_mode = e1000_fc_rx_pause; |
|
1223 e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); |
|
1224 } else { |
|
1225 /* |
|
1226 * Per the IEEE spec, at this point flow control |
|
1227 * should be disabled. |
|
1228 */ |
|
1229 hw->fc.current_mode = e1000_fc_none; |
|
1230 e_dbg("Flow Control = NONE.\r\n"); |
|
1231 } |
|
1232 |
|
1233 /* |
|
1234 * Now we need to do one last check... If we auto- |
|
1235 * negotiated to HALF DUPLEX, flow control should not be |
|
1236 * enabled per IEEE 802.3 spec. |
|
1237 */ |
|
1238 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); |
|
1239 if (ret_val) { |
|
1240 e_dbg("Error getting link speed and duplex\n"); |
|
1241 return ret_val; |
|
1242 } |
|
1243 |
|
1244 if (duplex == HALF_DUPLEX) |
|
1245 hw->fc.current_mode = e1000_fc_none; |
|
1246 |
|
1247 /* |
|
1248 * Now we call a subroutine to actually force the MAC |
|
1249 * controller to use the correct flow control settings. |
|
1250 */ |
|
1251 ret_val = e1000e_force_mac_fc(hw); |
|
1252 if (ret_val) { |
|
1253 e_dbg("Error forcing flow control settings\n"); |
|
1254 return ret_val; |
|
1255 } |
|
1256 } |
|
1257 |
|
1258 return 0; |
|
1259 } |
|
1260 |
|
1261 /** |
|
1262 * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex |
|
1263 * @hw: pointer to the HW structure |
|
1264 * @speed: stores the current speed |
|
1265 * @duplex: stores the current duplex |
|
1266 * |
|
1267 * Read the status register for the current speed/duplex and store the current |
|
1268 * speed and duplex for copper connections. |
|
1269 **/ |
|
1270 s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) |
|
1271 { |
|
1272 u32 status; |
|
1273 |
|
1274 status = er32(STATUS); |
|
1275 if (status & E1000_STATUS_SPEED_1000) |
|
1276 *speed = SPEED_1000; |
|
1277 else if (status & E1000_STATUS_SPEED_100) |
|
1278 *speed = SPEED_100; |
|
1279 else |
|
1280 *speed = SPEED_10; |
|
1281 |
|
1282 if (status & E1000_STATUS_FD) |
|
1283 *duplex = FULL_DUPLEX; |
|
1284 else |
|
1285 *duplex = HALF_DUPLEX; |
|
1286 |
|
1287 e_dbg("%u Mbps, %s Duplex\n", |
|
1288 *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10, |
|
1289 *duplex == FULL_DUPLEX ? "Full" : "Half"); |
|
1290 |
|
1291 return 0; |
|
1292 } |
|
1293 |
|
1294 /** |
|
1295 * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex |
|
1296 * @hw: pointer to the HW structure |
|
1297 * @speed: stores the current speed |
|
1298 * @duplex: stores the current duplex |
|
1299 * |
|
1300 * Sets the speed and duplex to gigabit full duplex (the only possible option) |
|
1301 * for fiber/serdes links. |
|
1302 **/ |
|
1303 s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex) |
|
1304 { |
|
1305 *speed = SPEED_1000; |
|
1306 *duplex = FULL_DUPLEX; |
|
1307 |
|
1308 return 0; |
|
1309 } |
|
1310 |
|
1311 /** |
|
1312 * e1000e_get_hw_semaphore - Acquire hardware semaphore |
|
1313 * @hw: pointer to the HW structure |
|
1314 * |
|
1315 * Acquire the HW semaphore to access the PHY or NVM |
|
1316 **/ |
|
1317 s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) |
|
1318 { |
|
1319 u32 swsm; |
|
1320 s32 timeout = hw->nvm.word_size + 1; |
|
1321 s32 i = 0; |
|
1322 |
|
1323 /* Get the SW semaphore */ |
|
1324 while (i < timeout) { |
|
1325 swsm = er32(SWSM); |
|
1326 if (!(swsm & E1000_SWSM_SMBI)) |
|
1327 break; |
|
1328 |
|
1329 udelay(50); |
|
1330 i++; |
|
1331 } |
|
1332 |
|
1333 if (i == timeout) { |
|
1334 e_dbg("Driver can't access device - SMBI bit is set.\n"); |
|
1335 return -E1000_ERR_NVM; |
|
1336 } |
|
1337 |
|
1338 /* Get the FW semaphore. */ |
|
1339 for (i = 0; i < timeout; i++) { |
|
1340 swsm = er32(SWSM); |
|
1341 ew32(SWSM, swsm | E1000_SWSM_SWESMBI); |
|
1342 |
|
1343 /* Semaphore acquired if bit latched */ |
|
1344 if (er32(SWSM) & E1000_SWSM_SWESMBI) |
|
1345 break; |
|
1346 |
|
1347 udelay(50); |
|
1348 } |
|
1349 |
|
1350 if (i == timeout) { |
|
1351 /* Release semaphores */ |
|
1352 e1000e_put_hw_semaphore(hw); |
|
1353 e_dbg("Driver can't access the NVM\n"); |
|
1354 return -E1000_ERR_NVM; |
|
1355 } |
|
1356 |
|
1357 return 0; |
|
1358 } |
|
1359 |
|
1360 /** |
|
1361 * e1000e_put_hw_semaphore - Release hardware semaphore |
|
1362 * @hw: pointer to the HW structure |
|
1363 * |
|
1364 * Release hardware semaphore used to access the PHY or NVM |
|
1365 **/ |
|
1366 void e1000e_put_hw_semaphore(struct e1000_hw *hw) |
|
1367 { |
|
1368 u32 swsm; |
|
1369 |
|
1370 swsm = er32(SWSM); |
|
1371 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); |
|
1372 ew32(SWSM, swsm); |
|
1373 } |
|
1374 |
|
1375 /** |
|
1376 * e1000e_get_auto_rd_done - Check for auto read completion |
|
1377 * @hw: pointer to the HW structure |
|
1378 * |
|
1379 * Check EEPROM for Auto Read done bit. |
|
1380 **/ |
|
1381 s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) |
|
1382 { |
|
1383 s32 i = 0; |
|
1384 |
|
1385 while (i < AUTO_READ_DONE_TIMEOUT) { |
|
1386 if (er32(EECD) & E1000_EECD_AUTO_RD) |
|
1387 break; |
|
1388 msleep(1); |
|
1389 i++; |
|
1390 } |
|
1391 |
|
1392 if (i == AUTO_READ_DONE_TIMEOUT) { |
|
1393 e_dbg("Auto read by HW from NVM has not completed.\n"); |
|
1394 return -E1000_ERR_RESET; |
|
1395 } |
|
1396 |
|
1397 return 0; |
|
1398 } |
|
1399 |
|
1400 /** |
|
1401 * e1000e_valid_led_default - Verify a valid default LED config |
|
1402 * @hw: pointer to the HW structure |
|
1403 * @data: pointer to the NVM (EEPROM) |
|
1404 * |
|
1405 * Read the EEPROM for the current default LED configuration. If the |
|
1406 * LED configuration is not valid, set to a valid LED configuration. |
|
1407 **/ |
|
1408 s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) |
|
1409 { |
|
1410 s32 ret_val; |
|
1411 |
|
1412 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); |
|
1413 if (ret_val) { |
|
1414 e_dbg("NVM Read Error\n"); |
|
1415 return ret_val; |
|
1416 } |
|
1417 |
|
1418 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) |
|
1419 *data = ID_LED_DEFAULT; |
|
1420 |
|
1421 return 0; |
|
1422 } |
|
1423 |
|
1424 /** |
|
1425 * e1000e_id_led_init - |
|
1426 * @hw: pointer to the HW structure |
|
1427 * |
|
1428 **/ |
|
1429 s32 e1000e_id_led_init(struct e1000_hw *hw) |
|
1430 { |
|
1431 struct e1000_mac_info *mac = &hw->mac; |
|
1432 s32 ret_val; |
|
1433 const u32 ledctl_mask = 0x000000FF; |
|
1434 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; |
|
1435 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; |
|
1436 u16 data, i, temp; |
|
1437 const u16 led_mask = 0x0F; |
|
1438 |
|
1439 ret_val = hw->nvm.ops.valid_led_default(hw, &data); |
|
1440 if (ret_val) |
|
1441 return ret_val; |
|
1442 |
|
1443 mac->ledctl_default = er32(LEDCTL); |
|
1444 mac->ledctl_mode1 = mac->ledctl_default; |
|
1445 mac->ledctl_mode2 = mac->ledctl_default; |
|
1446 |
|
1447 for (i = 0; i < 4; i++) { |
|
1448 temp = (data >> (i << 2)) & led_mask; |
|
1449 switch (temp) { |
|
1450 case ID_LED_ON1_DEF2: |
|
1451 case ID_LED_ON1_ON2: |
|
1452 case ID_LED_ON1_OFF2: |
|
1453 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); |
|
1454 mac->ledctl_mode1 |= ledctl_on << (i << 3); |
|
1455 break; |
|
1456 case ID_LED_OFF1_DEF2: |
|
1457 case ID_LED_OFF1_ON2: |
|
1458 case ID_LED_OFF1_OFF2: |
|
1459 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); |
|
1460 mac->ledctl_mode1 |= ledctl_off << (i << 3); |
|
1461 break; |
|
1462 default: |
|
1463 /* Do nothing */ |
|
1464 break; |
|
1465 } |
|
1466 switch (temp) { |
|
1467 case ID_LED_DEF1_ON2: |
|
1468 case ID_LED_ON1_ON2: |
|
1469 case ID_LED_OFF1_ON2: |
|
1470 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); |
|
1471 mac->ledctl_mode2 |= ledctl_on << (i << 3); |
|
1472 break; |
|
1473 case ID_LED_DEF1_OFF2: |
|
1474 case ID_LED_ON1_OFF2: |
|
1475 case ID_LED_OFF1_OFF2: |
|
1476 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); |
|
1477 mac->ledctl_mode2 |= ledctl_off << (i << 3); |
|
1478 break; |
|
1479 default: |
|
1480 /* Do nothing */ |
|
1481 break; |
|
1482 } |
|
1483 } |
|
1484 |
|
1485 return 0; |
|
1486 } |
|
1487 |
|
1488 /** |
|
1489 * e1000e_setup_led_generic - Configures SW controllable LED |
|
1490 * @hw: pointer to the HW structure |
|
1491 * |
|
1492 * This prepares the SW controllable LED for use and saves the current state |
|
1493 * of the LED so it can be later restored. |
|
1494 **/ |
|
1495 s32 e1000e_setup_led_generic(struct e1000_hw *hw) |
|
1496 { |
|
1497 u32 ledctl; |
|
1498 |
|
1499 if (hw->mac.ops.setup_led != e1000e_setup_led_generic) { |
|
1500 return -E1000_ERR_CONFIG; |
|
1501 } |
|
1502 |
|
1503 if (hw->phy.media_type == e1000_media_type_fiber) { |
|
1504 ledctl = er32(LEDCTL); |
|
1505 hw->mac.ledctl_default = ledctl; |
|
1506 /* Turn off LED0 */ |
|
1507 ledctl &= ~(E1000_LEDCTL_LED0_IVRT | |
|
1508 E1000_LEDCTL_LED0_BLINK | |
|
1509 E1000_LEDCTL_LED0_MODE_MASK); |
|
1510 ledctl |= (E1000_LEDCTL_MODE_LED_OFF << |
|
1511 E1000_LEDCTL_LED0_MODE_SHIFT); |
|
1512 ew32(LEDCTL, ledctl); |
|
1513 } else if (hw->phy.media_type == e1000_media_type_copper) { |
|
1514 ew32(LEDCTL, hw->mac.ledctl_mode1); |
|
1515 } |
|
1516 |
|
1517 return 0; |
|
1518 } |
|
1519 |
|
1520 /** |
|
1521 * e1000e_cleanup_led_generic - Set LED config to default operation |
|
1522 * @hw: pointer to the HW structure |
|
1523 * |
|
1524 * Remove the current LED configuration and set the LED configuration |
|
1525 * to the default value, saved from the EEPROM. |
|
1526 **/ |
|
1527 s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) |
|
1528 { |
|
1529 ew32(LEDCTL, hw->mac.ledctl_default); |
|
1530 return 0; |
|
1531 } |
|
1532 |
|
1533 /** |
|
1534 * e1000e_blink_led - Blink LED |
|
1535 * @hw: pointer to the HW structure |
|
1536 * |
|
1537 * Blink the LEDs which are set to be on. |
|
1538 **/ |
|
1539 s32 e1000e_blink_led(struct e1000_hw *hw) |
|
1540 { |
|
1541 u32 ledctl_blink = 0; |
|
1542 u32 i; |
|
1543 |
|
1544 if (hw->phy.media_type == e1000_media_type_fiber) { |
|
1545 /* always blink LED0 for PCI-E fiber */ |
|
1546 ledctl_blink = E1000_LEDCTL_LED0_BLINK | |
|
1547 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); |
|
1548 } else { |
|
1549 /* |
|
1550 * set the blink bit for each LED that's "on" (0x0E) |
|
1551 * in ledctl_mode2 |
|
1552 */ |
|
1553 ledctl_blink = hw->mac.ledctl_mode2; |
|
1554 for (i = 0; i < 4; i++) |
|
1555 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == |
|
1556 E1000_LEDCTL_MODE_LED_ON) |
|
1557 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << |
|
1558 (i * 8)); |
|
1559 } |
|
1560 |
|
1561 ew32(LEDCTL, ledctl_blink); |
|
1562 |
|
1563 return 0; |
|
1564 } |
|
1565 |
|
1566 /** |
|
1567 * e1000e_led_on_generic - Turn LED on |
|
1568 * @hw: pointer to the HW structure |
|
1569 * |
|
1570 * Turn LED on. |
|
1571 **/ |
|
1572 s32 e1000e_led_on_generic(struct e1000_hw *hw) |
|
1573 { |
|
1574 u32 ctrl; |
|
1575 |
|
1576 switch (hw->phy.media_type) { |
|
1577 case e1000_media_type_fiber: |
|
1578 ctrl = er32(CTRL); |
|
1579 ctrl &= ~E1000_CTRL_SWDPIN0; |
|
1580 ctrl |= E1000_CTRL_SWDPIO0; |
|
1581 ew32(CTRL, ctrl); |
|
1582 break; |
|
1583 case e1000_media_type_copper: |
|
1584 ew32(LEDCTL, hw->mac.ledctl_mode2); |
|
1585 break; |
|
1586 default: |
|
1587 break; |
|
1588 } |
|
1589 |
|
1590 return 0; |
|
1591 } |
|
1592 |
|
1593 /** |
|
1594 * e1000e_led_off_generic - Turn LED off |
|
1595 * @hw: pointer to the HW structure |
|
1596 * |
|
1597 * Turn LED off. |
|
1598 **/ |
|
1599 s32 e1000e_led_off_generic(struct e1000_hw *hw) |
|
1600 { |
|
1601 u32 ctrl; |
|
1602 |
|
1603 switch (hw->phy.media_type) { |
|
1604 case e1000_media_type_fiber: |
|
1605 ctrl = er32(CTRL); |
|
1606 ctrl |= E1000_CTRL_SWDPIN0; |
|
1607 ctrl |= E1000_CTRL_SWDPIO0; |
|
1608 ew32(CTRL, ctrl); |
|
1609 break; |
|
1610 case e1000_media_type_copper: |
|
1611 ew32(LEDCTL, hw->mac.ledctl_mode1); |
|
1612 break; |
|
1613 default: |
|
1614 break; |
|
1615 } |
|
1616 |
|
1617 return 0; |
|
1618 } |
|
1619 |
|
1620 /** |
|
1621 * e1000e_set_pcie_no_snoop - Set PCI-express capabilities |
|
1622 * @hw: pointer to the HW structure |
|
1623 * @no_snoop: bitmap of snoop events |
|
1624 * |
|
1625 * Set the PCI-express register to snoop for events enabled in 'no_snoop'. |
|
1626 **/ |
|
1627 void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) |
|
1628 { |
|
1629 u32 gcr; |
|
1630 |
|
1631 if (no_snoop) { |
|
1632 gcr = er32(GCR); |
|
1633 gcr &= ~(PCIE_NO_SNOOP_ALL); |
|
1634 gcr |= no_snoop; |
|
1635 ew32(GCR, gcr); |
|
1636 } |
|
1637 } |
|
1638 |
|
1639 /** |
|
1640 * e1000e_disable_pcie_master - Disables PCI-express master access |
|
1641 * @hw: pointer to the HW structure |
|
1642 * |
|
1643 * Returns 0 if successful, else returns -10 |
|
1644 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused |
|
1645 * the master requests to be disabled. |
|
1646 * |
|
1647 * Disables PCI-Express master access and verifies there are no pending |
|
1648 * requests. |
|
1649 **/ |
|
1650 s32 e1000e_disable_pcie_master(struct e1000_hw *hw) |
|
1651 { |
|
1652 u32 ctrl; |
|
1653 s32 timeout = MASTER_DISABLE_TIMEOUT; |
|
1654 |
|
1655 ctrl = er32(CTRL); |
|
1656 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; |
|
1657 ew32(CTRL, ctrl); |
|
1658 |
|
1659 while (timeout) { |
|
1660 if (!(er32(STATUS) & |
|
1661 E1000_STATUS_GIO_MASTER_ENABLE)) |
|
1662 break; |
|
1663 udelay(100); |
|
1664 timeout--; |
|
1665 } |
|
1666 |
|
1667 if (!timeout) { |
|
1668 e_dbg("Master requests are pending.\n"); |
|
1669 return -E1000_ERR_MASTER_REQUESTS_PENDING; |
|
1670 } |
|
1671 |
|
1672 return 0; |
|
1673 } |
|
1674 |
|
1675 /** |
|
1676 * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing |
|
1677 * @hw: pointer to the HW structure |
|
1678 * |
|
1679 * Reset the Adaptive Interframe Spacing throttle to default values. |
|
1680 **/ |
|
1681 void e1000e_reset_adaptive(struct e1000_hw *hw) |
|
1682 { |
|
1683 struct e1000_mac_info *mac = &hw->mac; |
|
1684 |
|
1685 if (!mac->adaptive_ifs) { |
|
1686 e_dbg("Not in Adaptive IFS mode!\n"); |
|
1687 goto out; |
|
1688 } |
|
1689 |
|
1690 mac->current_ifs_val = 0; |
|
1691 mac->ifs_min_val = IFS_MIN; |
|
1692 mac->ifs_max_val = IFS_MAX; |
|
1693 mac->ifs_step_size = IFS_STEP; |
|
1694 mac->ifs_ratio = IFS_RATIO; |
|
1695 |
|
1696 mac->in_ifs_mode = false; |
|
1697 ew32(AIT, 0); |
|
1698 out: |
|
1699 return; |
|
1700 } |
|
1701 |
|
1702 /** |
|
1703 * e1000e_update_adaptive - Update Adaptive Interframe Spacing |
|
1704 * @hw: pointer to the HW structure |
|
1705 * |
|
1706 * Update the Adaptive Interframe Spacing Throttle value based on the |
|
1707 * time between transmitted packets and time between collisions. |
|
1708 **/ |
|
1709 void e1000e_update_adaptive(struct e1000_hw *hw) |
|
1710 { |
|
1711 struct e1000_mac_info *mac = &hw->mac; |
|
1712 |
|
1713 if (!mac->adaptive_ifs) { |
|
1714 e_dbg("Not in Adaptive IFS mode!\n"); |
|
1715 goto out; |
|
1716 } |
|
1717 |
|
1718 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { |
|
1719 if (mac->tx_packet_delta > MIN_NUM_XMITS) { |
|
1720 mac->in_ifs_mode = true; |
|
1721 if (mac->current_ifs_val < mac->ifs_max_val) { |
|
1722 if (!mac->current_ifs_val) |
|
1723 mac->current_ifs_val = mac->ifs_min_val; |
|
1724 else |
|
1725 mac->current_ifs_val += |
|
1726 mac->ifs_step_size; |
|
1727 ew32(AIT, mac->current_ifs_val); |
|
1728 } |
|
1729 } |
|
1730 } else { |
|
1731 if (mac->in_ifs_mode && |
|
1732 (mac->tx_packet_delta <= MIN_NUM_XMITS)) { |
|
1733 mac->current_ifs_val = 0; |
|
1734 mac->in_ifs_mode = false; |
|
1735 ew32(AIT, 0); |
|
1736 } |
|
1737 } |
|
1738 out: |
|
1739 return; |
|
1740 } |
|
1741 |
|
1742 /** |
|
1743 * e1000_raise_eec_clk - Raise EEPROM clock |
|
1744 * @hw: pointer to the HW structure |
|
1745 * @eecd: pointer to the EEPROM |
|
1746 * |
|
1747 * Enable/Raise the EEPROM clock bit. |
|
1748 **/ |
|
1749 static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) |
|
1750 { |
|
1751 *eecd = *eecd | E1000_EECD_SK; |
|
1752 ew32(EECD, *eecd); |
|
1753 e1e_flush(); |
|
1754 udelay(hw->nvm.delay_usec); |
|
1755 } |
|
1756 |
|
1757 /** |
|
1758 * e1000_lower_eec_clk - Lower EEPROM clock |
|
1759 * @hw: pointer to the HW structure |
|
1760 * @eecd: pointer to the EEPROM |
|
1761 * |
|
1762 * Clear/Lower the EEPROM clock bit. |
|
1763 **/ |
|
1764 static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) |
|
1765 { |
|
1766 *eecd = *eecd & ~E1000_EECD_SK; |
|
1767 ew32(EECD, *eecd); |
|
1768 e1e_flush(); |
|
1769 udelay(hw->nvm.delay_usec); |
|
1770 } |
|
1771 |
|
1772 /** |
|
1773 * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM |
|
1774 * @hw: pointer to the HW structure |
|
1775 * @data: data to send to the EEPROM |
|
1776 * @count: number of bits to shift out |
|
1777 * |
|
1778 * We need to shift 'count' bits out to the EEPROM. So, the value in the |
|
1779 * "data" parameter will be shifted out to the EEPROM one bit at a time. |
|
1780 * In order to do this, "data" must be broken down into bits. |
|
1781 **/ |
|
1782 static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) |
|
1783 { |
|
1784 struct e1000_nvm_info *nvm = &hw->nvm; |
|
1785 u32 eecd = er32(EECD); |
|
1786 u32 mask; |
|
1787 |
|
1788 mask = 0x01 << (count - 1); |
|
1789 if (nvm->type == e1000_nvm_eeprom_spi) |
|
1790 eecd |= E1000_EECD_DO; |
|
1791 |
|
1792 do { |
|
1793 eecd &= ~E1000_EECD_DI; |
|
1794 |
|
1795 if (data & mask) |
|
1796 eecd |= E1000_EECD_DI; |
|
1797 |
|
1798 ew32(EECD, eecd); |
|
1799 e1e_flush(); |
|
1800 |
|
1801 udelay(nvm->delay_usec); |
|
1802 |
|
1803 e1000_raise_eec_clk(hw, &eecd); |
|
1804 e1000_lower_eec_clk(hw, &eecd); |
|
1805 |
|
1806 mask >>= 1; |
|
1807 } while (mask); |
|
1808 |
|
1809 eecd &= ~E1000_EECD_DI; |
|
1810 ew32(EECD, eecd); |
|
1811 } |
|
1812 |
|
1813 /** |
|
1814 * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM |
|
1815 * @hw: pointer to the HW structure |
|
1816 * @count: number of bits to shift in |
|
1817 * |
|
1818 * In order to read a register from the EEPROM, we need to shift 'count' bits |
|
1819 * in from the EEPROM. Bits are "shifted in" by raising the clock input to |
|
1820 * the EEPROM (setting the SK bit), and then reading the value of the data out |
|
1821 * "DO" bit. During this "shifting in" process the data in "DI" bit should |
|
1822 * always be clear. |
|
1823 **/ |
|
1824 static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) |
|
1825 { |
|
1826 u32 eecd; |
|
1827 u32 i; |
|
1828 u16 data; |
|
1829 |
|
1830 eecd = er32(EECD); |
|
1831 |
|
1832 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); |
|
1833 data = 0; |
|
1834 |
|
1835 for (i = 0; i < count; i++) { |
|
1836 data <<= 1; |
|
1837 e1000_raise_eec_clk(hw, &eecd); |
|
1838 |
|
1839 eecd = er32(EECD); |
|
1840 |
|
1841 eecd &= ~E1000_EECD_DI; |
|
1842 if (eecd & E1000_EECD_DO) |
|
1843 data |= 1; |
|
1844 |
|
1845 e1000_lower_eec_clk(hw, &eecd); |
|
1846 } |
|
1847 |
|
1848 return data; |
|
1849 } |
|
1850 |
|
1851 /** |
|
1852 * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion |
|
1853 * @hw: pointer to the HW structure |
|
1854 * @ee_reg: EEPROM flag for polling |
|
1855 * |
|
1856 * Polls the EEPROM status bit for either read or write completion based |
|
1857 * upon the value of 'ee_reg'. |
|
1858 **/ |
|
1859 s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) |
|
1860 { |
|
1861 u32 attempts = 100000; |
|
1862 u32 i, reg = 0; |
|
1863 |
|
1864 for (i = 0; i < attempts; i++) { |
|
1865 if (ee_reg == E1000_NVM_POLL_READ) |
|
1866 reg = er32(EERD); |
|
1867 else |
|
1868 reg = er32(EEWR); |
|
1869 |
|
1870 if (reg & E1000_NVM_RW_REG_DONE) |
|
1871 return 0; |
|
1872 |
|
1873 udelay(5); |
|
1874 } |
|
1875 |
|
1876 return -E1000_ERR_NVM; |
|
1877 } |
|
1878 |
|
1879 /** |
|
1880 * e1000e_acquire_nvm - Generic request for access to EEPROM |
|
1881 * @hw: pointer to the HW structure |
|
1882 * |
|
1883 * Set the EEPROM access request bit and wait for EEPROM access grant bit. |
|
1884 * Return successful if access grant bit set, else clear the request for |
|
1885 * EEPROM access and return -E1000_ERR_NVM (-1). |
|
1886 **/ |
|
1887 s32 e1000e_acquire_nvm(struct e1000_hw *hw) |
|
1888 { |
|
1889 u32 eecd = er32(EECD); |
|
1890 s32 timeout = E1000_NVM_GRANT_ATTEMPTS; |
|
1891 |
|
1892 ew32(EECD, eecd | E1000_EECD_REQ); |
|
1893 eecd = er32(EECD); |
|
1894 |
|
1895 while (timeout) { |
|
1896 if (eecd & E1000_EECD_GNT) |
|
1897 break; |
|
1898 udelay(5); |
|
1899 eecd = er32(EECD); |
|
1900 timeout--; |
|
1901 } |
|
1902 |
|
1903 if (!timeout) { |
|
1904 eecd &= ~E1000_EECD_REQ; |
|
1905 ew32(EECD, eecd); |
|
1906 e_dbg("Could not acquire NVM grant\n"); |
|
1907 return -E1000_ERR_NVM; |
|
1908 } |
|
1909 |
|
1910 return 0; |
|
1911 } |
|
1912 |
|
1913 /** |
|
1914 * e1000_standby_nvm - Return EEPROM to standby state |
|
1915 * @hw: pointer to the HW structure |
|
1916 * |
|
1917 * Return the EEPROM to a standby state. |
|
1918 **/ |
|
1919 static void e1000_standby_nvm(struct e1000_hw *hw) |
|
1920 { |
|
1921 struct e1000_nvm_info *nvm = &hw->nvm; |
|
1922 u32 eecd = er32(EECD); |
|
1923 |
|
1924 if (nvm->type == e1000_nvm_eeprom_spi) { |
|
1925 /* Toggle CS to flush commands */ |
|
1926 eecd |= E1000_EECD_CS; |
|
1927 ew32(EECD, eecd); |
|
1928 e1e_flush(); |
|
1929 udelay(nvm->delay_usec); |
|
1930 eecd &= ~E1000_EECD_CS; |
|
1931 ew32(EECD, eecd); |
|
1932 e1e_flush(); |
|
1933 udelay(nvm->delay_usec); |
|
1934 } |
|
1935 } |
|
1936 |
|
1937 /** |
|
1938 * e1000_stop_nvm - Terminate EEPROM command |
|
1939 * @hw: pointer to the HW structure |
|
1940 * |
|
1941 * Terminates the current command by inverting the EEPROM's chip select pin. |
|
1942 **/ |
|
1943 static void e1000_stop_nvm(struct e1000_hw *hw) |
|
1944 { |
|
1945 u32 eecd; |
|
1946 |
|
1947 eecd = er32(EECD); |
|
1948 if (hw->nvm.type == e1000_nvm_eeprom_spi) { |
|
1949 /* Pull CS high */ |
|
1950 eecd |= E1000_EECD_CS; |
|
1951 e1000_lower_eec_clk(hw, &eecd); |
|
1952 } |
|
1953 } |
|
1954 |
|
1955 /** |
|
1956 * e1000e_release_nvm - Release exclusive access to EEPROM |
|
1957 * @hw: pointer to the HW structure |
|
1958 * |
|
1959 * Stop any current commands to the EEPROM and clear the EEPROM request bit. |
|
1960 **/ |
|
1961 void e1000e_release_nvm(struct e1000_hw *hw) |
|
1962 { |
|
1963 u32 eecd; |
|
1964 |
|
1965 e1000_stop_nvm(hw); |
|
1966 |
|
1967 eecd = er32(EECD); |
|
1968 eecd &= ~E1000_EECD_REQ; |
|
1969 ew32(EECD, eecd); |
|
1970 } |
|
1971 |
|
1972 /** |
|
1973 * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write |
|
1974 * @hw: pointer to the HW structure |
|
1975 * |
|
1976 * Setups the EEPROM for reading and writing. |
|
1977 **/ |
|
1978 static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) |
|
1979 { |
|
1980 struct e1000_nvm_info *nvm = &hw->nvm; |
|
1981 u32 eecd = er32(EECD); |
|
1982 u16 timeout = 0; |
|
1983 u8 spi_stat_reg; |
|
1984 |
|
1985 if (nvm->type == e1000_nvm_eeprom_spi) { |
|
1986 /* Clear SK and CS */ |
|
1987 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); |
|
1988 ew32(EECD, eecd); |
|
1989 udelay(1); |
|
1990 timeout = NVM_MAX_RETRY_SPI; |
|
1991 |
|
1992 /* |
|
1993 * Read "Status Register" repeatedly until the LSB is cleared. |
|
1994 * The EEPROM will signal that the command has been completed |
|
1995 * by clearing bit 0 of the internal status register. If it's |
|
1996 * not cleared within 'timeout', then error out. |
|
1997 */ |
|
1998 while (timeout) { |
|
1999 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, |
|
2000 hw->nvm.opcode_bits); |
|
2001 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); |
|
2002 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) |
|
2003 break; |
|
2004 |
|
2005 udelay(5); |
|
2006 e1000_standby_nvm(hw); |
|
2007 timeout--; |
|
2008 } |
|
2009 |
|
2010 if (!timeout) { |
|
2011 e_dbg("SPI NVM Status error\n"); |
|
2012 return -E1000_ERR_NVM; |
|
2013 } |
|
2014 } |
|
2015 |
|
2016 return 0; |
|
2017 } |
|
2018 |
|
2019 /** |
|
2020 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register |
|
2021 * @hw: pointer to the HW structure |
|
2022 * @offset: offset of word in the EEPROM to read |
|
2023 * @words: number of words to read |
|
2024 * @data: word read from the EEPROM |
|
2025 * |
|
2026 * Reads a 16 bit word from the EEPROM using the EERD register. |
|
2027 **/ |
|
2028 s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) |
|
2029 { |
|
2030 struct e1000_nvm_info *nvm = &hw->nvm; |
|
2031 u32 i, eerd = 0; |
|
2032 s32 ret_val = 0; |
|
2033 |
|
2034 /* |
|
2035 * A check for invalid values: offset too large, too many words, |
|
2036 * too many words for the offset, and not enough words. |
|
2037 */ |
|
2038 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
|
2039 (words == 0)) { |
|
2040 e_dbg("nvm parameter(s) out of bounds\n"); |
|
2041 return -E1000_ERR_NVM; |
|
2042 } |
|
2043 |
|
2044 for (i = 0; i < words; i++) { |
|
2045 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + |
|
2046 E1000_NVM_RW_REG_START; |
|
2047 |
|
2048 ew32(EERD, eerd); |
|
2049 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); |
|
2050 if (ret_val) |
|
2051 break; |
|
2052 |
|
2053 data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); |
|
2054 } |
|
2055 |
|
2056 return ret_val; |
|
2057 } |
|
2058 |
|
2059 /** |
|
2060 * e1000e_write_nvm_spi - Write to EEPROM using SPI |
|
2061 * @hw: pointer to the HW structure |
|
2062 * @offset: offset within the EEPROM to be written to |
|
2063 * @words: number of words to write |
|
2064 * @data: 16 bit word(s) to be written to the EEPROM |
|
2065 * |
|
2066 * Writes data to EEPROM at offset using SPI interface. |
|
2067 * |
|
2068 * If e1000e_update_nvm_checksum is not called after this function , the |
|
2069 * EEPROM will most likely contain an invalid checksum. |
|
2070 **/ |
|
2071 s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) |
|
2072 { |
|
2073 struct e1000_nvm_info *nvm = &hw->nvm; |
|
2074 s32 ret_val; |
|
2075 u16 widx = 0; |
|
2076 |
|
2077 /* |
|
2078 * A check for invalid values: offset too large, too many words, |
|
2079 * and not enough words. |
|
2080 */ |
|
2081 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
|
2082 (words == 0)) { |
|
2083 e_dbg("nvm parameter(s) out of bounds\n"); |
|
2084 return -E1000_ERR_NVM; |
|
2085 } |
|
2086 |
|
2087 ret_val = nvm->ops.acquire(hw); |
|
2088 if (ret_val) |
|
2089 return ret_val; |
|
2090 |
|
2091 msleep(10); |
|
2092 |
|
2093 while (widx < words) { |
|
2094 u8 write_opcode = NVM_WRITE_OPCODE_SPI; |
|
2095 |
|
2096 ret_val = e1000_ready_nvm_eeprom(hw); |
|
2097 if (ret_val) { |
|
2098 nvm->ops.release(hw); |
|
2099 return ret_val; |
|
2100 } |
|
2101 |
|
2102 e1000_standby_nvm(hw); |
|
2103 |
|
2104 /* Send the WRITE ENABLE command (8 bit opcode) */ |
|
2105 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, |
|
2106 nvm->opcode_bits); |
|
2107 |
|
2108 e1000_standby_nvm(hw); |
|
2109 |
|
2110 /* |
|
2111 * Some SPI eeproms use the 8th address bit embedded in the |
|
2112 * opcode |
|
2113 */ |
|
2114 if ((nvm->address_bits == 8) && (offset >= 128)) |
|
2115 write_opcode |= NVM_A8_OPCODE_SPI; |
|
2116 |
|
2117 /* Send the Write command (8-bit opcode + addr) */ |
|
2118 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); |
|
2119 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), |
|
2120 nvm->address_bits); |
|
2121 |
|
2122 /* Loop to allow for up to whole page write of eeprom */ |
|
2123 while (widx < words) { |
|
2124 u16 word_out = data[widx]; |
|
2125 word_out = (word_out >> 8) | (word_out << 8); |
|
2126 e1000_shift_out_eec_bits(hw, word_out, 16); |
|
2127 widx++; |
|
2128 |
|
2129 if ((((offset + widx) * 2) % nvm->page_size) == 0) { |
|
2130 e1000_standby_nvm(hw); |
|
2131 break; |
|
2132 } |
|
2133 } |
|
2134 } |
|
2135 |
|
2136 msleep(10); |
|
2137 nvm->ops.release(hw); |
|
2138 return 0; |
|
2139 } |
|
2140 |
|
2141 /** |
|
2142 * e1000_read_mac_addr_generic - Read device MAC address |
|
2143 * @hw: pointer to the HW structure |
|
2144 * |
|
2145 * Reads the device MAC address from the EEPROM and stores the value. |
|
2146 * Since devices with two ports use the same EEPROM, we increment the |
|
2147 * last bit in the MAC address for the second port. |
|
2148 **/ |
|
2149 s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) |
|
2150 { |
|
2151 u32 rar_high; |
|
2152 u32 rar_low; |
|
2153 u16 i; |
|
2154 |
|
2155 rar_high = er32(RAH(0)); |
|
2156 rar_low = er32(RAL(0)); |
|
2157 |
|
2158 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) |
|
2159 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); |
|
2160 |
|
2161 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) |
|
2162 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); |
|
2163 |
|
2164 for (i = 0; i < ETH_ALEN; i++) |
|
2165 hw->mac.addr[i] = hw->mac.perm_addr[i]; |
|
2166 |
|
2167 return 0; |
|
2168 } |
|
2169 |
|
2170 /** |
|
2171 * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum |
|
2172 * @hw: pointer to the HW structure |
|
2173 * |
|
2174 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM |
|
2175 * and then verifies that the sum of the EEPROM is equal to 0xBABA. |
|
2176 **/ |
|
2177 s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) |
|
2178 { |
|
2179 s32 ret_val; |
|
2180 u16 checksum = 0; |
|
2181 u16 i, nvm_data; |
|
2182 |
|
2183 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { |
|
2184 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); |
|
2185 if (ret_val) { |
|
2186 e_dbg("NVM Read Error\n"); |
|
2187 return ret_val; |
|
2188 } |
|
2189 checksum += nvm_data; |
|
2190 } |
|
2191 |
|
2192 if (checksum != (u16) NVM_SUM) { |
|
2193 e_dbg("NVM Checksum Invalid\n"); |
|
2194 return -E1000_ERR_NVM; |
|
2195 } |
|
2196 |
|
2197 return 0; |
|
2198 } |
|
2199 |
|
2200 /** |
|
2201 * e1000e_update_nvm_checksum_generic - Update EEPROM checksum |
|
2202 * @hw: pointer to the HW structure |
|
2203 * |
|
2204 * Updates the EEPROM checksum by reading/adding each word of the EEPROM |
|
2205 * up to the checksum. Then calculates the EEPROM checksum and writes the |
|
2206 * value to the EEPROM. |
|
2207 **/ |
|
2208 s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) |
|
2209 { |
|
2210 s32 ret_val; |
|
2211 u16 checksum = 0; |
|
2212 u16 i, nvm_data; |
|
2213 |
|
2214 for (i = 0; i < NVM_CHECKSUM_REG; i++) { |
|
2215 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); |
|
2216 if (ret_val) { |
|
2217 e_dbg("NVM Read Error while updating checksum.\n"); |
|
2218 return ret_val; |
|
2219 } |
|
2220 checksum += nvm_data; |
|
2221 } |
|
2222 checksum = (u16) NVM_SUM - checksum; |
|
2223 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); |
|
2224 if (ret_val) |
|
2225 e_dbg("NVM Write Error while updating checksum.\n"); |
|
2226 |
|
2227 return ret_val; |
|
2228 } |
|
2229 |
|
2230 /** |
|
2231 * e1000e_reload_nvm - Reloads EEPROM |
|
2232 * @hw: pointer to the HW structure |
|
2233 * |
|
2234 * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the |
|
2235 * extended control register. |
|
2236 **/ |
|
2237 void e1000e_reload_nvm(struct e1000_hw *hw) |
|
2238 { |
|
2239 u32 ctrl_ext; |
|
2240 |
|
2241 udelay(10); |
|
2242 ctrl_ext = er32(CTRL_EXT); |
|
2243 ctrl_ext |= E1000_CTRL_EXT_EE_RST; |
|
2244 ew32(CTRL_EXT, ctrl_ext); |
|
2245 e1e_flush(); |
|
2246 } |
|
2247 |
|
2248 /** |
|
2249 * e1000_calculate_checksum - Calculate checksum for buffer |
|
2250 * @buffer: pointer to EEPROM |
|
2251 * @length: size of EEPROM to calculate a checksum for |
|
2252 * |
|
2253 * Calculates the checksum for some buffer on a specified length. The |
|
2254 * checksum calculated is returned. |
|
2255 **/ |
|
2256 static u8 e1000_calculate_checksum(u8 *buffer, u32 length) |
|
2257 { |
|
2258 u32 i; |
|
2259 u8 sum = 0; |
|
2260 |
|
2261 if (!buffer) |
|
2262 return 0; |
|
2263 |
|
2264 for (i = 0; i < length; i++) |
|
2265 sum += buffer[i]; |
|
2266 |
|
2267 return (u8) (0 - sum); |
|
2268 } |
|
2269 |
|
2270 /** |
|
2271 * e1000_mng_enable_host_if - Checks host interface is enabled |
|
2272 * @hw: pointer to the HW structure |
|
2273 * |
|
2274 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND |
|
2275 * |
|
2276 * This function checks whether the HOST IF is enabled for command operation |
|
2277 * and also checks whether the previous command is completed. It busy waits |
|
2278 * in case of previous command is not completed. |
|
2279 **/ |
|
2280 static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) |
|
2281 { |
|
2282 u32 hicr; |
|
2283 u8 i; |
|
2284 |
|
2285 if (!(hw->mac.arc_subsystem_valid)) { |
|
2286 e_dbg("ARC subsystem not valid.\n"); |
|
2287 return -E1000_ERR_HOST_INTERFACE_COMMAND; |
|
2288 } |
|
2289 |
|
2290 /* Check that the host interface is enabled. */ |
|
2291 hicr = er32(HICR); |
|
2292 if ((hicr & E1000_HICR_EN) == 0) { |
|
2293 e_dbg("E1000_HOST_EN bit disabled.\n"); |
|
2294 return -E1000_ERR_HOST_INTERFACE_COMMAND; |
|
2295 } |
|
2296 /* check the previous command is completed */ |
|
2297 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { |
|
2298 hicr = er32(HICR); |
|
2299 if (!(hicr & E1000_HICR_C)) |
|
2300 break; |
|
2301 mdelay(1); |
|
2302 } |
|
2303 |
|
2304 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { |
|
2305 e_dbg("Previous command timeout failed .\n"); |
|
2306 return -E1000_ERR_HOST_INTERFACE_COMMAND; |
|
2307 } |
|
2308 |
|
2309 return 0; |
|
2310 } |
|
2311 |
|
2312 /** |
|
2313 * e1000e_check_mng_mode_generic - check management mode |
|
2314 * @hw: pointer to the HW structure |
|
2315 * |
|
2316 * Reads the firmware semaphore register and returns true (>0) if |
|
2317 * manageability is enabled, else false (0). |
|
2318 **/ |
|
2319 bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) |
|
2320 { |
|
2321 u32 fwsm = er32(FWSM); |
|
2322 |
|
2323 return (fwsm & E1000_FWSM_MODE_MASK) == |
|
2324 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); |
|
2325 } |
|
2326 |
|
2327 /** |
|
2328 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx |
|
2329 * @hw: pointer to the HW structure |
|
2330 * |
|
2331 * Enables packet filtering on transmit packets if manageability is enabled |
|
2332 * and host interface is enabled. |
|
2333 **/ |
|
2334 bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) |
|
2335 { |
|
2336 struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; |
|
2337 u32 *buffer = (u32 *)&hw->mng_cookie; |
|
2338 u32 offset; |
|
2339 s32 ret_val, hdr_csum, csum; |
|
2340 u8 i, len; |
|
2341 |
|
2342 hw->mac.tx_pkt_filtering = true; |
|
2343 |
|
2344 /* No manageability, no filtering */ |
|
2345 if (!e1000e_check_mng_mode(hw)) { |
|
2346 hw->mac.tx_pkt_filtering = false; |
|
2347 goto out; |
|
2348 } |
|
2349 |
|
2350 /* |
|
2351 * If we can't read from the host interface for whatever |
|
2352 * reason, disable filtering. |
|
2353 */ |
|
2354 ret_val = e1000_mng_enable_host_if(hw); |
|
2355 if (ret_val) { |
|
2356 hw->mac.tx_pkt_filtering = false; |
|
2357 goto out; |
|
2358 } |
|
2359 |
|
2360 /* Read in the header. Length and offset are in dwords. */ |
|
2361 len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; |
|
2362 offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; |
|
2363 for (i = 0; i < len; i++) |
|
2364 *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i); |
|
2365 hdr_csum = hdr->checksum; |
|
2366 hdr->checksum = 0; |
|
2367 csum = e1000_calculate_checksum((u8 *)hdr, |
|
2368 E1000_MNG_DHCP_COOKIE_LENGTH); |
|
2369 /* |
|
2370 * If either the checksums or signature don't match, then |
|
2371 * the cookie area isn't considered valid, in which case we |
|
2372 * take the safe route of assuming Tx filtering is enabled. |
|
2373 */ |
|
2374 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { |
|
2375 hw->mac.tx_pkt_filtering = true; |
|
2376 goto out; |
|
2377 } |
|
2378 |
|
2379 /* Cookie area is valid, make the final check for filtering. */ |
|
2380 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { |
|
2381 hw->mac.tx_pkt_filtering = false; |
|
2382 goto out; |
|
2383 } |
|
2384 |
|
2385 out: |
|
2386 return hw->mac.tx_pkt_filtering; |
|
2387 } |
|
2388 |
|
2389 /** |
|
2390 * e1000_mng_write_cmd_header - Writes manageability command header |
|
2391 * @hw: pointer to the HW structure |
|
2392 * @hdr: pointer to the host interface command header |
|
2393 * |
|
2394 * Writes the command header after does the checksum calculation. |
|
2395 **/ |
|
2396 static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, |
|
2397 struct e1000_host_mng_command_header *hdr) |
|
2398 { |
|
2399 u16 i, length = sizeof(struct e1000_host_mng_command_header); |
|
2400 |
|
2401 /* Write the whole command header structure with new checksum. */ |
|
2402 |
|
2403 hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); |
|
2404 |
|
2405 length >>= 2; |
|
2406 /* Write the relevant command block into the ram area. */ |
|
2407 for (i = 0; i < length; i++) { |
|
2408 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, |
|
2409 *((u32 *) hdr + i)); |
|
2410 e1e_flush(); |
|
2411 } |
|
2412 |
|
2413 return 0; |
|
2414 } |
|
2415 |
|
2416 /** |
|
2417 * e1000_mng_host_if_write - Write to the manageability host interface |
|
2418 * @hw: pointer to the HW structure |
|
2419 * @buffer: pointer to the host interface buffer |
|
2420 * @length: size of the buffer |
|
2421 * @offset: location in the buffer to write to |
|
2422 * @sum: sum of the data (not checksum) |
|
2423 * |
|
2424 * This function writes the buffer content at the offset given on the host if. |
|
2425 * It also does alignment considerations to do the writes in most efficient |
|
2426 * way. Also fills up the sum of the buffer in *buffer parameter. |
|
2427 **/ |
|
2428 static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, |
|
2429 u16 length, u16 offset, u8 *sum) |
|
2430 { |
|
2431 u8 *tmp; |
|
2432 u8 *bufptr = buffer; |
|
2433 u32 data = 0; |
|
2434 u16 remaining, i, j, prev_bytes; |
|
2435 |
|
2436 /* sum = only sum of the data and it is not checksum */ |
|
2437 |
|
2438 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) |
|
2439 return -E1000_ERR_PARAM; |
|
2440 |
|
2441 tmp = (u8 *)&data; |
|
2442 prev_bytes = offset & 0x3; |
|
2443 offset >>= 2; |
|
2444 |
|
2445 if (prev_bytes) { |
|
2446 data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); |
|
2447 for (j = prev_bytes; j < sizeof(u32); j++) { |
|
2448 *(tmp + j) = *bufptr++; |
|
2449 *sum += *(tmp + j); |
|
2450 } |
|
2451 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); |
|
2452 length -= j - prev_bytes; |
|
2453 offset++; |
|
2454 } |
|
2455 |
|
2456 remaining = length & 0x3; |
|
2457 length -= remaining; |
|
2458 |
|
2459 /* Calculate length in DWORDs */ |
|
2460 length >>= 2; |
|
2461 |
|
2462 /* |
|
2463 * The device driver writes the relevant command block into the |
|
2464 * ram area. |
|
2465 */ |
|
2466 for (i = 0; i < length; i++) { |
|
2467 for (j = 0; j < sizeof(u32); j++) { |
|
2468 *(tmp + j) = *bufptr++; |
|
2469 *sum += *(tmp + j); |
|
2470 } |
|
2471 |
|
2472 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); |
|
2473 } |
|
2474 if (remaining) { |
|
2475 for (j = 0; j < sizeof(u32); j++) { |
|
2476 if (j < remaining) |
|
2477 *(tmp + j) = *bufptr++; |
|
2478 else |
|
2479 *(tmp + j) = 0; |
|
2480 |
|
2481 *sum += *(tmp + j); |
|
2482 } |
|
2483 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); |
|
2484 } |
|
2485 |
|
2486 return 0; |
|
2487 } |
|
2488 |
|
2489 /** |
|
2490 * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface |
|
2491 * @hw: pointer to the HW structure |
|
2492 * @buffer: pointer to the host interface |
|
2493 * @length: size of the buffer |
|
2494 * |
|
2495 * Writes the DHCP information to the host interface. |
|
2496 **/ |
|
2497 s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) |
|
2498 { |
|
2499 struct e1000_host_mng_command_header hdr; |
|
2500 s32 ret_val; |
|
2501 u32 hicr; |
|
2502 |
|
2503 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; |
|
2504 hdr.command_length = length; |
|
2505 hdr.reserved1 = 0; |
|
2506 hdr.reserved2 = 0; |
|
2507 hdr.checksum = 0; |
|
2508 |
|
2509 /* Enable the host interface */ |
|
2510 ret_val = e1000_mng_enable_host_if(hw); |
|
2511 if (ret_val) |
|
2512 return ret_val; |
|
2513 |
|
2514 /* Populate the host interface with the contents of "buffer". */ |
|
2515 ret_val = e1000_mng_host_if_write(hw, buffer, length, |
|
2516 sizeof(hdr), &(hdr.checksum)); |
|
2517 if (ret_val) |
|
2518 return ret_val; |
|
2519 |
|
2520 /* Write the manageability command header */ |
|
2521 ret_val = e1000_mng_write_cmd_header(hw, &hdr); |
|
2522 if (ret_val) |
|
2523 return ret_val; |
|
2524 |
|
2525 /* Tell the ARC a new command is pending. */ |
|
2526 hicr = er32(HICR); |
|
2527 ew32(HICR, hicr | E1000_HICR_C); |
|
2528 |
|
2529 return 0; |
|
2530 } |
|
2531 |
|
2532 /** |
|
2533 * e1000e_enable_mng_pass_thru - Check if management passthrough is needed |
|
2534 * @hw: pointer to the HW structure |
|
2535 * |
|
2536 * Verifies the hardware needs to leave interface enabled so that frames can |
|
2537 * be directed to and from the management interface. |
|
2538 **/ |
|
2539 bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) |
|
2540 { |
|
2541 u32 manc; |
|
2542 u32 fwsm, factps; |
|
2543 bool ret_val = false; |
|
2544 |
|
2545 manc = er32(MANC); |
|
2546 |
|
2547 if (!(manc & E1000_MANC_RCV_TCO_EN)) |
|
2548 goto out; |
|
2549 |
|
2550 if (hw->mac.has_fwsm) { |
|
2551 fwsm = er32(FWSM); |
|
2552 factps = er32(FACTPS); |
|
2553 |
|
2554 if (!(factps & E1000_FACTPS_MNGCG) && |
|
2555 ((fwsm & E1000_FWSM_MODE_MASK) == |
|
2556 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { |
|
2557 ret_val = true; |
|
2558 goto out; |
|
2559 } |
|
2560 } else if ((hw->mac.type == e1000_82574) || |
|
2561 (hw->mac.type == e1000_82583)) { |
|
2562 u16 data; |
|
2563 |
|
2564 factps = er32(FACTPS); |
|
2565 e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); |
|
2566 |
|
2567 if (!(factps & E1000_FACTPS_MNGCG) && |
|
2568 ((data & E1000_NVM_INIT_CTRL2_MNGM) == |
|
2569 (e1000_mng_mode_pt << 13))) { |
|
2570 ret_val = true; |
|
2571 goto out; |
|
2572 } |
|
2573 } else if ((manc & E1000_MANC_SMBUS_EN) && |
|
2574 !(manc & E1000_MANC_ASF_EN)) { |
|
2575 ret_val = true; |
|
2576 goto out; |
|
2577 } |
|
2578 |
|
2579 out: |
|
2580 return ret_val; |
|
2581 } |
|
2582 |
|
2583 s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num) |
|
2584 { |
|
2585 s32 ret_val; |
|
2586 u16 nvm_data; |
|
2587 |
|
2588 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); |
|
2589 if (ret_val) { |
|
2590 e_dbg("NVM Read Error\n"); |
|
2591 return ret_val; |
|
2592 } |
|
2593 *pba_num = (u32)(nvm_data << 16); |
|
2594 |
|
2595 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); |
|
2596 if (ret_val) { |
|
2597 e_dbg("NVM Read Error\n"); |
|
2598 return ret_val; |
|
2599 } |
|
2600 *pba_num |= nvm_data; |
|
2601 |
|
2602 return 0; |
|
2603 } |