|
1 /** |
|
2 Network Driver for Beckhoff CCAT communication controller |
|
3 Copyright (C) 2014 Beckhoff Automation GmbH |
|
4 Author: Patrick Bruenn <p.bruenn@beckhoff.com> |
|
5 |
|
6 This program is free software; you can redistribute it and/or modify |
|
7 it under the terms of the GNU General Public License as published by |
|
8 the Free Software Foundation; either version 2 of the License, or |
|
9 (at your option) any later version. |
|
10 |
|
11 This program is distributed in the hope that it will be useful, |
|
12 but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
14 GNU General Public License for more details. |
|
15 |
|
16 You should have received a copy of the GNU General Public License along |
|
17 with this program; if not, write to the Free Software Foundation, Inc., |
|
18 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 */ |
|
20 |
|
21 #include <linux/etherdevice.h> |
|
22 #include <linux/init.h> |
|
23 #include <linux/kernel.h> |
|
24 #include <linux/kfifo.h> |
|
25 #include <linux/kthread.h> |
|
26 #include <linux/module.h> |
|
27 #include <linux/netdevice.h> |
|
28 #include <linux/spinlock.h> |
|
29 |
|
30 #include "compat.h" |
|
31 #include "module.h" |
|
32 #include "netdev.h" |
|
33 #include "print.h" |
|
34 |
|
35 /** |
|
36 * EtherCAT frame to enable forwarding on EtherCAT Terminals |
|
37 */ |
|
38 static const UINT8 frameForwardEthernetFrames[] = { |
|
39 0x01, 0x01, 0x05, 0x01, 0x00, 0x00, |
|
40 0x00, 0x1b, 0x21, 0x36, 0x1b, 0xce, |
|
41 0x88, 0xa4, 0x0e, 0x10, |
|
42 0x08, |
|
43 0x00, |
|
44 0x00, 0x00, |
|
45 0x00, 0x01, |
|
46 0x02, 0x00, |
|
47 0x00, 0x00, |
|
48 0x00, 0x00, |
|
49 0x00, 0x00 |
|
50 }; |
|
51 |
|
52 #define FIFO_LENGTH 64 |
|
53 #define DMA_POLL_DELAY_RANGE_USECS 100, 100 /* time to sleep between rx/tx DMA polls */ |
|
54 #define POLL_DELAY_RANGE_USECS 500, 1000 /* time to sleep between link state polls */ |
|
55 |
|
56 static void ec_poll(struct net_device *dev); |
|
57 static int run_poll_thread(void *data); |
|
58 static int run_rx_thread(void *data); |
|
59 static int run_tx_thread(void *data); |
|
60 |
|
61 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) |
|
62 static struct rtnl_link_stats64 *ccat_eth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 |
|
63 *storage); |
|
64 #endif |
|
65 static int ccat_eth_open(struct net_device *dev); |
|
66 static netdev_tx_t ccat_eth_start_xmit(struct sk_buff *skb, |
|
67 struct net_device *dev); |
|
68 static int ccat_eth_stop(struct net_device *dev); |
|
69 static void ccat_eth_xmit_raw(struct net_device *dev, const char *data, |
|
70 size_t len); |
|
71 |
|
72 static const struct net_device_ops ccat_eth_netdev_ops = { |
|
73 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) |
|
74 .ndo_get_stats64 = ccat_eth_get_stats64, |
|
75 #endif |
|
76 .ndo_open = ccat_eth_open, |
|
77 .ndo_start_xmit = ccat_eth_start_xmit, |
|
78 .ndo_stop = ccat_eth_stop, |
|
79 }; |
|
80 |
|
81 static void ecdev_kfree_skb_any(struct sk_buff *skb) |
|
82 { |
|
83 /* never release a skb in EtherCAT mode */ |
|
84 } |
|
85 |
|
86 static void ecdev_carrier_on(struct net_device *const netdev) |
|
87 { |
|
88 struct ccat_eth_priv *const priv = netdev_priv(netdev); |
|
89 ecdev_set_link(priv->ecdev, 1); |
|
90 } |
|
91 |
|
92 static void ecdev_carrier_off(struct net_device *const netdev) |
|
93 { |
|
94 struct ccat_eth_priv *const priv = netdev_priv(netdev); |
|
95 ecdev_set_link(priv->ecdev, 0); |
|
96 } |
|
97 |
|
98 static void ecdev_nop(struct net_device *const netdev) |
|
99 { |
|
100 /* dummy called if nothing has to be done in EtherCAT operation mode */ |
|
101 } |
|
102 |
|
103 static void ecdev_tx_fifo_full(struct net_device *const dev, |
|
104 const struct ccat_eth_frame *const frame) |
|
105 { |
|
106 /* we are polled -> there is nothing we can do in EtherCAT mode */ |
|
107 } |
|
108 |
|
109 static void unregister_ecdev(struct net_device *const netdev) |
|
110 { |
|
111 struct ccat_eth_priv *const priv = netdev_priv(netdev); |
|
112 ecdev_close(priv->ecdev); |
|
113 ecdev_withdraw(priv->ecdev); |
|
114 } |
|
115 |
|
116 typedef void (*fifo_add_function) (struct ccat_eth_frame *, |
|
117 struct ccat_eth_dma_fifo *); |
|
118 |
|
119 static void ccat_eth_rx_fifo_add(struct ccat_eth_frame *frame, |
|
120 struct ccat_eth_dma_fifo *fifo) |
|
121 { |
|
122 const size_t offset = ((void *)(frame) - fifo->dma.virt); |
|
123 const uint32_t addr_and_length = (1 << 31) | offset; |
|
124 frame->received = 0; |
|
125 iowrite32(addr_and_length, fifo->reg); |
|
126 } |
|
127 |
|
128 static void ccat_eth_tx_fifo_add_free(struct ccat_eth_frame *frame, |
|
129 struct ccat_eth_dma_fifo *fifo) |
|
130 { |
|
131 /* mark frame as ready to use for tx */ |
|
132 frame->sent = 1; |
|
133 } |
|
134 |
|
135 static void ccat_eth_tx_fifo_full(struct net_device *const dev, |
|
136 const struct ccat_eth_frame *const frame) |
|
137 { |
|
138 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
139 netif_stop_queue(dev); |
|
140 priv->next_tx_frame = frame; |
|
141 wake_up_process(priv->tx_thread); |
|
142 } |
|
143 |
|
144 static void ccat_eth_dma_fifo_reset(struct ccat_eth_dma_fifo *fifo) |
|
145 { |
|
146 struct ccat_eth_frame *frame = fifo->dma.virt; |
|
147 const struct ccat_eth_frame *const end = frame + FIFO_LENGTH; |
|
148 |
|
149 /* reset hw fifo */ |
|
150 iowrite32(0, fifo->reg + 0x8); |
|
151 wmb(); |
|
152 |
|
153 if (fifo->add) { |
|
154 while (frame < end) { |
|
155 fifo->add(frame, fifo); |
|
156 ++frame; |
|
157 } |
|
158 } |
|
159 } |
|
160 |
|
161 static int ccat_eth_dma_fifo_init(struct ccat_eth_dma_fifo *fifo, |
|
162 void __iomem * const fifo_reg, |
|
163 fifo_add_function add, size_t channel, |
|
164 struct ccat_eth_priv *const priv) |
|
165 { |
|
166 if (0 != |
|
167 ccat_dma_init(&fifo->dma, channel, priv->ccatdev->bar[2].ioaddr, |
|
168 &priv->ccatdev->pdev->dev)) { |
|
169 pr_info("init DMA%llu memory failed.\n", (uint64_t) channel); |
|
170 return -1; |
|
171 } |
|
172 fifo->add = add; |
|
173 fifo->reg = fifo_reg; |
|
174 return 0; |
|
175 } |
|
176 |
|
177 /** |
|
178 * Stop both (Rx/Tx) DMA fifo's and free related management structures |
|
179 */ |
|
180 static void ccat_eth_priv_free_dma(struct ccat_eth_priv *priv) |
|
181 { |
|
182 /* reset hw fifo's */ |
|
183 iowrite32(0, priv->rx_fifo.reg + 0x8); |
|
184 iowrite32(0, priv->tx_fifo.reg + 0x8); |
|
185 wmb(); |
|
186 |
|
187 /* release dma */ |
|
188 ccat_dma_free(&priv->rx_fifo.dma); |
|
189 ccat_dma_free(&priv->tx_fifo.dma); |
|
190 pr_debug("DMA fifo's stopped.\n"); |
|
191 } |
|
192 |
|
193 /** |
|
194 * Initalizes both (Rx/Tx) DMA fifo's and related management structures |
|
195 */ |
|
196 static int ccat_eth_priv_init_dma(struct ccat_eth_priv *priv) |
|
197 { |
|
198 if (ccat_eth_dma_fifo_init |
|
199 (&priv->rx_fifo, priv->reg.rx_fifo, ccat_eth_rx_fifo_add, |
|
200 priv->info.rxDmaChn, priv)) { |
|
201 pr_warn("init Rx DMA fifo failed.\n"); |
|
202 return -1; |
|
203 } |
|
204 |
|
205 if (ccat_eth_dma_fifo_init |
|
206 (&priv->tx_fifo, priv->reg.tx_fifo, ccat_eth_tx_fifo_add_free, |
|
207 priv->info.txDmaChn, priv)) { |
|
208 pr_warn("init Tx DMA fifo failed.\n"); |
|
209 ccat_dma_free(&priv->rx_fifo.dma); |
|
210 return -1; |
|
211 } |
|
212 |
|
213 /* disable MAC filter */ |
|
214 iowrite8(0, priv->reg.mii + 0x8 + 6); |
|
215 wmb(); |
|
216 return 0; |
|
217 } |
|
218 |
|
219 /** |
|
220 * Initializes the CCat... members of the ccat_eth_priv structure. |
|
221 * Call this function only if info and ioaddr are already initialized! |
|
222 */ |
|
223 static void ccat_eth_priv_init_mappings(struct ccat_eth_priv *priv) |
|
224 { |
|
225 CCatInfoBlockOffs offsets; |
|
226 void __iomem *const func_base = |
|
227 priv->ccatdev->bar[0].ioaddr + priv->info.nAddr; |
|
228 memcpy_fromio(&offsets, func_base, sizeof(offsets)); |
|
229 priv->reg.mii = func_base + offsets.nMMIOffs; |
|
230 priv->reg.tx_fifo = func_base + offsets.nTxFifoOffs; |
|
231 priv->reg.rx_fifo = func_base + offsets.nTxFifoOffs + 0x10; |
|
232 priv->reg.mac = func_base + offsets.nMacRegOffs; |
|
233 priv->reg.rx_mem = func_base + offsets.nRxMemOffs; |
|
234 priv->reg.tx_mem = func_base + offsets.nTxMemOffs; |
|
235 priv->reg.misc = func_base + offsets.nMiscOffs; |
|
236 } |
|
237 |
|
238 /** |
|
239 * Read link state from CCAT hardware |
|
240 * @return 1 if link is up, 0 if not |
|
241 */ |
|
242 inline static size_t ccat_eth_priv_read_link_state(const struct ccat_eth_priv |
|
243 *const priv) |
|
244 { |
|
245 return (1 << 24) == (ioread32(priv->reg.mii + 0x8 + 4) & (1 << 24)); |
|
246 } |
|
247 |
|
248 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) |
|
249 static struct rtnl_link_stats64 *ccat_eth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 |
|
250 *storage) |
|
251 { |
|
252 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
253 CCatMacRegs mac; |
|
254 memcpy_fromio(&mac, priv->reg.mac, sizeof(mac)); |
|
255 storage->rx_packets = mac.rxFrameCnt; /* total packets received */ |
|
256 storage->tx_packets = mac.txFrameCnt; /* total packets transmitted */ |
|
257 storage->rx_bytes = atomic64_read(&priv->rx_bytes); /* total bytes received */ |
|
258 storage->tx_bytes = atomic64_read(&priv->tx_bytes); /* total bytes transmitted */ |
|
259 storage->rx_errors = mac.frameLenErrCnt + mac.dropFrameErrCnt + mac.crcErrCnt + mac.rxErrCnt; /* bad packets received */ |
|
260 //TODO __u64 tx_errors; /* packet transmit problems */ |
|
261 storage->rx_dropped = atomic64_read(&priv->rx_dropped); /* no space in linux buffers */ |
|
262 storage->tx_dropped = atomic64_read(&priv->tx_dropped); /* no space available in linux */ |
|
263 //TODO __u64 multicast; /* multicast packets received */ |
|
264 //TODO __u64 collisions; |
|
265 |
|
266 /* detailed rx_errors: */ |
|
267 storage->rx_length_errors = mac.frameLenErrCnt; |
|
268 storage->rx_over_errors = mac.dropFrameErrCnt; /* receiver ring buff overflow */ |
|
269 storage->rx_crc_errors = mac.crcErrCnt; /* recved pkt with crc error */ |
|
270 storage->rx_frame_errors = mac.rxErrCnt; /* recv'd frame alignment error */ |
|
271 storage->rx_fifo_errors = mac.dropFrameErrCnt; /* recv'r fifo overrun */ |
|
272 //TODO __u64 rx_missed_errors; /* receiver missed packet */ |
|
273 |
|
274 /* detailed tx_errors */ |
|
275 //TODO __u64 tx_aborted_errors; |
|
276 //TODO __u64 tx_carrier_errors; |
|
277 //TODO __u64 tx_fifo_errors; |
|
278 //TODO __u64 tx_heartbeat_errors; |
|
279 //TODO __u64 tx_window_errors; |
|
280 |
|
281 /* for cslip etc */ |
|
282 //TODO __u64 rx_compressed; |
|
283 //TODO __u64 tx_compressed; |
|
284 return storage; |
|
285 } |
|
286 #endif |
|
287 |
|
288 struct ccat_eth_priv *ccat_eth_init(const struct ccat_device *const ccatdev, |
|
289 const void __iomem * const addr) |
|
290 { |
|
291 struct ccat_eth_priv *priv; |
|
292 struct net_device *const netdev = alloc_etherdev(sizeof(*priv)); |
|
293 priv = netdev_priv(netdev); |
|
294 priv->netdev = netdev; |
|
295 priv->ccatdev = ccatdev; |
|
296 |
|
297 /* ccat register mappings */ |
|
298 memcpy_fromio(&priv->info, addr, sizeof(priv->info)); |
|
299 ccat_eth_priv_init_mappings(priv); |
|
300 ccat_print_function_info(priv); |
|
301 |
|
302 if (ccat_eth_priv_init_dma(priv)) { |
|
303 pr_warn("%s(): DMA initialization failed.\n", __FUNCTION__); |
|
304 free_netdev(netdev); |
|
305 return NULL; |
|
306 } |
|
307 |
|
308 /* init netdev with MAC and stack callbacks */ |
|
309 memcpy_fromio(netdev->dev_addr, priv->reg.mii + 8, 6); |
|
310 netdev->netdev_ops = &ccat_eth_netdev_ops; |
|
311 |
|
312 /* use as EtherCAT device? */ |
|
313 priv->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); |
|
314 if (priv->ecdev) { |
|
315 priv->carrier_off = ecdev_carrier_off; |
|
316 priv->carrier_on = ecdev_carrier_on; |
|
317 priv->kfree_skb_any = ecdev_kfree_skb_any; |
|
318 priv->start_queue = ecdev_nop; |
|
319 priv->stop_queue = ecdev_nop; |
|
320 priv->tx_fifo_full = ecdev_tx_fifo_full; |
|
321 priv->unregister = unregister_ecdev; |
|
322 if (ecdev_open(priv->ecdev)) { |
|
323 pr_info("unable to register network device.\n"); |
|
324 ecdev_withdraw(priv->ecdev); |
|
325 ccat_eth_priv_free_dma(priv); |
|
326 free_netdev(netdev); |
|
327 return NULL; |
|
328 } |
|
329 return priv; |
|
330 } |
|
331 |
|
332 /* EtherCAT disabled -> prepare normal ethernet mode */ |
|
333 priv->carrier_off = netif_carrier_off; |
|
334 priv->carrier_on = netif_carrier_on; |
|
335 priv->kfree_skb_any = dev_kfree_skb_any; |
|
336 priv->start_queue = netif_start_queue; |
|
337 priv->stop_queue = netif_stop_queue; |
|
338 priv->tx_fifo_full = ccat_eth_tx_fifo_full; |
|
339 priv->unregister = unregister_netdev; |
|
340 if (register_netdev(netdev)) { |
|
341 pr_info("unable to register network device.\n"); |
|
342 ccat_eth_priv_free_dma(priv); |
|
343 free_netdev(netdev); |
|
344 return NULL; |
|
345 } |
|
346 pr_info("registered %s as network device.\n", netdev->name); |
|
347 priv->rx_thread = kthread_run(run_rx_thread, netdev, "%s_rx", DRV_NAME); |
|
348 priv->tx_thread = kthread_run(run_tx_thread, netdev, "%s_tx", DRV_NAME); |
|
349 return priv; |
|
350 } |
|
351 |
|
352 void ccat_eth_remove(struct ccat_eth_priv *const priv) |
|
353 { |
|
354 if (priv->rx_thread) { |
|
355 kthread_stop(priv->rx_thread); |
|
356 } |
|
357 if (priv->tx_thread) { |
|
358 kthread_stop(priv->tx_thread); |
|
359 } |
|
360 priv->unregister(priv->netdev); |
|
361 ccat_eth_priv_free_dma(priv); |
|
362 free_netdev(priv->netdev); |
|
363 pr_debug("%s(): done\n", __FUNCTION__); |
|
364 } |
|
365 |
|
366 static int ccat_eth_open(struct net_device *dev) |
|
367 { |
|
368 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
369 priv->carrier_off(dev); |
|
370 priv->poll_thread = |
|
371 kthread_run(run_poll_thread, dev, "%s_poll", DRV_NAME); |
|
372 |
|
373 //TODO |
|
374 return 0; |
|
375 } |
|
376 |
|
377 static const size_t CCATRXDESC_HEADER_LEN = 20; |
|
378 static void ccat_eth_receive(struct net_device *const dev, |
|
379 const struct ccat_eth_frame *const frame) |
|
380 { |
|
381 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
382 const size_t len = frame->length - CCATRXDESC_HEADER_LEN; |
|
383 struct sk_buff *skb = dev_alloc_skb(len + NET_IP_ALIGN); |
|
384 if (!skb) { |
|
385 pr_info("%s() out of memory :-(\n", __FUNCTION__); |
|
386 atomic64_inc(&priv->rx_dropped); |
|
387 return; |
|
388 } |
|
389 skb->dev = dev; |
|
390 skb_reserve(skb, NET_IP_ALIGN); |
|
391 skb_copy_to_linear_data(skb, frame->data, len); |
|
392 skb_put(skb, len); |
|
393 skb->protocol = eth_type_trans(skb, dev); |
|
394 skb->ip_summed = CHECKSUM_UNNECESSARY; |
|
395 atomic64_add(len, &priv->rx_bytes); |
|
396 netif_rx(skb); |
|
397 } |
|
398 |
|
399 /** |
|
400 * Rx handler in EtherCAT operation mode |
|
401 * priv->ecdev should always be valid! |
|
402 */ |
|
403 static void ec_poll(struct net_device *dev) |
|
404 { |
|
405 static size_t next = 0; |
|
406 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
407 struct ccat_eth_frame *frame = |
|
408 ((struct ccat_eth_frame *)priv->rx_fifo.dma.virt) + next; |
|
409 if (frame->received) { |
|
410 ecdev_receive(priv->ecdev, frame->data, |
|
411 frame->length - CCATRXDESC_HEADER_LEN); |
|
412 frame->received = 0; |
|
413 ccat_eth_rx_fifo_add(frame, &priv->rx_fifo); |
|
414 next = (next + 1) % FIFO_LENGTH; |
|
415 } else { |
|
416 //TODO dev_warn(&dev->dev, "%s(): frame was not ready\n", __FUNCTION__); |
|
417 } |
|
418 } |
|
419 |
|
420 static netdev_tx_t ccat_eth_start_xmit(struct sk_buff *skb, |
|
421 struct net_device *dev) |
|
422 { |
|
423 static size_t next = 0; |
|
424 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
425 struct ccat_eth_frame *const frame = |
|
426 ((struct ccat_eth_frame *)priv->tx_fifo.dma.virt); |
|
427 uint32_t addr_and_length; |
|
428 |
|
429 if (skb_is_nonlinear(skb)) { |
|
430 pr_warn("Non linear skb not supported -> drop frame.\n"); |
|
431 atomic64_inc(&priv->tx_dropped); |
|
432 priv->kfree_skb_any(skb); |
|
433 return NETDEV_TX_OK; |
|
434 } |
|
435 |
|
436 if (skb->len > sizeof(frame->data)) { |
|
437 pr_warn("skb.len %llu exceeds dma buffer %llu -> drop frame.\n", |
|
438 (uint64_t) skb->len, (uint64_t) sizeof(frame->data)); |
|
439 atomic64_inc(&priv->tx_dropped); |
|
440 priv->kfree_skb_any(skb); |
|
441 return NETDEV_TX_OK; |
|
442 } |
|
443 |
|
444 if (!frame[next].sent) { |
|
445 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); |
|
446 ccat_eth_tx_fifo_full(dev, &frame[next]); |
|
447 return NETDEV_TX_BUSY; |
|
448 } |
|
449 |
|
450 /* prepare frame in DMA memory */ |
|
451 frame[next].sent = 0; |
|
452 frame[next].length = skb->len; |
|
453 memcpy(frame[next].data, skb->data, skb->len); |
|
454 |
|
455 priv->kfree_skb_any(skb); |
|
456 |
|
457 addr_and_length = 8 + (next * sizeof(*frame)); |
|
458 addr_and_length += |
|
459 ((frame[next].length + CCAT_DMA_FRAME_HEADER_LENGTH) / 8) << 24; |
|
460 iowrite32(addr_and_length, priv->reg.tx_fifo); /* add to DMA fifo */ |
|
461 atomic64_add(frame[next].length, &priv->tx_bytes); /* update stats */ |
|
462 |
|
463 next = (next + 1) % FIFO_LENGTH; |
|
464 /* stop queue if tx ring is full */ |
|
465 if (!frame[next].sent) { |
|
466 ccat_eth_tx_fifo_full(dev, &frame[next]); |
|
467 } |
|
468 return NETDEV_TX_OK; |
|
469 } |
|
470 |
|
471 static int ccat_eth_stop(struct net_device *dev) |
|
472 { |
|
473 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
474 priv->stop_queue(dev); |
|
475 if (priv->poll_thread) { |
|
476 /* TODO care about smp context? */ |
|
477 kthread_stop(priv->poll_thread); |
|
478 priv->poll_thread = NULL; |
|
479 } |
|
480 netdev_info(dev, "stopped.\n"); |
|
481 return 0; |
|
482 } |
|
483 |
|
484 static void ccat_eth_link_down(struct net_device *dev) |
|
485 { |
|
486 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
487 priv->stop_queue(dev); |
|
488 priv->carrier_off(dev); |
|
489 netdev_info(dev, "NIC Link is Down\n"); |
|
490 } |
|
491 |
|
492 static void ccat_eth_link_up(struct net_device *const dev) |
|
493 { |
|
494 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
495 netdev_info(dev, "NIC Link is Up\n"); |
|
496 /* TODO netdev_info(dev, "NIC Link is Up %u Mbps %s Duplex\n", |
|
497 speed == SPEED_100 ? 100 : 10, |
|
498 cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); */ |
|
499 |
|
500 ccat_eth_dma_fifo_reset(&priv->rx_fifo); |
|
501 ccat_eth_dma_fifo_reset(&priv->tx_fifo); |
|
502 ccat_eth_xmit_raw(dev, frameForwardEthernetFrames, |
|
503 sizeof(frameForwardEthernetFrames)); |
|
504 priv->carrier_on(dev); |
|
505 priv->start_queue(dev); |
|
506 } |
|
507 |
|
508 /** |
|
509 * Function to transmit a raw buffer to the network (f.e. frameForwardEthernetFrames) |
|
510 * @dev a valid net_device |
|
511 * @data pointer to your raw buffer |
|
512 * @len number of bytes in the raw buffer to transmit |
|
513 */ |
|
514 static void ccat_eth_xmit_raw(struct net_device *dev, const char *const data, |
|
515 size_t len) |
|
516 { |
|
517 struct sk_buff *skb = dev_alloc_skb(len); |
|
518 skb->dev = dev; |
|
519 skb_copy_to_linear_data(skb, data, len); |
|
520 skb_put(skb, len); |
|
521 ccat_eth_start_xmit(skb, dev); |
|
522 } |
|
523 |
|
524 /** |
|
525 * Since CCAT doesn't support interrupts until now, we have to poll |
|
526 * some status bits to recognize things like link change etc. |
|
527 */ |
|
528 static int run_poll_thread(void *data) |
|
529 { |
|
530 struct net_device *const dev = (struct net_device *)data; |
|
531 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
532 size_t link = 0; |
|
533 |
|
534 while (!kthread_should_stop()) { |
|
535 if (ccat_eth_priv_read_link_state(priv) != link) { |
|
536 link = !link; |
|
537 link ? ccat_eth_link_up(dev) : ccat_eth_link_down(dev); |
|
538 } |
|
539 usleep_range(POLL_DELAY_RANGE_USECS); |
|
540 } |
|
541 pr_debug("%s() stopped.\n", __FUNCTION__); |
|
542 return 0; |
|
543 } |
|
544 |
|
545 static int run_rx_thread(void *data) |
|
546 { |
|
547 struct net_device *const dev = (struct net_device *)data; |
|
548 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
549 struct ccat_eth_frame *frame = priv->rx_fifo.dma.virt; |
|
550 const struct ccat_eth_frame *const end = frame + FIFO_LENGTH; |
|
551 |
|
552 while (!kthread_should_stop()) { |
|
553 /* wait until frame was used by DMA for Rx */ |
|
554 while (!kthread_should_stop() && !frame->received) { |
|
555 usleep_range(DMA_POLL_DELAY_RANGE_USECS); |
|
556 } |
|
557 |
|
558 /* can be NULL, if we are asked to stop! */ |
|
559 if (frame->received) { |
|
560 ccat_eth_receive(dev, frame); |
|
561 frame->received = 0; |
|
562 ccat_eth_rx_fifo_add(frame, &priv->rx_fifo); |
|
563 } |
|
564 if (++frame >= end) { |
|
565 frame = priv->rx_fifo.dma.virt; |
|
566 } |
|
567 } |
|
568 pr_debug("%s() stopped.\n", __FUNCTION__); |
|
569 return 0; |
|
570 } |
|
571 |
|
572 /** |
|
573 * Polling of tx dma descriptors in ethernet operating mode |
|
574 */ |
|
575 static int run_tx_thread(void *data) |
|
576 { |
|
577 struct net_device *const dev = (struct net_device *)data; |
|
578 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
579 |
|
580 set_current_state(TASK_INTERRUPTIBLE); |
|
581 while (!kthread_should_stop()) { |
|
582 const struct ccat_eth_frame *const frame = priv->next_tx_frame; |
|
583 if (frame) { |
|
584 while (!kthread_should_stop() && !frame->sent) { |
|
585 usleep_range(DMA_POLL_DELAY_RANGE_USECS); |
|
586 } |
|
587 } |
|
588 netif_wake_queue(dev); |
|
589 schedule(); |
|
590 set_current_state(TASK_INTERRUPTIBLE); |
|
591 } |
|
592 set_current_state(TASK_RUNNING); |
|
593 pr_debug("%s() stopped.\n", __FUNCTION__); |
|
594 return 0; |
|
595 } |