1 ../../../ccat/netdev.c |
1 /** |
|
2 Network Driver for Beckhoff CCAT communication controller |
|
3 Copyright (C) 2014 Beckhoff Automation GmbH |
|
4 Author: Patrick Bruenn <p.bruenn@beckhoff.com> |
|
5 |
|
6 This program is free software; you can redistribute it and/or modify |
|
7 it under the terms of the GNU General Public License as published by |
|
8 the Free Software Foundation; either version 2 of the License, or |
|
9 (at your option) any later version. |
|
10 |
|
11 This program is distributed in the hope that it will be useful, |
|
12 but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
14 GNU General Public License for more details. |
|
15 |
|
16 You should have received a copy of the GNU General Public License along |
|
17 with this program; if not, write to the Free Software Foundation, Inc., |
|
18 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 */ |
|
20 |
|
21 #include <linux/etherdevice.h> |
|
22 #include <linux/init.h> |
|
23 #include <linux/kernel.h> |
|
24 #include <linux/kfifo.h> |
|
25 #include <linux/kthread.h> |
|
26 #include <linux/module.h> |
|
27 #include <linux/netdevice.h> |
|
28 #include <linux/spinlock.h> |
|
29 |
|
30 #include "module.h" |
|
31 #include "netdev.h" |
|
32 #include "print.h" |
|
33 |
|
34 /** |
|
35 * EtherCAT frame to enable forwarding on EtherCAT Terminals |
|
36 */ |
|
37 static const UINT8 frameForwardEthernetFrames[] = { |
|
38 0x01, 0x01, 0x05, 0x01, 0x00, 0x00, |
|
39 0x00, 0x1b, 0x21, 0x36, 0x1b, 0xce, |
|
40 0x88, 0xa4, 0x0e, 0x10, |
|
41 0x08, |
|
42 0x00, |
|
43 0x00, 0x00, |
|
44 0x00, 0x01, |
|
45 0x02, 0x00, |
|
46 0x00, 0x00, |
|
47 0x00, 0x00, |
|
48 0x00, 0x00 |
|
49 }; |
|
50 |
|
51 #define FIFO_LENGTH 64 |
|
52 #define DMA_POLL_DELAY_RANGE_USECS 100, 100 /* time to sleep between rx/tx DMA polls */ |
|
53 #define POLL_DELAY_RANGE_USECS 500, 1000 /* time to sleep between link state polls */ |
|
54 |
|
55 static void ec_poll(struct net_device *dev); |
|
56 static int run_poll_thread(void *data); |
|
57 static int run_rx_thread(void *data); |
|
58 static int run_tx_thread(void *data); |
|
59 |
|
60 static struct rtnl_link_stats64 *ccat_eth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 |
|
61 *storage); |
|
62 static int ccat_eth_open(struct net_device *dev); |
|
63 static netdev_tx_t ccat_eth_start_xmit(struct sk_buff *skb, |
|
64 struct net_device *dev); |
|
65 static int ccat_eth_stop(struct net_device *dev); |
|
66 static void ccat_eth_xmit_raw(struct net_device *dev, const char *data, |
|
67 size_t len); |
|
68 |
|
69 static const struct net_device_ops ccat_eth_netdev_ops = { |
|
70 .ndo_get_stats64 = ccat_eth_get_stats64, |
|
71 .ndo_open = ccat_eth_open, |
|
72 .ndo_start_xmit = ccat_eth_start_xmit, |
|
73 .ndo_stop = ccat_eth_stop, |
|
74 }; |
|
75 |
|
76 static void ecdev_kfree_skb_any(struct sk_buff *skb) |
|
77 { |
|
78 /* never release a skb in EtherCAT mode */ |
|
79 } |
|
80 |
|
81 static void ecdev_carrier_on(struct net_device *const netdev) |
|
82 { |
|
83 struct ccat_eth_priv *const priv = netdev_priv(netdev); |
|
84 ecdev_set_link(priv->ecdev, 1); |
|
85 } |
|
86 |
|
87 static void ecdev_carrier_off(struct net_device *const netdev) |
|
88 { |
|
89 struct ccat_eth_priv *const priv = netdev_priv(netdev); |
|
90 ecdev_set_link(priv->ecdev, 0); |
|
91 } |
|
92 |
|
93 static void ecdev_nop(struct net_device *const netdev) |
|
94 { |
|
95 /* dummy called if nothing has to be done in EtherCAT operation mode */ |
|
96 } |
|
97 |
|
98 static void ecdev_tx_fifo_full(struct net_device *const dev, |
|
99 const struct ccat_eth_frame *const frame) |
|
100 { |
|
101 /* we are polled -> there is nothing we can do in EtherCAT mode */ |
|
102 } |
|
103 |
|
104 static void unregister_ecdev(struct net_device *const netdev) |
|
105 { |
|
106 struct ccat_eth_priv *const priv = netdev_priv(netdev); |
|
107 ecdev_close(priv->ecdev); |
|
108 ecdev_withdraw(priv->ecdev); |
|
109 } |
|
110 |
|
111 typedef void (*fifo_add_function) (struct ccat_eth_frame *, |
|
112 struct ccat_eth_dma_fifo *); |
|
113 |
|
114 static void ccat_eth_rx_fifo_add(struct ccat_eth_frame *frame, |
|
115 struct ccat_eth_dma_fifo *fifo) |
|
116 { |
|
117 const size_t offset = ((void *)(frame) - fifo->dma.virt); |
|
118 const uint32_t addr_and_length = (1 << 31) | offset; |
|
119 frame->received = 0; |
|
120 iowrite32(addr_and_length, fifo->reg); |
|
121 } |
|
122 |
|
123 static void ccat_eth_tx_fifo_add_free(struct ccat_eth_frame *frame, |
|
124 struct ccat_eth_dma_fifo *fifo) |
|
125 { |
|
126 /* mark frame as ready to use for tx */ |
|
127 frame->sent = 1; |
|
128 } |
|
129 |
|
130 static void ccat_eth_tx_fifo_full(struct net_device *const dev, |
|
131 const struct ccat_eth_frame *const frame) |
|
132 { |
|
133 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
134 netif_stop_queue(dev); |
|
135 priv->next_tx_frame = frame; |
|
136 wake_up_process(priv->tx_thread); |
|
137 } |
|
138 |
|
139 static void ccat_eth_dma_fifo_reset(struct ccat_eth_dma_fifo *fifo) |
|
140 { |
|
141 struct ccat_eth_frame *frame = fifo->dma.virt; |
|
142 const struct ccat_eth_frame *const end = frame + FIFO_LENGTH; |
|
143 |
|
144 /* reset hw fifo */ |
|
145 iowrite32(0, fifo->reg + 0x8); |
|
146 wmb(); |
|
147 |
|
148 if (fifo->add) { |
|
149 while (frame < end) { |
|
150 fifo->add(frame, fifo); |
|
151 ++frame; |
|
152 } |
|
153 } |
|
154 } |
|
155 |
|
156 static int ccat_eth_dma_fifo_init(struct ccat_eth_dma_fifo *fifo, |
|
157 void __iomem * const fifo_reg, |
|
158 fifo_add_function add, size_t channel, |
|
159 struct ccat_eth_priv *const priv) |
|
160 { |
|
161 if (0 != |
|
162 ccat_dma_init(&fifo->dma, channel, priv->ccatdev->bar[2].ioaddr, |
|
163 &priv->ccatdev->pdev->dev)) { |
|
164 pr_info("init DMA%llu memory failed.\n", (uint64_t) channel); |
|
165 return -1; |
|
166 } |
|
167 fifo->add = add; |
|
168 fifo->reg = fifo_reg; |
|
169 return 0; |
|
170 } |
|
171 |
|
172 /** |
|
173 * Stop both (Rx/Tx) DMA fifo's and free related management structures |
|
174 */ |
|
175 static void ccat_eth_priv_free_dma(struct ccat_eth_priv *priv) |
|
176 { |
|
177 /* reset hw fifo's */ |
|
178 iowrite32(0, priv->rx_fifo.reg + 0x8); |
|
179 iowrite32(0, priv->tx_fifo.reg + 0x8); |
|
180 wmb(); |
|
181 |
|
182 /* release dma */ |
|
183 ccat_dma_free(&priv->rx_fifo.dma); |
|
184 ccat_dma_free(&priv->tx_fifo.dma); |
|
185 pr_debug("DMA fifo's stopped.\n"); |
|
186 } |
|
187 |
|
188 /** |
|
189 * Initalizes both (Rx/Tx) DMA fifo's and related management structures |
|
190 */ |
|
191 static int ccat_eth_priv_init_dma(struct ccat_eth_priv *priv) |
|
192 { |
|
193 if (ccat_eth_dma_fifo_init |
|
194 (&priv->rx_fifo, priv->reg.rx_fifo, ccat_eth_rx_fifo_add, |
|
195 priv->info.rxDmaChn, priv)) { |
|
196 pr_warn("init Rx DMA fifo failed.\n"); |
|
197 return -1; |
|
198 } |
|
199 |
|
200 if (ccat_eth_dma_fifo_init |
|
201 (&priv->tx_fifo, priv->reg.tx_fifo, ccat_eth_tx_fifo_add_free, |
|
202 priv->info.txDmaChn, priv)) { |
|
203 pr_warn("init Tx DMA fifo failed.\n"); |
|
204 ccat_dma_free(&priv->rx_fifo.dma); |
|
205 return -1; |
|
206 } |
|
207 |
|
208 /* disable MAC filter */ |
|
209 iowrite8(0, priv->reg.mii + 0x8 + 6); |
|
210 wmb(); |
|
211 return 0; |
|
212 } |
|
213 |
|
214 /** |
|
215 * Initializes the CCat... members of the ccat_eth_priv structure. |
|
216 * Call this function only if info and ioaddr are already initialized! |
|
217 */ |
|
218 static void ccat_eth_priv_init_mappings(struct ccat_eth_priv *priv) |
|
219 { |
|
220 CCatInfoBlockOffs offsets; |
|
221 void __iomem *const func_base = |
|
222 priv->ccatdev->bar[0].ioaddr + priv->info.nAddr; |
|
223 memcpy_fromio(&offsets, func_base, sizeof(offsets)); |
|
224 priv->reg.mii = func_base + offsets.nMMIOffs; |
|
225 priv->reg.tx_fifo = func_base + offsets.nTxFifoOffs; |
|
226 priv->reg.rx_fifo = func_base + offsets.nTxFifoOffs + 0x10; |
|
227 priv->reg.mac = func_base + offsets.nMacRegOffs; |
|
228 priv->reg.rx_mem = func_base + offsets.nRxMemOffs; |
|
229 priv->reg.tx_mem = func_base + offsets.nTxMemOffs; |
|
230 priv->reg.misc = func_base + offsets.nMiscOffs; |
|
231 } |
|
232 |
|
233 /** |
|
234 * Read link state from CCAT hardware |
|
235 * @return 1 if link is up, 0 if not |
|
236 */ |
|
237 inline static size_t ccat_eth_priv_read_link_state(const struct ccat_eth_priv |
|
238 *const priv) |
|
239 { |
|
240 return (1 << 24) == (ioread32(priv->reg.mii + 0x8 + 4) & (1 << 24)); |
|
241 } |
|
242 |
|
243 static struct rtnl_link_stats64 *ccat_eth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 |
|
244 *storage) |
|
245 { |
|
246 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
247 CCatMacRegs mac; |
|
248 memcpy_fromio(&mac, priv->reg.mac, sizeof(mac)); |
|
249 storage->rx_packets = mac.rxFrameCnt; /* total packets received */ |
|
250 storage->tx_packets = mac.txFrameCnt; /* total packets transmitted */ |
|
251 storage->rx_bytes = atomic64_read(&priv->rx_bytes); /* total bytes received */ |
|
252 storage->tx_bytes = atomic64_read(&priv->tx_bytes); /* total bytes transmitted */ |
|
253 storage->rx_errors = mac.frameLenErrCnt + mac.dropFrameErrCnt + mac.crcErrCnt + mac.rxErrCnt; /* bad packets received */ |
|
254 //TODO __u64 tx_errors; /* packet transmit problems */ |
|
255 storage->rx_dropped = atomic64_read(&priv->rx_dropped); /* no space in linux buffers */ |
|
256 storage->tx_dropped = atomic64_read(&priv->tx_dropped); /* no space available in linux */ |
|
257 //TODO __u64 multicast; /* multicast packets received */ |
|
258 //TODO __u64 collisions; |
|
259 |
|
260 /* detailed rx_errors: */ |
|
261 storage->rx_length_errors = mac.frameLenErrCnt; |
|
262 storage->rx_over_errors = mac.dropFrameErrCnt; /* receiver ring buff overflow */ |
|
263 storage->rx_crc_errors = mac.crcErrCnt; /* recved pkt with crc error */ |
|
264 storage->rx_frame_errors = mac.rxErrCnt; /* recv'd frame alignment error */ |
|
265 storage->rx_fifo_errors = mac.dropFrameErrCnt; /* recv'r fifo overrun */ |
|
266 //TODO __u64 rx_missed_errors; /* receiver missed packet */ |
|
267 |
|
268 /* detailed tx_errors */ |
|
269 //TODO __u64 tx_aborted_errors; |
|
270 //TODO __u64 tx_carrier_errors; |
|
271 //TODO __u64 tx_fifo_errors; |
|
272 //TODO __u64 tx_heartbeat_errors; |
|
273 //TODO __u64 tx_window_errors; |
|
274 |
|
275 /* for cslip etc */ |
|
276 //TODO __u64 rx_compressed; |
|
277 //TODO __u64 tx_compressed; |
|
278 return storage; |
|
279 } |
|
280 |
|
281 struct ccat_eth_priv *ccat_eth_init(const struct ccat_device *const ccatdev, |
|
282 const void __iomem * const addr) |
|
283 { |
|
284 struct ccat_eth_priv *priv; |
|
285 struct net_device *const netdev = alloc_etherdev(sizeof(*priv)); |
|
286 priv = netdev_priv(netdev); |
|
287 priv->netdev = netdev; |
|
288 priv->ccatdev = ccatdev; |
|
289 |
|
290 /* ccat register mappings */ |
|
291 memcpy_fromio(&priv->info, addr, sizeof(priv->info)); |
|
292 ccat_eth_priv_init_mappings(priv); |
|
293 ccat_print_function_info(priv); |
|
294 |
|
295 if (ccat_eth_priv_init_dma(priv)) { |
|
296 pr_warn("%s(): DMA initialization failed.\n", __FUNCTION__); |
|
297 free_netdev(netdev); |
|
298 return NULL; |
|
299 } |
|
300 |
|
301 /* init netdev with MAC and stack callbacks */ |
|
302 memcpy_fromio(netdev->dev_addr, priv->reg.mii + 8, 6); |
|
303 netdev->netdev_ops = &ccat_eth_netdev_ops; |
|
304 |
|
305 /* use as EtherCAT device? */ |
|
306 priv->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); |
|
307 if (priv->ecdev) { |
|
308 priv->carrier_off = ecdev_carrier_off; |
|
309 priv->carrier_on = ecdev_carrier_on; |
|
310 priv->kfree_skb_any = ecdev_kfree_skb_any; |
|
311 priv->start_queue = ecdev_nop; |
|
312 priv->stop_queue = ecdev_nop; |
|
313 priv->tx_fifo_full = ecdev_tx_fifo_full; |
|
314 priv->unregister = unregister_ecdev; |
|
315 if (ecdev_open(priv->ecdev)) { |
|
316 pr_info("unable to register network device.\n"); |
|
317 ecdev_withdraw(priv->ecdev); |
|
318 ccat_eth_priv_free_dma(priv); |
|
319 free_netdev(netdev); |
|
320 return NULL; |
|
321 } |
|
322 return priv; |
|
323 } |
|
324 |
|
325 /* EtherCAT disabled -> prepare normal ethernet mode */ |
|
326 priv->carrier_off = netif_carrier_off; |
|
327 priv->carrier_on = netif_carrier_on; |
|
328 priv->kfree_skb_any = dev_kfree_skb_any; |
|
329 priv->start_queue = netif_start_queue; |
|
330 priv->stop_queue = netif_stop_queue; |
|
331 priv->tx_fifo_full = ccat_eth_tx_fifo_full; |
|
332 priv->unregister = unregister_netdev; |
|
333 if (register_netdev(netdev)) { |
|
334 pr_info("unable to register network device.\n"); |
|
335 ccat_eth_priv_free_dma(priv); |
|
336 free_netdev(netdev); |
|
337 return NULL; |
|
338 } |
|
339 pr_info("registered %s as network device.\n", netdev->name); |
|
340 priv->rx_thread = kthread_run(run_rx_thread, netdev, "%s_rx", DRV_NAME); |
|
341 priv->tx_thread = kthread_run(run_tx_thread, netdev, "%s_tx", DRV_NAME); |
|
342 return priv; |
|
343 } |
|
344 |
|
345 void ccat_eth_remove(struct ccat_eth_priv *const priv) |
|
346 { |
|
347 if (priv->rx_thread) { |
|
348 kthread_stop(priv->rx_thread); |
|
349 } |
|
350 if (priv->tx_thread) { |
|
351 kthread_stop(priv->tx_thread); |
|
352 } |
|
353 priv->unregister(priv->netdev); |
|
354 ccat_eth_priv_free_dma(priv); |
|
355 free_netdev(priv->netdev); |
|
356 pr_debug("%s(): done\n", __FUNCTION__); |
|
357 } |
|
358 |
|
359 static int ccat_eth_open(struct net_device *dev) |
|
360 { |
|
361 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
362 priv->carrier_off(dev); |
|
363 priv->poll_thread = |
|
364 kthread_run(run_poll_thread, dev, "%s_poll", DRV_NAME); |
|
365 |
|
366 //TODO |
|
367 return 0; |
|
368 } |
|
369 |
|
370 static const size_t CCATRXDESC_HEADER_LEN = 20; |
|
371 static void ccat_eth_receive(struct net_device *const dev, |
|
372 const struct ccat_eth_frame *const frame) |
|
373 { |
|
374 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
375 const size_t len = frame->length - CCATRXDESC_HEADER_LEN; |
|
376 struct sk_buff *skb = dev_alloc_skb(len + NET_IP_ALIGN); |
|
377 if (!skb) { |
|
378 pr_info("%s() out of memory :-(\n", __FUNCTION__); |
|
379 atomic64_inc(&priv->rx_dropped); |
|
380 return; |
|
381 } |
|
382 skb->dev = dev; |
|
383 skb_reserve(skb, NET_IP_ALIGN); |
|
384 skb_copy_to_linear_data(skb, frame->data, len); |
|
385 skb_put(skb, len); |
|
386 skb->protocol = eth_type_trans(skb, dev); |
|
387 skb->ip_summed = CHECKSUM_UNNECESSARY; |
|
388 atomic64_add(len, &priv->rx_bytes); |
|
389 netif_rx(skb); |
|
390 } |
|
391 |
|
392 /** |
|
393 * Rx handler in EtherCAT operation mode |
|
394 * priv->ecdev should always be valid! |
|
395 */ |
|
396 static void ec_poll(struct net_device *dev) |
|
397 { |
|
398 static size_t next = 0; |
|
399 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
400 struct ccat_eth_frame *frame = |
|
401 ((struct ccat_eth_frame *)priv->rx_fifo.dma.virt) + next; |
|
402 if (frame->received) { |
|
403 ecdev_receive(priv->ecdev, frame->data, |
|
404 frame->length - CCATRXDESC_HEADER_LEN); |
|
405 frame->received = 0; |
|
406 ccat_eth_rx_fifo_add(frame, &priv->rx_fifo); |
|
407 next = (next + 1) % FIFO_LENGTH; |
|
408 } else { |
|
409 //TODO dev_warn(&dev->dev, "%s(): frame was not ready\n", __FUNCTION__); |
|
410 } |
|
411 } |
|
412 |
|
413 static netdev_tx_t ccat_eth_start_xmit(struct sk_buff *skb, |
|
414 struct net_device *dev) |
|
415 { |
|
416 static size_t next = 0; |
|
417 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
418 struct ccat_eth_frame *const frame = |
|
419 ((struct ccat_eth_frame *)priv->tx_fifo.dma.virt); |
|
420 uint32_t addr_and_length; |
|
421 |
|
422 if (skb_is_nonlinear(skb)) { |
|
423 pr_warn("Non linear skb not supported -> drop frame.\n"); |
|
424 atomic64_inc(&priv->tx_dropped); |
|
425 priv->kfree_skb_any(skb); |
|
426 return NETDEV_TX_OK; |
|
427 } |
|
428 |
|
429 if (skb->len > sizeof(frame->data)) { |
|
430 pr_warn("skb.len %llu exceeds dma buffer %llu -> drop frame.\n", |
|
431 (uint64_t) skb->len, (uint64_t) sizeof(frame->data)); |
|
432 atomic64_inc(&priv->tx_dropped); |
|
433 priv->kfree_skb_any(skb); |
|
434 return NETDEV_TX_OK; |
|
435 } |
|
436 |
|
437 if (!frame[next].sent) { |
|
438 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); |
|
439 ccat_eth_tx_fifo_full(dev, &frame[next]); |
|
440 return NETDEV_TX_BUSY; |
|
441 } |
|
442 |
|
443 /* prepare frame in DMA memory */ |
|
444 frame[next].sent = 0; |
|
445 frame[next].length = skb->len; |
|
446 memcpy(frame[next].data, skb->data, skb->len); |
|
447 |
|
448 priv->kfree_skb_any(skb); |
|
449 |
|
450 addr_and_length = 8 + (next * sizeof(*frame)); |
|
451 addr_and_length += |
|
452 ((frame[next].length + sizeof(CCAT_HEADER_TAG) + 8) / 8) << 24; |
|
453 iowrite32(addr_and_length, priv->reg.tx_fifo); /* add to DMA fifo */ |
|
454 atomic64_add(frame[next].length, &priv->tx_bytes); /* update stats */ |
|
455 |
|
456 next = (next + 1) % FIFO_LENGTH; |
|
457 /* stop queue if tx ring is full */ |
|
458 if (!frame[next].sent) { |
|
459 ccat_eth_tx_fifo_full(dev, &frame[next]); |
|
460 } |
|
461 return NETDEV_TX_OK; |
|
462 } |
|
463 |
|
464 static int ccat_eth_stop(struct net_device *dev) |
|
465 { |
|
466 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
467 priv->stop_queue(dev); |
|
468 if (priv->poll_thread) { |
|
469 /* TODO care about smp context? */ |
|
470 kthread_stop(priv->poll_thread); |
|
471 priv->poll_thread = NULL; |
|
472 } |
|
473 netdev_info(dev, "stopped.\n"); |
|
474 return 0; |
|
475 } |
|
476 |
|
477 static void ccat_eth_link_down(struct net_device *dev) |
|
478 { |
|
479 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
480 priv->stop_queue(dev); |
|
481 priv->carrier_off(dev); |
|
482 netdev_info(dev, "NIC Link is Down\n"); |
|
483 } |
|
484 |
|
485 static void ccat_eth_link_up(struct net_device *const dev) |
|
486 { |
|
487 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
488 netdev_info(dev, "NIC Link is Up\n"); |
|
489 /* TODO netdev_info(dev, "NIC Link is Up %u Mbps %s Duplex\n", |
|
490 speed == SPEED_100 ? 100 : 10, |
|
491 cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); */ |
|
492 |
|
493 ccat_eth_dma_fifo_reset(&priv->rx_fifo); |
|
494 ccat_eth_dma_fifo_reset(&priv->tx_fifo); |
|
495 ccat_eth_xmit_raw(dev, frameForwardEthernetFrames, |
|
496 sizeof(frameForwardEthernetFrames)); |
|
497 priv->carrier_on(dev); |
|
498 priv->start_queue(dev); |
|
499 } |
|
500 |
|
501 /** |
|
502 * Function to transmit a raw buffer to the network (f.e. frameForwardEthernetFrames) |
|
503 * @dev a valid net_device |
|
504 * @data pointer to your raw buffer |
|
505 * @len number of bytes in the raw buffer to transmit |
|
506 */ |
|
507 static void ccat_eth_xmit_raw(struct net_device *dev, const char *const data, |
|
508 size_t len) |
|
509 { |
|
510 struct sk_buff *skb = dev_alloc_skb(len); |
|
511 skb->dev = dev; |
|
512 skb_copy_to_linear_data(skb, data, len); |
|
513 skb_put(skb, len); |
|
514 ccat_eth_start_xmit(skb, dev); |
|
515 } |
|
516 |
|
517 /** |
|
518 * Since CCAT doesn't support interrupts until now, we have to poll |
|
519 * some status bits to recognize things like link change etc. |
|
520 */ |
|
521 static int run_poll_thread(void *data) |
|
522 { |
|
523 struct net_device *const dev = (struct net_device *)data; |
|
524 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
525 size_t link = 0; |
|
526 |
|
527 while (!kthread_should_stop()) { |
|
528 if (ccat_eth_priv_read_link_state(priv) != link) { |
|
529 link = !link; |
|
530 link ? ccat_eth_link_up(dev) : ccat_eth_link_down(dev); |
|
531 } |
|
532 usleep_range(POLL_DELAY_RANGE_USECS); |
|
533 } |
|
534 pr_debug("%s() stopped.\n", __FUNCTION__); |
|
535 return 0; |
|
536 } |
|
537 |
|
538 static int run_rx_thread(void *data) |
|
539 { |
|
540 struct net_device *const dev = (struct net_device *)data; |
|
541 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
542 struct ccat_eth_frame *frame = priv->rx_fifo.dma.virt; |
|
543 const struct ccat_eth_frame *const end = frame + FIFO_LENGTH; |
|
544 |
|
545 while (!kthread_should_stop()) { |
|
546 /* wait until frame was used by DMA for Rx */ |
|
547 while (!kthread_should_stop() && !frame->received) { |
|
548 usleep_range(DMA_POLL_DELAY_RANGE_USECS); |
|
549 } |
|
550 |
|
551 /* can be NULL, if we are asked to stop! */ |
|
552 if (frame->received) { |
|
553 ccat_eth_receive(dev, frame); |
|
554 frame->received = 0; |
|
555 ccat_eth_rx_fifo_add(frame, &priv->rx_fifo); |
|
556 } |
|
557 if (++frame >= end) { |
|
558 frame = priv->rx_fifo.dma.virt; |
|
559 } |
|
560 } |
|
561 pr_debug("%s() stopped.\n", __FUNCTION__); |
|
562 return 0; |
|
563 } |
|
564 |
|
565 /** |
|
566 * Polling of tx dma descriptors in ethernet operating mode |
|
567 */ |
|
568 static int run_tx_thread(void *data) |
|
569 { |
|
570 struct net_device *const dev = (struct net_device *)data; |
|
571 struct ccat_eth_priv *const priv = netdev_priv(dev); |
|
572 |
|
573 set_current_state(TASK_INTERRUPTIBLE); |
|
574 while (!kthread_should_stop()) { |
|
575 const struct ccat_eth_frame *const frame = priv->next_tx_frame; |
|
576 if (frame) { |
|
577 while (!kthread_should_stop() && !frame->sent) { |
|
578 usleep_range(DMA_POLL_DELAY_RANGE_USECS); |
|
579 } |
|
580 } |
|
581 netif_wake_queue(dev); |
|
582 schedule(); |
|
583 set_current_state(TASK_INTERRUPTIBLE); |
|
584 } |
|
585 set_current_state(TASK_RUNNING); |
|
586 pr_debug("%s() stopped.\n", __FUNCTION__); |
|
587 return 0; |
|
588 } |