17 with this program; if not, write to the Free Software Foundation, Inc., |
17 with this program; if not, write to the Free Software Foundation, Inc., |
18 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
18 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
19 */ |
19 */ |
20 |
20 |
21 #include <linux/etherdevice.h> |
21 #include <linux/etherdevice.h> |
22 #include <linux/init.h> |
|
23 #include <linux/kernel.h> |
22 #include <linux/kernel.h> |
24 #include <linux/module.h> |
23 #include <linux/module.h> |
25 #include <linux/netdevice.h> |
24 #include <linux/netdevice.h> |
26 #include <linux/spinlock.h> |
|
27 |
25 |
28 #include "module.h" |
26 #include "module.h" |
29 #include "netdev.h" |
27 #include "netdev.h" |
30 |
28 |
31 /** |
29 /** |
71 } |
87 } |
72 |
88 |
73 static void ecdev_nop(struct net_device *const netdev) |
89 static void ecdev_nop(struct net_device *const netdev) |
74 { |
90 { |
75 /* dummy called if nothing has to be done in EtherCAT operation mode */ |
91 /* dummy called if nothing has to be done in EtherCAT operation mode */ |
76 } |
|
77 |
|
78 static void ecdev_tx_fifo_full(struct ccat_eth_priv *const priv, |
|
79 const struct ccat_eth_frame *const frame) |
|
80 { |
|
81 /* we are polled -> there is nothing we can do in EtherCAT mode */ |
|
82 } |
92 } |
83 |
93 |
84 static void unregister_ecdev(struct net_device *const netdev) |
94 static void unregister_ecdev(struct net_device *const netdev) |
85 { |
95 { |
86 struct ccat_eth_priv *const priv = netdev_priv(netdev); |
96 struct ccat_eth_priv *const priv = netdev_priv(netdev); |
87 ecdev_close(priv->ecdev); |
97 ecdev_close(priv->ecdev); |
88 ecdev_withdraw(priv->ecdev); |
98 ecdev_withdraw(priv->ecdev); |
89 } |
99 } |
90 |
100 |
91 typedef void (*fifo_add_function) (struct ccat_eth_frame *, |
101 static void ccat_eth_fifo_inc(struct ccat_eth_dma_fifo *fifo) |
92 struct ccat_eth_dma_fifo *); |
102 { |
93 |
103 if (++fifo->next >= fifo->end) |
94 static void ccat_eth_rx_fifo_add(struct ccat_eth_frame *frame, |
104 fifo->next = fifo->dma.virt; |
95 struct ccat_eth_dma_fifo *fifo) |
105 } |
|
106 |
|
107 typedef void (*fifo_add_function) (struct ccat_eth_dma_fifo *, |
|
108 struct ccat_eth_frame *); |
|
109 |
|
110 static void ccat_eth_rx_fifo_add(struct ccat_eth_dma_fifo *fifo, |
|
111 struct ccat_eth_frame *frame) |
96 { |
112 { |
97 const size_t offset = ((void *)(frame) - fifo->dma.virt); |
113 const size_t offset = ((void *)(frame) - fifo->dma.virt); |
98 const u32 addr_and_length = (1 << 31) | offset; |
114 const u32 addr_and_length = (1 << 31) | offset; |
99 |
115 |
100 frame->received = 0; |
116 frame->rx_flags = cpu_to_le32(0); |
101 iowrite32(addr_and_length, fifo->reg); |
117 iowrite32(addr_and_length, fifo->reg); |
102 } |
118 } |
103 |
119 |
104 static void ccat_eth_tx_fifo_add_free(struct ccat_eth_frame *frame, |
120 static void ccat_eth_tx_fifo_add_free(struct ccat_eth_dma_fifo *fifo, |
105 struct ccat_eth_dma_fifo *fifo) |
121 struct ccat_eth_frame *frame) |
106 { |
122 { |
107 /* mark frame as ready to use for tx */ |
123 /* mark frame as ready to use for tx */ |
108 frame->sent = 1; |
124 frame->tx_flags = cpu_to_le32(CCAT_FRAME_SENT); |
109 } |
|
110 |
|
111 static void ccat_eth_tx_fifo_full(struct ccat_eth_priv *const priv, |
|
112 const struct ccat_eth_frame *const frame) |
|
113 { |
|
114 priv->stop_queue(priv->netdev); |
|
115 priv->next_tx_frame = frame; |
|
116 } |
125 } |
117 |
126 |
118 static void ccat_eth_dma_fifo_reset(struct ccat_eth_dma_fifo *fifo) |
127 static void ccat_eth_dma_fifo_reset(struct ccat_eth_dma_fifo *fifo) |
119 { |
128 { |
120 struct ccat_eth_frame *frame = fifo->dma.virt; |
|
121 const struct ccat_eth_frame *const end = frame + FIFO_LENGTH; |
|
122 |
|
123 /* reset hw fifo */ |
129 /* reset hw fifo */ |
124 iowrite32(0, fifo->reg + 0x8); |
130 iowrite32(0, fifo->reg + 0x8); |
125 wmb(); |
131 wmb(); |
126 |
132 |
127 if (fifo->add) { |
133 if (fifo->add) { |
128 while (frame < end) { |
134 fifo->next = fifo->dma.virt; |
129 fifo->add(frame, fifo); |
135 do { |
130 ++frame; |
136 fifo->add(fifo, fifo->next); |
131 } |
137 ccat_eth_fifo_inc(fifo); |
|
138 } while (fifo->next != fifo->dma.virt); |
132 } |
139 } |
133 } |
140 } |
134 |
141 |
135 static int ccat_eth_dma_fifo_init(struct ccat_eth_dma_fifo *fifo, |
142 static int ccat_eth_dma_fifo_init(struct ccat_eth_dma_fifo *fifo, |
136 void __iomem * const fifo_reg, |
143 void __iomem * const fifo_reg, |
210 } |
218 } |
211 |
219 |
212 static netdev_tx_t ccat_eth_start_xmit(struct sk_buff *skb, |
220 static netdev_tx_t ccat_eth_start_xmit(struct sk_buff *skb, |
213 struct net_device *dev) |
221 struct net_device *dev) |
214 { |
222 { |
215 static size_t next = 0; |
223 struct ccat_eth_priv *const priv = netdev_priv(dev); |
216 struct ccat_eth_priv *const priv = netdev_priv(dev); |
224 struct ccat_eth_dma_fifo *const fifo = &priv->tx_fifo; |
217 struct ccat_eth_frame *const frame = |
|
218 ((struct ccat_eth_frame *)priv->tx_fifo.dma.virt); |
|
219 u32 addr_and_length; |
225 u32 addr_and_length; |
220 |
226 |
221 if (skb_is_nonlinear(skb)) { |
227 if (skb_is_nonlinear(skb)) { |
222 pr_warn("Non linear skb not supported -> drop frame.\n"); |
228 pr_warn("Non linear skb not supported -> drop frame.\n"); |
223 atomic64_inc(&priv->tx_dropped); |
229 atomic64_inc(&priv->tx_dropped); |
224 priv->kfree_skb_any(skb); |
230 priv->kfree_skb_any(skb); |
225 return NETDEV_TX_OK; |
231 return NETDEV_TX_OK; |
226 } |
232 } |
227 |
233 |
228 if (skb->len > sizeof(frame->data)) { |
234 if (skb->len > sizeof(fifo->next->data)) { |
229 pr_warn("skb.len %llu exceeds dma buffer %llu -> drop frame.\n", |
235 pr_warn("skb.len %llu exceeds dma buffer %llu -> drop frame.\n", |
230 (u64) skb->len, (u64) sizeof(frame->data)); |
236 (u64) skb->len, (u64) sizeof(fifo->next->data)); |
231 atomic64_inc(&priv->tx_dropped); |
237 atomic64_inc(&priv->tx_dropped); |
232 priv->kfree_skb_any(skb); |
238 priv->kfree_skb_any(skb); |
233 return NETDEV_TX_OK; |
239 return NETDEV_TX_OK; |
234 } |
240 } |
235 |
241 |
236 if (!frame[next].sent) { |
242 if (!ccat_eth_frame_sent(fifo->next)) { |
237 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); |
243 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); |
238 ccat_eth_tx_fifo_full(priv, &frame[next]); |
244 priv->stop_queue(priv->netdev); |
239 return NETDEV_TX_BUSY; |
245 return NETDEV_TX_BUSY; |
240 } |
246 } |
241 |
247 |
242 /* prepare frame in DMA memory */ |
248 /* prepare frame in DMA memory */ |
243 frame[next].sent = 0; |
249 fifo->next->tx_flags = cpu_to_le32(0); |
244 frame[next].length = skb->len; |
250 fifo->next->length = cpu_to_le16(skb->len); |
245 memcpy(frame[next].data, skb->data, skb->len); |
251 memcpy(fifo->next->data, skb->data, skb->len); |
|
252 |
|
253 /* Queue frame into CCAT TX-FIFO, CCAT ignores the first 8 bytes of the tx descriptor */ |
|
254 addr_and_length = offsetof(struct ccat_eth_frame, length); |
|
255 addr_and_length += ((void*)fifo->next - fifo->dma.virt); |
|
256 addr_and_length += ((skb->len + CCAT_ETH_FRAME_HEAD_LEN) / 8) << 24; |
|
257 iowrite32(addr_and_length, priv->reg.tx_fifo); |
|
258 |
|
259 /* update stats */ |
|
260 atomic64_add(skb->len, &priv->tx_bytes); |
246 |
261 |
247 priv->kfree_skb_any(skb); |
262 priv->kfree_skb_any(skb); |
248 |
263 |
249 addr_and_length = 8 + (next * sizeof(*frame)); |
264 ccat_eth_fifo_inc(fifo); |
250 addr_and_length += |
|
251 ((frame[next].length + CCAT_ETH_FRAME_HEAD_LEN) / 8) << 24; |
|
252 iowrite32(addr_and_length, priv->reg.tx_fifo); /* add to DMA fifo */ |
|
253 atomic64_add(frame[next].length, &priv->tx_bytes); /* update stats */ |
|
254 |
|
255 next = (next + 1) % FIFO_LENGTH; |
|
256 /* stop queue if tx ring is full */ |
265 /* stop queue if tx ring is full */ |
257 if (!frame[next].sent) { |
266 if (!ccat_eth_frame_sent(fifo->next)) { |
258 ccat_eth_tx_fifo_full(priv, &frame[next]); |
267 priv->stop_queue(priv->netdev); |
259 } |
268 } |
260 return NETDEV_TX_OK; |
269 return NETDEV_TX_OK; |
261 } |
270 } |
262 |
271 |
263 /** |
272 /** |
275 skb_copy_to_linear_data(skb, data, len); |
284 skb_copy_to_linear_data(skb, data, len); |
276 skb_put(skb, len); |
285 skb_put(skb, len); |
277 ccat_eth_start_xmit(skb, dev); |
286 ccat_eth_start_xmit(skb, dev); |
278 } |
287 } |
279 |
288 |
280 static const size_t CCATRXDESC_HEADER_LEN = 20; |
|
281 static void ccat_eth_receive(struct net_device *const dev, |
289 static void ccat_eth_receive(struct net_device *const dev, |
282 const struct ccat_eth_frame *const frame) |
290 const void *const data, const size_t len) |
283 { |
291 { |
284 struct ccat_eth_priv *const priv = netdev_priv(dev); |
292 struct sk_buff *const skb = dev_alloc_skb(len + NET_IP_ALIGN); |
285 const size_t len = frame->length - CCATRXDESC_HEADER_LEN; |
293 struct ccat_eth_priv *const priv = netdev_priv(dev); |
286 struct sk_buff *skb = dev_alloc_skb(len + NET_IP_ALIGN); |
|
287 |
294 |
288 if (!skb) { |
295 if (!skb) { |
289 pr_info("%s() out of memory :-(\n", __FUNCTION__); |
296 pr_info("%s() out of memory :-(\n", __FUNCTION__); |
290 atomic64_inc(&priv->rx_dropped); |
297 atomic64_inc(&priv->rx_dropped); |
291 return; |
298 return; |
292 } |
299 } |
293 skb->dev = dev; |
300 skb->dev = dev; |
294 skb_reserve(skb, NET_IP_ALIGN); |
301 skb_reserve(skb, NET_IP_ALIGN); |
295 skb_copy_to_linear_data(skb, frame->data, len); |
302 skb_copy_to_linear_data(skb, data, len); |
296 skb_put(skb, len); |
303 skb_put(skb, len); |
297 skb->protocol = eth_type_trans(skb, dev); |
304 skb->protocol = eth_type_trans(skb, dev); |
298 skb->ip_summed = CHECKSUM_UNNECESSARY; |
305 skb->ip_summed = CHECKSUM_UNNECESSARY; |
299 atomic64_add(len, &priv->rx_bytes); |
306 atomic64_add(len, &priv->rx_bytes); |
300 netif_rx(skb); |
307 netif_rx(skb); |
301 } |
308 } |
302 |
309 |
303 static void ccat_eth_link_down(struct net_device *dev) |
310 static void ccat_eth_link_down(struct net_device *const dev) |
304 { |
311 { |
305 struct ccat_eth_priv *const priv = netdev_priv(dev); |
312 struct ccat_eth_priv *const priv = netdev_priv(dev); |
306 |
313 |
307 priv->stop_queue(dev); |
314 priv->stop_queue(dev); |
308 priv->carrier_off(dev); |
315 priv->carrier_off(dev); |
353 ccat_eth_link_down(priv->netdev); |
360 ccat_eth_link_down(priv->netdev); |
354 } |
361 } |
355 } |
362 } |
356 |
363 |
357 /** |
364 /** |
358 * Rx handler in EtherCAT operation mode |
365 * Poll for available rx dma descriptors in ethernet operating mode |
359 * priv->ecdev should always be valid! |
366 */ |
360 */ |
367 static void poll_rx(struct ccat_eth_priv *const priv) |
|
368 { |
|
369 static const size_t overhead = CCAT_ETH_FRAME_HEAD_LEN - 4; |
|
370 struct ccat_eth_dma_fifo *const fifo = &priv->rx_fifo; |
|
371 |
|
372 /* TODO omit possible deadlock in situations with heavy traffic */ |
|
373 while (ccat_eth_frame_received(fifo->next)) { |
|
374 const size_t len = le16_to_cpu(fifo->next->length) - overhead; |
|
375 if (priv->ecdev) { |
|
376 ecdev_receive(priv->ecdev, fifo->next->data, len); |
|
377 } else { |
|
378 ccat_eth_receive(priv->netdev, fifo->next->data, len); |
|
379 } |
|
380 ccat_eth_rx_fifo_add(fifo, fifo->next); |
|
381 ccat_eth_fifo_inc(fifo); |
|
382 } |
|
383 } |
|
384 |
361 static void ec_poll_rx(struct net_device *dev) |
385 static void ec_poll_rx(struct net_device *dev) |
362 { |
386 { |
363 static size_t next = 0; |
387 struct ccat_eth_priv *const priv = netdev_priv(dev); |
364 struct ccat_eth_priv *const priv = netdev_priv(dev); |
388 poll_rx(priv); |
365 struct ccat_eth_frame *frame = |
|
366 ((struct ccat_eth_frame *)priv->rx_fifo.dma.virt) + next; |
|
367 if (frame->received) { |
|
368 ecdev_receive(priv->ecdev, frame->data, |
|
369 frame->length - CCATRXDESC_HEADER_LEN); |
|
370 frame->received = 0; |
|
371 ccat_eth_rx_fifo_add(frame, &priv->rx_fifo); |
|
372 next = (next + 1) % FIFO_LENGTH; |
|
373 } else { |
|
374 //TODO dev_warn(&dev->dev, "%s(): frame was not ready\n", __FUNCTION__); |
|
375 } |
|
376 } |
|
377 |
|
378 /** |
|
379 * Poll for available rx dma descriptors in ethernet operating mode |
|
380 */ |
|
381 static void poll_rx(struct ccat_eth_priv *const priv) |
|
382 { |
|
383 struct ccat_eth_frame *const frame = priv->rx_fifo.dma.virt; |
|
384 static size_t next = 0; |
|
385 |
|
386 /* TODO omit possible deadlock in situations with heavy traffic */ |
|
387 while (frame[next].received) { |
|
388 ccat_eth_receive(priv->netdev, frame + next); |
|
389 frame[next].received = 0; |
|
390 ccat_eth_rx_fifo_add(frame + next, &priv->rx_fifo); |
|
391 next = (next + 1) % FIFO_LENGTH; |
|
392 } |
|
393 } |
389 } |
394 |
390 |
395 /** |
391 /** |
396 * Poll for available tx dma descriptors in ethernet operating mode |
392 * Poll for available tx dma descriptors in ethernet operating mode |
397 */ |
393 */ |
398 static void poll_tx(struct ccat_eth_priv *const priv) |
394 static void poll_tx(struct ccat_eth_priv *const priv) |
399 { |
395 { |
400 if (priv->next_tx_frame && priv->next_tx_frame->sent) { |
396 if (ccat_eth_frame_sent(priv->tx_fifo.next)) { |
401 priv->next_tx_frame = NULL; |
|
402 netif_wake_queue(priv->netdev); |
397 netif_wake_queue(priv->netdev); |
403 } |
398 } |
404 } |
399 } |
405 |
400 |
406 /** |
401 /** |
407 * Since CCAT doesn't support interrupts until now, we have to poll |
402 * Since CCAT doesn't support interrupts until now, we have to poll |
408 * some status bits to recognize things like link change etc. |
403 * some status bits to recognize things like link change etc. |
409 */ |
404 */ |
410 static enum hrtimer_restart poll_timer_callback(struct hrtimer *timer) |
405 static enum hrtimer_restart poll_timer_callback(struct hrtimer *timer) |
411 { |
406 { |
412 struct ccat_eth_priv *priv = container_of(timer, struct ccat_eth_priv, |
407 struct ccat_eth_priv *const priv = |
413 poll_timer); |
408 container_of(timer, struct ccat_eth_priv, poll_timer); |
414 |
409 |
415 poll_link(priv); |
410 poll_link(priv); |
416 if(!priv->ecdev) |
411 if(!priv->ecdev) { |
417 poll_rx(priv); |
412 poll_rx(priv); |
418 poll_tx(priv); |
413 poll_tx(priv); |
419 hrtimer_forward_now(timer, ktime_set(0, 100 * NSEC_PER_USEC)); |
414 } |
|
415 hrtimer_forward_now(timer, POLL_TIME); |
420 return HRTIMER_RESTART; |
416 return HRTIMER_RESTART; |
421 } |
417 } |
422 |
418 |
423 static struct rtnl_link_stats64 *ccat_eth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 |
419 static struct rtnl_link_stats64 *ccat_eth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 |
424 *storage) |
420 *storage) |
462 { |
458 { |
463 struct ccat_eth_priv *const priv = netdev_priv(dev); |
459 struct ccat_eth_priv *const priv = netdev_priv(dev); |
464 |
460 |
465 hrtimer_init(&priv->poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
461 hrtimer_init(&priv->poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
466 priv->poll_timer.function = poll_timer_callback; |
462 priv->poll_timer.function = poll_timer_callback; |
467 hrtimer_start(&priv->poll_timer, ktime_set(0, 100000), |
463 hrtimer_start(&priv->poll_timer, POLL_TIME, HRTIMER_MODE_REL); |
468 HRTIMER_MODE_REL); |
|
469 return 0; |
464 return 0; |
470 } |
465 } |
471 |
466 |
472 static int ccat_eth_stop(struct net_device *dev) |
467 static int ccat_eth_stop(struct net_device *dev) |
473 { |
468 { |
474 struct ccat_eth_priv *const priv = netdev_priv(dev); |
469 struct ccat_eth_priv *const priv = netdev_priv(dev); |
475 |
470 |
476 priv->stop_queue(dev); |
471 priv->stop_queue(dev); |
477 hrtimer_cancel(&priv->poll_timer); |
472 hrtimer_cancel(&priv->poll_timer); |
478 netdev_info(dev, "stopped.\n"); |
|
479 return 0; |
473 return 0; |
480 } |
474 } |
481 |
475 |
482 static const struct net_device_ops ccat_eth_netdev_ops = { |
476 static const struct net_device_ops ccat_eth_netdev_ops = { |
483 .ndo_get_stats64 = ccat_eth_get_stats64, |
477 .ndo_get_stats64 = ccat_eth_get_stats64, |
517 priv->carrier_ok = ecdev_carrier_ok; |
511 priv->carrier_ok = ecdev_carrier_ok; |
518 priv->carrier_on = ecdev_carrier_on; |
512 priv->carrier_on = ecdev_carrier_on; |
519 priv->kfree_skb_any = ecdev_kfree_skb_any; |
513 priv->kfree_skb_any = ecdev_kfree_skb_any; |
520 priv->start_queue = ecdev_nop; |
514 priv->start_queue = ecdev_nop; |
521 priv->stop_queue = ecdev_nop; |
515 priv->stop_queue = ecdev_nop; |
522 priv->tx_fifo_full = ecdev_tx_fifo_full; |
|
523 priv->unregister = unregister_ecdev; |
516 priv->unregister = unregister_ecdev; |
524 |
517 |
525 priv->carrier_off(netdev); |
518 priv->carrier_off(netdev); |
526 if (ecdev_open(priv->ecdev)) { |
519 if (ecdev_open(priv->ecdev)) { |
527 pr_info("unable to register network device.\n"); |
520 pr_info("unable to register network device.\n"); |
538 priv->carrier_ok = netif_carrier_ok; |
531 priv->carrier_ok = netif_carrier_ok; |
539 priv->carrier_on = netif_carrier_on; |
532 priv->carrier_on = netif_carrier_on; |
540 priv->kfree_skb_any = dev_kfree_skb_any; |
533 priv->kfree_skb_any = dev_kfree_skb_any; |
541 priv->start_queue = netif_start_queue; |
534 priv->start_queue = netif_start_queue; |
542 priv->stop_queue = netif_stop_queue; |
535 priv->stop_queue = netif_stop_queue; |
543 priv->tx_fifo_full = ccat_eth_tx_fifo_full; |
|
544 priv->unregister = unregister_netdev; |
536 priv->unregister = unregister_netdev; |
545 |
537 |
546 priv->carrier_off(netdev); |
538 priv->carrier_off(netdev); |
547 if (register_netdev(netdev)) { |
539 if (register_netdev(netdev)) { |
548 pr_info("unable to register network device.\n"); |
540 pr_info("unable to register network device.\n"); |