devices/ccat/netdev.c
branchstable-1.5
changeset 2567 d70aad2f131f
parent 2565 f7b06b264646
child 2568 2f3078ec9ffb
equal deleted inserted replaced
2566:0f39e1e7b288 2567:d70aad2f131f
    19 */
    19 */
    20 
    20 
    21 #include <linux/etherdevice.h>
    21 #include <linux/etherdevice.h>
    22 #include <linux/init.h>
    22 #include <linux/init.h>
    23 #include <linux/kernel.h>
    23 #include <linux/kernel.h>
    24 #include <linux/kfifo.h>
       
    25 #include <linux/kthread.h>
       
    26 #include <linux/module.h>
    24 #include <linux/module.h>
    27 #include <linux/netdevice.h>
    25 #include <linux/netdevice.h>
    28 #include <linux/spinlock.h>
    26 #include <linux/spinlock.h>
    29 
    27 
    30 #include "compat.h"
    28 #include "compat.h"
    33 #include "print.h"
    31 #include "print.h"
    34 
    32 
    35 /**
    33 /**
    36  * EtherCAT frame to enable forwarding on EtherCAT Terminals
    34  * EtherCAT frame to enable forwarding on EtherCAT Terminals
    37  */
    35  */
    38 static const UINT8 frameForwardEthernetFrames[] = {
    36 static const u8 frameForwardEthernetFrames[] = {
    39 	0x01, 0x01, 0x05, 0x01, 0x00, 0x00,
    37 	0x01, 0x01, 0x05, 0x01, 0x00, 0x00,
    40 	0x00, 0x1b, 0x21, 0x36, 0x1b, 0xce,
    38 	0x00, 0x1b, 0x21, 0x36, 0x1b, 0xce,
    41 	0x88, 0xa4, 0x0e, 0x10,
    39 	0x88, 0xa4, 0x0e, 0x10,
    42 	0x08,
    40 	0x08,
    43 	0x00,
    41 	0x00,
    48 	0x00, 0x00,
    46 	0x00, 0x00,
    49 	0x00, 0x00
    47 	0x00, 0x00
    50 };
    48 };
    51 
    49 
    52 #define FIFO_LENGTH 64
    50 #define FIFO_LENGTH 64
    53 #define DMA_POLL_DELAY_RANGE_USECS 100, 100	/* time to sleep between rx/tx DMA polls */
       
    54 #define POLL_DELAY_RANGE_USECS 500, 1000	/* time to sleep between link state polls */
       
    55 
       
    56 static void ec_poll(struct net_device *dev);
    51 static void ec_poll(struct net_device *dev);
    57 static int run_poll_thread(void *data);
    52 static enum hrtimer_restart poll_timer_callback(struct hrtimer *timer);
    58 static int run_rx_thread(void *data);
       
    59 static int run_tx_thread(void *data);
       
    60 
    53 
    61 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
    54 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
    62 static struct rtnl_link_stats64 *ccat_eth_get_stats64(struct net_device *dev, struct rtnl_link_stats64
    55 static struct rtnl_link_stats64 *ccat_eth_get_stats64(struct net_device *dev, struct rtnl_link_stats64
    63 						      *storage);
    56 						      *storage);
    64 #endif
    57 #endif
    81 static void ecdev_kfree_skb_any(struct sk_buff *skb)
    74 static void ecdev_kfree_skb_any(struct sk_buff *skb)
    82 {
    75 {
    83 	/* never release a skb in EtherCAT mode */
    76 	/* never release a skb in EtherCAT mode */
    84 }
    77 }
    85 
    78 
       
    79 static bool ecdev_carrier_ok(struct net_device *const netdev)
       
    80 {
       
    81 	struct ccat_eth_priv *const priv = netdev_priv(netdev);
       
    82 	return ecdev_get_link(priv->ecdev);
       
    83 }
       
    84 
    86 static void ecdev_carrier_on(struct net_device *const netdev)
    85 static void ecdev_carrier_on(struct net_device *const netdev)
    87 {
    86 {
    88 	struct ccat_eth_priv *const priv = netdev_priv(netdev);
    87 	struct ccat_eth_priv *const priv = netdev_priv(netdev);
    89 	ecdev_set_link(priv->ecdev, 1);
    88 	ecdev_set_link(priv->ecdev, 1);
    90 }
    89 }
    98 static void ecdev_nop(struct net_device *const netdev)
    97 static void ecdev_nop(struct net_device *const netdev)
    99 {
    98 {
   100 	/* dummy called if nothing has to be done in EtherCAT operation mode */
    99 	/* dummy called if nothing has to be done in EtherCAT operation mode */
   101 }
   100 }
   102 
   101 
   103 static void ecdev_tx_fifo_full(struct net_device *const dev,
   102 static void ecdev_tx_fifo_full(struct ccat_eth_priv *const priv,
   104 			       const struct ccat_eth_frame *const frame)
   103 			       const struct ccat_eth_frame *const frame)
   105 {
   104 {
   106 	/* we are polled -> there is nothing we can do in EtherCAT mode */
   105 	/* we are polled -> there is nothing we can do in EtherCAT mode */
   107 }
   106 }
   108 
   107 
   118 
   117 
   119 static void ccat_eth_rx_fifo_add(struct ccat_eth_frame *frame,
   118 static void ccat_eth_rx_fifo_add(struct ccat_eth_frame *frame,
   120 				 struct ccat_eth_dma_fifo *fifo)
   119 				 struct ccat_eth_dma_fifo *fifo)
   121 {
   120 {
   122 	const size_t offset = ((void *)(frame) - fifo->dma.virt);
   121 	const size_t offset = ((void *)(frame) - fifo->dma.virt);
   123 	const uint32_t addr_and_length = (1 << 31) | offset;
   122 	const u32 addr_and_length = (1 << 31) | offset;
       
   123 
   124 	frame->received = 0;
   124 	frame->received = 0;
   125 	iowrite32(addr_and_length, fifo->reg);
   125 	iowrite32(addr_and_length, fifo->reg);
   126 }
   126 }
   127 
   127 
   128 static void ccat_eth_tx_fifo_add_free(struct ccat_eth_frame *frame,
   128 static void ccat_eth_tx_fifo_add_free(struct ccat_eth_frame *frame,
   130 {
   130 {
   131 	/* mark frame as ready to use for tx */
   131 	/* mark frame as ready to use for tx */
   132 	frame->sent = 1;
   132 	frame->sent = 1;
   133 }
   133 }
   134 
   134 
   135 static void ccat_eth_tx_fifo_full(struct net_device *const dev,
   135 static void ccat_eth_tx_fifo_full(struct ccat_eth_priv *const priv,
   136 				  const struct ccat_eth_frame *const frame)
   136 				  const struct ccat_eth_frame *const frame)
   137 {
   137 {
   138 	struct ccat_eth_priv *const priv = netdev_priv(dev);
   138 	priv->stop_queue(priv->netdev);
   139 	netif_stop_queue(dev);
       
   140 	priv->next_tx_frame = frame;
   139 	priv->next_tx_frame = frame;
   141 	wake_up_process(priv->tx_thread);
       
   142 }
   140 }
   143 
   141 
   144 static void ccat_eth_dma_fifo_reset(struct ccat_eth_dma_fifo *fifo)
   142 static void ccat_eth_dma_fifo_reset(struct ccat_eth_dma_fifo *fifo)
   145 {
   143 {
   146 	struct ccat_eth_frame *frame = fifo->dma.virt;
   144 	struct ccat_eth_frame *frame = fifo->dma.virt;
   164 				  struct ccat_eth_priv *const priv)
   162 				  struct ccat_eth_priv *const priv)
   165 {
   163 {
   166 	if (0 !=
   164 	if (0 !=
   167 	    ccat_dma_init(&fifo->dma, channel, priv->ccatdev->bar[2].ioaddr,
   165 	    ccat_dma_init(&fifo->dma, channel, priv->ccatdev->bar[2].ioaddr,
   168 			  &priv->ccatdev->pdev->dev)) {
   166 			  &priv->ccatdev->pdev->dev)) {
   169 		pr_info("init DMA%llu memory failed.\n", (uint64_t) channel);
   167 		pr_info("init DMA%llu memory failed.\n", (u64) channel);
   170 		return -1;
   168 		return -1;
   171 	}
   169 	}
   172 	fifo->add = add;
   170 	fifo->add = add;
   173 	fifo->reg = fifo_reg;
   171 	fifo->reg = fifo_reg;
   174 	return 0;
   172 	return 0;
   185 	wmb();
   183 	wmb();
   186 
   184 
   187 	/* release dma */
   185 	/* release dma */
   188 	ccat_dma_free(&priv->rx_fifo.dma);
   186 	ccat_dma_free(&priv->rx_fifo.dma);
   189 	ccat_dma_free(&priv->tx_fifo.dma);
   187 	ccat_dma_free(&priv->tx_fifo.dma);
   190 	pr_debug("DMA fifo's stopped.\n");
       
   191 }
   188 }
   192 
   189 
   193 /**
   190 /**
   194  * Initalizes both (Rx/Tx) DMA fifo's and related management structures
   191  * Initalizes both (Rx/Tx) DMA fifo's and related management structures
   195  */
   192  */
   223 static void ccat_eth_priv_init_mappings(struct ccat_eth_priv *priv)
   220 static void ccat_eth_priv_init_mappings(struct ccat_eth_priv *priv)
   224 {
   221 {
   225 	CCatInfoBlockOffs offsets;
   222 	CCatInfoBlockOffs offsets;
   226 	void __iomem *const func_base =
   223 	void __iomem *const func_base =
   227 	    priv->ccatdev->bar[0].ioaddr + priv->info.nAddr;
   224 	    priv->ccatdev->bar[0].ioaddr + priv->info.nAddr;
       
   225 
   228 	memcpy_fromio(&offsets, func_base, sizeof(offsets));
   226 	memcpy_fromio(&offsets, func_base, sizeof(offsets));
   229 	priv->reg.mii = func_base + offsets.nMMIOffs;
   227 	priv->reg.mii = func_base + offsets.nMMIOffs;
   230 	priv->reg.tx_fifo = func_base + offsets.nTxFifoOffs;
   228 	priv->reg.tx_fifo = func_base + offsets.nTxFifoOffs;
   231 	priv->reg.rx_fifo = func_base + offsets.nTxFifoOffs + 0x10;
   229 	priv->reg.rx_fifo = func_base + offsets.nTxFifoOffs + 0x10;
   232 	priv->reg.mac = func_base + offsets.nMacRegOffs;
   230 	priv->reg.mac = func_base + offsets.nMacRegOffs;
   249 static struct rtnl_link_stats64 *ccat_eth_get_stats64(struct net_device *dev, struct rtnl_link_stats64
   247 static struct rtnl_link_stats64 *ccat_eth_get_stats64(struct net_device *dev, struct rtnl_link_stats64
   250 						      *storage)
   248 						      *storage)
   251 {
   249 {
   252 	struct ccat_eth_priv *const priv = netdev_priv(dev);
   250 	struct ccat_eth_priv *const priv = netdev_priv(dev);
   253 	CCatMacRegs mac;
   251 	CCatMacRegs mac;
       
   252 
   254 	memcpy_fromio(&mac, priv->reg.mac, sizeof(mac));
   253 	memcpy_fromio(&mac, priv->reg.mac, sizeof(mac));
   255 	storage->rx_packets = mac.rxFrameCnt;	/* total packets received       */
   254 	storage->rx_packets = mac.rxFrameCnt;	/* total packets received       */
   256 	storage->tx_packets = mac.txFrameCnt;	/* total packets transmitted    */
   255 	storage->tx_packets = mac.txFrameCnt;	/* total packets transmitted    */
   257 	storage->rx_bytes = atomic64_read(&priv->rx_bytes);	/* total bytes received         */
   256 	storage->rx_bytes = atomic64_read(&priv->rx_bytes);	/* total bytes received         */
   258 	storage->tx_bytes = atomic64_read(&priv->tx_bytes);	/* total bytes transmitted      */
   257 	storage->tx_bytes = atomic64_read(&priv->tx_bytes);	/* total bytes transmitted      */
   288 struct ccat_eth_priv *ccat_eth_init(const struct ccat_device *const ccatdev,
   287 struct ccat_eth_priv *ccat_eth_init(const struct ccat_device *const ccatdev,
   289 				    const void __iomem * const addr)
   288 				    const void __iomem * const addr)
   290 {
   289 {
   291 	struct ccat_eth_priv *priv;
   290 	struct ccat_eth_priv *priv;
   292 	struct net_device *const netdev = alloc_etherdev(sizeof(*priv));
   291 	struct net_device *const netdev = alloc_etherdev(sizeof(*priv));
       
   292 
   293 	priv = netdev_priv(netdev);
   293 	priv = netdev_priv(netdev);
   294 	priv->netdev = netdev;
   294 	priv->netdev = netdev;
   295 	priv->ccatdev = ccatdev;
   295 	priv->ccatdev = ccatdev;
   296 
   296 
   297 	/* ccat register mappings */
   297 	/* ccat register mappings */
   311 
   311 
   312 	/* use as EtherCAT device? */
   312 	/* use as EtherCAT device? */
   313 	priv->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE);
   313 	priv->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE);
   314 	if (priv->ecdev) {
   314 	if (priv->ecdev) {
   315 		priv->carrier_off = ecdev_carrier_off;
   315 		priv->carrier_off = ecdev_carrier_off;
       
   316 		priv->carrier_ok = ecdev_carrier_ok;
   316 		priv->carrier_on = ecdev_carrier_on;
   317 		priv->carrier_on = ecdev_carrier_on;
   317 		priv->kfree_skb_any = ecdev_kfree_skb_any;
   318 		priv->kfree_skb_any = ecdev_kfree_skb_any;
   318 		priv->start_queue = ecdev_nop;
   319 		priv->start_queue = ecdev_nop;
   319 		priv->stop_queue = ecdev_nop;
   320 		priv->stop_queue = ecdev_nop;
   320 		priv->tx_fifo_full = ecdev_tx_fifo_full;
   321 		priv->tx_fifo_full = ecdev_tx_fifo_full;
   329 		return priv;
   330 		return priv;
   330 	}
   331 	}
   331 
   332 
   332 	/* EtherCAT disabled -> prepare normal ethernet mode */
   333 	/* EtherCAT disabled -> prepare normal ethernet mode */
   333 	priv->carrier_off = netif_carrier_off;
   334 	priv->carrier_off = netif_carrier_off;
       
   335 	priv->carrier_ok = netif_carrier_ok;
   334 	priv->carrier_on = netif_carrier_on;
   336 	priv->carrier_on = netif_carrier_on;
   335 	priv->kfree_skb_any = dev_kfree_skb_any;
   337 	priv->kfree_skb_any = dev_kfree_skb_any;
   336 	priv->start_queue = netif_start_queue;
   338 	priv->start_queue = netif_start_queue;
   337 	priv->stop_queue = netif_stop_queue;
   339 	priv->stop_queue = netif_stop_queue;
   338 	priv->tx_fifo_full = ccat_eth_tx_fifo_full;
   340 	priv->tx_fifo_full = ccat_eth_tx_fifo_full;
   342 		ccat_eth_priv_free_dma(priv);
   344 		ccat_eth_priv_free_dma(priv);
   343 		free_netdev(netdev);
   345 		free_netdev(netdev);
   344 		return NULL;
   346 		return NULL;
   345 	}
   347 	}
   346 	pr_info("registered %s as network device.\n", netdev->name);
   348 	pr_info("registered %s as network device.\n", netdev->name);
   347 	priv->rx_thread = kthread_run(run_rx_thread, netdev, "%s_rx", KBUILD_MODNAME);
       
   348 	priv->tx_thread = kthread_run(run_tx_thread, netdev, "%s_tx", KBUILD_MODNAME);
       
   349 	return priv;
   349 	return priv;
   350 }
   350 }
   351 
   351 
   352 void ccat_eth_remove(struct ccat_eth_priv *const priv)
   352 void ccat_eth_remove(struct ccat_eth_priv *const priv)
   353 {
   353 {
   354 	if (priv->rx_thread) {
       
   355 		kthread_stop(priv->rx_thread);
       
   356 	}
       
   357 	if (priv->tx_thread) {
       
   358 		kthread_stop(priv->tx_thread);
       
   359 	}
       
   360 	priv->unregister(priv->netdev);
   354 	priv->unregister(priv->netdev);
   361 	ccat_eth_priv_free_dma(priv);
   355 	ccat_eth_priv_free_dma(priv);
   362 	free_netdev(priv->netdev);
   356 	free_netdev(priv->netdev);
   363 	pr_debug("%s(): done\n", __FUNCTION__);
   357 	pr_debug("%s(): done\n", __FUNCTION__);
   364 }
   358 }
   365 
   359 
   366 static int ccat_eth_open(struct net_device *dev)
   360 static int ccat_eth_open(struct net_device *dev)
   367 {
   361 {
   368 	struct ccat_eth_priv *const priv = netdev_priv(dev);
   362 	struct ccat_eth_priv *const priv = netdev_priv(dev);
       
   363 
   369 	priv->carrier_off(dev);
   364 	priv->carrier_off(dev);
   370 	priv->poll_thread =
   365 	hrtimer_init(&priv->poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
   371 	    kthread_run(run_poll_thread, dev, "%s_poll", KBUILD_MODNAME);
   366 	priv->poll_timer.function = poll_timer_callback;
   372 
   367 	hrtimer_start(&priv->poll_timer, ktime_set(0, 100000),
   373 	//TODO
   368 		      HRTIMER_MODE_REL);
   374 	return 0;
   369 	return 0;
   375 }
   370 }
   376 
   371 
   377 static const size_t CCATRXDESC_HEADER_LEN = 20;
   372 static const size_t CCATRXDESC_HEADER_LEN = 20;
   378 static void ccat_eth_receive(struct net_device *const dev,
   373 static void ccat_eth_receive(struct net_device *const dev,
   379 			     const struct ccat_eth_frame *const frame)
   374 			     const struct ccat_eth_frame *const frame)
   380 {
   375 {
   381 	struct ccat_eth_priv *const priv = netdev_priv(dev);
   376 	struct ccat_eth_priv *const priv = netdev_priv(dev);
   382 	const size_t len = frame->length - CCATRXDESC_HEADER_LEN;
   377 	const size_t len = frame->length - CCATRXDESC_HEADER_LEN;
   383 	struct sk_buff *skb = dev_alloc_skb(len + NET_IP_ALIGN);
   378 	struct sk_buff *skb = dev_alloc_skb(len + NET_IP_ALIGN);
       
   379 
   384 	if (!skb) {
   380 	if (!skb) {
   385 		pr_info("%s() out of memory :-(\n", __FUNCTION__);
   381 		pr_info("%s() out of memory :-(\n", __FUNCTION__);
   386 		atomic64_inc(&priv->rx_dropped);
   382 		atomic64_inc(&priv->rx_dropped);
   387 		return;
   383 		return;
   388 	}
   384 	}
   422 {
   418 {
   423 	static size_t next = 0;
   419 	static size_t next = 0;
   424 	struct ccat_eth_priv *const priv = netdev_priv(dev);
   420 	struct ccat_eth_priv *const priv = netdev_priv(dev);
   425 	struct ccat_eth_frame *const frame =
   421 	struct ccat_eth_frame *const frame =
   426 	    ((struct ccat_eth_frame *)priv->tx_fifo.dma.virt);
   422 	    ((struct ccat_eth_frame *)priv->tx_fifo.dma.virt);
   427 	uint32_t addr_and_length;
   423 	u32 addr_and_length;
   428 
   424 
   429 	if (skb_is_nonlinear(skb)) {
   425 	if (skb_is_nonlinear(skb)) {
   430 		pr_warn("Non linear skb not supported -> drop frame.\n");
   426 		pr_warn("Non linear skb not supported -> drop frame.\n");
   431 		atomic64_inc(&priv->tx_dropped);
   427 		atomic64_inc(&priv->tx_dropped);
   432 		priv->kfree_skb_any(skb);
   428 		priv->kfree_skb_any(skb);
   433 		return NETDEV_TX_OK;
   429 		return NETDEV_TX_OK;
   434 	}
   430 	}
   435 
   431 
   436 	if (skb->len > sizeof(frame->data)) {
   432 	if (skb->len > sizeof(frame->data)) {
   437 		pr_warn("skb.len %llu exceeds dma buffer %llu -> drop frame.\n",
   433 		pr_warn("skb.len %llu exceeds dma buffer %llu -> drop frame.\n",
   438 			(uint64_t) skb->len, (uint64_t) sizeof(frame->data));
   434 			(u64) skb->len, (u64) sizeof(frame->data));
   439 		atomic64_inc(&priv->tx_dropped);
   435 		atomic64_inc(&priv->tx_dropped);
   440 		priv->kfree_skb_any(skb);
   436 		priv->kfree_skb_any(skb);
   441 		return NETDEV_TX_OK;
   437 		return NETDEV_TX_OK;
   442 	}
   438 	}
   443 
   439 
   444 	if (!frame[next].sent) {
   440 	if (!frame[next].sent) {
   445 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
   441 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
   446 		ccat_eth_tx_fifo_full(dev, &frame[next]);
   442 		ccat_eth_tx_fifo_full(priv, &frame[next]);
   447 		return NETDEV_TX_BUSY;
   443 		return NETDEV_TX_BUSY;
   448 	}
   444 	}
   449 
   445 
   450 	/* prepare frame in DMA memory */
   446 	/* prepare frame in DMA memory */
   451 	frame[next].sent = 0;
   447 	frame[next].sent = 0;
   461 	atomic64_add(frame[next].length, &priv->tx_bytes);	/* update stats */
   457 	atomic64_add(frame[next].length, &priv->tx_bytes);	/* update stats */
   462 
   458 
   463 	next = (next + 1) % FIFO_LENGTH;
   459 	next = (next + 1) % FIFO_LENGTH;
   464 	/* stop queue if tx ring is full */
   460 	/* stop queue if tx ring is full */
   465 	if (!frame[next].sent) {
   461 	if (!frame[next].sent) {
   466 		ccat_eth_tx_fifo_full(dev, &frame[next]);
   462 		ccat_eth_tx_fifo_full(priv, &frame[next]);
   467 	}
   463 	}
   468 	return NETDEV_TX_OK;
   464 	return NETDEV_TX_OK;
   469 }
   465 }
   470 
   466 
   471 static int ccat_eth_stop(struct net_device *dev)
   467 static int ccat_eth_stop(struct net_device *dev)
   472 {
   468 {
   473 	struct ccat_eth_priv *const priv = netdev_priv(dev);
   469 	struct ccat_eth_priv *const priv = netdev_priv(dev);
       
   470 
   474 	priv->stop_queue(dev);
   471 	priv->stop_queue(dev);
   475 	if (priv->poll_thread) {
   472 	hrtimer_cancel(&priv->poll_timer);
   476 		/* TODO care about smp context? */
       
   477 		kthread_stop(priv->poll_thread);
       
   478 		priv->poll_thread = NULL;
       
   479 	}
       
   480 	netdev_info(dev, "stopped.\n");
   473 	netdev_info(dev, "stopped.\n");
   481 	return 0;
   474 	return 0;
   482 }
   475 }
   483 
   476 
   484 static void ccat_eth_link_down(struct net_device *dev)
   477 static void ccat_eth_link_down(struct net_device *dev)
   485 {
   478 {
   486 	struct ccat_eth_priv *const priv = netdev_priv(dev);
   479 	struct ccat_eth_priv *const priv = netdev_priv(dev);
       
   480 
   487 	priv->stop_queue(dev);
   481 	priv->stop_queue(dev);
   488 	priv->carrier_off(dev);
   482 	priv->carrier_off(dev);
   489 	netdev_info(dev, "NIC Link is Down\n");
   483 	netdev_info(dev, "NIC Link is Down\n");
   490 }
   484 }
   491 
   485 
   492 static void ccat_eth_link_up(struct net_device *const dev)
   486 static void ccat_eth_link_up(struct net_device *const dev)
   493 {
   487 {
   494 	struct ccat_eth_priv *const priv = netdev_priv(dev);
   488 	struct ccat_eth_priv *const priv = netdev_priv(dev);
       
   489 
   495 	netdev_info(dev, "NIC Link is Up\n");
   490 	netdev_info(dev, "NIC Link is Up\n");
   496 	/* TODO netdev_info(dev, "NIC Link is Up %u Mbps %s Duplex\n",
   491 	/* TODO netdev_info(dev, "NIC Link is Up %u Mbps %s Duplex\n",
   497 	   speed == SPEED_100 ? 100 : 10,
   492 	   speed == SPEED_100 ? 100 : 10,
   498 	   cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); */
   493 	   cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); */
   499 
   494 
   513  */
   508  */
   514 static void ccat_eth_xmit_raw(struct net_device *dev, const char *const data,
   509 static void ccat_eth_xmit_raw(struct net_device *dev, const char *const data,
   515 			      size_t len)
   510 			      size_t len)
   516 {
   511 {
   517 	struct sk_buff *skb = dev_alloc_skb(len);
   512 	struct sk_buff *skb = dev_alloc_skb(len);
       
   513 
   518 	skb->dev = dev;
   514 	skb->dev = dev;
   519 	skb_copy_to_linear_data(skb, data, len);
   515 	skb_copy_to_linear_data(skb, data, len);
   520 	skb_put(skb, len);
   516 	skb_put(skb, len);
   521 	ccat_eth_start_xmit(skb, dev);
   517 	ccat_eth_start_xmit(skb, dev);
   522 }
   518 }
   523 
   519 
   524 /**
   520 /**
       
   521  * Poll for link state changes
       
   522  */
       
   523 static void poll_link(struct ccat_eth_priv *const priv)
       
   524 {
       
   525 	const size_t link = ccat_eth_priv_read_link_state(priv);
       
   526 
       
   527 	if (link != priv->carrier_ok(priv->netdev)) {
       
   528 		if (link)
       
   529 			ccat_eth_link_up(priv->netdev);
       
   530 		else
       
   531 			ccat_eth_link_down(priv->netdev);
       
   532 	}
       
   533 }
       
   534 
       
   535 /**
       
   536  * Poll for available rx dma descriptors in ethernet operating mode
       
   537  */
       
   538 static void poll_rx(struct ccat_eth_priv *const priv)
       
   539 {
       
   540 	struct ccat_eth_frame *const frame = priv->rx_fifo.dma.virt;
       
   541 	static size_t next = 0;
       
   542 
       
   543 	/* TODO omit possible deadlock in situations with heavy traffic */
       
   544 	while (frame[next].received) {
       
   545 		ccat_eth_receive(priv->netdev, frame + next);
       
   546 		frame[next].received = 0;
       
   547 		ccat_eth_rx_fifo_add(frame + next, &priv->rx_fifo);
       
   548 		next = (next + 1) % FIFO_LENGTH;
       
   549 	}
       
   550 }
       
   551 
       
   552 /**
       
   553  * Poll for available tx dma descriptors in ethernet operating mode
       
   554  */
       
   555 static void poll_tx(struct ccat_eth_priv *const priv)
       
   556 {
       
   557 	if (priv->next_tx_frame && priv->next_tx_frame->sent) {
       
   558 		priv->next_tx_frame = NULL;
       
   559 		netif_wake_queue(priv->netdev);
       
   560 	}
       
   561 }
       
   562 
       
   563 /**
   525  * Since CCAT doesn't support interrupts until now, we have to poll
   564  * Since CCAT doesn't support interrupts until now, we have to poll
   526  * some status bits to recognize things like link change etc.
   565  * some status bits to recognize things like link change etc.
   527  */
   566  */
   528 static int run_poll_thread(void *data)
   567 static enum hrtimer_restart poll_timer_callback(struct hrtimer *timer)
   529 {
   568 {
   530 	struct net_device *const dev = (struct net_device *)data;
   569 	struct ccat_eth_priv *priv = container_of(timer, struct ccat_eth_priv,
   531 	struct ccat_eth_priv *const priv = netdev_priv(dev);
   570 						  poll_timer);
   532 	size_t link = 0;
   571 
   533 
   572 	poll_link(priv);
   534 	while (!kthread_should_stop()) {
   573 	if(!priv->ecdev)
   535 		if (ccat_eth_priv_read_link_state(priv) != link) {
   574 		poll_rx(priv);
   536 			link = !link;
   575 	poll_tx(priv);
   537 			link ? ccat_eth_link_up(dev) : ccat_eth_link_down(dev);
   576 	hrtimer_forward_now(timer, ktime_set(0, 100 * NSEC_PER_USEC));
   538 		}
   577 	return HRTIMER_RESTART;
   539 		usleep_range(POLL_DELAY_RANGE_USECS);
   578 }
   540 	}
       
   541 	pr_debug("%s() stopped.\n", __FUNCTION__);
       
   542 	return 0;
       
   543 }
       
   544 
       
   545 static int run_rx_thread(void *data)
       
   546 {
       
   547 	struct net_device *const dev = (struct net_device *)data;
       
   548 	struct ccat_eth_priv *const priv = netdev_priv(dev);
       
   549 	struct ccat_eth_frame *frame = priv->rx_fifo.dma.virt;
       
   550 	const struct ccat_eth_frame *const end = frame + FIFO_LENGTH;
       
   551 
       
   552 	while (!kthread_should_stop()) {
       
   553 		/* wait until frame was used by DMA for Rx */
       
   554 		while (!kthread_should_stop() && !frame->received) {
       
   555 			usleep_range(DMA_POLL_DELAY_RANGE_USECS);
       
   556 		}
       
   557 
       
   558 		/* can be NULL, if we are asked to stop! */
       
   559 		if (frame->received) {
       
   560 			ccat_eth_receive(dev, frame);
       
   561 			frame->received = 0;
       
   562 			ccat_eth_rx_fifo_add(frame, &priv->rx_fifo);
       
   563 		}
       
   564 		if (++frame >= end) {
       
   565 			frame = priv->rx_fifo.dma.virt;
       
   566 		}
       
   567 	}
       
   568 	pr_debug("%s() stopped.\n", __FUNCTION__);
       
   569 	return 0;
       
   570 }
       
   571 
       
   572 /**
       
   573  * Polling of tx dma descriptors in ethernet operating mode
       
   574  */
       
   575 static int run_tx_thread(void *data)
       
   576 {
       
   577 	struct net_device *const dev = (struct net_device *)data;
       
   578 	struct ccat_eth_priv *const priv = netdev_priv(dev);
       
   579 
       
   580 	set_current_state(TASK_INTERRUPTIBLE);
       
   581 	while (!kthread_should_stop()) {
       
   582 		const struct ccat_eth_frame *const frame = priv->next_tx_frame;
       
   583 		if (frame) {
       
   584 			while (!kthread_should_stop() && !frame->sent) {
       
   585 				usleep_range(DMA_POLL_DELAY_RANGE_USECS);
       
   586 			}
       
   587 		}
       
   588 		netif_wake_queue(dev);
       
   589 		schedule();
       
   590 		set_current_state(TASK_INTERRUPTIBLE);
       
   591 	}
       
   592 	set_current_state(TASK_RUNNING);
       
   593 	pr_debug("%s() stopped.\n", __FUNCTION__);
       
   594 	return 0;
       
   595 }