42 0x00, 0x00, |
48 0x00, 0x00, |
43 0x00, 0x00 |
49 0x00, 0x00 |
44 }; |
50 }; |
45 |
51 |
46 #define FIFO_LENGTH 64 |
52 #define FIFO_LENGTH 64 |
47 #define POLL_TIME ktime_set(0, 100 * NSEC_PER_USEC) |
53 #define POLL_TIME ktime_set(0, 50 * NSEC_PER_USEC) |
48 |
54 #define CCAT_ALIGNMENT ((size_t)(128 * 1024)) |
49 /** |
55 #define CCAT_ALIGN_CHANNEL(x, c) ((typeof(x))(ALIGN((size_t)((x) + ((c) * CCAT_ALIGNMENT)), CCAT_ALIGNMENT))) |
50 * Helper to check if frame in tx dma memory was already marked as sent by CCAT |
56 |
51 */ |
57 struct ccat_dma_frame_hdr { |
52 static inline bool ccat_eth_frame_sent(const struct ccat_eth_frame *const frame) |
58 __le32 reserved1; |
53 { |
59 __le32 rx_flags; |
54 return le32_to_cpu(frame->tx_flags) & CCAT_FRAME_SENT; |
60 #define CCAT_FRAME_RECEIVED 0x1 |
55 } |
61 __le16 length; |
56 |
62 __le16 reserved3; |
57 /** |
63 __le32 tx_flags; |
58 * Helper to check if frame in tx dma memory was already marked as sent by CCAT |
64 #define CCAT_FRAME_SENT 0x1 |
59 */ |
65 __le64 timestamp; |
60 static inline bool ccat_eth_frame_received(const struct ccat_eth_frame *const |
66 }; |
61 frame) |
67 |
62 { |
68 struct ccat_eim_frame_hdr { |
63 return le32_to_cpu(frame->rx_flags) & CCAT_FRAME_RECEIVED; |
69 __le16 length; |
|
70 __le16 reserved3; |
|
71 __le32 tx_flags; |
|
72 __le64 timestamp; |
|
73 }; |
|
74 |
|
75 struct ccat_eth_frame { |
|
76 u8 placeholder[0x800]; |
|
77 }; |
|
78 |
|
79 struct ccat_dma_frame { |
|
80 struct ccat_dma_frame_hdr hdr; |
|
81 u8 data[sizeof(struct ccat_eth_frame) - |
|
82 sizeof(struct ccat_dma_frame_hdr)]; |
|
83 }; |
|
84 |
|
85 struct ccat_eim_frame { |
|
86 struct ccat_eim_frame_hdr hdr; |
|
87 u8 data[sizeof(struct ccat_eth_frame) - |
|
88 sizeof(struct ccat_eim_frame_hdr)]; |
|
89 }; |
|
90 |
|
91 #define MAX_PAYLOAD_SIZE \ |
|
92 (sizeof(struct ccat_eth_frame) - max(sizeof(struct ccat_dma_frame_hdr), sizeof(struct ccat_eim_frame_hdr))) |
|
93 |
|
94 /** |
|
95 * struct ccat_eth_register - CCAT register addresses in the PCI BAR |
|
96 * @mii: address of the CCAT management interface register |
|
97 * @mac: address of the CCAT media access control register |
|
98 * @rx_mem: address of the CCAT register holding the RX DMA address |
|
99 * @tx_mem: address of the CCAT register holding the TX DMA address |
|
100 * @misc: address of a CCAT register holding miscellaneous information |
|
101 */ |
|
102 struct ccat_eth_register { |
|
103 void __iomem *mii; |
|
104 void __iomem *mac; |
|
105 void __iomem *rx_mem; |
|
106 void __iomem *tx_mem; |
|
107 void __iomem *misc; |
|
108 }; |
|
109 |
|
110 /** |
|
111 * struct ccat_dma_mem - CCAT DMA channel configuration |
|
112 * @size: number of bytes in the associated DMA memory |
|
113 * @phys: device-viewed address(physical) of the associated DMA memory |
|
114 * @channel: CCAT DMA channel number |
|
115 * @dev: valid struct device pointer |
|
116 * @base: CPU-viewed address(virtual) of the associated DMA memory |
|
117 */ |
|
118 struct ccat_dma_mem { |
|
119 size_t size; |
|
120 dma_addr_t phys; |
|
121 size_t channel; |
|
122 struct device *dev; |
|
123 void *base; |
|
124 }; |
|
125 |
|
126 /** |
|
127 * struct ccat_dma/eim/mem |
|
128 * @next: pointer to the next frame in fifo ring buffer |
|
129 * @start: aligned CPU-viewed address(virtual) of the associated memory |
|
130 */ |
|
131 struct ccat_dma { |
|
132 struct ccat_dma_frame *next; |
|
133 void *start; |
|
134 }; |
|
135 |
|
136 struct ccat_eim { |
|
137 struct ccat_eim_frame __iomem *next; |
|
138 void __iomem *start; |
|
139 }; |
|
140 |
|
141 struct ccat_mem { |
|
142 struct ccat_eth_frame *next; |
|
143 void *start; |
|
144 }; |
|
145 |
|
146 /** |
|
147 * struct ccat_eth_fifo - CCAT RX or TX fifo |
|
148 * @ops: function pointer table for dma/eim and rx/tx specific fifo functions |
|
149 * @reg: PCI register address of this fifo |
|
150 * @rx_bytes: number of bytes processed -> reported with ndo_get_stats64() |
|
151 * @rx_dropped: number of dropped frames -> reported with ndo_get_stats64() |
|
152 * @mem/dma/eim: information about the associated memory |
|
153 */ |
|
154 struct ccat_eth_fifo { |
|
155 const struct ccat_eth_fifo_operations *ops; |
|
156 const struct ccat_eth_frame *end; |
|
157 void __iomem *reg; |
|
158 atomic64_t bytes; |
|
159 atomic64_t dropped; |
|
160 union { |
|
161 struct ccat_mem mem; |
|
162 struct ccat_dma dma; |
|
163 struct ccat_eim eim; |
|
164 }; |
|
165 }; |
|
166 |
|
167 /** |
|
168 * struct ccat_eth_fifo_operations |
|
169 * @ready: callback used to test the next frames ready bit |
|
170 * @add: callback used to add a frame to this fifo |
|
171 * @copy_to_skb: callback used to copy from rx fifos to skbs |
|
172 * @skb: callback used to queue skbs into tx fifos |
|
173 */ |
|
174 struct ccat_eth_fifo_operations { |
|
175 size_t(*ready) (struct ccat_eth_fifo *); |
|
176 void (*add) (struct ccat_eth_fifo *); |
|
177 union { |
|
178 void (*copy_to_skb) (struct ccat_eth_fifo *, struct sk_buff *, |
|
179 size_t); |
|
180 void (*skb) (struct ccat_eth_fifo *, struct sk_buff *); |
|
181 } queue; |
|
182 }; |
|
183 |
|
184 /** |
|
185 * same as: typedef struct _CCatInfoBlockOffs from CCatDefinitions.h |
|
186 */ |
|
187 struct ccat_mac_infoblock { |
|
188 u32 reserved; |
|
189 u32 mii; |
|
190 u32 tx_fifo; |
|
191 u32 mac; |
|
192 u32 rx_mem; |
|
193 u32 tx_mem; |
|
194 u32 misc; |
|
195 }; |
|
196 |
|
197 /** |
|
198 * struct ccat_eth_priv - CCAT Ethernet/EtherCAT Master function (netdev) |
|
199 * @func: pointer to the parent struct ccat_function |
|
200 * @netdev: the net_device structure used by the kernel networking stack |
|
201 * @reg: register addresses in PCI config space of the Ethernet/EtherCAT Master function |
|
202 * @rx_fifo: fifo used for RX descriptors |
|
203 * @tx_fifo: fifo used for TX descriptors |
|
204 * @poll_timer: interval timer used to poll CCAT for events like link changed, rx done, tx done |
|
205 */ |
|
206 struct ccat_eth_priv { |
|
207 struct ccat_function *func; |
|
208 struct net_device *netdev; |
|
209 struct ccat_eth_register reg; |
|
210 struct ccat_eth_fifo rx_fifo; |
|
211 struct ccat_eth_fifo tx_fifo; |
|
212 struct hrtimer poll_timer; |
|
213 struct ccat_dma_mem dma_mem; |
|
214 ec_device_t *ecdev; |
|
215 void (*carrier_off) (struct net_device * netdev); |
|
216 bool(*carrier_ok) (const struct net_device * netdev); |
|
217 void (*carrier_on) (struct net_device * netdev); |
|
218 void (*kfree_skb_any) (struct sk_buff * skb); |
|
219 void (*receive) (struct ccat_eth_priv *, size_t); |
|
220 void (*start_queue) (struct net_device * netdev); |
|
221 void (*stop_queue) (struct net_device * netdev); |
|
222 void (*unregister) (struct net_device * netdev); |
|
223 }; |
|
224 |
|
225 struct ccat_mac_register { |
|
226 /** MAC error register @+0x0 */ |
|
227 u8 frame_len_err; |
|
228 u8 rx_err; |
|
229 u8 crc_err; |
|
230 u8 link_lost_err; |
|
231 u32 reserved1; |
|
232 /** Buffer overflow errors @+0x8 */ |
|
233 u8 rx_mem_full; |
|
234 u8 reserved2[7]; |
|
235 /** MAC frame counter @+0x10 */ |
|
236 u32 tx_frames; |
|
237 u32 rx_frames; |
|
238 u64 reserved3; |
|
239 /** MAC fifo level @+0x20 */ |
|
240 u8 tx_fifo_level:7; |
|
241 u8 reserved4:1; |
|
242 u8 reserved5[7]; |
|
243 /** TX memory full error @+0x28 */ |
|
244 u8 tx_mem_full; |
|
245 u8 reserved6[7]; |
|
246 u64 reserved8[9]; |
|
247 /** Connection @+0x78 */ |
|
248 u8 mii_connected; |
|
249 }; |
|
250 |
|
251 static void fifo_set_end(struct ccat_eth_fifo *const fifo, size_t size) |
|
252 { |
|
253 fifo->end = fifo->mem.start + size - sizeof(struct ccat_eth_frame); |
|
254 } |
|
255 |
|
256 static void ccat_dma_free(struct ccat_eth_priv *const priv) |
|
257 { |
|
258 if (priv->dma_mem.base) { |
|
259 const struct ccat_dma_mem tmp = priv->dma_mem; |
|
260 |
|
261 memset(&priv->dma_mem, 0, sizeof(priv->dma_mem)); |
|
262 dma_free_coherent(tmp.dev, tmp.size, tmp.base, tmp.phys); |
|
263 free_dma(priv->func->info.tx_dma_chan); |
|
264 free_dma(priv->func->info.rx_dma_chan); |
|
265 } |
|
266 } |
|
267 |
|
268 /** |
|
269 * ccat_dma_init() - Initialize CCAT and host memory for DMA transfer |
|
270 * @dma object for management data which will be initialized |
|
271 * @channel number of the DMA channel |
|
272 * @ioaddr of the pci bar2 configspace used to calculate the address of the pci dma configuration |
|
273 * @dev which should be configured for DMA |
|
274 */ |
|
275 static int ccat_dma_init(struct ccat_dma_mem *const dma, size_t channel, |
|
276 void __iomem * const bar2, |
|
277 struct ccat_eth_fifo *const fifo) |
|
278 { |
|
279 void __iomem *const ioaddr = bar2 + 0x1000 + (sizeof(u64) * channel); |
|
280 const dma_addr_t phys = CCAT_ALIGN_CHANNEL(dma->phys, channel); |
|
281 const u32 phys_hi = (sizeof(phys) > sizeof(u32)) ? phys >> 32 : 0; |
|
282 fifo->dma.start = CCAT_ALIGN_CHANNEL(dma->base, channel); |
|
283 |
|
284 fifo_set_end(fifo, CCAT_ALIGNMENT); |
|
285 if (request_dma(channel, KBUILD_MODNAME)) { |
|
286 pr_info("request dma channel %llu failed\n", (u64) channel); |
|
287 return -EINVAL; |
|
288 } |
|
289 |
|
290 /** bit 0 enables 64 bit mode on ccat */ |
|
291 iowrite32((u32) phys | ((phys_hi) > 0), ioaddr); |
|
292 iowrite32(phys_hi, ioaddr + 4); |
|
293 |
|
294 pr_debug |
|
295 ("DMA%llu mem initialized\n base: 0x%p\n start: 0x%p\n phys: 0x%09llx\n pci addr: 0x%01x%08x\n size: %llu |%llx bytes.\n", |
|
296 (u64) channel, dma->base, fifo->dma.start, (u64) dma->phys, |
|
297 ioread32(ioaddr + 4), ioread32(ioaddr), |
|
298 (u64) dma->size, (u64) dma->size); |
|
299 return 0; |
64 } |
300 } |
65 |
301 |
66 static void ecdev_kfree_skb_any(struct sk_buff *skb) |
302 static void ecdev_kfree_skb_any(struct sk_buff *skb) |
67 { |
303 { |
68 /* never release a skb in EtherCAT mode */ |
304 /* never release a skb in EtherCAT mode */ |
87 } |
323 } |
88 |
324 |
89 static void ecdev_nop(struct net_device *const netdev) |
325 static void ecdev_nop(struct net_device *const netdev) |
90 { |
326 { |
91 /* dummy called if nothing has to be done in EtherCAT operation mode */ |
327 /* dummy called if nothing has to be done in EtherCAT operation mode */ |
|
328 } |
|
329 |
|
330 static void ecdev_receive_dma(struct ccat_eth_priv *const priv, size_t len) |
|
331 { |
|
332 ecdev_receive(priv->ecdev, priv->rx_fifo.dma.next->data, len); |
|
333 } |
|
334 |
|
335 static void ecdev_receive_eim(struct ccat_eth_priv *const priv, size_t len) |
|
336 { |
|
337 ecdev_receive(priv->ecdev, priv->rx_fifo.eim.next->data, len); |
92 } |
338 } |
93 |
339 |
94 static void unregister_ecdev(struct net_device *const netdev) |
340 static void unregister_ecdev(struct net_device *const netdev) |
95 { |
341 { |
96 struct ccat_eth_priv *const priv = netdev_priv(netdev); |
342 struct ccat_eth_priv *const priv = netdev_priv(netdev); |
97 ecdev_close(priv->ecdev); |
343 ecdev_close(priv->ecdev); |
98 ecdev_withdraw(priv->ecdev); |
344 ecdev_withdraw(priv->ecdev); |
99 } |
345 } |
100 |
346 |
101 static void ccat_eth_fifo_inc(struct ccat_eth_dma_fifo *fifo) |
347 static inline size_t fifo_eim_tx_ready(struct ccat_eth_fifo *const fifo) |
102 { |
348 { |
103 if (++fifo->next >= fifo->end) |
349 struct ccat_eth_priv *const priv = |
104 fifo->next = fifo->dma.virt; |
350 container_of(fifo, struct ccat_eth_priv, tx_fifo); |
105 } |
351 static const size_t TX_FIFO_LEVEL_OFFSET = 0x20; |
106 |
352 static const u8 TX_FIFO_LEVEL_MASK = 0x3F; |
107 typedef void (*fifo_add_function) (struct ccat_eth_dma_fifo *, |
353 void __iomem *addr = priv->reg.mac + TX_FIFO_LEVEL_OFFSET; |
108 struct ccat_eth_frame *); |
354 |
109 |
355 return !(ioread8(addr) & TX_FIFO_LEVEL_MASK); |
110 static void ccat_eth_rx_fifo_add(struct ccat_eth_dma_fifo *fifo, |
356 } |
111 struct ccat_eth_frame *frame) |
357 |
112 { |
358 static inline size_t fifo_eim_rx_ready(struct ccat_eth_fifo *const fifo) |
113 const size_t offset = ((void *)(frame) - fifo->dma.virt); |
359 { |
|
360 static const size_t OVERHEAD = sizeof(struct ccat_eim_frame_hdr); |
|
361 const size_t len = ioread16(&fifo->eim.next->hdr.length); |
|
362 |
|
363 return (len < OVERHEAD) ? 0 : len - OVERHEAD; |
|
364 } |
|
365 |
|
366 static void ccat_eth_fifo_inc(struct ccat_eth_fifo *fifo) |
|
367 { |
|
368 if (++fifo->mem.next > fifo->end) |
|
369 fifo->mem.next = fifo->mem.start; |
|
370 } |
|
371 |
|
372 static void fifo_eim_rx_add(struct ccat_eth_fifo *const fifo) |
|
373 { |
|
374 struct ccat_eim_frame __iomem *frame = fifo->eim.next; |
|
375 iowrite16(0, frame); |
|
376 wmb(); |
|
377 } |
|
378 |
|
379 static void fifo_eim_tx_add(struct ccat_eth_fifo *const fifo) |
|
380 { |
|
381 } |
|
382 |
|
383 #define memcpy_from_ccat(DEST, SRC, LEN) memcpy(DEST,(__force void*)(SRC), LEN) |
|
384 #define memcpy_to_ccat(DEST, SRC, LEN) memcpy((__force void*)(DEST),SRC, LEN) |
|
385 static void fifo_eim_copy_to_linear_skb(struct ccat_eth_fifo *const fifo, |
|
386 struct sk_buff *skb, const size_t len) |
|
387 { |
|
388 memcpy_from_ccat(skb->data, fifo->eim.next->data, len); |
|
389 } |
|
390 |
|
391 static void fifo_eim_queue_skb(struct ccat_eth_fifo *const fifo, |
|
392 struct sk_buff *skb) |
|
393 { |
|
394 struct ccat_eim_frame __iomem *frame = fifo->eim.next; |
|
395 const u32 addr_and_length = |
|
396 (void __iomem *)frame - (void __iomem *)fifo->eim.start; |
|
397 |
|
398 const __le16 length = cpu_to_le16(skb->len); |
|
399 memcpy_to_ccat(&frame->hdr.length, &length, sizeof(length)); |
|
400 memcpy_to_ccat(frame->data, skb->data, skb->len); |
|
401 iowrite32(addr_and_length, fifo->reg); |
|
402 } |
|
403 |
|
404 static void ccat_eth_fifo_hw_reset(struct ccat_eth_fifo *const fifo) |
|
405 { |
|
406 if (fifo->reg) { |
|
407 iowrite32(0, fifo->reg + 0x8); |
|
408 wmb(); |
|
409 } |
|
410 } |
|
411 |
|
412 static void ccat_eth_fifo_reset(struct ccat_eth_fifo *const fifo) |
|
413 { |
|
414 ccat_eth_fifo_hw_reset(fifo); |
|
415 |
|
416 if (fifo->ops->add) { |
|
417 fifo->mem.next = fifo->mem.start; |
|
418 do { |
|
419 fifo->ops->add(fifo); |
|
420 ccat_eth_fifo_inc(fifo); |
|
421 } while (fifo->mem.next != fifo->mem.start); |
|
422 } |
|
423 } |
|
424 |
|
425 static inline size_t fifo_dma_tx_ready(struct ccat_eth_fifo *const fifo) |
|
426 { |
|
427 const struct ccat_dma_frame *frame = fifo->dma.next; |
|
428 return le32_to_cpu(frame->hdr.tx_flags) & CCAT_FRAME_SENT; |
|
429 } |
|
430 |
|
431 static inline size_t fifo_dma_rx_ready(struct ccat_eth_fifo *const fifo) |
|
432 { |
|
433 static const size_t OVERHEAD = |
|
434 offsetof(struct ccat_dma_frame_hdr, rx_flags); |
|
435 const struct ccat_dma_frame *const frame = fifo->dma.next; |
|
436 |
|
437 if (le32_to_cpu(frame->hdr.rx_flags) & CCAT_FRAME_RECEIVED) { |
|
438 const size_t len = le16_to_cpu(frame->hdr.length); |
|
439 return (len < OVERHEAD) ? 0 : len - OVERHEAD; |
|
440 } |
|
441 return 0; |
|
442 } |
|
443 |
|
444 static void ccat_eth_rx_fifo_dma_add(struct ccat_eth_fifo *const fifo) |
|
445 { |
|
446 struct ccat_dma_frame *const frame = fifo->dma.next; |
|
447 const size_t offset = (void *)frame - fifo->dma.start; |
114 const u32 addr_and_length = (1 << 31) | offset; |
448 const u32 addr_and_length = (1 << 31) | offset; |
115 |
449 |
116 frame->rx_flags = cpu_to_le32(0); |
450 frame->hdr.rx_flags = cpu_to_le32(0); |
117 iowrite32(addr_and_length, fifo->reg); |
451 iowrite32(addr_and_length, fifo->reg); |
118 } |
452 } |
119 |
453 |
120 static void ccat_eth_tx_fifo_add_free(struct ccat_eth_dma_fifo *fifo, |
454 static void ccat_eth_tx_fifo_dma_add_free(struct ccat_eth_fifo *const fifo) |
121 struct ccat_eth_frame *frame) |
|
122 { |
455 { |
123 /* mark frame as ready to use for tx */ |
456 /* mark frame as ready to use for tx */ |
124 frame->tx_flags = cpu_to_le32(CCAT_FRAME_SENT); |
457 fifo->dma.next->hdr.tx_flags = cpu_to_le32(CCAT_FRAME_SENT); |
125 } |
458 } |
126 |
459 |
127 static void ccat_eth_dma_fifo_reset(struct ccat_eth_dma_fifo *fifo) |
460 static void fifo_dma_copy_to_linear_skb(struct ccat_eth_fifo *const fifo, |
128 { |
461 struct sk_buff *skb, const size_t len) |
129 /* reset hw fifo */ |
462 { |
130 iowrite32(0, fifo->reg + 0x8); |
463 skb_copy_to_linear_data(skb, fifo->dma.next->data, len); |
131 wmb(); |
464 } |
132 |
465 |
133 if (fifo->add) { |
466 static void fifo_dma_queue_skb(struct ccat_eth_fifo *const fifo, |
134 fifo->next = fifo->dma.virt; |
467 struct sk_buff *skb) |
135 do { |
468 { |
136 fifo->add(fifo, fifo->next); |
469 struct ccat_dma_frame *frame = fifo->dma.next; |
137 ccat_eth_fifo_inc(fifo); |
470 u32 addr_and_length; |
138 } while (fifo->next != fifo->dma.virt); |
471 |
139 } |
472 frame->hdr.tx_flags = cpu_to_le32(0); |
140 } |
473 frame->hdr.length = cpu_to_le16(skb->len); |
141 |
474 |
142 static int ccat_eth_dma_fifo_init(struct ccat_eth_dma_fifo *fifo, |
475 memcpy(frame->data, skb->data, skb->len); |
143 void __iomem * const fifo_reg, |
476 |
144 fifo_add_function add, size_t channel, |
477 /* Queue frame into CCAT TX-FIFO, CCAT ignores the first 8 bytes of the tx descriptor */ |
145 struct ccat_eth_priv *const priv) |
478 addr_and_length = offsetof(struct ccat_dma_frame_hdr, length); |
146 { |
479 addr_and_length += ((void *)frame - fifo->dma.start); |
147 if (0 != |
480 addr_and_length += |
148 ccat_dma_init(&fifo->dma, channel, priv->ccatdev->bar[2].ioaddr, |
481 ((skb->len + sizeof(struct ccat_dma_frame_hdr)) / 8) << 24; |
149 &priv->ccatdev->pdev->dev)) { |
482 iowrite32(addr_and_length, fifo->reg); |
150 pr_info("init DMA%llu memory failed.\n", (u64) channel); |
483 } |
151 return -1; |
484 |
152 } |
485 static const struct ccat_eth_fifo_operations dma_rx_fifo_ops = { |
153 fifo->add = add; |
486 .add = ccat_eth_rx_fifo_dma_add, |
154 fifo->end = ((struct ccat_eth_frame *)fifo->dma.virt) + FIFO_LENGTH; |
487 .ready = fifo_dma_rx_ready, |
155 fifo->reg = fifo_reg; |
488 .queue.copy_to_skb = fifo_dma_copy_to_linear_skb, |
156 return 0; |
489 }; |
157 } |
490 |
158 |
491 static const struct ccat_eth_fifo_operations dma_tx_fifo_ops = { |
159 /** |
492 .add = ccat_eth_tx_fifo_dma_add_free, |
160 * Stop both (Rx/Tx) DMA fifo's and free related management structures |
493 .ready = fifo_dma_tx_ready, |
161 */ |
494 .queue.skb = fifo_dma_queue_skb, |
162 static void ccat_eth_priv_free_dma(struct ccat_eth_priv *priv) |
495 }; |
|
496 |
|
497 static const struct ccat_eth_fifo_operations eim_rx_fifo_ops = { |
|
498 .add = fifo_eim_rx_add, |
|
499 .queue.copy_to_skb = fifo_eim_copy_to_linear_skb, |
|
500 .ready = fifo_eim_rx_ready, |
|
501 }; |
|
502 |
|
503 static const struct ccat_eth_fifo_operations eim_tx_fifo_ops = { |
|
504 .add = fifo_eim_tx_add, |
|
505 .queue.skb = fifo_eim_queue_skb, |
|
506 .ready = fifo_eim_tx_ready, |
|
507 }; |
|
508 |
|
509 static void ccat_eth_priv_free(struct ccat_eth_priv *priv) |
163 { |
510 { |
164 /* reset hw fifo's */ |
511 /* reset hw fifo's */ |
165 iowrite32(0, priv->rx_fifo.reg + 0x8); |
512 ccat_eth_fifo_hw_reset(&priv->rx_fifo); |
166 iowrite32(0, priv->tx_fifo.reg + 0x8); |
513 ccat_eth_fifo_hw_reset(&priv->tx_fifo); |
167 wmb(); |
|
168 |
514 |
169 /* release dma */ |
515 /* release dma */ |
170 ccat_dma_free(&priv->rx_fifo.dma); |
516 ccat_dma_free(priv); |
171 ccat_dma_free(&priv->tx_fifo.dma); |
517 } |
172 } |
518 |
173 |
519 static int ccat_hw_disable_mac_filter(struct ccat_eth_priv *priv) |
174 /** |
520 { |
175 * Initalizes both (Rx/Tx) DMA fifo's and related management structures |
|
176 */ |
|
177 static int ccat_eth_priv_init_dma(struct ccat_eth_priv *priv) |
|
178 { |
|
179 if (ccat_eth_dma_fifo_init |
|
180 (&priv->rx_fifo, priv->reg.rx_fifo, ccat_eth_rx_fifo_add, |
|
181 priv->info.rx_dma_chan, priv)) { |
|
182 pr_warn("init Rx DMA fifo failed.\n"); |
|
183 return -1; |
|
184 } |
|
185 |
|
186 if (ccat_eth_dma_fifo_init |
|
187 (&priv->tx_fifo, priv->reg.tx_fifo, ccat_eth_tx_fifo_add_free, |
|
188 priv->info.tx_dma_chan, priv)) { |
|
189 pr_warn("init Tx DMA fifo failed.\n"); |
|
190 ccat_dma_free(&priv->rx_fifo.dma); |
|
191 return -1; |
|
192 } |
|
193 |
|
194 /* disable MAC filter */ |
|
195 iowrite8(0, priv->reg.mii + 0x8 + 6); |
521 iowrite8(0, priv->reg.mii + 0x8 + 6); |
196 wmb(); |
522 wmb(); |
197 return 0; |
523 return 0; |
198 } |
524 } |
199 |
525 |
200 /** |
526 /** |
201 * Initializes the CCat... members of the ccat_eth_priv structure. |
527 * Initalizes both (Rx/Tx) DMA fifo's and related management structures |
202 * Call this function only if info and ioaddr are already initialized! |
528 */ |
203 */ |
529 static int ccat_eth_priv_init_dma(struct ccat_eth_priv *priv) |
204 static void ccat_eth_priv_init_mappings(struct ccat_eth_priv *priv) |
530 { |
|
531 struct ccat_dma_mem *const dma = &priv->dma_mem; |
|
532 struct pci_dev *const pdev = priv->func->ccat->pdev; |
|
533 void __iomem *const bar_2 = priv->func->ccat->bar_2; |
|
534 const u8 rx_chan = priv->func->info.rx_dma_chan; |
|
535 const u8 tx_chan = priv->func->info.tx_dma_chan; |
|
536 int status = 0; |
|
537 |
|
538 dma->dev = &pdev->dev; |
|
539 dma->size = CCAT_ALIGNMENT * 3; |
|
540 dma->base = |
|
541 dma_zalloc_coherent(dma->dev, dma->size, &dma->phys, GFP_KERNEL); |
|
542 if (!dma->base || !dma->phys) { |
|
543 pr_err("init DMA memory failed.\n"); |
|
544 return -ENOMEM; |
|
545 } |
|
546 |
|
547 priv->rx_fifo.ops = &dma_rx_fifo_ops; |
|
548 status = ccat_dma_init(dma, rx_chan, bar_2, &priv->rx_fifo); |
|
549 if (status) { |
|
550 pr_info("init RX DMA memory failed.\n"); |
|
551 ccat_dma_free(priv); |
|
552 return status; |
|
553 } |
|
554 |
|
555 priv->tx_fifo.ops = &dma_tx_fifo_ops; |
|
556 status = ccat_dma_init(dma, tx_chan, bar_2, &priv->tx_fifo); |
|
557 if (status) { |
|
558 pr_info("init TX DMA memory failed.\n"); |
|
559 ccat_dma_free(priv); |
|
560 return status; |
|
561 } |
|
562 return ccat_hw_disable_mac_filter(priv); |
|
563 } |
|
564 |
|
565 static int ccat_eth_priv_init_eim(struct ccat_eth_priv *priv) |
|
566 { |
|
567 priv->rx_fifo.eim.start = priv->reg.rx_mem; |
|
568 priv->rx_fifo.ops = &eim_rx_fifo_ops; |
|
569 fifo_set_end(&priv->rx_fifo, sizeof(struct ccat_eth_frame)); |
|
570 |
|
571 priv->tx_fifo.eim.start = priv->reg.tx_mem; |
|
572 priv->tx_fifo.ops = &eim_tx_fifo_ops; |
|
573 fifo_set_end(&priv->tx_fifo, priv->func->info.tx_size); |
|
574 |
|
575 return ccat_hw_disable_mac_filter(priv); |
|
576 } |
|
577 |
|
578 /** |
|
579 * Initializes a struct ccat_eth_register with data from a corresponding |
|
580 * CCAT function. |
|
581 */ |
|
582 static void ccat_eth_priv_init_reg(struct ccat_eth_priv *const priv) |
205 { |
583 { |
206 struct ccat_mac_infoblock offsets; |
584 struct ccat_mac_infoblock offsets; |
207 void __iomem *const func_base = |
585 struct ccat_eth_register *const reg = &priv->reg; |
208 priv->ccatdev->bar[0].ioaddr + priv->info.addr; |
586 const struct ccat_function *const func = priv->func; |
|
587 void __iomem *const func_base = func->ccat->bar_0 + func->info.addr; |
|
588 |
|
589 /* struct ccat_eth_fifo contains a union of ccat_dma, ccat_eim and ccat_mem |
|
590 * the members next and start have to overlay the exact same memory, |
|
591 * to support 'polymorphic' usage of them */ |
|
592 BUILD_BUG_ON(offsetof(struct ccat_dma, next) != |
|
593 offsetof(struct ccat_mem, next)); |
|
594 BUILD_BUG_ON(offsetof(struct ccat_dma, start) != |
|
595 offsetof(struct ccat_mem, start)); |
|
596 BUILD_BUG_ON(offsetof(struct ccat_dma, next) != |
|
597 offsetof(struct ccat_eim, next)); |
|
598 BUILD_BUG_ON(offsetof(struct ccat_dma, start) != |
|
599 offsetof(struct ccat_eim, start)); |
209 |
600 |
210 memcpy_fromio(&offsets, func_base, sizeof(offsets)); |
601 memcpy_fromio(&offsets, func_base, sizeof(offsets)); |
211 priv->reg.mii = func_base + offsets.mii; |
602 reg->mii = func_base + offsets.mii; |
212 priv->reg.tx_fifo = func_base + offsets.tx_fifo; |
603 priv->tx_fifo.reg = func_base + offsets.tx_fifo; |
213 priv->reg.rx_fifo = func_base + offsets.tx_fifo + 0x10; |
604 priv->rx_fifo.reg = func_base + offsets.tx_fifo + 0x10; |
214 priv->reg.mac = func_base + offsets.mac; |
605 reg->mac = func_base + offsets.mac; |
215 priv->reg.rx_mem = func_base + offsets.rx_mem; |
606 reg->rx_mem = func_base + offsets.rx_mem; |
216 priv->reg.tx_mem = func_base + offsets.tx_mem; |
607 reg->tx_mem = func_base + offsets.tx_mem; |
217 priv->reg.misc = func_base + offsets.misc; |
608 reg->misc = func_base + offsets.misc; |
218 } |
609 } |
219 |
610 |
220 static netdev_tx_t ccat_eth_start_xmit(struct sk_buff *skb, |
611 static netdev_tx_t ccat_eth_start_xmit(struct sk_buff *skb, |
221 struct net_device *dev) |
612 struct net_device *dev) |
222 { |
613 { |
223 struct ccat_eth_priv *const priv = netdev_priv(dev); |
614 struct ccat_eth_priv *const priv = netdev_priv(dev); |
224 struct ccat_eth_dma_fifo *const fifo = &priv->tx_fifo; |
615 struct ccat_eth_fifo *const fifo = &priv->tx_fifo; |
225 u32 addr_and_length; |
|
226 |
616 |
227 if (skb_is_nonlinear(skb)) { |
617 if (skb_is_nonlinear(skb)) { |
228 pr_warn("Non linear skb not supported -> drop frame.\n"); |
618 pr_warn("Non linear skb not supported -> drop frame.\n"); |
229 atomic64_inc(&priv->tx_dropped); |
619 atomic64_inc(&fifo->dropped); |
230 priv->kfree_skb_any(skb); |
620 priv->kfree_skb_any(skb); |
231 return NETDEV_TX_OK; |
621 return NETDEV_TX_OK; |
232 } |
622 } |
233 |
623 |
234 if (skb->len > sizeof(fifo->next->data)) { |
624 if (skb->len > MAX_PAYLOAD_SIZE) { |
235 pr_warn("skb.len %llu exceeds dma buffer %llu -> drop frame.\n", |
625 pr_warn("skb.len %llu exceeds dma buffer %llu -> drop frame.\n", |
236 (u64) skb->len, (u64) sizeof(fifo->next->data)); |
626 (u64) skb->len, (u64) MAX_PAYLOAD_SIZE); |
237 atomic64_inc(&priv->tx_dropped); |
627 atomic64_inc(&fifo->dropped); |
238 priv->kfree_skb_any(skb); |
628 priv->kfree_skb_any(skb); |
239 return NETDEV_TX_OK; |
629 return NETDEV_TX_OK; |
240 } |
630 } |
241 |
631 |
242 if (!ccat_eth_frame_sent(fifo->next)) { |
632 if (!fifo->ops->ready(fifo)) { |
243 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); |
633 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); |
244 priv->stop_queue(priv->netdev); |
634 priv->stop_queue(priv->netdev); |
245 return NETDEV_TX_BUSY; |
635 return NETDEV_TX_BUSY; |
246 } |
636 } |
247 |
637 |
248 /* prepare frame in DMA memory */ |
638 /* prepare frame in DMA memory */ |
249 fifo->next->tx_flags = cpu_to_le32(0); |
639 fifo->ops->queue.skb(fifo, skb); |
250 fifo->next->length = cpu_to_le16(skb->len); |
|
251 memcpy(fifo->next->data, skb->data, skb->len); |
|
252 |
|
253 /* Queue frame into CCAT TX-FIFO, CCAT ignores the first 8 bytes of the tx descriptor */ |
|
254 addr_and_length = offsetof(struct ccat_eth_frame, length); |
|
255 addr_and_length += ((void *)fifo->next - fifo->dma.virt); |
|
256 addr_and_length += ((skb->len + CCAT_ETH_FRAME_HEAD_LEN) / 8) << 24; |
|
257 iowrite32(addr_and_length, priv->reg.tx_fifo); |
|
258 |
640 |
259 /* update stats */ |
641 /* update stats */ |
260 atomic64_add(skb->len, &priv->tx_bytes); |
642 atomic64_add(skb->len, &fifo->bytes); |
261 |
643 |
262 priv->kfree_skb_any(skb); |
644 priv->kfree_skb_any(skb); |
263 |
645 |
264 ccat_eth_fifo_inc(fifo); |
646 ccat_eth_fifo_inc(fifo); |
265 /* stop queue if tx ring is full */ |
647 /* stop queue if tx ring is full */ |
266 if (!ccat_eth_frame_sent(fifo->next)) { |
648 if (!fifo->ops->ready(fifo)) { |
267 priv->stop_queue(priv->netdev); |
649 priv->stop_queue(priv->netdev); |
268 } |
650 } |
269 return NETDEV_TX_OK; |
651 return NETDEV_TX_OK; |
270 } |
652 } |
271 |
653 |
456 |
832 |
457 static int ccat_eth_open(struct net_device *dev) |
833 static int ccat_eth_open(struct net_device *dev) |
458 { |
834 { |
459 struct ccat_eth_priv *const priv = netdev_priv(dev); |
835 struct ccat_eth_priv *const priv = netdev_priv(dev); |
460 |
836 |
461 hrtimer_init(&priv->poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
837 if (!priv->ecdev) { |
462 priv->poll_timer.function = poll_timer_callback; |
838 hrtimer_init(&priv->poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
463 hrtimer_start(&priv->poll_timer, POLL_TIME, HRTIMER_MODE_REL); |
839 priv->poll_timer.function = poll_timer_callback; |
|
840 hrtimer_start(&priv->poll_timer, POLL_TIME, HRTIMER_MODE_REL); |
|
841 } |
464 return 0; |
842 return 0; |
465 } |
843 } |
466 |
844 |
467 static int ccat_eth_stop(struct net_device *dev) |
845 static int ccat_eth_stop(struct net_device *dev) |
468 { |
846 { |
469 struct ccat_eth_priv *const priv = netdev_priv(dev); |
847 struct ccat_eth_priv *const priv = netdev_priv(dev); |
470 |
848 |
471 priv->stop_queue(dev); |
849 priv->stop_queue(dev); |
472 hrtimer_cancel(&priv->poll_timer); |
850 if (!priv->ecdev) { |
|
851 hrtimer_cancel(&priv->poll_timer); |
|
852 } |
473 return 0; |
853 return 0; |
474 } |
854 } |
475 |
855 |
476 static const struct net_device_ops ccat_eth_netdev_ops = { |
856 static const struct net_device_ops ccat_eth_netdev_ops = { |
477 .ndo_get_stats64 = ccat_eth_get_stats64, |
857 .ndo_get_stats64 = ccat_eth_get_stats64, |
478 .ndo_open = ccat_eth_open, |
858 .ndo_open = ccat_eth_open, |
479 .ndo_start_xmit = ccat_eth_start_xmit, |
859 .ndo_start_xmit = ccat_eth_start_xmit, |
480 .ndo_stop = ccat_eth_stop, |
860 .ndo_stop = ccat_eth_stop, |
481 }; |
861 }; |
482 |
862 |
483 struct ccat_eth_priv *ccat_eth_init(const struct ccat_device *const ccatdev, |
863 static struct ccat_eth_priv *ccat_eth_alloc_netdev(struct ccat_function *func) |
484 const void __iomem * const addr) |
864 { |
485 { |
865 struct ccat_eth_priv *priv = NULL; |
486 struct ccat_eth_priv *priv; |
|
487 struct net_device *const netdev = alloc_etherdev(sizeof(*priv)); |
866 struct net_device *const netdev = alloc_etherdev(sizeof(*priv)); |
488 |
867 |
489 priv = netdev_priv(netdev); |
868 if (netdev) { |
490 priv->netdev = netdev; |
869 priv = netdev_priv(netdev); |
491 priv->ccatdev = ccatdev; |
870 memset(priv, 0, sizeof(*priv)); |
492 |
871 priv->netdev = netdev; |
493 /* ccat register mappings */ |
872 priv->func = func; |
494 memcpy_fromio(&priv->info, addr, sizeof(priv->info)); |
873 ccat_eth_priv_init_reg(priv); |
495 ccat_eth_priv_init_mappings(priv); |
874 } |
496 |
875 return priv; |
497 if (ccat_eth_priv_init_dma(priv)) { |
876 } |
498 pr_warn("%s(): DMA initialization failed.\n", __FUNCTION__); |
877 |
499 free_netdev(netdev); |
878 static int ccat_eth_init_netdev(struct ccat_eth_priv *priv) |
500 return NULL; |
879 { |
501 } |
880 int status; |
502 |
881 |
503 /* init netdev with MAC and stack callbacks */ |
882 /* init netdev with MAC and stack callbacks */ |
504 memcpy_fromio(netdev->dev_addr, priv->reg.mii + 8, netdev->addr_len); |
883 memcpy_fromio(priv->netdev->dev_addr, priv->reg.mii + 8, |
505 netdev->netdev_ops = &ccat_eth_netdev_ops; |
884 priv->netdev->addr_len); |
|
885 priv->netdev->netdev_ops = &ccat_eth_netdev_ops; |
506 |
886 |
507 /* use as EtherCAT device? */ |
887 /* use as EtherCAT device? */ |
508 priv->ecdev = ecdev_offer(netdev, ec_poll_rx, THIS_MODULE); |
888 priv->carrier_off = ecdev_carrier_off; |
|
889 priv->carrier_ok = ecdev_carrier_ok; |
|
890 priv->carrier_on = ecdev_carrier_on; |
|
891 priv->kfree_skb_any = ecdev_kfree_skb_any; |
|
892 |
|
893 /* It would be more intuitive to check for: |
|
894 * if (priv->func->drv->type == CCATINFO_ETHERCAT_MASTER_DMA) { |
|
895 * unfortunately priv->func->drv is not initialized until probe() returns. |
|
896 * So we check if there is a rx dma fifo registered to determine dma/io mode */ |
|
897 if (&dma_rx_fifo_ops == priv->rx_fifo.ops) { |
|
898 priv->receive = ecdev_receive_dma; |
|
899 } else { |
|
900 priv->receive = ecdev_receive_eim; |
|
901 } |
|
902 priv->start_queue = ecdev_nop; |
|
903 priv->stop_queue = ecdev_nop; |
|
904 priv->unregister = unregister_ecdev; |
|
905 priv->ecdev = ecdev_offer(priv->netdev, ec_poll, THIS_MODULE); |
509 if (priv->ecdev) { |
906 if (priv->ecdev) { |
510 priv->carrier_off = ecdev_carrier_off; |
907 priv->carrier_off(priv->netdev); |
511 priv->carrier_ok = ecdev_carrier_ok; |
|
512 priv->carrier_on = ecdev_carrier_on; |
|
513 priv->kfree_skb_any = ecdev_kfree_skb_any; |
|
514 priv->start_queue = ecdev_nop; |
|
515 priv->stop_queue = ecdev_nop; |
|
516 priv->unregister = unregister_ecdev; |
|
517 |
|
518 priv->carrier_off(netdev); |
|
519 if (ecdev_open(priv->ecdev)) { |
908 if (ecdev_open(priv->ecdev)) { |
520 pr_info("unable to register network device.\n"); |
909 pr_info("unable to register network device.\n"); |
521 ecdev_withdraw(priv->ecdev); |
910 ecdev_withdraw(priv->ecdev); |
522 ccat_eth_priv_free_dma(priv); |
911 ccat_eth_priv_free(priv); |
523 free_netdev(netdev); |
912 free_netdev(priv->netdev); |
524 return NULL; |
913 return -1; // TODO return better error code |
525 } |
914 } |
526 return priv; |
915 priv->func->private_data = priv; |
|
916 return 0; |
527 } |
917 } |
528 |
918 |
529 /* EtherCAT disabled -> prepare normal ethernet mode */ |
919 /* EtherCAT disabled -> prepare normal ethernet mode */ |
530 priv->carrier_off = netif_carrier_off; |
920 priv->carrier_off = netif_carrier_off; |
531 priv->carrier_ok = netif_carrier_ok; |
921 priv->carrier_ok = netif_carrier_ok; |
532 priv->carrier_on = netif_carrier_on; |
922 priv->carrier_on = netif_carrier_on; |
533 priv->kfree_skb_any = dev_kfree_skb_any; |
923 priv->kfree_skb_any = dev_kfree_skb_any; |
|
924 priv->receive = ccat_eth_receive; |
534 priv->start_queue = netif_start_queue; |
925 priv->start_queue = netif_start_queue; |
535 priv->stop_queue = netif_stop_queue; |
926 priv->stop_queue = netif_stop_queue; |
536 priv->unregister = unregister_netdev; |
927 priv->unregister = unregister_netdev; |
537 |
928 priv->carrier_off(priv->netdev); |
538 priv->carrier_off(netdev); |
929 |
539 if (register_netdev(netdev)) { |
930 status = register_netdev(priv->netdev); |
|
931 if (status) { |
540 pr_info("unable to register network device.\n"); |
932 pr_info("unable to register network device.\n"); |
541 ccat_eth_priv_free_dma(priv); |
933 ccat_eth_priv_free(priv); |
542 free_netdev(netdev); |
934 free_netdev(priv->netdev); |
543 return NULL; |
935 return status; |
544 } |
936 } |
545 pr_info("registered %s as network device.\n", netdev->name); |
937 pr_info("registered %s as network device.\n", priv->netdev->name); |
546 return priv; |
938 priv->func->private_data = priv; |
547 } |
939 return 0; |
548 |
940 } |
549 void ccat_eth_remove(struct ccat_eth_priv *const priv) |
941 |
550 { |
942 static int ccat_eth_dma_probe(struct ccat_function *func) |
551 priv->unregister(priv->netdev); |
943 { |
552 ccat_eth_priv_free_dma(priv); |
944 struct ccat_eth_priv *priv = ccat_eth_alloc_netdev(func); |
553 free_netdev(priv->netdev); |
945 int status; |
554 } |
946 |
|
947 if (!priv) |
|
948 return -ENOMEM; |
|
949 |
|
950 status = ccat_eth_priv_init_dma(priv); |
|
951 if (status) { |
|
952 pr_warn("%s(): DMA initialization failed.\n", __FUNCTION__); |
|
953 free_netdev(priv->netdev); |
|
954 return status; |
|
955 } |
|
956 return ccat_eth_init_netdev(priv); |
|
957 } |
|
958 |
|
959 static void ccat_eth_dma_remove(struct ccat_function *func) |
|
960 { |
|
961 struct ccat_eth_priv *const eth = func->private_data; |
|
962 eth->unregister(eth->netdev); |
|
963 ccat_eth_priv_free(eth); |
|
964 free_netdev(eth->netdev); |
|
965 } |
|
966 |
|
967 const struct ccat_driver eth_dma_driver = { |
|
968 .type = CCATINFO_ETHERCAT_MASTER_DMA, |
|
969 .probe = ccat_eth_dma_probe, |
|
970 .remove = ccat_eth_dma_remove, |
|
971 }; |
|
972 |
|
973 static int ccat_eth_eim_probe(struct ccat_function *func) |
|
974 { |
|
975 struct ccat_eth_priv *priv = ccat_eth_alloc_netdev(func); |
|
976 int status; |
|
977 |
|
978 if (!priv) |
|
979 return -ENOMEM; |
|
980 |
|
981 status = ccat_eth_priv_init_eim(priv); |
|
982 if (status) { |
|
983 pr_warn("%s(): memory initialization failed.\n", __FUNCTION__); |
|
984 free_netdev(priv->netdev); |
|
985 return status; |
|
986 } |
|
987 return ccat_eth_init_netdev(priv); |
|
988 } |
|
989 |
|
990 static void ccat_eth_eim_remove(struct ccat_function *func) |
|
991 { |
|
992 struct ccat_eth_priv *const eth = func->private_data; |
|
993 eth->unregister(eth->netdev); |
|
994 ccat_eth_priv_free(eth); |
|
995 free_netdev(eth->netdev); |
|
996 } |
|
997 |
|
998 const struct ccat_driver eth_eim_driver = { |
|
999 .type = CCATINFO_ETHERCAT_NODMA, |
|
1000 .probe = ccat_eth_eim_probe, |
|
1001 .remove = ccat_eth_eim_remove, |
|
1002 }; |