54 */ |
53 */ |
55 static int ccat_bar_init(struct ccat_bar *bar, size_t index, |
54 static int ccat_bar_init(struct ccat_bar *bar, size_t index, |
56 struct pci_dev *pdev) |
55 struct pci_dev *pdev) |
57 { |
56 { |
58 struct resource *res; |
57 struct resource *res; |
|
58 |
59 bar->start = pci_resource_start(pdev, index); |
59 bar->start = pci_resource_start(pdev, index); |
60 bar->end = pci_resource_end(pdev, index); |
60 bar->end = pci_resource_end(pdev, index); |
61 bar->len = pci_resource_len(pdev, index); |
61 bar->len = pci_resource_len(pdev, index); |
62 bar->flags = pci_resource_flags(pdev, index); |
62 bar->flags = pci_resource_flags(pdev, index); |
63 if (!(IORESOURCE_MEM & bar->flags)) { |
63 if (!(IORESOURCE_MEM & bar->flags)) { |
64 pr_info("bar%llu is no mem_region -> abort.\n", |
64 pr_info("bar%llu is no mem_region -> abort.\n", (u64) index); |
65 (uint64_t) index); |
65 return -EIO; |
66 return -EIO; |
66 } |
67 } |
67 |
68 |
68 res = request_mem_region(bar->start, bar->len, KBUILD_MODNAME); |
69 res = request_mem_region(bar->start, bar->len, DRV_NAME); |
|
70 if (!res) { |
69 if (!res) { |
71 pr_info("allocate mem_region failed.\n"); |
70 pr_info("allocate mem_region failed.\n"); |
72 return -EIO; |
71 return -EIO; |
73 } |
72 } |
74 pr_debug("bar%llu at [%lx,%lx] len=%lu res: %p.\n", (uint64_t) index, |
73 pr_debug("bar%llu at [%lx,%lx] len=%lu res: %p.\n", (u64) index, |
75 bar->start, bar->end, bar->len, res); |
74 bar->start, bar->end, bar->len, res); |
76 |
75 |
77 bar->ioaddr = ioremap(bar->start, bar->len); |
76 bar->ioaddr = ioremap(bar->start, bar->len); |
78 if (!bar->ioaddr) { |
77 if (!bar->ioaddr) { |
79 pr_info("bar%llu ioremap failed.\n", (uint64_t) index); |
78 pr_info("bar%llu ioremap failed.\n", (u64) index); |
80 release_mem_region(bar->start, bar->len); |
79 release_mem_region(bar->start, bar->len); |
81 return -EIO; |
80 return -EIO; |
82 } |
81 } |
83 pr_debug("bar%llu I/O mem mapped to %p.\n", (uint64_t) index, |
82 pr_debug("bar%llu I/O mem mapped to %p.\n", (u64) index, bar->ioaddr); |
84 bar->ioaddr); |
|
85 return 0; |
83 return 0; |
86 } |
84 } |
87 |
85 |
88 void ccat_dma_free(struct ccat_dma *const dma) |
86 void ccat_dma_free(struct ccat_dma *const dma) |
89 { |
87 { |
90 const struct ccat_dma tmp = *dma; |
88 const struct ccat_dma tmp = *dma; |
|
89 |
91 free_dma(dma->channel); |
90 free_dma(dma->channel); |
92 memset(dma, 0, sizeof(*dma)); |
91 memset(dma, 0, sizeof(*dma)); |
93 dma_free_coherent(tmp.dev, tmp.size, tmp.virt, tmp.phys); |
92 dma_free_coherent(tmp.dev, tmp.size, tmp.virt, tmp.phys); |
94 } |
93 } |
95 |
94 |
102 */ |
101 */ |
103 int ccat_dma_init(struct ccat_dma *const dma, size_t channel, |
102 int ccat_dma_init(struct ccat_dma *const dma, size_t channel, |
104 void __iomem * const ioaddr, struct device *const dev) |
103 void __iomem * const ioaddr, struct device *const dev) |
105 { |
104 { |
106 void *frame; |
105 void *frame; |
107 uint64_t addr; |
106 u64 addr; |
108 uint32_t translateAddr; |
107 u32 translateAddr; |
109 uint32_t memTranslate; |
108 u32 memTranslate; |
110 uint32_t memSize; |
109 u32 memSize; |
111 uint32_t data = 0xffffffff; |
110 u32 data = 0xffffffff; |
112 uint32_t offset = (sizeof(uint64_t) * channel) + 0x1000; |
111 u32 offset = (sizeof(u64) * channel) + 0x1000; |
113 |
112 |
114 dma->channel = channel; |
113 dma->channel = channel; |
115 dma->dev = dev; |
114 dma->dev = dev; |
116 |
115 |
117 /* calculate size and alignments */ |
116 /* calculate size and alignments */ |
121 memTranslate = data & 0xfffffffc; |
120 memTranslate = data & 0xfffffffc; |
122 memSize = (~memTranslate) + 1; |
121 memSize = (~memTranslate) + 1; |
123 dma->size = 2 * memSize - PAGE_SIZE; |
122 dma->size = 2 * memSize - PAGE_SIZE; |
124 dma->virt = dma_zalloc_coherent(dev, dma->size, &dma->phys, GFP_KERNEL); |
123 dma->virt = dma_zalloc_coherent(dev, dma->size, &dma->phys, GFP_KERNEL); |
125 if (!dma->virt || !dma->phys) { |
124 if (!dma->virt || !dma->phys) { |
126 pr_info("init DMA%llu memory failed.\n", (uint64_t) channel); |
125 pr_info("init DMA%llu memory failed.\n", (u64) channel); |
127 return -1; |
126 return -1; |
128 } |
127 } |
129 |
128 |
130 if (request_dma(channel, DRV_NAME)) { |
129 if (request_dma(channel, KBUILD_MODNAME)) { |
131 pr_info("request dma channel %llu failed\n", |
130 pr_info("request dma channel %llu failed\n", (u64) channel); |
132 (uint64_t) channel); |
|
133 ccat_dma_free(dma); |
131 ccat_dma_free(dma); |
134 return -1; |
132 return -1; |
135 } |
133 } |
136 |
134 |
137 translateAddr = (dma->phys + memSize - PAGE_SIZE) & memTranslate; |
135 translateAddr = (dma->phys + memSize - PAGE_SIZE) & memTranslate; |
138 addr = translateAddr; |
136 addr = translateAddr; |
139 memcpy_toio(ioaddr + offset, &addr, sizeof(addr)); |
137 memcpy_toio(ioaddr + offset, &addr, sizeof(addr)); |
140 frame = dma->virt + translateAddr - dma->phys; |
138 frame = dma->virt + translateAddr - dma->phys; |
141 pr_debug |
139 pr_debug |
142 ("DMA%llu mem initialized\n virt: 0x%p\n phys: 0x%llx\n translated: 0x%llx\n pci addr: 0x%08x%x\n memTranslate: 0x%x\n size: %llu bytes.\n", |
140 ("DMA%llu mem initialized\n virt: 0x%p\n phys: 0x%llx\n translated: 0x%llx\n pci addr: 0x%08x%x\n memTranslate: 0x%x\n size: %llu bytes.\n", |
143 (uint64_t) channel, dma->virt, (uint64_t) (dma->phys), addr, |
141 (u64) channel, dma->virt, (u64) (dma->phys), addr, |
144 ioread32(ioaddr + offset + 4), ioread32(ioaddr + offset), |
142 ioread32(ioaddr + offset + 4), ioread32(ioaddr + offset), |
145 memTranslate, (uint64_t) dma->size); |
143 memTranslate, (u64) dma->size); |
146 return 0; |
144 return 0; |
147 } |
145 } |
148 |
146 |
149 /** |
147 /** |
150 * Initialize all available CCAT functions. |
148 * Initialize all available CCAT functions. |
151 * |
149 * |
152 * Return: count of failed functions |
150 * Return: count of failed functions |
153 */ |
151 */ |
154 static int ccat_functions_init(struct ccat_device *const ccatdev) |
152 static int ccat_functions_init(struct ccat_device *const ccatdev) |
155 { |
153 { |
156 /* read CCatInfoBlock.nMaxEntries from ccat */ |
154 static const size_t block_size = sizeof(struct ccat_info_block); |
157 const uint8_t num_func = ioread8(ccatdev->bar[0].ioaddr + 4); |
155 void __iomem *addr = ccatdev->bar[0].ioaddr; /** first block is the CCAT information block entry */ |
158 void __iomem *addr = ccatdev->bar[0].ioaddr; |
156 const u8 num_func = ioread8(addr + 4); /** number of CCAT function blocks is at offset 0x4 */ |
159 const void __iomem *end = addr + (sizeof(CCatInfoBlock) * num_func); |
157 const void __iomem *end = addr + (block_size * num_func); |
160 int status = 0; //count init function failures |
158 int status = 0; /** count init function failures */ |
161 |
159 |
162 while (addr < end) { |
160 while (addr < end) { |
163 const uint8_t type = ioread16(addr); |
161 const u8 type = ioread16(addr); |
164 switch (type) { |
162 switch (type) { |
165 case CCATINFO_NOTUSED: |
163 case CCATINFO_NOTUSED: |
166 break; |
164 break; |
167 case CCATINFO_EPCS_PROM: |
165 case CCATINFO_EPCS_PROM: |
168 pr_info("Found: CCAT update(EPCS_PROM) -> init()\n"); |
166 pr_info("Found: CCAT update(EPCS_PROM) -> init()\n"); |
227 if (status) { |
226 if (status) { |
228 pr_warn("read CCAT pci revision failed with %d\n", status); |
227 pr_warn("read CCAT pci revision failed with %d\n", status); |
229 return status; |
228 return status; |
230 } |
229 } |
231 |
230 |
232 /* FIXME upgrade to a newer kernel to get support of dma_set_mask_and_coherent() |
231 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { |
233 * (!dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64))) { |
|
234 */ |
|
235 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
|
236 pr_debug("64 bit DMA supported, pci rev: %u\n", revision); |
232 pr_debug("64 bit DMA supported, pci rev: %u\n", revision); |
237 /*} else if (!dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) { */ |
233 } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { |
238 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { |
|
239 pr_debug("32 bit DMA supported, pci rev: %u\n", revision); |
234 pr_debug("32 bit DMA supported, pci rev: %u\n", revision); |
240 } else { |
235 } else { |
241 pr_warn("No suitable DMA available, pci rev: %u\n", revision); |
236 pr_warn("No suitable DMA available, pci rev: %u\n", revision); |
242 } |
237 } |
243 |
238 |
283 #if 0 /* prevent auto-loading */ |
279 #if 0 /* prevent auto-loading */ |
284 MODULE_DEVICE_TABLE(pci, pci_ids); |
280 MODULE_DEVICE_TABLE(pci, pci_ids); |
285 #endif |
281 #endif |
286 |
282 |
287 static struct pci_driver pci_driver = { |
283 static struct pci_driver pci_driver = { |
288 .name = DRV_NAME, |
284 .name = KBUILD_MODNAME, |
289 .id_table = pci_ids, |
285 .id_table = pci_ids, |
290 .probe = ccat_probe, |
286 .probe = ccat_probe, |
291 .remove = ccat_remove, |
287 .remove = ccat_remove, |
292 }; |
288 }; |
293 |
289 |
294 static void ccat_exit_module(void) |
290 static void __exit ccat_exit_module(void) |
295 { |
291 { |
296 pci_unregister_driver(&pci_driver); |
292 pci_unregister_driver(&pci_driver); |
297 } |
293 } |
298 |
294 |
299 static int ccat_init_module(void) |
295 static int __init ccat_init_module(void) |
300 { |
296 { |
301 BUILD_BUG_ON(offsetof(struct ccat_eth_frame, data) != |
|
302 CCAT_DMA_FRAME_HEADER_LENGTH); |
|
303 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); |
297 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); |
304 return pci_register_driver(&pci_driver); |
298 return pci_register_driver(&pci_driver); |
305 } |
299 } |
306 |
300 |
307 module_exit(ccat_exit_module); |
301 module_exit(ccat_exit_module); |