16 You should have received a copy of the GNU General Public License along |
16 You should have received a copy of the GNU General Public License along |
17 with this program; if not, write to the Free Software Foundation, Inc., |
17 with this program; if not, write to the Free Software Foundation, Inc., |
18 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
18 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
19 */ |
19 */ |
20 |
20 |
21 #include <asm/dma.h> |
|
22 #include <linux/etherdevice.h> |
21 #include <linux/etherdevice.h> |
23 #include <linux/module.h> |
22 #include <linux/module.h> |
24 #include <linux/netdevice.h> |
23 #include <linux/netdevice.h> |
|
24 #include <linux/platform_device.h> |
25 #include <linux/version.h> |
25 #include <linux/version.h> |
|
26 |
26 #include "module.h" |
27 #include "module.h" |
27 #include "netdev.h" |
|
28 #include "update.h" |
|
29 |
28 |
30 MODULE_DESCRIPTION(DRV_DESCRIPTION); |
29 MODULE_DESCRIPTION(DRV_DESCRIPTION); |
31 MODULE_AUTHOR("Patrick Bruenn <p.bruenn@beckhoff.com>"); |
30 MODULE_AUTHOR("Patrick Bruenn <p.bruenn@beckhoff.com>"); |
32 MODULE_LICENSE("GPL"); |
31 MODULE_LICENSE("GPL"); |
33 MODULE_VERSION(DRV_VERSION); |
32 MODULE_VERSION(DRV_VERSION); |
34 |
33 |
35 static void ccat_bar_free(struct ccat_bar *bar) |
34 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,27)) |
36 { |
35 /* |
37 if (bar->ioaddr) { |
36 * Set both the DMA mask and the coherent DMA mask to the same thing. |
38 const struct ccat_bar tmp = *bar; |
37 * Note that we don't check the return value from dma_set_coherent_mask() |
39 memset(bar, 0, sizeof(*bar)); |
38 * as the DMA API guarantees that the coherent DMA mask can be set to |
40 iounmap(tmp.ioaddr); |
39 * the same or smaller than the streaming DMA mask. |
41 release_mem_region(tmp.start, tmp.len); |
40 */ |
42 } else { |
41 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) |
43 pr_warn("%s(): %p was already done.\n", __FUNCTION__, bar); |
42 { |
44 } |
43 int rc = dma_set_mask(dev, mask); |
45 } |
44 if (rc == 0) |
|
45 dma_set_coherent_mask(dev, mask); |
|
46 return rc; |
|
47 } |
|
48 #endif |
46 |
49 |
47 /** |
50 /** |
48 * ccat_bar_init() - Initialize a CCAT pci bar |
51 * configure the drivers capabilities here |
49 * @bar object which should be initialized |
|
50 * @index 0 and 2 are valid for CCAT, meaning pci bar0 or pci bar2 |
|
51 * @pdev the pci device as which the CCAT was recognized before |
|
52 * |
|
53 * Reading PCI config space; request and map memory region. |
|
54 */ |
52 */ |
55 static int ccat_bar_init(struct ccat_bar *bar, size_t index, |
53 static const struct ccat_driver *const drivers[] = { |
56 struct pci_dev *pdev) |
54 #ifdef CONFIG_PCI |
57 { |
55 ð_dma_driver, /* load Ethernet MAC/EtherCAT Master driver with DMA support from netdev.c */ |
58 struct resource *res; |
56 #endif |
59 |
57 ð_eim_driver, /* load Ethernet MAC/EtherCAT Master driver without DMA support from */ |
60 bar->start = pci_resource_start(pdev, index); |
58 &gpio_driver, /* load GPIO driver from gpio.c */ |
61 bar->end = pci_resource_end(pdev, index); |
59 &sram_driver, /* load SRAM driver from sram.c */ |
62 bar->len = pci_resource_len(pdev, index); |
60 &update_driver, /* load Update driver from update.c */ |
63 bar->flags = pci_resource_flags(pdev, index); |
61 }; |
64 if (!(IORESOURCE_MEM & bar->flags)) { |
62 |
65 pr_info("bar%llu is no mem_region -> abort.\n", (u64) index); |
63 static int __init ccat_class_init(struct ccat_class *base) |
66 return -EIO; |
64 { |
67 } |
65 if (1 == atomic_inc_return(&base->instances)) { |
68 |
66 if (alloc_chrdev_region |
69 res = request_mem_region(bar->start, bar->len, KBUILD_MODNAME); |
67 (&base->dev, 0, base->count, KBUILD_MODNAME)) { |
70 if (!res) { |
68 pr_warn("alloc_chrdev_region() for '%s' failed\n", |
71 pr_info("allocate mem_region failed.\n"); |
69 base->name); |
72 return -EIO; |
70 return -1; |
73 } |
71 } |
74 pr_debug("bar%llu at [%lx,%lx] len=%lu res: %p.\n", (u64) index, |
72 |
75 bar->start, bar->end, bar->len, res); |
73 base->class = class_create(THIS_MODULE, base->name); |
76 |
74 if (!base->class) { |
77 bar->ioaddr = ioremap(bar->start, bar->len); |
75 pr_warn("Create device class '%s' failed\n", |
78 if (!bar->ioaddr) { |
76 base->name); |
79 pr_info("bar%llu ioremap failed.\n", (u64) index); |
77 unregister_chrdev_region(base->dev, base->count); |
80 release_mem_region(bar->start, bar->len); |
78 return -1; |
81 return -EIO; |
79 } |
82 } |
80 } |
83 pr_debug("bar%llu I/O mem mapped to %p.\n", (u64) index, bar->ioaddr); |
81 return 0; |
84 return 0; |
82 } |
85 } |
83 |
86 |
84 static void ccat_class_exit(struct ccat_class *base) |
87 void ccat_dma_free(struct ccat_dma *const dma) |
85 { |
88 { |
86 if (!atomic_dec_return(&base->instances)) { |
89 const struct ccat_dma tmp = *dma; |
87 class_destroy(base->class); |
90 |
88 unregister_chrdev_region(base->dev, base->count); |
91 free_dma(dma->channel); |
89 } |
92 memset(dma, 0, sizeof(*dma)); |
90 } |
93 dma_free_coherent(tmp.dev, tmp.size, tmp.virt, tmp.phys); |
91 |
94 } |
92 static void free_ccat_cdev(struct ccat_cdev *ccdev) |
95 |
93 { |
96 /** |
94 ccat_class_exit(ccdev->class); |
97 * ccat_dma_init() - Initialize CCAT and host memory for DMA transfer |
95 ccdev->dev = 0; |
98 * @dma object for management data which will be initialized |
96 } |
99 * @channel number of the DMA channel |
97 |
100 * @ioaddr of the pci bar2 configspace used to calculate the address of the pci dma configuration |
98 static struct ccat_cdev *alloc_ccat_cdev(struct ccat_class *base) |
101 * @dev which should be configured for DMA |
99 { |
102 */ |
100 int i = 0; |
103 int ccat_dma_init(struct ccat_dma *const dma, size_t channel, |
101 |
104 void __iomem * const ioaddr, struct device *const dev) |
102 ccat_class_init(base); |
105 { |
103 for (i = 0; i < base->count; ++i) { |
106 void *frame; |
104 if (base->devices[i].dev == 0) { |
107 u64 addr; |
105 base->devices[i].dev = MKDEV(MAJOR(base->dev), i); |
108 u32 translateAddr; |
106 return &base->devices[i]; |
109 u32 memTranslate; |
107 } |
110 u32 memSize; |
108 } |
111 u32 data = 0xffffffff; |
109 pr_warn("exceeding max. number of '%s' devices (%d)\n", |
112 u32 offset = (sizeof(u64) * channel) + 0x1000; |
110 base->class->name, base->count); |
113 |
111 atomic_dec_return(&base->instances); |
114 dma->channel = channel; |
112 return NULL; |
115 dma->dev = dev; |
113 } |
116 |
114 |
117 /* calculate size and alignments */ |
115 static int ccat_cdev_init(struct cdev *cdev, dev_t dev, struct class *class, |
118 iowrite32(data, ioaddr + offset); |
116 struct file_operations *fops) |
119 wmb(); |
117 { |
120 data = ioread32(ioaddr + offset); |
118 if (!device_create |
121 memTranslate = data & 0xfffffffc; |
119 (class, NULL, dev, NULL, "%s%d", class->name, MINOR(dev))) { |
122 memSize = (~memTranslate) + 1; |
120 pr_warn("device_create() failed\n"); |
123 dma->size = 2 * memSize - PAGE_SIZE; |
|
124 dma->virt = dma_zalloc_coherent(dev, dma->size, &dma->phys, GFP_KERNEL); |
|
125 if (!dma->virt || !dma->phys) { |
|
126 pr_info("init DMA%llu memory failed.\n", (u64) channel); |
|
127 return -1; |
121 return -1; |
128 } |
122 } |
129 |
123 |
130 if (request_dma(channel, KBUILD_MODNAME)) { |
124 cdev_init(cdev, fops); |
131 pr_info("request dma channel %llu failed\n", (u64) channel); |
125 cdev->owner = fops->owner; |
132 ccat_dma_free(dma); |
126 if (cdev_add(cdev, dev, 1)) { |
|
127 pr_warn("add update device failed\n"); |
|
128 device_destroy(class, dev); |
133 return -1; |
129 return -1; |
134 } |
130 } |
135 |
131 |
136 translateAddr = (dma->phys + memSize - PAGE_SIZE) & memTranslate; |
132 pr_info("registered %s%d.\n", class->name, MINOR(dev)); |
137 addr = translateAddr; |
133 return 0; |
138 memcpy_toio(ioaddr + offset, &addr, sizeof(addr)); |
134 } |
139 frame = dma->virt + translateAddr - dma->phys; |
135 |
140 pr_debug |
136 int ccat_cdev_open(struct inode *const i, struct file *const f) |
141 ("DMA%llu mem initialized\n virt: 0x%p\n phys: 0x%llx\n translated: 0x%llx\n pci addr: 0x%08x%x\n memTranslate: 0x%x\n size: %llu bytes.\n", |
137 { |
142 (u64) channel, dma->virt, (u64) (dma->phys), addr, |
138 struct ccat_cdev *ccdev = |
143 ioread32(ioaddr + offset + 4), ioread32(ioaddr + offset), |
139 container_of(i->i_cdev, struct ccat_cdev, cdev); |
144 memTranslate, (u64) dma->size); |
140 struct cdev_buffer *buf; |
145 return 0; |
141 |
|
142 if (!atomic_dec_and_test(&ccdev->in_use)) { |
|
143 atomic_inc(&ccdev->in_use); |
|
144 return -EBUSY; |
|
145 } |
|
146 |
|
147 buf = kzalloc(sizeof(*buf) + ccdev->iosize, GFP_KERNEL); |
|
148 if (!buf) { |
|
149 atomic_inc(&ccdev->in_use); |
|
150 return -ENOMEM; |
|
151 } |
|
152 |
|
153 buf->ccdev = ccdev; |
|
154 f->private_data = buf; |
|
155 return 0; |
|
156 } |
|
157 |
|
158 int ccat_cdev_probe(struct ccat_function *func, struct ccat_class *cdev_class, |
|
159 size_t iosize) |
|
160 { |
|
161 struct ccat_cdev *const ccdev = alloc_ccat_cdev(cdev_class); |
|
162 if (!ccdev) { |
|
163 return -ENOMEM; |
|
164 } |
|
165 |
|
166 ccdev->ioaddr = func->ccat->bar_0 + func->info.addr; |
|
167 ccdev->iosize = iosize; |
|
168 atomic_set(&ccdev->in_use, 1); |
|
169 |
|
170 if (ccat_cdev_init |
|
171 (&ccdev->cdev, ccdev->dev, cdev_class->class, &cdev_class->fops)) { |
|
172 pr_warn("ccat_cdev_probe() failed\n"); |
|
173 free_ccat_cdev(ccdev); |
|
174 return -1; |
|
175 } |
|
176 ccdev->class = cdev_class; |
|
177 func->private_data = ccdev; |
|
178 return 0; |
|
179 } |
|
180 |
|
181 int ccat_cdev_release(struct inode *const i, struct file *const f) |
|
182 { |
|
183 const struct cdev_buffer *const buf = f->private_data; |
|
184 struct ccat_cdev *const ccdev = buf->ccdev; |
|
185 |
|
186 kfree(f->private_data); |
|
187 atomic_inc(&ccdev->in_use); |
|
188 return 0; |
|
189 } |
|
190 |
|
191 void ccat_cdev_remove(struct ccat_function *func) |
|
192 { |
|
193 struct ccat_cdev *const ccdev = func->private_data; |
|
194 |
|
195 cdev_del(&ccdev->cdev); |
|
196 device_destroy(ccdev->class->class, ccdev->dev); |
|
197 free_ccat_cdev(ccdev); |
|
198 } |
|
199 |
|
200 static const struct ccat_driver *ccat_function_connect(struct ccat_function |
|
201 *const func) |
|
202 { |
|
203 int i; |
|
204 |
|
205 for (i = 0; i < ARRAY_SIZE(drivers); ++i) { |
|
206 if (func->info.type == drivers[i]->type) { |
|
207 return drivers[i]->probe(func) ? NULL : drivers[i]; |
|
208 } |
|
209 } |
|
210 return NULL; |
146 } |
211 } |
147 |
212 |
148 /** |
213 /** |
149 * Initialize all available CCAT functions. |
214 * Initialize all available CCAT functions. |
150 * |
215 * |
151 * Return: count of failed functions |
216 * Return: count of failed functions |
152 */ |
217 */ |
153 static int ccat_functions_init(struct ccat_device *const ccatdev) |
218 static int ccat_functions_init(struct ccat_device *const ccatdev) |
154 { |
219 { |
155 static const size_t block_size = sizeof(struct ccat_info_block); |
220 static const size_t block_size = sizeof(struct ccat_info_block); |
156 void __iomem *addr = ccatdev->bar[0].ioaddr; /** first block is the CCAT information block entry */ |
221 struct ccat_function *next = kzalloc(sizeof(*next), GFP_KERNEL); |
|
222 void __iomem *addr = ccatdev->bar_0; /** first block is the CCAT information block entry */ |
157 const u8 num_func = ioread8(addr + 4); /** number of CCAT function blocks is at offset 0x4 */ |
223 const u8 num_func = ioread8(addr + 4); /** number of CCAT function blocks is at offset 0x4 */ |
158 const void __iomem *end = addr + (block_size * num_func); |
224 const void __iomem *end = addr + (block_size * num_func); |
159 int status = 0; /** count init function failures */ |
225 |
160 |
226 INIT_LIST_HEAD(&ccatdev->functions); |
161 while (addr < end) { |
227 for (; addr < end && next; addr += block_size) { |
162 const u8 type = ioread16(addr); |
228 memcpy_fromio(&next->info, addr, sizeof(next->info)); |
163 switch (type) { |
229 if (CCATINFO_NOTUSED != next->info.type) { |
164 case CCATINFO_NOTUSED: |
230 next->ccat = ccatdev; |
165 break; |
231 next->drv = ccat_function_connect(next); |
166 case CCATINFO_EPCS_PROM: |
232 if (next->drv) { |
167 pr_info("Found: CCAT update(EPCS_PROM) -> init()\n"); |
233 list_add(&next->list, &ccatdev->functions); |
168 ccatdev->update = ccat_update_init(ccatdev, addr); |
234 next = kzalloc(sizeof(*next), GFP_KERNEL); |
169 status += (NULL == ccatdev->update); |
235 } |
170 break; |
236 } |
171 case CCATINFO_ETHERCAT_MASTER_DMA: |
237 } |
172 pr_info("Found: ETHERCAT_MASTER_DMA -> init()\n"); |
238 kfree(next); |
173 ccatdev->ethdev = ccat_eth_init(ccatdev, addr); |
239 return list_empty(&ccatdev->functions); |
174 status += (NULL == ccatdev->ethdev); |
|
175 break; |
|
176 default: |
|
177 pr_info("Found: 0x%04x not supported\n", type); |
|
178 break; |
|
179 } |
|
180 addr += block_size; |
|
181 } |
|
182 return status; |
|
183 } |
240 } |
184 |
241 |
185 /** |
242 /** |
186 * Destroy all previously initialized CCAT functions |
243 * Destroy all previously initialized CCAT functions |
187 */ |
244 */ |
188 static void ccat_functions_remove(struct ccat_device *const ccatdev) |
245 static void ccat_functions_remove(struct ccat_device *const dev) |
189 { |
246 { |
190 if (!ccatdev->ethdev) { |
247 struct ccat_function *func; |
191 pr_warn("%s(): 'ethdev' was not initialized.\n", __FUNCTION__); |
248 struct ccat_function *tmp; |
192 } else { |
249 list_for_each_entry_safe(func, tmp, &dev->functions, list) { |
193 struct ccat_eth_priv *const ethdev = ccatdev->ethdev; |
250 if (func->drv) { |
194 ccatdev->ethdev = NULL; |
251 func->drv->remove(func); |
195 ccat_eth_remove(ethdev); |
252 func->drv = NULL; |
196 } |
253 } |
197 if (!ccatdev->update) { |
254 list_del(&func->list); |
198 pr_warn("%s(): 'update' was not initialized.\n", __FUNCTION__); |
255 kfree(func); |
199 } else { |
256 } |
200 struct ccat_update *const update = ccatdev->update; |
257 } |
201 ccatdev->update = NULL; |
258 |
202 ccat_update_remove(update); |
259 #ifdef CONFIG_PCI |
203 } |
260 static int ccat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
204 } |
261 { |
205 |
262 struct ccat_device *ccatdev; |
206 static int ccat_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
263 u8 revision; |
207 { |
|
208 int status; |
264 int status; |
209 u8 revision; |
265 |
210 struct ccat_device *ccatdev = kmalloc(sizeof(*ccatdev), GFP_KERNEL); |
266 ccatdev = devm_kzalloc(&pdev->dev, sizeof(*ccatdev), GFP_KERNEL); |
211 |
|
212 if (!ccatdev) { |
267 if (!ccatdev) { |
213 pr_err("%s() out of memory.\n", __FUNCTION__); |
268 pr_err("%s() out of memory.\n", __FUNCTION__); |
214 return -ENOMEM; |
269 return -ENOMEM; |
215 } |
270 } |
216 memset(ccatdev, 0, sizeof(*ccatdev)); |
|
217 ccatdev->pdev = pdev; |
271 ccatdev->pdev = pdev; |
218 pci_set_drvdata(pdev, ccatdev); |
272 pci_set_drvdata(pdev, ccatdev); |
219 |
273 |
220 status = pci_enable_device_mem(pdev); |
274 status = pci_enable_device_mem(pdev); |
221 if (status) { |
275 if (status) { |
222 pr_info("enable %s failed: %d\n", pdev->dev.kobj.name, status); |
276 pr_info("enable %s failed: %d\n", pdev->dev.kobj.name, status); |
223 return status; |
277 goto cleanup_pci_device; |
224 } |
278 } |
225 |
279 |
226 status = pci_read_config_byte(pdev, PCI_REVISION_ID, &revision); |
280 status = pci_read_config_byte(pdev, PCI_REVISION_ID, &revision); |
227 if (status) { |
281 if (status) { |
228 pr_warn("read CCAT pci revision failed with %d\n", status); |
282 pr_warn("read CCAT pci revision failed with %d\n", status); |
229 return status; |
283 goto cleanup_pci_device; |
230 } |
284 } |
231 |
285 |
232 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) |
286 if ((status = pci_request_regions(pdev, KBUILD_MODNAME))) { |
|
287 pr_info("allocate mem_regions failed.\n"); |
|
288 goto cleanup_pci_device; |
|
289 } |
|
290 |
233 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { |
291 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { |
234 pr_debug("64 bit DMA supported, pci rev: %u\n", revision); |
292 pr_debug("64 bit DMA supported, pci rev: %u\n", revision); |
235 } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { |
293 } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { |
236 pr_debug("32 bit DMA supported, pci rev: %u\n", revision); |
294 pr_debug("32 bit DMA supported, pci rev: %u\n", revision); |
237 } else { |
295 } else { |
238 pr_warn("No suitable DMA available, pci rev: %u\n", revision); |
296 pr_warn("No suitable DMA available, pci rev: %u\n", revision); |
239 } |
297 } |
240 #else |
298 |
241 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
299 if (!(ccatdev->bar_0 = pci_iomap(pdev, 0, 0))) { |
242 pr_debug("64 bit DMA supported, pci rev: %u\n", revision); |
|
243 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { |
|
244 pr_debug("32 bit DMA supported, pci rev: %u\n", revision); |
|
245 } else { |
|
246 pr_warn("No suitable DMA available, pci rev: %u\n", revision); |
|
247 } |
|
248 #endif |
|
249 |
|
250 if (ccat_bar_init(&ccatdev->bar[0], 0, pdev)) { |
|
251 pr_warn("initialization of bar0 failed.\n"); |
300 pr_warn("initialization of bar0 failed.\n"); |
252 return -EIO; |
301 status = -EIO; |
253 } |
302 goto cleanup_pci_device; |
254 |
303 } |
255 if (ccat_bar_init(&ccatdev->bar[2], 2, pdev)) { |
304 |
256 pr_warn("initialization of bar2 failed.\n"); |
305 if (!(ccatdev->bar_2 = pci_iomap(pdev, 2, 0))) { |
257 return -EIO; |
306 pr_warn("initialization of optional bar2 failed.\n"); |
258 } |
307 } |
259 |
308 |
260 pci_set_master(pdev); |
309 pci_set_master(pdev); |
261 if (ccat_functions_init(ccatdev)) { |
310 if (ccat_functions_init(ccatdev)) { |
262 pr_warn("some functions couldn't be initialized\n"); |
311 pr_warn("some functions couldn't be initialized\n"); |
263 } |
312 } |
264 return 0; |
313 return 0; |
265 } |
314 cleanup_pci_device: |
266 |
315 pci_disable_device(pdev); |
267 static void ccat_remove(struct pci_dev *pdev) |
316 return status; |
|
317 } |
|
318 |
|
319 static void ccat_pci_remove(struct pci_dev *pdev) |
268 { |
320 { |
269 struct ccat_device *ccatdev = pci_get_drvdata(pdev); |
321 struct ccat_device *ccatdev = pci_get_drvdata(pdev); |
270 |
322 |
271 if (ccatdev) { |
323 if (ccatdev) { |
272 ccat_functions_remove(ccatdev); |
324 ccat_functions_remove(ccatdev); |
273 ccat_bar_free(&ccatdev->bar[2]); |
325 if (ccatdev->bar_2) |
274 ccat_bar_free(&ccatdev->bar[0]); |
326 pci_iounmap(pdev, ccatdev->bar_2); |
|
327 pci_iounmap(pdev, ccatdev->bar_0); |
|
328 pci_release_regions(pdev); |
275 pci_disable_device(pdev); |
329 pci_disable_device(pdev); |
276 pci_set_drvdata(pdev, NULL); |
330 } |
277 kfree(ccatdev); |
|
278 } |
|
279 pr_debug("%s() done.\n", __FUNCTION__); |
|
280 } |
331 } |
281 |
332 |
282 #define PCI_DEVICE_ID_BECKHOFF_CCAT 0x5000 |
333 #define PCI_DEVICE_ID_BECKHOFF_CCAT 0x5000 |
283 #define PCI_VENDOR_ID_BECKHOFF 0x15EC |
334 #define PCI_VENDOR_ID_BECKHOFF 0x15EC |
284 |
335 |
285 static const struct pci_device_id pci_ids[] = { |
336 static const struct pci_device_id pci_ids[] = { |
286 {PCI_DEVICE(PCI_VENDOR_ID_BECKHOFF, PCI_DEVICE_ID_BECKHOFF_CCAT)}, |
337 {PCI_DEVICE(PCI_VENDOR_ID_BECKHOFF, PCI_DEVICE_ID_BECKHOFF_CCAT)}, |
287 {0,}, |
338 {0,}, |
288 }; |
339 }; |
289 |
340 |
290 #if 0 /* prevent auto-loading */ |
|
291 MODULE_DEVICE_TABLE(pci, pci_ids); |
341 MODULE_DEVICE_TABLE(pci, pci_ids); |
292 #endif |
342 |
293 |
343 static struct pci_driver ccat_pci_driver = { |
294 static struct pci_driver pci_driver = { |
|
295 .name = KBUILD_MODNAME, |
344 .name = KBUILD_MODNAME, |
296 .id_table = pci_ids, |
345 .id_table = pci_ids, |
297 .probe = ccat_probe, |
346 .probe = ccat_pci_probe, |
298 .remove = ccat_remove, |
347 .remove = ccat_pci_remove, |
299 }; |
348 }; |
300 |
349 |
301 static void __exit ccat_exit_module(void) |
350 module_pci_driver(ccat_pci_driver); |
302 { |
351 |
303 pci_unregister_driver(&pci_driver); |
352 #else /* #ifdef CONFIG_PCI */ |
304 } |
353 |
305 |
354 static int ccat_eim_probe(struct platform_device *pdev) |
306 static int __init ccat_init_module(void) |
355 { |
307 { |
356 struct ccat_device *ccatdev; |
308 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); |
357 |
309 return pci_register_driver(&pci_driver); |
358 ccatdev = devm_kzalloc(&pdev->dev, sizeof(*ccatdev), GFP_KERNEL); |
310 } |
359 if (!ccatdev) { |
311 |
360 pr_err("%s() out of memory.\n", __FUNCTION__); |
312 module_exit(ccat_exit_module); |
361 return -ENOMEM; |
313 module_init(ccat_init_module); |
362 } |
|
363 ccatdev->pdev = pdev; |
|
364 platform_set_drvdata(pdev, ccatdev); |
|
365 |
|
366 if (!request_mem_region(0xf0000000, 0x02000000, pdev->name)) { |
|
367 pr_warn("request mem region failed.\n"); |
|
368 return -EIO; |
|
369 } |
|
370 |
|
371 if (!(ccatdev->bar_0 = ioremap(0xf0000000, 0x02000000))) { |
|
372 pr_warn("initialization of bar0 failed.\n"); |
|
373 return -EIO; |
|
374 } |
|
375 |
|
376 ccatdev->bar_2 = NULL; |
|
377 |
|
378 if (ccat_functions_init(ccatdev)) { |
|
379 pr_warn("some functions couldn't be initialized\n"); |
|
380 } |
|
381 return 0; |
|
382 } |
|
383 |
|
384 static int ccat_eim_remove(struct platform_device *pdev) |
|
385 { |
|
386 struct ccat_device *ccatdev = platform_get_drvdata(pdev); |
|
387 |
|
388 if (ccatdev) { |
|
389 ccat_functions_remove(ccatdev); |
|
390 iounmap(ccatdev->bar_0); |
|
391 release_mem_region(0xf0000000, 0x02000000); |
|
392 } |
|
393 return 0; |
|
394 } |
|
395 |
|
396 static const struct of_device_id bhf_eim_ccat_ids[] = { |
|
397 {.compatible = "bhf,emi-ccat",}, |
|
398 {} |
|
399 }; |
|
400 |
|
401 MODULE_DEVICE_TABLE(of, bhf_eim_ccat_ids); |
|
402 |
|
403 static struct platform_driver ccat_eim_driver = { |
|
404 .driver = { |
|
405 .name = KBUILD_MODNAME, |
|
406 .of_match_table = bhf_eim_ccat_ids, |
|
407 }, |
|
408 .probe = ccat_eim_probe, |
|
409 .remove = ccat_eim_remove, |
|
410 }; |
|
411 |
|
412 module_platform_driver(ccat_eim_driver); |
|
413 #endif /* #ifdef CONFIG_PCI */ |