Updated `README.md` with instructions for building/using the kernel module.
[xeon-phi-kernel-module] / host / linux.c
CommitLineData
800f879a
AT
1/*
2 * Copyright 2010-2017 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * Disclaimer: The codes contained in these modules may be specific to
14 * the Intel Software Development Platform codenamed Knights Ferry,
15 * and the Intel product codenamed Knights Corner, and are not backward
16 * compatible with other Intel products. Additionally, Intel will NOT
17 * support the codes or instruction set in future products.
18 *
19 * Intel offers no warranty of any kind regarding the code. This code is
20 * licensed on an "AS IS" basis and Intel is not obligated to provide
21 * any support, assistance, installation, training, or other services
22 * of any kind. Intel is also not obligated to provide any updates,
23 * enhancements or extensions. Intel specifically disclaims any warranty
24 * of merchantability, non-infringement, fitness for any particular
25 * purpose, and any other warranty.
26 *
27 * Further, Intel disclaims all liability of any kind, including but
28 * not limited to liability for infringement of any proprietary rights,
29 * relating to the use of the code, even if Intel is notified of the
30 * possibility of such liability. Except as expressly stated in an Intel
31 * license agreement provided with this code and agreed upon with Intel,
32 * no license, express or implied, by estoppel or otherwise, to any
33 * intellectual property rights is granted herein.
34 */
35
36#include <linux/string.h>
37
38#include "mic/micscif_kmem_cache.h"
39#include "micint.h"
40#include "mic_common.h"
41#include "mic/io_interface.h"
42#include "mic/mic_pm.h"
43#include "mic/micveth.h"
44
45MODULE_LICENSE("GPL");
46MODULE_INFO(build_number, BUILD_NUMBER);
47MODULE_INFO(build_bywhom, BUILD_BYWHOM);
48MODULE_INFO(build_ondate, BUILD_ONDATE);
49MODULE_INFO(build_scmver, BUILD_SCMVER);
50
51#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34))
52#include <linux/pm_qos_params.h>
53#endif
54
55struct kmem_cache *unaligned_cache;
56mic_lindata_t mic_lindata;
57
58module_param_named(ulimit, mic_ulimit_check, bool, 0600);
59MODULE_PARM_DESC(ulimit, "SCIF ulimit check");
60
61module_param_named(reg_cache, mic_reg_cache_enable, bool, 0600);
62MODULE_PARM_DESC(reg_cache, "SCIF registration caching");
63
64module_param_named(huge_page, mic_huge_page_enable, bool, 0600);
65MODULE_PARM_DESC(huge_page, "SCIF Huge Page Support");
66
67extern bool mic_p2p_enable;
68module_param_named(p2p, mic_p2p_enable, bool, 0600);
69MODULE_PARM_DESC(p2p, "SCIF peer-to-peer");
70
71extern bool mic_p2p_proxy_enable;
72module_param_named(p2p_proxy, mic_p2p_proxy_enable, bool, 0600);
73MODULE_PARM_DESC(p2p_proxy, "SCIF peer-to-peer proxy DMA support");
74
75extern bool mic_watchdog_enable;
76module_param_named(watchdog, mic_watchdog_enable, bool, 0600);
77MODULE_PARM_DESC(watchdog, "SCIF Watchdog");
78
79extern bool mic_watchdog_auto_reboot;
80module_param_named(watchdog_auto_reboot, mic_watchdog_auto_reboot, bool, 0600);
81MODULE_PARM_DESC(watchdog_auto_reboot, "SCIF Watchdog auto reboot");
82
83bool mic_msi_enable = 1;
84module_param_named(msi, mic_msi_enable, bool, 0600);
85MODULE_PARM_DESC(mic_msi_enable, "To enable MSIx in the driver.");
86
87int mic_pm_qos_cpu_dma_lat = -1;
88module_param_named(pm_qos_cpu_dma_lat, mic_pm_qos_cpu_dma_lat, int, 0600);
89MODULE_PARM_DESC(mic_pm_qos_cpu_dma_lat, "PM QoS CPU DMA latency in usecs.");
90
91extern int ramoops_count;
92module_param_named(ramoops_count, ramoops_count, int, 0600);
93MODULE_PARM_DESC(ramoops_count, "Maximum frame count for the ramoops driver.");
94
95extern bool mic_crash_dump_enabled;
96module_param_named(crash_dump, mic_crash_dump_enabled, bool, 0600);
97MODULE_PARM_DESC(mic_crash_dump_enabled, "MIC Crash Dump enabled.");
98
99#define GET_FILE_SIZE_FROM_INODE(fp) i_size_read((fp)->f_path.dentry->d_inode)
100
101int usagemode_param = 0;
102
103static int
104mic_open(struct inode *inode, struct file *filp)
105{
106 dev_t dev = inode->i_rdev;
107
108 switch (MINOR(dev)) {
109 case 0:
110 return 0;
111 case 1:
112 return scif_fdopen(filp);
113 case 2:
114 return mic_psmi_open(filp);
115 }
116
117 return -EINVAL;
118}
119
120static int
121mic_release(struct inode *inode, struct file *filp)
122{
123 dev_t dev = inode->i_rdev;
124 int rc = 0;
125
126 switch (MINOR(dev)) {
127 case 0:
128 if (filp->private_data == filp) {
129 // Fasync is set
130 rc = fasync_helper(-1, filp, 0, &mic_data.dd_fasync);
131 mic_data.dd_fasync = NULL;
132 }
133 return rc;
134 case 1:
135 return scif_fdclose(filp);
136 case 2:
137 // psmi access to device
138 return 0;
139 }
140
141 return -EINVAL;
142}
143
144extern ssize_t mic_psmi_read(struct file * filp, char __user *buf,
145 size_t count, loff_t *pos);
146static ssize_t
147mic_read(struct file * filp, char __user *buf,
148 size_t count, loff_t *pos)
149{
150 dev_t dev = filp->f_path.dentry->d_inode->i_rdev;
151 if (MINOR(dev) == 2)
152 return mic_psmi_read(filp, buf, count, pos);
153
154 return -EINVAL;
155}
156
157static long
158mic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
159{
160 dev_t dev;
161 int status = 0;
162
163 dev = filp->f_path.dentry->d_inode->i_rdev;
164 if (MINOR(dev) == 1)
165 return scif_process_ioctl(filp, cmd, arg);
166
167 if (MINOR(dev) == 2)
168 return -EINVAL;
169
170 status = adapter_do_ioctl(cmd, arg);
171 return status;
172}
173
174static int
175mic_fasync(int fd, struct file *filp, int on)
176{
177 int rc;
178
179 if ((rc = fasync_helper(fd, filp, on, &mic_data.dd_fasync)) < 0) {
180 return rc;
181 }
182
183 if (on) {
184#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
185 rc = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
186#else
187 __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
188#endif
189 filp->private_data = filp;
190 } else {
191 filp->private_data = NULL;
192 }
193
194 return rc;
195}
196
197int
198mic_mmap(struct file *f, struct vm_area_struct *vma)
199{
200 dev_t dev = f->f_path.dentry->d_inode->i_rdev;
201 if (MINOR(dev) == 1)
202 return micscif_mmap(f, vma);
203
204 return -EINVAL;
205}
206
207unsigned int
208mic_poll(struct file *f, poll_table *wait)
209{
210 dev_t dev = f->f_path.dentry->d_inode->i_rdev;
211 if (MINOR(dev) == 1)
212 return micscif_poll(f, wait);
213
214 return -EINVAL;
215}
216
217int
218mic_flush(struct file *f, fl_owner_t id)
219{
220 dev_t dev = f->f_path.dentry->d_inode->i_rdev;
221 if (MINOR(dev) == 1)
222 return micscif_flush(f, id);
223
224 return -EINVAL;
225}
226
227irqreturn_t
228mic_irq_isr(int irq, void *data)
229{
230 if (((mic_ctx_t *)data)->msie)
231 adapter_imsr((mic_ctx_t *)data);
232 else if (adapter_isr((mic_ctx_t *)data) < 0 ){
233 return IRQ_NONE;
234 }
235
236 return IRQ_HANDLED;
237}
238
239extern struct attribute_group bd_attr_group;
240extern struct attribute_group host_attr_group;
241extern struct attribute_group scif_attr_group;
242extern struct attribute_group psmi_attr_group;
243extern struct bin_attribute mic_psmi_ptes_attr;
244
245static int
246mic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
247{
248 int brdnum = mic_data.dd_numdevs;
249 int err = 0;
250 bd_info_t *bd_info;
251 mic_ctx_t *mic_ctx;
252#ifdef CONFIG_PCI_MSI
253 int i=0;
254#endif
255 if ((bd_info = (bd_info_t *)kzalloc(sizeof(bd_info_t), GFP_KERNEL)) == NULL) {
256 printk("MIC: probe failed allocating memory for bd_info\n");
257 return -ENOSPC;
258 }
259
260 mic_ctx = &bd_info->bi_ctx;
261 mic_ctx->bd_info = bd_info;
262 mic_ctx->bi_id = brdnum;
263 mic_ctx->bi_pdev = pdev;
264 mic_ctx->msie = 0;
265 mic_data.dd_bi[brdnum] = bd_info;
266
267 if ((err = pci_enable_device(pdev))) {
268 printk("pci_enable failed board #%d\n", brdnum);
269 goto probe_freebd;
270 }
271
272 pci_set_master(pdev);
273 err = pci_reenable_device(pdev);
274 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
275 if (err) {
276 printk("mic %d: ERROR DMA not available\n", brdnum);
277 goto probe_freebd;
278 }
279 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
280 if (err) {
281 printk("mic %d: ERROR pci_set_consistent_dma_mask(64) %d\n", brdnum, err);
282 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
283 if (err) {
284 printk("mic %d: ERROR pci_set_consistent_dma_mask(32) %d\n", brdnum, err);
285 goto probe_freebd;
286 }
287 }
288
289 // Allocate bar 4 for MMIO and GTT
290 bd_info->bi_ctx.mmio.pa = pci_resource_start(pdev, DLDR_MMIO_BAR);
291 bd_info->bi_ctx.mmio.len = pci_resource_len(pdev, DLDR_MMIO_BAR);
292 if (request_mem_region(bd_info->bi_ctx.mmio.pa,
293 bd_info->bi_ctx.mmio.len, "mic") == NULL) {
294 printk("mic %d: failed to reserve mmio space\n", brdnum);
295 goto probe_freebd;
296 }
297
298 // Allocate bar 0 for access Aperture
299 bd_info->bi_ctx.aper.pa = pci_resource_start(pdev, DLDR_APT_BAR);
300 bd_info->bi_ctx.aper.len = pci_resource_len(pdev, DLDR_APT_BAR);
301 if (request_mem_region(bd_info->bi_ctx.aper.pa,
302 bd_info->bi_ctx.aper.len, "mic") == NULL) {
303 printk("mic %d: failed to reserve aperture space\n", brdnum);
304 goto probe_relmmio;
305 }
306
307#ifdef CONFIG_PCI_MSI
adc1aa3a
AT
308 if (mic_msi_enable) {
309 for (i = 0; i < MIC_NUM_MSIX_ENTRIES; i++)
800f879a 310 bd_info->bi_msix_entries[i].entry = i;
6b1bd4ae 311 err = pci_enable_msix_exact(mic_ctx->bi_pdev, bd_info->bi_msix_entries,
800f879a 312 MIC_NUM_MSIX_ENTRIES);
adc1aa3a 313 if (err == 0) { // Error code of zero means success.
800f879a
AT
314 // Only support 1 MSIx for now
315 err = request_irq(bd_info->bi_msix_entries[0].vector,
316 mic_irq_isr, 0, "mic", mic_ctx);
317 if (err != 0) {
318 printk("MIC: Error in request_irq %d\n", err);
319 goto probe_relaper;
320 }
321 mic_ctx->msie = 1;
322 }
323 }
324#endif
325
326 // TODO: this needs to be hardened and actually return errors
327 if ((err = adapter_init_device(mic_ctx)) != 0) {
328 printk("MIC: Adapter init device failed %d\n", err);
329 goto probe_relaper;
330 }
331
332 // Adding sysfs entries
333 set_sysfs_entries(mic_ctx);
334
335 bd_info->bi_sysfsdev = device_create(mic_lindata.dd_class, &pdev->dev,
336 mic_lindata.dd_dev + 2 + mic_ctx->bd_info->bi_ctx.bi_id,
337 NULL, "mic%d", mic_ctx->bd_info->bi_ctx.bi_id);
338 err = sysfs_create_group(&mic_ctx->bd_info->bi_sysfsdev->kobj, &bd_attr_group);
339 mic_ctx->sysfs_state = sysfs_get_dirent(mic_ctx->bd_info->bi_sysfsdev->kobj.sd,
340#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) && LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
341 NULL,
342#endif
343 "state");
344
345 dev_set_drvdata(mic_ctx->bd_info->bi_sysfsdev, mic_ctx);
346
347 if (!mic_ctx->msie)
348 if ((err = request_irq(mic_ctx->bi_pdev->irq, mic_irq_isr,
349 IRQF_SHARED, "mic", mic_ctx)) != 0) {
350 printk("MIC: Error in request_irq %d\n", err);
351 goto probe_unmapaper;
352 }
353
354 adapter_probe(&bd_info->bi_ctx);
355
356 if (mic_ctx->bi_psmi.enabled) {
357 err = sysfs_create_group(&mic_ctx->bd_info->bi_sysfsdev->kobj,
358 &psmi_attr_group);
359 err = device_create_bin_file(mic_ctx->bd_info->bi_sysfsdev,
360 &mic_psmi_ptes_attr);
361 }
362
363 adapter_wait_reset(mic_ctx);
364
365 // Adding a board instance so increment the total number of MICs in the system.
366 list_add_tail(&bd_info->bi_list, &mic_data.dd_bdlist);
367 mic_data.dd_numdevs++;
368 printk("mic_probe %d:%d:%d as board #%d\n", pdev->bus->number,
369 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), brdnum);
370 return 0;
371
372probe_unmapaper:
373 wait_event(mic_ctx->ioremapwq, mic_ctx->aper.va || mic_ctx->state == MIC_RESETFAIL);
374 if (mic_ctx->aper.va)
375 iounmap((void *)bd_info->bi_ctx.aper.va);
376 iounmap((void *)bd_info->bi_ctx.mmio.va);
377
378probe_relaper:
379 release_mem_region(bd_info->bi_ctx.aper.pa, bd_info->bi_ctx.aper.len);
380
381probe_relmmio:
382 release_mem_region(bd_info->bi_ctx.mmio.pa, bd_info->bi_ctx.mmio.len);
383
384probe_freebd:
385 kfree(bd_info);
386 return err;
387}
388
389static void
390mic_remove(struct pci_dev *pdev)
391{
392 int32_t brdnum;
393 bd_info_t *bd_info;
394
395 if (mic_data.dd_numdevs - 1 < 0)
396 return;
397 mic_data.dd_numdevs--;
398 brdnum = mic_data.dd_numdevs;
399
400 /* Make sure boards are shutdown and not available. */
401 bd_info = mic_data.dd_bi[brdnum];
402
403 spin_lock_bh(&bd_info->bi_ctx.sysfs_lock);
404 sysfs_put(bd_info->bi_ctx.sysfs_state);
405 bd_info->bi_ctx.sysfs_state = NULL;
406 spin_unlock_bh(&bd_info->bi_ctx.sysfs_lock);
407
408 if (bd_info->bi_ctx.bi_psmi.enabled) {
409 device_remove_bin_file(bd_info->bi_sysfsdev, &mic_psmi_ptes_attr);
410 sysfs_remove_group(&bd_info->bi_sysfsdev->kobj, &psmi_attr_group);
411 }
412 sysfs_remove_group(&bd_info->bi_sysfsdev->kobj, &bd_attr_group);
413
414 free_sysfs_entries(&bd_info->bi_ctx);
415 device_destroy(mic_lindata.dd_class,
416 mic_lindata.dd_dev + 2 + bd_info->bi_ctx.bi_id);
417
418 adapter_stop_device(&bd_info->bi_ctx, 1, 0);
419 /*
420 * Need to wait for reset since accessing the card while GDDR training
421 * is ongoing by adapter_remove(..) below for example can be fatal.
422 */
423 wait_for_reset(&bd_info->bi_ctx);
424
425 mic_disable_interrupts(&bd_info->bi_ctx);
426
427 if (!bd_info->bi_ctx.msie) {
428 free_irq(bd_info->bi_ctx.bi_pdev->irq, &bd_info->bi_ctx);
429#ifdef CONFIG_PCI_MSI
430 } else {
431 free_irq(bd_info->bi_msix_entries[0].vector, &bd_info->bi_ctx);
432 pci_disable_msix(bd_info->bi_ctx.bi_pdev);
433#endif
434 }
435 adapter_remove(&bd_info->bi_ctx);
436 release_mem_region(bd_info->bi_ctx.aper.pa, bd_info->bi_ctx.aper.len);
437 release_mem_region(bd_info->bi_ctx.mmio.pa, bd_info->bi_ctx.mmio.len);
438 pci_disable_device(bd_info->bi_ctx.bi_pdev);
439 kfree(bd_info);
440}
441
442static void
443mic_shutdown(struct pci_dev *pdev) {
444 mic_ctx_t *mic_ctx;
445 mic_ctx = get_device_context(pdev);
446
447 if(!mic_ctx)
448 return;
449
450 adapter_stop_device(mic_ctx, !RESET_WAIT , !RESET_REATTEMPT);
451 return;
452}
453static const struct file_operations mic_fops = {
454 .open = mic_open,
455 .release = mic_release,
456 .read = mic_read,
457 .unlocked_ioctl = mic_ioctl,
458 .fasync = mic_fasync,
459 .mmap = mic_mmap,
460 .poll = mic_poll,
461 .flush = mic_flush,
462 .owner = THIS_MODULE,
463};
464
465static const struct dev_pm_ops pci_dev_pm_ops = {
466 .suspend = micpm_suspend,
467 .resume = micpm_resume,
468 .freeze = micpm_suspend,
469 .restore = micpm_resume,
470 .suspend_noirq = micpm_suspend_noirq,
471 .resume_noirq = micpm_resume_noirq,
472 .freeze_noirq = micpm_suspend_noirq,
473 .restore_noirq = micpm_resume_noirq,
474};
475
476static struct notifier_block mic_pm_notifer = {
477 .notifier_call = micpm_notifier_block,
478};
479
480static struct pci_device_id mic_pci_tbl[] = {
481#ifdef CONFIG_ML1OM
482 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ABR_2249, PCI_ANY_ID, PCI_ANY_ID,
483 0, 0, 0 },
484 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ABR_224a, PCI_ANY_ID, PCI_ANY_ID,
485 0, 0, 0 },
486#endif
487#ifdef CONFIG_MK1OM
488 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_2250, PCI_ANY_ID, PCI_ANY_ID,
489 0, 0, 0 },
490 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_2251, PCI_ANY_ID, PCI_ANY_ID,
491 0, 0, 0 },
492 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_2252, PCI_ANY_ID, PCI_ANY_ID,
493 0, 0, 0 },
494 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_2253, PCI_ANY_ID, PCI_ANY_ID,
495 0, 0, 0 },
496 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_2254, PCI_ANY_ID, PCI_ANY_ID,
497 0, 0, 0 },
498 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_2255, PCI_ANY_ID, PCI_ANY_ID,
499 0, 0, 0 },
500 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_2256, PCI_ANY_ID, PCI_ANY_ID,
501 0, 0, 0 },
502 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_2257, PCI_ANY_ID, PCI_ANY_ID,
503 0, 0, 0 },
504 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_2258, PCI_ANY_ID, PCI_ANY_ID,
505 0, 0, 0 },
506 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_2259, PCI_ANY_ID, PCI_ANY_ID,
507 0, 0, 0 },
508 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_225a, PCI_ANY_ID, PCI_ANY_ID,
509 0, 0, 0 },
510 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_225b, PCI_ANY_ID, PCI_ANY_ID,
511 0, 0, 0 },
512 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_225c, PCI_ANY_ID, PCI_ANY_ID,
513 0, 0, 0 },
514 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_225d, PCI_ANY_ID, PCI_ANY_ID,
515 0, 0, 0 },
516 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_KNC_225e, PCI_ANY_ID, PCI_ANY_ID,
517 0, 0, 0 },
518
519#endif
520 { 0, }
521};
522
523#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)
524#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
525#define MODE_T umode_t
526#else
527#define MODE_T mode_t
528#endif
529static char *
530mic_devnode(struct device *dev, MODE_T *mode)
531{
532 return kasprintf(GFP_KERNEL, "mic/%s", dev_name(dev));
533}
534#undef MODE_T
535#endif
536
537static int __init
538mic_init(void)
539{
540 int ret, i;
541
542 adapter_init();
543
544 unaligned_cache = micscif_kmem_cache_create();
545 if (!unaligned_cache) {
546 ret = -ENOMEM;
547 goto init_free_ports;
548 }
549
550 mic_lindata.dd_pcidriver.name = "mic";
551 mic_lindata.dd_pcidriver.id_table = mic_pci_tbl;
552 mic_lindata.dd_pcidriver.probe = mic_probe;
553 mic_lindata.dd_pcidriver.remove = mic_remove;
554 mic_lindata.dd_pcidriver.driver.pm = &pci_dev_pm_ops;
555 mic_lindata.dd_pcidriver.shutdown = mic_shutdown;
556
557
558 if ((ret = alloc_chrdev_region(&mic_lindata.dd_dev,
559 0, MAX_DLDR_MINORS, "mic") != 0)) {
560 printk("Error allocating device nodes: %d\n", ret);
561 goto init_free_ports;
562 }
563
564 cdev_init(&mic_lindata.dd_cdev, &mic_fops);
565 mic_lindata.dd_cdev.owner = THIS_MODULE;
566 mic_lindata.dd_cdev.ops = &mic_fops;
567
568 if ((ret = cdev_add(&mic_lindata.dd_cdev,
569 mic_lindata.dd_dev, MAX_DLDR_MINORS) != 0)) {
570 kobject_put(&mic_lindata.dd_cdev.kobj);
571 goto init_free_region;
572 }
573
574 mic_lindata.dd_class = class_create(THIS_MODULE, "mic");
575 if (IS_ERR(mic_lindata.dd_class)) {
576 printk("MICDLDR: Error createing mic class\n");
577 cdev_del(&mic_lindata.dd_cdev);
578 ret = PTR_ERR(mic_lindata.dd_class);
579 goto init_free_region;
580 }
581
582#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)
583 mic_lindata.dd_class->devnode = mic_devnode;
584#endif
585
586 mic_lindata.dd_hostdev = device_create(mic_lindata.dd_class, NULL,
587 mic_lindata.dd_dev, NULL, "ctrl");
588 mic_lindata.dd_scifdev = device_create(mic_lindata.dd_class, NULL,
589 mic_lindata.dd_dev + 1, NULL, "scif");
590 ret = sysfs_create_group(&mic_lindata.dd_hostdev->kobj, &host_attr_group);
591 ret = sysfs_create_group(&mic_lindata.dd_scifdev->kobj, &scif_attr_group);
592
593#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)
594 mic_lindata.dd_class->devnode = NULL;
595#endif
596
597 if (micveth_init(mic_lindata.dd_hostdev))
598 printk(KERN_ERR "%s: micveth_init failed\n", __func__);
599
600 ret = pci_register_driver(&mic_lindata.dd_pcidriver);
601 if (ret) {
602 micscif_destroy();
603 printk("mic: failed to register pci driver %d\n", ret);
604 goto clean_unregister;
605 }
606
607 if (!mic_data.dd_numdevs) {
608 printk("mic: No MIC boards present. SCIF available in loopback mode\n");
609 } else {
610 printk("mic: number of devices detected %d \n", mic_data.dd_numdevs);
611 }
612
613 for (i = 0; i < mic_data.dd_numdevs; i++) {
614 mic_ctx_t *mic_ctx = get_per_dev_ctx(i);
615 wait_event(mic_ctx->ioremapwq,
616 mic_ctx->aper.va || mic_ctx->state == MIC_RESETFAIL);
617 destroy_workqueue(mic_ctx->ioremapworkq);
618 }
619
620 micveth_init_legacy(mic_data.dd_numdevs, mic_lindata.dd_hostdev);
621
622 ret = acptboot_init();
623
624#ifdef USE_VCONSOLE
625 micvcons_create(mic_data.dd_numdevs);
626#endif
627
628 /* Initialize Data structures for PM Disconnect */
629 ret = micpm_disconn_init(mic_data.dd_numdevs + 1);
630 if (ret)
631 printk(KERN_ERR "%s: Failed to initialize PM disconnect"
632 " data structures. PM may not work as expected."
633 " ret = %d\n", __func__, ret);
634 register_pm_notifier(&mic_pm_notifer);
635#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34))
636 ret = pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, "mic", mic_pm_qos_cpu_dma_lat);
637 if (ret) {
638 printk(KERN_ERR "%s %d mic_pm_qos_cpu_dma_lat %d ret %d\n",
639 __func__, __LINE__, mic_pm_qos_cpu_dma_lat, ret);
640 ret = 0;
641 /* Dont fail driver load due to PM QoS API. Fall through */
642 }
643#endif
644 return 0;
645
646clean_unregister:
647 device_destroy(mic_lindata.dd_class, mic_lindata.dd_dev + 1);
648 device_destroy(mic_lindata.dd_class, mic_lindata.dd_dev);
649 class_destroy(mic_lindata.dd_class);
650 cdev_del(&mic_lindata.dd_cdev);
651 unregister_pm_notifier(&mic_pm_notifer);
652init_free_region:
653 unregister_chrdev_region(mic_lindata.dd_dev, MAX_DLDR_MINORS);
654init_free_ports:
655 micpm_uninit();
656 return ret;
657}
658
659static void __exit
660mic_exit(void)
661{
662 /* Close endpoints related to reverse registration */
663 acptboot_exit();
664
665#ifdef USE_VCONSOLE
666 micvcons_destroy(mic_data.dd_numdevs);
667#endif
668
669 pci_unregister_driver(&mic_lindata.dd_pcidriver);
670 micpm_uninit();
671
672 /* Uninit data structures for PM disconnect */
673 micpm_disconn_uninit(mic_data.dd_numdevs + 1);
674
675#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34))
676 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, "mic");
677#endif
678 micscif_kmem_cache_destroy();
679 vmcore_exit();
680 micveth_exit();
681 micscif_destroy();
682 ramoops_exit();
683
684 device_destroy(mic_lindata.dd_class, mic_lindata.dd_dev + 1);
685 device_destroy(mic_lindata.dd_class, mic_lindata.dd_dev);
686 class_destroy(mic_lindata.dd_class);
687 cdev_del(&mic_lindata.dd_cdev);
688 unregister_chrdev_region(mic_lindata.dd_dev, MAX_DLDR_MINORS);
689 unregister_pm_notifier(&mic_pm_notifer);
690 return;
691}
692
693void
694set_sysfs_entries(mic_ctx_t *mic_ctx)
695{
696 memset(&mic_ctx->sysfs_info, 0, sizeof(mic_ctx->sysfs_info));
697}
698
699void
700free_sysfs_entries(mic_ctx_t *mic_ctx)
701{
702 if (mic_ctx->image != NULL)
703 kfree(mic_ctx->image); /* mic_ctx->initramfs points into this buffer */
704 if (mic_ctx->sysfs_info.cmdline != NULL)
705 kfree(mic_ctx->sysfs_info.cmdline);
706 if (mic_ctx->sysfs_info.kernel_cmdline != NULL)
707 kfree(mic_ctx->sysfs_info.kernel_cmdline);
708}
709
710mic_ctx_t *
711get_per_dev_ctx(uint16_t node)
712{
713 /* TODO: Its important to check the upper bound of the dd_bi array as well.
714 * Cannot be done currently since not all calling functions to get_per_dev_ctx
715 * has the dd_numdevs set correctly. (See mic_ctx_map_single call in adapter_init_device
716 * thats callled even before dd_numdevs is incremented. */
717 return &mic_data.dd_bi[node]->bi_ctx;
718}
719
720int
721get_num_devs(mic_ctx_t *mic_ctx, uint32_t *num_devs)
722{
723 if (num_devs == NULL)
724 return -EINVAL;
725 if (copy_to_user(num_devs, &mic_data.dd_numdevs, sizeof(uint32_t)))
726 return -EFAULT;
727 return 0;
728}
729
730int
731mic_get_file_size(const char* fn, uint32_t* file_len)
732{
733 struct file *filp;
734 loff_t filp_size;
735 uint32_t status = 0;
736 mm_segment_t fs = get_fs();
737
738 set_fs(get_ds());
739
740 if (!fn || IS_ERR(filp = filp_open(fn, 0, 0))) {
741 status = EINVAL;
742 goto cleanup_fs;
743 }
744
745 filp_size = GET_FILE_SIZE_FROM_INODE(filp);
746 if (filp_size <= 0) {
747 status = EINVAL;
748 goto cleanup_filp;
749 }
750
751 *file_len = filp_size;
752cleanup_filp:
753 filp_close(filp, current->files);
754cleanup_fs:
755 set_fs(fs);
756 return status;
757}
758
759// loads file from hdd into pci physical memory
760int
761mic_load_file(const char* fn, uint8_t* buffer, uint32_t max_size)
762{
763 long c;
764 int status = 0;
765 struct file *filp;
766 loff_t filp_size, pos = 0;
767
768 mm_segment_t fs = get_fs();
769 set_fs(get_ds());
770
771 if (!fn || IS_ERR(filp = filp_open(fn, 0, 0))) {
772 status = EINVAL;
773 goto cleanup_fs;
774 }
775
776 filp_size = GET_FILE_SIZE_FROM_INODE(filp);
777 if (filp_size <= 0) {
778 goto cleanup_filp;
779 }
780
781 c = vfs_read(filp, buffer, filp_size, &pos);
782 if(c != (long)filp_size) {
783 status = -1; //FIXME
784 goto cleanup_filp;
785 }
786
787cleanup_filp:
788 filp_close(filp, current->files);
789cleanup_fs:
790 set_fs(fs);
791
792 return status;
793}
794
795module_init(mic_init);
796module_exit(mic_exit);