Updated micscif/miscif_api.c for new get_user_pages() function.
[xeon-phi-kernel-module] / micscif / micscif_main.c
CommitLineData
800f879a
AT
1/*
2 * Copyright 2010-2017 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * Disclaimer: The codes contained in these modules may be specific to
14 * the Intel Software Development Platform codenamed Knights Ferry,
15 * and the Intel product codenamed Knights Corner, and are not backward
16 * compatible with other Intel products. Additionally, Intel will NOT
17 * support the codes or instruction set in future products.
18 *
19 * Intel offers no warranty of any kind regarding the code. This code is
20 * licensed on an "AS IS" basis and Intel is not obligated to provide
21 * any support, assistance, installation, training, or other services
22 * of any kind. Intel is also not obligated to provide any updates,
23 * enhancements or extensions. Intel specifically disclaims any warranty
24 * of merchantability, non-infringement, fitness for any particular
25 * purpose, and any other warranty.
26 *
27 * Further, Intel disclaims all liability of any kind, including but
28 * not limited to liability for infringement of any proprietary rights,
29 * relating to the use of the code, even if Intel is notified of the
30 * possibility of such liability. Except as expressly stated in an Intel
31 * license agreement provided with this code and agreed upon with Intel,
32 * no license, express or implied, by estoppel or otherwise, to any
33 * intellectual property rights is granted herein.
34 */
35
36#include <linux/cdev.h>
37#include <linux/reboot.h>
38#include <linux/version.h>
39#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34))
40#include <linux/pm_qos_params.h>
41#endif
42
43#include <mic/micscif.h>
44#include <mic/micscif_smpt.h>
45#include <mic/micscif_rb.h>
46#include <mic/micscif_intr.h>
47//#include <micscif_test.h>
48#include <mic/micscif_nodeqp.h>
49#include <mic/mic_dma_api.h>
50#include <mic/micscif_kmem_cache.h>
51/* Include this for suspend/resume notifications from pm driver */
52#include <mic/micscif_nm.h>
53
54#ifdef CONFIG_MK1OM
55#define MICPM_DEVEVENT_SUSPEND 1
56#define MICPM_DEVEVENT_RESUME 2
57#define MICPM_DEVEVENT_FAIL_SUSPEND 3
58extern void micpm_device_register(struct notifier_block *n);
59extern void micpm_device_unregister(struct notifier_block *n);
60#endif
61
62int scif_id = 0;
63module_param(scif_id, int, 0400);
64MODULE_PARM_DESC(scif_id, "Set scif driver node ID");
65
66ulong scif_addr = 0;
67module_param(scif_addr, ulong, 0400);
68MODULE_PARM_DESC(scif_addr, "Set scif driver host address");
69
70struct kmem_cache *unaligned_cache;
71
72struct mic_info {
73 dev_t m_dev;
74 struct cdev m_cdev;
75 struct class * m_class;
76 struct device * m_scifdev;
77} micinfo;
78
79int micscif_major = SCIF_MAJOR;
80int micscif_minor = 0;
81
82struct micscif_info ms_info;
83
84// MAX MIC cards + 1 for the Host
85struct micscif_dev scif_dev[MAX_BOARD_SUPPORTED + 1];
86
87extern mic_dma_handle_t mic_dma_handle;
88
89static int mic_pm_qos_cpu_dma_lat = -1;
90static int mic_host_numa_node = -1;
91static unsigned long mic_p2p_proxy_thresh = -1;
92
93#ifdef CONFIG_MK1OM
94static int micscif_devevent_handler(struct notifier_block *nb,
95 unsigned long event,
96 void *msg)
97{
98 if (event == MICPM_DEVEVENT_SUSPEND)
99 return micscif_suspend_handler(nb, event, msg);
100 else if (event == MICPM_DEVEVENT_RESUME)
101 return micscif_resume_handler(nb, event, msg);
102 else if (event == MICPM_DEVEVENT_FAIL_SUSPEND)
103 return micscif_fail_suspend_handler(nb, event, msg);
104 return 0;
105}
106
107static struct notifier_block mic_deviceevent = {
108 .notifier_call = micscif_devevent_handler,
109};
110#endif
111
112static int micscif_open(struct inode *in, struct file *f)
113{
114 dev_t dev = in->i_rdev;
115
116 switch (MINOR(dev)) {
117 case 0:
118 /* base mic device access for testing */
119 return 0;
120 case 1:
121 return scif_fdopen(f);
122 }
123
124 return -EINVAL;
125}
126
127static int micscif_ioctl(struct inode *in, struct file *f,
128 unsigned int cmd, unsigned long arg)
129{
130 dev_t dev = in->i_rdev;
131
132 if (MINOR(dev) == 1) {
133 /* SCIF device */
134 return scif_process_ioctl(f, cmd, arg);
135 }
136 return -EINVAL;
137}
138
139static long micscif_unlocked_ioctl(struct file *f,
140 unsigned int cmd, unsigned long arg)
141{
142 return (long) micscif_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
143}
144
145static int micscif_release(struct inode *in, struct file *f)
146{
147 dev_t dev = in->i_rdev;
148
149 switch (MINOR(dev)) {
150 case 0:
151 /* base mic device access for testing */
152 return 0;
153 case 1:
154 return scif_fdclose(f);
155 }
156
157 return -EINVAL;
158}
159
160/* TODO: Need to flush the queue, grab some lock, and probably
161 * notify the remote node we're going down ... right now, we're
162 * just freeing things, which is probably a bad idea :-)
163 */
164static int micscif_uninit_qp(struct micscif_dev *scifdev)
165{
166 int i;
167 /* first, iounmap/unmap/free any memory we mapped */
168 for (i = 0; i < scifdev->n_qpairs; i++) {
169 iounmap(scifdev->qpairs[i].remote_qp);
170 iounmap(scifdev->qpairs[i].outbound_q.rb_base);
171 kfree((void *)scifdev->qpairs[i].inbound_q.rb_base);
172 }
173 kfree(scifdev->qpairs);
174 scifdev->n_qpairs = 0;
175
176 return 0;
177}
178
179static int micscif_reboot(struct notifier_block *notifier, unsigned long unused1, void *unused2);
180
181static struct notifier_block micscif_reboot_notifier = {
182 .notifier_call = micscif_reboot,
183 .priority = 0,
184};
185
186extern struct attribute_group scif_attr_group;
187
188void micscif_destroy_base(void)
189{
190#ifdef CONFIG_MMU_NOTIFIER
191 destroy_workqueue(ms_info.mi_mmu_notif_wq);
192#endif
193 destroy_workqueue(ms_info.mi_misc_wq);
194 destroy_workqueue(ms_info.mi_conn_wq);
195
196 sysfs_remove_group(&micinfo.m_scifdev->kobj, &scif_attr_group);
197 device_destroy(micinfo.m_class, micinfo.m_dev + 1);
198 device_destroy(micinfo.m_class, micinfo.m_dev);
199 class_destroy(micinfo.m_class);
200 cdev_del(&(micinfo.m_cdev));
201 unregister_chrdev_region(micinfo.m_dev, 2);
202}
203
204static void _micscif_exit(void)
205{
206 struct list_head *pos, *unused;
207 struct scif_callback *temp;
208 struct micscif_dev *dev;
209 int i;
210
211 pr_debug("Goodbye SCIF!\n");
212 /* Cleanup P2P Node Qp/ Interrupt Handlers */
213 for (i = SCIF_HOST_NODE + 1; i <= MAX_BOARD_SUPPORTED; i++) {
214 dev = &scif_dev[i];
215
216 if (is_self_scifdev(dev))
217 continue;
218
219 micscif_cleanup_scifdev(dev, DESTROY_WQ);
220 }
221
222 list_for_each_safe(pos, unused, &ms_info.mi_event_cb) {
223 temp = list_entry(pos, struct scif_callback, list_member);
224 list_del(pos);
225 kfree(temp);
226 }
227 mutex_destroy(&ms_info.mi_event_cblock);
228
229#ifdef CONFIG_MK1OM
230 micpm_device_unregister(&mic_deviceevent);
231#endif
232
233 scif_dev[ms_info.mi_nodeid].sd_state = SCIFDEV_STOPPING;
234 scif_dev[SCIF_HOST_NODE].sd_state = SCIFDEV_STOPPING;
235
236 /* The EXIT message is the last message from MIC to the Host */
237 micscif_send_exit();
238
239 /*
240 * Deliberate infinite wait for a host response during driver
241 * unload since the host must inform other SCIF nodes about
242 * this node going away and then only send a response back
243 * to this node to avoid this nodes host shutdown handler racing
244 * with disconnection from the SCIF network. There is a timeout
245 * on the host for sending a response back so a response will
246 * be sent else the host has crashed.
247 */
248 wait_event(ms_info.mi_exitwq,
249 scif_dev[ms_info.mi_nodeid].sd_state == SCIFDEV_STOPPED);
250 scif_proc_cleanup();
251 mic_debug_uninit();
252 micscif_kmem_cache_destroy();
253
254 micscif_destroy_base();
255
256 /* Disable interrupts */
257 deregister_scif_intr_handler(&scif_dev[SCIF_HOST_NODE]);
258 destroy_workqueue(scif_dev[SCIF_HOST_NODE].sd_intr_wq);
259 micscif_destroy_loopback_qp(&scif_dev[ms_info.mi_nodeid]);
260
261 /* Close DMA device */
262 close_dma_device(0, &mic_dma_handle);
263
264 micscif_uninit_qp(&scif_dev[SCIF_HOST_NODE]);
265 iounmap(scif_dev[SCIF_HOST_NODE].mm_sbox);
266#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34))
267 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, "micscif");
268#endif
269}
270
271static void micscif_exit(void)
272{
273 unregister_reboot_notifier(&micscif_reboot_notifier);
274 _micscif_exit();
275}
276
277static int micscif_reboot(struct notifier_block *notifier, unsigned long unused1, void *unused2)
278{
279 _micscif_exit();
280 return NOTIFY_OK;
281}
282
283struct file_operations micscif_ops = {
284 .owner = THIS_MODULE,
285 .unlocked_ioctl = micscif_unlocked_ioctl,
286 .mmap = micscif_mmap,
287 .poll = micscif_poll,
288 .flush = micscif_flush,
289 .open = micscif_open,
290 .release = micscif_release,
291};
292
293static char * scif_devnode(struct device *dev, mode_t *mode)
294{
295 return kasprintf(GFP_KERNEL, "mic/%s", dev_name(dev));
296}
297
298// Setup the base informaiton for the driver. No interface specific code.
299static int micscif_setup_base(void)
300{
301 long int result;
302
303 if (micscif_major) {
304 micinfo.m_dev = MKDEV(micscif_major, micscif_minor);
305 result = register_chrdev_region(micinfo.m_dev, 2, "micscif");
306 } else {
307 result = alloc_chrdev_region(&micinfo.m_dev, micscif_minor, 2, "micscif");
308 micscif_major = MAJOR(micinfo.m_dev);
309 }
310
311 if (result >= 0) {
312 cdev_init(&(micinfo.m_cdev), &micscif_ops);
313 micinfo.m_cdev.owner = THIS_MODULE;
314 if ((result = cdev_add(&(micinfo.m_cdev), micinfo.m_dev, 2)))
315 goto unreg_chrdev;
316 } else {
317 goto unreg_chrdev;
318 }
319
320 micinfo.m_class = class_create(THIS_MODULE, "micscif");
321 if (IS_ERR(micinfo.m_class)) {
322 result = PTR_ERR(micinfo.m_class);
323 goto del_m_dev;
324 }
325
326 micinfo.m_class->devnode = scif_devnode;
327 if (IS_ERR((int *)(result =
328 (long int)device_create(micinfo.m_class, NULL, micinfo.m_dev, NULL, "mic")))) {
329 result = PTR_ERR((int *)result);
330 goto class_destroy;
331 }
332 if (IS_ERR(micinfo.m_scifdev =
333 device_create(micinfo.m_class, NULL, micinfo.m_dev + 1, NULL, "scif"))) {
334 result = PTR_ERR(micinfo.m_scifdev);
335 goto device_destroy;
336 }
337 if ((result = sysfs_create_group(&micinfo.m_scifdev->kobj, &scif_attr_group)))
338 goto device_destroy1;
339
340 spin_lock_init(&ms_info.mi_eplock);
341 spin_lock_init(&ms_info.mi_connlock);
342 spin_lock_init(&ms_info.mi_rmalock);
343 mutex_init(&ms_info.mi_fencelock);
344 spin_lock_init(&ms_info.mi_nb_connect_lock);
345 INIT_LIST_HEAD(&ms_info.mi_uaccept);
346 INIT_LIST_HEAD(&ms_info.mi_listen);
347 INIT_LIST_HEAD(&ms_info.mi_zombie);
348 INIT_LIST_HEAD(&ms_info.mi_connected);
349 INIT_LIST_HEAD(&ms_info.mi_disconnected);
350 INIT_LIST_HEAD(&ms_info.mi_rma);
351 INIT_LIST_HEAD(&ms_info.mi_rma_tc);
352 INIT_LIST_HEAD(&ms_info.mi_nb_connect_list);
353
354#ifdef CONFIG_MMU_NOTIFIER
355 INIT_LIST_HEAD(&ms_info.mi_mmu_notif_cleanup);
356#endif
357 INIT_LIST_HEAD(&ms_info.mi_fence);
358 if (!(ms_info.mi_misc_wq = create_singlethread_workqueue("SCIF_MISC"))) {
359 result = -ENOMEM;
360 goto remove_group;
361 }
362 INIT_WORK(&ms_info.mi_misc_work, micscif_misc_handler);
363 if (!(ms_info.mi_conn_wq = create_singlethread_workqueue("SCIF_NB_CONN"))) {
364 result = -ENOMEM;
365 goto destroy_misc_wq;
366 }
367 INIT_WORK(&ms_info.mi_conn_work, micscif_conn_handler);
368#ifdef CONFIG_MMU_NOTIFIER
369 if (!(ms_info.mi_mmu_notif_wq = create_singlethread_workqueue("SCIF_MMU"))) {
370 result = -ENOMEM;
371 goto destroy_conn_wq;
372 }
373 INIT_WORK(&ms_info.mi_mmu_notif_work, micscif_mmu_notif_handler);
374#endif
375 ms_info.mi_watchdog_to = DEFAULT_WATCHDOG_TO;
376#ifdef MIC_IS_EMULATION
377 ms_info.mi_watchdog_enabled = 0;
378#else
379 ms_info.mi_watchdog_enabled = 1;
380#endif
381 ms_info.mi_rma_tc_limit = SCIF_RMA_TEMP_CACHE_LIMIT;
382 ms_info.mi_proxy_dma_threshold = mic_p2p_proxy_thresh;
383 ms_info.en_msg_log = 0;
384 return result;
385#ifdef CONFIG_MMU_NOTIFIER
386destroy_conn_wq:
387 destroy_workqueue(ms_info.mi_conn_wq);
388#endif
389destroy_misc_wq:
390 destroy_workqueue(ms_info.mi_misc_wq);
391remove_group:
392 sysfs_remove_group(&micinfo.m_scifdev->kobj, &scif_attr_group);
393device_destroy1:
394 device_destroy(micinfo.m_class, micinfo.m_dev + 1);
395device_destroy:
396 device_destroy(micinfo.m_class, micinfo.m_dev);
397class_destroy:
398 class_destroy(micinfo.m_class);
399del_m_dev:
400 cdev_del(&(micinfo.m_cdev));
401unreg_chrdev:
402 unregister_chrdev_region(micinfo.m_dev, 2);
403//error:
404 return result;
405}
406
407#define SBOX_MMIO_LENGTH 0x10000
408
409static int micscif_init(void)
410{
411 int result = 0;
412 int i;
413 phys_addr_t host_queue_phys;
414 phys_addr_t gtt_phys_base;
415
416 pr_debug("HELLO SCIF!\n");
417
418#if defined(CONFIG_ML1OM)
419 pr_debug("micscif_init(): Hello KNF!\n");
420#elif defined(CONFIG_MK1OM)
421 pr_debug("micscif_init(): Hello KNC!\n");
422#endif
423
424 if (!scif_id || !scif_addr) {
425 printk(KERN_ERR "%s %d scif_id 0x%x scif_addr 0x%lx"
426 "not provided as module parameter. Fail module load",
427 __func__, __LINE__, scif_id, scif_addr);
428 return -EINVAL;
429 }
430
431 for (i = 1; i <= MAX_BOARD_SUPPORTED; i++) {
432 scif_dev[i].sd_state = SCIFDEV_INIT;
433 scif_dev[i].sd_node = i;
434 scif_dev[i].sd_numa_node = -1;
435 mutex_init (&scif_dev[i].sd_lock);
436 init_waitqueue_head(&scif_dev[i].sd_mmap_wq);
437 init_waitqueue_head(&scif_dev[i].sd_wq);
438 init_waitqueue_head(&scif_dev[i].sd_p2p_wq);
439 INIT_DELAYED_WORK(&scif_dev[i].sd_p2p_dwork,
440 scif_poll_qp_state);
441 scif_dev[i].sd_p2p_retry = 0;
442 }
443
444 // Setup the host node access information
445 // Initially only talks to the host => node 0
446 scif_dev[SCIF_HOST_NODE].sd_node = SCIF_HOST_NODE;
447 scif_dev[SCIF_HOST_NODE].sd_state = SCIFDEV_RUNNING;
448 if (!(scif_dev[SCIF_HOST_NODE].mm_sbox =
449 ioremap_nocache(SBOX_BASE, SBOX_MMIO_LENGTH))) {
450 result = -ENOMEM;
451 goto error;
452 }
453 scif_dev[SCIF_HOST_NODE].scif_ref_cnt = (atomic_long_t) ATOMIC_LONG_INIT(0);
454 scif_dev[SCIF_HOST_NODE].scif_map_ref_cnt = 0;
455 init_waitqueue_head(&scif_dev[SCIF_HOST_NODE].sd_wq);
456 init_waitqueue_head(&scif_dev[SCIF_HOST_NODE].sd_mmap_wq);
457 mutex_init(&scif_dev[SCIF_HOST_NODE].sd_lock);
458 gtt_phys_base = readl(scif_dev[SCIF_HOST_NODE].mm_sbox + SBOX_GTT_PHY_BASE);
459 gtt_phys_base *= ((4) * 1024);
460 pr_debug("GTT PHY BASE in GDDR 0x%llx\n", gtt_phys_base);
461 pr_debug("micscif_init(): gtt_phy_base x%llx\n", gtt_phys_base);
462
463 /* Get handle to DMA device */
464 if ((result = open_dma_device(0, 0, &mic_dma_handle)))
465 goto unmap_sbox;
466
467 ms_info.mi_nodeid = scif_id;
468 ms_info.mi_maxid = scif_id;
469 ms_info.mi_total = 2; // Host plus this card
470
471#ifdef RMA_DEBUG
472 ms_info.rma_unaligned_cpu_cnt = (atomic_long_t) ATOMIC_LONG_INIT(0);
473 ms_info.rma_alloc_cnt = (atomic_long_t) ATOMIC_LONG_INIT(0);
474 ms_info.rma_pin_cnt = (atomic_long_t) ATOMIC_LONG_INIT(0);
475#ifdef CONFIG_MMU_NOTIFIER
476 ms_info.mmu_notif_cnt = (atomic_long_t) ATOMIC_LONG_INIT(0);
477#endif
478#endif
479
480 pr_debug("micscif_init(): setup_card_qp \n");
481 host_queue_phys = scif_addr;
482 mutex_init(&ms_info.mi_event_cblock);
483 mutex_init(&ms_info.mi_conflock);
484 INIT_LIST_HEAD(&ms_info.mi_event_cb);
485
486 pr_debug("micscif_init(): setup_interrupts \n");
487 /*
488 * Set up the workqueue thread for interrupt handling
489 */
490 if ((result = micscif_setup_interrupts(&scif_dev[SCIF_HOST_NODE])))
491 goto close_dma;
492
493 pr_debug("micscif_init(): host_intr_handler \n");
494 if ((result = micscif_setup_card_qp(host_queue_phys, &scif_dev[SCIF_HOST_NODE]))) {
495 if (result == -ENXIO)
496 goto uninit_qp;
497 else
498 goto destroy_intr_wq;
499 }
500 /* need to do this last -- as soon as the dev is setup, userspace
501 * can try to use the device
502 */
503 pr_debug("micscif_init(): setup_base \n");
504 if ((result = micscif_setup_base()))
505 goto uninit_qp;
506 /*
507 * Register the interrupt
508 */
509 if ((result = register_scif_intr_handler(&scif_dev[SCIF_HOST_NODE])))
510 goto destroy_base;
511
512 // Setup information for self aka loopback.
513 scif_dev[ms_info.mi_nodeid].sd_node = ms_info.mi_nodeid;
514 scif_dev[ms_info.mi_nodeid].sd_numa_node = mic_host_numa_node;
515 scif_dev[ms_info.mi_nodeid].mm_sbox = scif_dev[SCIF_HOST_NODE].mm_sbox;
516 scif_dev[ms_info.mi_nodeid].scif_ref_cnt = (atomic_long_t) ATOMIC_LONG_INIT(0);
517 scif_dev[ms_info.mi_nodeid].scif_map_ref_cnt = 0;
518 init_waitqueue_head(&scif_dev[ms_info.mi_nodeid].sd_wq);
519 init_waitqueue_head(&scif_dev[ms_info.mi_nodeid].sd_mmap_wq);
520 mutex_init(&scif_dev[ms_info.mi_nodeid].sd_lock);
521 if ((result = micscif_setup_loopback_qp(&scif_dev[ms_info.mi_nodeid])))
522 goto dereg_intr_handle;
523 scif_dev[ms_info.mi_nodeid].sd_state = SCIFDEV_RUNNING;
524
525 unaligned_cache = micscif_kmem_cache_create();
526 if (!unaligned_cache) {
527 result = -ENOMEM;
528 goto destroy_loopb;
529 }
530 scif_proc_init();
531 mic_debug_init();
532
533 pr_debug("micscif_init(): Setup successful: 0x%llx \n", host_queue_phys);
534
535#ifdef CONFIG_MK1OM
536 micpm_device_register(&mic_deviceevent);
537#endif
538 if ((result = register_reboot_notifier(&micscif_reboot_notifier)))
539 goto cache_destroy;
540
541#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34))
542 result = pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, "micscif", mic_pm_qos_cpu_dma_lat);
543 if (result) {
544 printk("%s %d mic_pm_qos_cpu_dma_lat %d result %d\n",
545 __func__, __LINE__, mic_pm_qos_cpu_dma_lat, result);
546 result = 0;
547 /* Dont fail driver load due to PM QoS API. Fall through */
548 }
549#endif
550
551 return result;
552cache_destroy:
553#ifdef CONFIG_MK1OM
554 micpm_device_unregister(&mic_deviceevent);
555#endif
556 micscif_kmem_cache_destroy();
557destroy_loopb:
558 micscif_destroy_loopback_qp(&scif_dev[ms_info.mi_nodeid]);
559dereg_intr_handle:
560 deregister_scif_intr_handler(&scif_dev[SCIF_HOST_NODE]);
561destroy_base:
562 pr_debug("Unable to finish scif setup for some reason: %d\n", result);
563 micscif_destroy_base();
564uninit_qp:
565 micscif_uninit_qp(&scif_dev[SCIF_HOST_NODE]);
566destroy_intr_wq:
567 micscif_destroy_interrupts(&scif_dev[SCIF_HOST_NODE]);
568close_dma:
569 close_dma_device(0, &mic_dma_handle);
570unmap_sbox:
571 iounmap(scif_dev[SCIF_HOST_NODE].mm_sbox);
572error:
573 return result;
574}
575
576module_init(micscif_init);
577module_exit(micscif_exit);
578
579module_param_named(huge_page, mic_huge_page_enable, bool, 0600);
580MODULE_PARM_DESC(huge_page, "SCIF Huge Page Support");
581
582module_param_named(ulimit, mic_ulimit_check, bool, 0600);
583MODULE_PARM_DESC(ulimit, "SCIF ulimit check");
584
585module_param_named(reg_cache, mic_reg_cache_enable, bool, 0600);
586MODULE_PARM_DESC(reg_cache, "SCIF registration caching");
587module_param_named(p2p, mic_p2p_enable, bool, 0600);
588MODULE_PARM_DESC(p2p, "SCIF peer-to-peer");
589
590module_param_named(p2p_proxy, mic_p2p_proxy_enable, bool, 0600);
591MODULE_PARM_DESC(p2p_proxy, "SCIF peer-to-peer proxy DMA support");
592
593module_param_named(pm_qos_cpu_dma_lat, mic_pm_qos_cpu_dma_lat, int, 0600);
594MODULE_PARM_DESC(pm_qos_cpu_dma_lat, "PM QoS CPU DMA latency in usecs.");
595
596module_param_named(numa_node, mic_host_numa_node, int, 0600);
597MODULE_PARM_DESC(numa_node, "Host Numa node to which MIC is attached");
598
599module_param_named(p2p_proxy_thresh, mic_p2p_proxy_thresh, ulong, 0600);
600MODULE_PARM_DESC(numa_node, "Transfer size after which Proxy DMA helps DMA perf");
601
602MODULE_LICENSE("GPL");
603MODULE_INFO(build_number, BUILD_NUMBER);
604MODULE_INFO(build_bywhom, BUILD_BYWHOM);
605MODULE_INFO(build_ondate, BUILD_ONDATE);
606MODULE_INFO(build_scmver, BUILD_SCMVER);