* Copyright 2010-2017 Intel Corporation.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2,
* as published by the Free Software Foundation.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* Disclaimer: The codes contained in these modules may be specific to
* the Intel Software Development Platform codenamed Knights Ferry,
* and the Intel product codenamed Knights Corner, and are not backward
* compatible with other Intel products. Additionally, Intel will NOT
* support the codes or instruction set in future products.
* Intel offers no warranty of any kind regarding the code. This code is
* licensed on an "AS IS" basis and Intel is not obligated to provide
* any support, assistance, installation, training, or other services
* of any kind. Intel is also not obligated to provide any updates,
* enhancements or extensions. Intel specifically disclaims any warranty
* of merchantability, non-infringement, fitness for any particular
* purpose, and any other warranty.
* Further, Intel disclaims all liability of any kind, including but
* not limited to liability for infringement of any proprietary rights,
* relating to the use of the code, even if Intel is notified of the
* possibility of such liability. Except as expressly stated in an Intel
* license agreement provided with this code and agreed upon with Intel,
* no license, express or implied, by estoppel or otherwise, to any
* intellectual property rights is granted herein.
#include <linux/string.h>
#include "mic/micscif_kmem_cache.h"
#include "mic/io_interface.h"
MODULE_INFO(build_number
, BUILD_NUMBER
);
MODULE_INFO(build_bywhom
, BUILD_BYWHOM
);
MODULE_INFO(build_ondate
, BUILD_ONDATE
);
MODULE_INFO(build_scmver
, BUILD_SCMVER
);
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34))
#include <linux/pm_qos_params.h>
struct kmem_cache
*unaligned_cache
;
mic_lindata_t mic_lindata
;
module_param_named(ulimit
, mic_ulimit_check
, bool, 0600);
MODULE_PARM_DESC(ulimit
, "SCIF ulimit check");
module_param_named(reg_cache
, mic_reg_cache_enable
, bool, 0600);
MODULE_PARM_DESC(reg_cache
, "SCIF registration caching");
module_param_named(huge_page
, mic_huge_page_enable
, bool, 0600);
MODULE_PARM_DESC(huge_page
, "SCIF Huge Page Support");
extern bool mic_p2p_enable
;
module_param_named(p2p
, mic_p2p_enable
, bool, 0600);
MODULE_PARM_DESC(p2p
, "SCIF peer-to-peer");
extern bool mic_p2p_proxy_enable
;
module_param_named(p2p_proxy
, mic_p2p_proxy_enable
, bool, 0600);
MODULE_PARM_DESC(p2p_proxy
, "SCIF peer-to-peer proxy DMA support");
extern bool mic_watchdog_enable
;
module_param_named(watchdog
, mic_watchdog_enable
, bool, 0600);
MODULE_PARM_DESC(watchdog
, "SCIF Watchdog");
extern bool mic_watchdog_auto_reboot
;
module_param_named(watchdog_auto_reboot
, mic_watchdog_auto_reboot
, bool, 0600);
MODULE_PARM_DESC(watchdog_auto_reboot
, "SCIF Watchdog auto reboot");
module_param_named(msi
, mic_msi_enable
, bool, 0600);
MODULE_PARM_DESC(mic_msi_enable
, "To enable MSIx in the driver.");
int mic_pm_qos_cpu_dma_lat
= -1;
module_param_named(pm_qos_cpu_dma_lat
, mic_pm_qos_cpu_dma_lat
, int, 0600);
MODULE_PARM_DESC(mic_pm_qos_cpu_dma_lat
, "PM QoS CPU DMA latency in usecs.");
extern int ramoops_count
;
module_param_named(ramoops_count
, ramoops_count
, int, 0600);
MODULE_PARM_DESC(ramoops_count
, "Maximum frame count for the ramoops driver.");
extern bool mic_crash_dump_enabled
;
module_param_named(crash_dump
, mic_crash_dump_enabled
, bool, 0600);
MODULE_PARM_DESC(mic_crash_dump_enabled
, "MIC Crash Dump enabled.");
#define GET_FILE_SIZE_FROM_INODE(fp) i_size_read((fp)->f_path.dentry->d_inode)
mic_open(struct inode
*inode
, struct file
*filp
)
dev_t dev
= inode
->i_rdev
;
return scif_fdopen(filp
);
return mic_psmi_open(filp
);
mic_release(struct inode
*inode
, struct file
*filp
)
dev_t dev
= inode
->i_rdev
;
if (filp
->private_data
== filp
) {
rc
= fasync_helper(-1, filp
, 0, &mic_data
.dd_fasync
);
mic_data
.dd_fasync
= NULL
;
return scif_fdclose(filp
);
extern ssize_t
mic_psmi_read(struct file
* filp
, char __user
*buf
,
size_t count
, loff_t
*pos
);
mic_read(struct file
* filp
, char __user
*buf
,
size_t count
, loff_t
*pos
)
dev_t dev
= filp
->f_path
.dentry
->d_inode
->i_rdev
;
return mic_psmi_read(filp
, buf
, count
, pos
);
mic_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
dev
= filp
->f_path
.dentry
->d_inode
->i_rdev
;
return scif_process_ioctl(filp
, cmd
, arg
);
status
= adapter_do_ioctl(cmd
, arg
);
mic_fasync(int fd
, struct file
*filp
, int on
)
if ((rc
= fasync_helper(fd
, filp
, on
, &mic_data
.dd_fasync
)) < 0) {
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
rc
= __f_setown(filp
, task_pid(current
), PIDTYPE_PID
, 0);
__f_setown(filp
, task_pid(current
), PIDTYPE_PID
, 0);
filp
->private_data
= filp
;
filp
->private_data
= NULL
;
mic_mmap(struct file
*f
, struct vm_area_struct
*vma
)
dev_t dev
= f
->f_path
.dentry
->d_inode
->i_rdev
;
return micscif_mmap(f
, vma
);
mic_poll(struct file
*f
, poll_table
*wait
)
dev_t dev
= f
->f_path
.dentry
->d_inode
->i_rdev
;
return micscif_poll(f
, wait
);
mic_flush(struct file
*f
, fl_owner_t id
)
dev_t dev
= f
->f_path
.dentry
->d_inode
->i_rdev
;
return micscif_flush(f
, id
);
mic_irq_isr(int irq
, void *data
)
if (((mic_ctx_t
*)data
)->msie
)
adapter_imsr((mic_ctx_t
*)data
);
else if (adapter_isr((mic_ctx_t
*)data
) < 0 ){
extern struct attribute_group bd_attr_group
;
extern struct attribute_group host_attr_group
;
extern struct attribute_group scif_attr_group
;
extern struct attribute_group psmi_attr_group
;
extern struct bin_attribute mic_psmi_ptes_attr
;
mic_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
int brdnum
= mic_data
.dd_numdevs
;
if ((bd_info
= (bd_info_t
*)kzalloc(sizeof(bd_info_t
), GFP_KERNEL
)) == NULL
) {
printk("MIC: probe failed allocating memory for bd_info\n");
mic_ctx
= &bd_info
->bi_ctx
;
mic_ctx
->bd_info
= bd_info
;
mic_data
.dd_bi
[brdnum
] = bd_info
;
if ((err
= pci_enable_device(pdev
))) {
printk("pci_enable failed board #%d\n", brdnum
);
err
= pci_reenable_device(pdev
);
err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
printk("mic %d: ERROR DMA not available\n", brdnum
);
err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
printk("mic %d: ERROR pci_set_consistent_dma_mask(64) %d\n", brdnum
, err
);
err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
printk("mic %d: ERROR pci_set_consistent_dma_mask(32) %d\n", brdnum
, err
);
// Allocate bar 4 for MMIO and GTT
bd_info
->bi_ctx
.mmio
.pa
= pci_resource_start(pdev
, DLDR_MMIO_BAR
);
bd_info
->bi_ctx
.mmio
.len
= pci_resource_len(pdev
, DLDR_MMIO_BAR
);
if (request_mem_region(bd_info
->bi_ctx
.mmio
.pa
,
bd_info
->bi_ctx
.mmio
.len
, "mic") == NULL
) {
printk("mic %d: failed to reserve mmio space\n", brdnum
);
// Allocate bar 0 for access Aperture
bd_info
->bi_ctx
.aper
.pa
= pci_resource_start(pdev
, DLDR_APT_BAR
);
bd_info
->bi_ctx
.aper
.len
= pci_resource_len(pdev
, DLDR_APT_BAR
);
if (request_mem_region(bd_info
->bi_ctx
.aper
.pa
,
bd_info
->bi_ctx
.aper
.len
, "mic") == NULL
) {
printk("mic %d: failed to reserve aperture space\n", brdnum
);
for (i
= 0; i
< MIC_NUM_MSIX_ENTRIES
; i
++)
bd_info
->bi_msix_entries
[i
].entry
= i
;
err
= pci_enable_msix(mic_ctx
->bi_pdev
, bd_info
->bi_msix_entries
,
// Only support 1 MSIx for now
err
= request_irq(bd_info
->bi_msix_entries
[0].vector
,
mic_irq_isr
, 0, "mic", mic_ctx
);
printk("MIC: Error in request_irq %d\n", err
);
// TODO: this needs to be hardened and actually return errors
if ((err
= adapter_init_device(mic_ctx
)) != 0) {
printk("MIC: Adapter init device failed %d\n", err
);
set_sysfs_entries(mic_ctx
);
bd_info
->bi_sysfsdev
= device_create(mic_lindata
.dd_class
, &pdev
->dev
,
mic_lindata
.dd_dev
+ 2 + mic_ctx
->bd_info
->bi_ctx
.bi_id
,
NULL
, "mic%d", mic_ctx
->bd_info
->bi_ctx
.bi_id
);
err
= sysfs_create_group(&mic_ctx
->bd_info
->bi_sysfsdev
->kobj
, &bd_attr_group
);
mic_ctx
->sysfs_state
= sysfs_get_dirent(mic_ctx
->bd_info
->bi_sysfsdev
->kobj
.sd
,
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) && LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
dev_set_drvdata(mic_ctx
->bd_info
->bi_sysfsdev
, mic_ctx
);
if ((err
= request_irq(mic_ctx
->bi_pdev
->irq
, mic_irq_isr
,
IRQF_SHARED
, "mic", mic_ctx
)) != 0) {
printk("MIC: Error in request_irq %d\n", err
);
adapter_probe(&bd_info
->bi_ctx
);
if (mic_ctx
->bi_psmi
.enabled
) {
err
= sysfs_create_group(&mic_ctx
->bd_info
->bi_sysfsdev
->kobj
,
err
= device_create_bin_file(mic_ctx
->bd_info
->bi_sysfsdev
,
adapter_wait_reset(mic_ctx
);
// Adding a board instance so increment the total number of MICs in the system.
list_add_tail(&bd_info
->bi_list
, &mic_data
.dd_bdlist
);
printk("mic_probe %d:%d:%d as board #%d\n", pdev
->bus
->number
,
PCI_SLOT(pdev
->devfn
), PCI_FUNC(pdev
->devfn
), brdnum
);
wait_event(mic_ctx
->ioremapwq
, mic_ctx
->aper
.va
|| mic_ctx
->state
== MIC_RESETFAIL
);
iounmap((void *)bd_info
->bi_ctx
.aper
.va
);
iounmap((void *)bd_info
->bi_ctx
.mmio
.va
);
release_mem_region(bd_info
->bi_ctx
.aper
.pa
, bd_info
->bi_ctx
.aper
.len
);
release_mem_region(bd_info
->bi_ctx
.mmio
.pa
, bd_info
->bi_ctx
.mmio
.len
);
mic_remove(struct pci_dev
*pdev
)
if (mic_data
.dd_numdevs
- 1 < 0)
brdnum
= mic_data
.dd_numdevs
;
/* Make sure boards are shutdown and not available. */
bd_info
= mic_data
.dd_bi
[brdnum
];
spin_lock_bh(&bd_info
->bi_ctx
.sysfs_lock
);
sysfs_put(bd_info
->bi_ctx
.sysfs_state
);
bd_info
->bi_ctx
.sysfs_state
= NULL
;
spin_unlock_bh(&bd_info
->bi_ctx
.sysfs_lock
);
if (bd_info
->bi_ctx
.bi_psmi
.enabled
) {
device_remove_bin_file(bd_info
->bi_sysfsdev
, &mic_psmi_ptes_attr
);
sysfs_remove_group(&bd_info
->bi_sysfsdev
->kobj
, &psmi_attr_group
);
sysfs_remove_group(&bd_info
->bi_sysfsdev
->kobj
, &bd_attr_group
);
free_sysfs_entries(&bd_info
->bi_ctx
);
device_destroy(mic_lindata
.dd_class
,
mic_lindata
.dd_dev
+ 2 + bd_info
->bi_ctx
.bi_id
);
adapter_stop_device(&bd_info
->bi_ctx
, 1, 0);
* Need to wait for reset since accessing the card while GDDR training
* is ongoing by adapter_remove(..) below for example can be fatal.
wait_for_reset(&bd_info
->bi_ctx
);
mic_disable_interrupts(&bd_info
->bi_ctx
);
if (!bd_info
->bi_ctx
.msie
) {
free_irq(bd_info
->bi_ctx
.bi_pdev
->irq
, &bd_info
->bi_ctx
);
free_irq(bd_info
->bi_msix_entries
[0].vector
, &bd_info
->bi_ctx
);
pci_disable_msix(bd_info
->bi_ctx
.bi_pdev
);
adapter_remove(&bd_info
->bi_ctx
);
release_mem_region(bd_info
->bi_ctx
.aper
.pa
, bd_info
->bi_ctx
.aper
.len
);
release_mem_region(bd_info
->bi_ctx
.mmio
.pa
, bd_info
->bi_ctx
.mmio
.len
);
pci_disable_device(bd_info
->bi_ctx
.bi_pdev
);
mic_shutdown(struct pci_dev
*pdev
) {
mic_ctx
= get_device_context(pdev
);
adapter_stop_device(mic_ctx
, !RESET_WAIT
, !RESET_REATTEMPT
);
static const struct file_operations mic_fops
= {
.unlocked_ioctl
= mic_ioctl
,
static const struct dev_pm_ops pci_dev_pm_ops
= {
.suspend
= micpm_suspend
,
.suspend_noirq
= micpm_suspend_noirq
,
.resume_noirq
= micpm_resume_noirq
,
.freeze_noirq
= micpm_suspend_noirq
,
.restore_noirq
= micpm_resume_noirq
,
static struct notifier_block mic_pm_notifer
= {
.notifier_call
= micpm_notifier_block
,
static struct pci_device_id mic_pci_tbl
[] = {
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ABR_2249
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ABR_224a
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_2250
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_2251
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_2252
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_2253
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_2254
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_2255
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_2256
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_2257
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_2258
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_2259
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_225a
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_225b
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_225c
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_225d
, PCI_ANY_ID
, PCI_ANY_ID
,
{ PCI_VENDOR_ID_INTEL
, PCI_DEVICE_KNC_225e
, PCI_ANY_ID
, PCI_ANY_ID
,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
mic_devnode(struct device
*dev
, MODE_T
*mode
)
return kasprintf(GFP_KERNEL
, "mic/%s", dev_name(dev
));
unaligned_cache
= micscif_kmem_cache_create();
mic_lindata
.dd_pcidriver
.name
= "mic";
mic_lindata
.dd_pcidriver
.id_table
= mic_pci_tbl
;
mic_lindata
.dd_pcidriver
.probe
= mic_probe
;
mic_lindata
.dd_pcidriver
.remove
= mic_remove
;
mic_lindata
.dd_pcidriver
.driver
.pm
= &pci_dev_pm_ops
;
mic_lindata
.dd_pcidriver
.shutdown
= mic_shutdown
;
if ((ret
= alloc_chrdev_region(&mic_lindata
.dd_dev
,
0, MAX_DLDR_MINORS
, "mic") != 0)) {
printk("Error allocating device nodes: %d\n", ret
);
cdev_init(&mic_lindata
.dd_cdev
, &mic_fops
);
mic_lindata
.dd_cdev
.owner
= THIS_MODULE
;
mic_lindata
.dd_cdev
.ops
= &mic_fops
;
if ((ret
= cdev_add(&mic_lindata
.dd_cdev
,
mic_lindata
.dd_dev
, MAX_DLDR_MINORS
) != 0)) {
kobject_put(&mic_lindata
.dd_cdev
.kobj
);
mic_lindata
.dd_class
= class_create(THIS_MODULE
, "mic");
if (IS_ERR(mic_lindata
.dd_class
)) {
printk("MICDLDR: Error createing mic class\n");
cdev_del(&mic_lindata
.dd_cdev
);
ret
= PTR_ERR(mic_lindata
.dd_class
);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)
mic_lindata
.dd_class
->devnode
= mic_devnode
;
mic_lindata
.dd_hostdev
= device_create(mic_lindata
.dd_class
, NULL
,
mic_lindata
.dd_dev
, NULL
, "ctrl");
mic_lindata
.dd_scifdev
= device_create(mic_lindata
.dd_class
, NULL
,
mic_lindata
.dd_dev
+ 1, NULL
, "scif");
ret
= sysfs_create_group(&mic_lindata
.dd_hostdev
->kobj
, &host_attr_group
);
ret
= sysfs_create_group(&mic_lindata
.dd_scifdev
->kobj
, &scif_attr_group
);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)
mic_lindata
.dd_class
->devnode
= NULL
;
if (micveth_init(mic_lindata
.dd_hostdev
))
printk(KERN_ERR
"%s: micveth_init failed\n", __func__
);
ret
= pci_register_driver(&mic_lindata
.dd_pcidriver
);
printk("mic: failed to register pci driver %d\n", ret
);
if (!mic_data
.dd_numdevs
) {
printk("mic: No MIC boards present. SCIF available in loopback mode\n");
printk("mic: number of devices detected %d \n", mic_data
.dd_numdevs
);
for (i
= 0; i
< mic_data
.dd_numdevs
; i
++) {
mic_ctx_t
*mic_ctx
= get_per_dev_ctx(i
);
wait_event(mic_ctx
->ioremapwq
,
mic_ctx
->aper
.va
|| mic_ctx
->state
== MIC_RESETFAIL
);
destroy_workqueue(mic_ctx
->ioremapworkq
);
micveth_init_legacy(mic_data
.dd_numdevs
, mic_lindata
.dd_hostdev
);
micvcons_create(mic_data
.dd_numdevs
);
/* Initialize Data structures for PM Disconnect */
ret
= micpm_disconn_init(mic_data
.dd_numdevs
+ 1);
printk(KERN_ERR
"%s: Failed to initialize PM disconnect"
" data structures. PM may not work as expected."
" ret = %d\n", __func__
, ret
);
register_pm_notifier(&mic_pm_notifer
);
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34))
ret
= pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY
, "mic", mic_pm_qos_cpu_dma_lat
);
printk(KERN_ERR
"%s %d mic_pm_qos_cpu_dma_lat %d ret %d\n",
__func__
, __LINE__
, mic_pm_qos_cpu_dma_lat
, ret
);
/* Dont fail driver load due to PM QoS API. Fall through */
device_destroy(mic_lindata
.dd_class
, mic_lindata
.dd_dev
+ 1);
device_destroy(mic_lindata
.dd_class
, mic_lindata
.dd_dev
);
class_destroy(mic_lindata
.dd_class
);
cdev_del(&mic_lindata
.dd_cdev
);
unregister_pm_notifier(&mic_pm_notifer
);
unregister_chrdev_region(mic_lindata
.dd_dev
, MAX_DLDR_MINORS
);
/* Close endpoints related to reverse registration */
micvcons_destroy(mic_data
.dd_numdevs
);
pci_unregister_driver(&mic_lindata
.dd_pcidriver
);
/* Uninit data structures for PM disconnect */
micpm_disconn_uninit(mic_data
.dd_numdevs
+ 1);
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34))
pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY
, "mic");
micscif_kmem_cache_destroy();
device_destroy(mic_lindata
.dd_class
, mic_lindata
.dd_dev
+ 1);
device_destroy(mic_lindata
.dd_class
, mic_lindata
.dd_dev
);
class_destroy(mic_lindata
.dd_class
);
cdev_del(&mic_lindata
.dd_cdev
);
unregister_chrdev_region(mic_lindata
.dd_dev
, MAX_DLDR_MINORS
);
unregister_pm_notifier(&mic_pm_notifer
);
set_sysfs_entries(mic_ctx_t
*mic_ctx
)
memset(&mic_ctx
->sysfs_info
, 0, sizeof(mic_ctx
->sysfs_info
));
free_sysfs_entries(mic_ctx_t
*mic_ctx
)
if (mic_ctx
->image
!= NULL
)
kfree(mic_ctx
->image
); /* mic_ctx->initramfs points into this buffer */
if (mic_ctx
->sysfs_info
.cmdline
!= NULL
)
kfree(mic_ctx
->sysfs_info
.cmdline
);
if (mic_ctx
->sysfs_info
.kernel_cmdline
!= NULL
)
kfree(mic_ctx
->sysfs_info
.kernel_cmdline
);
get_per_dev_ctx(uint16_t node
)
/* TODO: Its important to check the upper bound of the dd_bi array as well.
* Cannot be done currently since not all calling functions to get_per_dev_ctx
* has the dd_numdevs set correctly. (See mic_ctx_map_single call in adapter_init_device
* thats callled even before dd_numdevs is incremented. */
return &mic_data
.dd_bi
[node
]->bi_ctx
;
get_num_devs(mic_ctx_t
*mic_ctx
, uint32_t *num_devs
)
if (copy_to_user(num_devs
, &mic_data
.dd_numdevs
, sizeof(uint32_t)))
mic_get_file_size(const char* fn
, uint32_t* file_len
)
mm_segment_t fs
= get_fs();
if (!fn
|| IS_ERR(filp
= filp_open(fn
, 0, 0))) {
filp_size
= GET_FILE_SIZE_FROM_INODE(filp
);
filp_close(filp
, current
->files
);
// loads file from hdd into pci physical memory
mic_load_file(const char* fn
, uint8_t* buffer
, uint32_t max_size
)
loff_t filp_size
, pos
= 0;
mm_segment_t fs
= get_fs();
if (!fn
|| IS_ERR(filp
= filp_open(fn
, 0, 0))) {
filp_size
= GET_FILE_SIZE_FROM_INODE(filp
);
c
= vfs_read(filp
, buffer
, filp_size
, &pos
);
if(c
!= (long)filp_size
) {
filp_close(filp
, current
->files
);