| 1 | /* |
| 2 | * Copyright 2010-2017 Intel Corporation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License, version 2, |
| 6 | * as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * Disclaimer: The codes contained in these modules may be specific to |
| 14 | * the Intel Software Development Platform codenamed Knights Ferry, |
| 15 | * and the Intel product codenamed Knights Corner, and are not backward |
| 16 | * compatible with other Intel products. Additionally, Intel will NOT |
| 17 | * support the codes or instruction set in future products. |
| 18 | * |
| 19 | * Intel offers no warranty of any kind regarding the code. This code is |
| 20 | * licensed on an "AS IS" basis and Intel is not obligated to provide |
| 21 | * any support, assistance, installation, training, or other services |
| 22 | * of any kind. Intel is also not obligated to provide any updates, |
| 23 | * enhancements or extensions. Intel specifically disclaims any warranty |
| 24 | * of merchantability, non-infringement, fitness for any particular |
| 25 | * purpose, and any other warranty. |
| 26 | * |
| 27 | * Further, Intel disclaims all liability of any kind, including but |
| 28 | * not limited to liability for infringement of any proprietary rights, |
| 29 | * relating to the use of the code, even if Intel is notified of the |
| 30 | * possibility of such liability. Except as expressly stated in an Intel |
| 31 | * license agreement provided with this code and agreed upon with Intel, |
| 32 | * no license, express or implied, by estoppel or otherwise, to any |
| 33 | * intellectual property rights is granted herein. |
| 34 | */ |
| 35 | |
| 36 | #ifndef MICSCIF_NM_H |
| 37 | #define MICSCIF_NM_H |
| 38 | |
| 39 | #include <scif.h> |
| 40 | |
| 41 | #ifdef MIC_IS_EMULATION |
| 42 | #define DEFAULT_WATCHDOG_TO (INT_MAX) |
| 43 | #define NODE_ALIVE_TIMEOUT (INT_MAX) |
| 44 | #define NODE_QP_TIMEOUT (INT_MAX) |
| 45 | #define NODE_ACCEPT_TIMEOUT (INT_MAX) |
| 46 | #define NODEQP_SEND_TO_MSEC (INT_MAX) |
| 47 | #else |
| 48 | #define DEFAULT_WATCHDOG_TO (30) |
| 49 | #define NODE_ALIVE_TIMEOUT (ms_info.mi_watchdog_to * HZ) |
| 50 | #define NODE_QP_TIMEOUT (100) |
| 51 | #define NODE_ACCEPT_TIMEOUT (3 * HZ) |
| 52 | #define NODEQP_SEND_TO_MSEC (3 * 1000) |
| 53 | #endif |
| 54 | |
| 55 | #define SCIF_ENABLE_PM 1 |
| 56 | |
| 57 | #define DESTROY_WQ (true) |
| 58 | |
| 59 | enum disconn_type { |
| 60 | DISCONN_TYPE_POWER_MGMT, |
| 61 | DISCONN_TYPE_LOST_NODE, |
| 62 | DISCONN_TYPE_MAINTENANCE_MODE, |
| 63 | }; |
| 64 | |
| 65 | /* |
| 66 | * Notify the host about a new dependency with the remote SCIF device. |
| 67 | * Dependencies are created during scif_mmap()/scif_get_pages(). |
| 68 | */ |
| 69 | void micscif_create_node_dep(struct micscif_dev *dev, int nr_pages); |
| 70 | |
| 71 | /* |
| 72 | * Notify the host that an existing dependency with the remote SCIF |
| 73 | * device no longer exists. |
| 74 | */ |
| 75 | void micscif_destroy_node_dep(struct micscif_dev *dev, int nr_pages); |
| 76 | |
| 77 | /** |
| 78 | * micscif_inc_node_refcnt: |
| 79 | * |
| 80 | * @dev: Remote SCIF device. |
| 81 | * @count: ref count |
| 82 | * |
| 83 | * Increment the global activity ref count for the remote SCIF device. |
| 84 | * If the remote SCIF device is idle, then notify the host to wake up |
| 85 | * the remote SCIF device and then wait for an ACK. |
| 86 | */ |
| 87 | static __always_inline void |
| 88 | micscif_inc_node_refcnt(struct micscif_dev *dev, long cnt) |
| 89 | { |
| 90 | #ifdef SCIF_ENABLE_PM |
| 91 | if (unlikely(dev && !atomic_long_add_unless(&dev->scif_ref_cnt, |
| 92 | cnt, SCIF_NODE_IDLE))) { |
| 93 | /* |
| 94 | * This code path would not be entered unless the remote |
| 95 | * SCIF device has actually been put to sleep by the host. |
| 96 | */ |
| 97 | mutex_lock(&dev->sd_lock); |
| 98 | if (SCIFDEV_STOPPED == dev->sd_state || |
| 99 | SCIFDEV_STOPPING == dev->sd_state || |
| 100 | SCIFDEV_INIT == dev->sd_state) |
| 101 | goto bail_out; |
| 102 | if (test_bit(SCIF_NODE_MAGIC_BIT, |
| 103 | &dev->scif_ref_cnt.counter)) { |
| 104 | /* Notify host that the remote node must be woken */ |
| 105 | struct nodemsg notif_msg; |
| 106 | |
| 107 | dev->sd_wait_status = OP_IN_PROGRESS; |
| 108 | notif_msg.uop = SCIF_NODE_WAKE_UP; |
| 109 | notif_msg.src.node = ms_info.mi_nodeid; |
| 110 | notif_msg.dst.node = SCIF_HOST_NODE; |
| 111 | notif_msg.payload[0] = dev->sd_node; |
| 112 | /* No error handling for Host SCIF device */ |
| 113 | micscif_nodeqp_send(&scif_dev[SCIF_HOST_NODE], |
| 114 | ¬if_msg, NULL); |
| 115 | /* |
| 116 | * A timeout is not required since only the cards can |
| 117 | * initiate this message. The Host is expected to be alive. |
| 118 | * If the host has crashed then so will the cards. |
| 119 | */ |
| 120 | wait_event(dev->sd_wq, |
| 121 | dev->sd_wait_status != OP_IN_PROGRESS); |
| 122 | /* |
| 123 | * Aieee! The host could not wake up the remote node. |
| 124 | * Bail out for now. |
| 125 | */ |
| 126 | if (dev->sd_wait_status == OP_COMPLETED) { |
| 127 | dev->sd_state = SCIFDEV_RUNNING; |
| 128 | clear_bit(SCIF_NODE_MAGIC_BIT, |
| 129 | &dev->scif_ref_cnt.counter); |
| 130 | } |
| 131 | } |
| 132 | /* The ref count was not added if the node was idle. */ |
| 133 | atomic_long_add(cnt, &dev->scif_ref_cnt); |
| 134 | bail_out: |
| 135 | mutex_unlock(&dev->sd_lock); |
| 136 | } |
| 137 | #endif |
| 138 | } |
| 139 | |
| 140 | /** |
| 141 | * micscif_dec_node_refcnt: |
| 142 | * |
| 143 | * @dev: Remote SCIF device. |
| 144 | * @nr_pages: number of pages |
| 145 | * |
| 146 | * Decrement the global activity ref count for the remote SCIF device. |
| 147 | * Assert if the ref count drops to negative. |
| 148 | */ |
| 149 | static __always_inline void |
| 150 | micscif_dec_node_refcnt(struct micscif_dev *dev, long cnt) |
| 151 | { |
| 152 | #ifdef SCIF_ENABLE_PM |
| 153 | if (dev) { |
| 154 | if (unlikely((atomic_long_sub_return(cnt, |
| 155 | &dev->scif_ref_cnt)) < 0)) { |
| 156 | printk(KERN_ERR "%s %d dec dev %p node %d ref %ld " |
| 157 | " caller %p Lost Node?? \n", |
| 158 | __func__, __LINE__, dev, dev->sd_node, |
| 159 | atomic_long_read(&dev->scif_ref_cnt), |
| 160 | __builtin_return_address(0)); |
| 161 | atomic_long_add_unless(&dev->scif_ref_cnt, cnt, |
| 162 | SCIF_NODE_IDLE); |
| 163 | } |
| 164 | } |
| 165 | #endif |
| 166 | } |
| 167 | |
| 168 | /* Handle a SCIF_NODE_REMOVE message */ |
| 169 | uint64_t micscif_handle_remove_node(uint64_t mask, uint64_t flags); |
| 170 | void micscif_cleanup_scifdev(struct micscif_dev *dev, bool destroy_wq); |
| 171 | |
| 172 | void micscif_node_add_callback(int node); |
| 173 | |
| 174 | void set_nodemask_bit(uint8_t* nodemask, uint32_t node_id, int val); |
| 175 | int get_nodemask_bit(uint8_t* nodemask, uint32_t node_id); |
| 176 | |
| 177 | #ifndef _MIC_SCIF_ |
| 178 | |
| 179 | /* definition of stack node used in activation/deactivation set algorithms*/ |
| 180 | struct stack_node { |
| 181 | struct list_head next; |
| 182 | uint32_t node_id; |
| 183 | }; |
| 184 | |
| 185 | enum dependency_state { |
| 186 | DEP_STATE_NOT_DEPENDENT, |
| 187 | DEP_STATE_DEPENDENT, |
| 188 | DEP_STATE_DISCONNECT_READY, |
| 189 | DEP_STATE_DISCONNECTED |
| 190 | }; |
| 191 | |
| 192 | |
| 193 | uint64_t micscif_send_pm_rmnode_msg(int node, uint64_t nodemask_addr, |
| 194 | uint64_t nodemask_size, int orig_node); |
| 195 | uint64_t micscif_send_lost_node_rmnode_msg(int node, int orig_node); |
| 196 | |
| 197 | /* definitions of stack methods used in activation/deactivation set algorithms */ |
| 198 | int init_depgraph_stack(struct list_head *stack_ptr); |
| 199 | int uninit_depgraph_stack(struct list_head *stack_ptr); |
| 200 | int is_stack_empty(struct list_head *stack_ptr); |
| 201 | int stack_push_node(struct list_head *stack_ptr, uint32_t node_id); |
| 202 | int stack_pop_node(struct list_head *stack_ptr, uint32_t *node_id); |
| 203 | int micscif_get_activeset(uint32_t node_id, uint8_t *nodemask); |
| 204 | int micscif_get_minimal_deactiveset(uint32_t node_id, uint8_t *nodemask, uint8_t *visited); |
| 205 | int micscif_get_deactiveset(uint32_t node_id, uint8_t *nodemask, int max_possible); |
| 206 | void micscif_update_p2p_state(uint32_t node_id, uint32_t peer_id, enum scif_state state); |
| 207 | |
| 208 | /* Method responsible for disconnecting node from the scif network */ |
| 209 | int micscif_disconnect_node(uint32_t node_id, uint8_t *nodemask, enum disconn_type type); |
| 210 | int micscif_connect_node(uint32_t node_id, bool get_ref); |
| 211 | |
| 212 | void micscif_set_nodedep(uint32_t src_node, uint32_t dst_node, enum dependency_state state); |
| 213 | enum dependency_state micscif_get_nodedep(uint32_t src_node, uint32_t dst_node); |
| 214 | uint64_t micscif_send_node_alive(int node); |
| 215 | void micscif_watchdog_handler(struct work_struct *work); |
| 216 | int micscif_handle_lostnode(uint32_t nodeid); |
| 217 | #endif /*_MIC_SCIF_*/ |
| 218 | |
| 219 | /* SCIF tasks before transition to low power state */ |
| 220 | int micscif_suspend_handler(struct notifier_block *notif, |
| 221 | unsigned long event, void *ptr); |
| 222 | |
| 223 | /* |
| 224 | * SCIF tasks if a previous low power state transition |
| 225 | * has failed after a suspend call. |
| 226 | */ |
| 227 | int micscif_fail_suspend_handler(struct notifier_block *notif, |
| 228 | unsigned long event, void *ptr); |
| 229 | |
| 230 | /* SCIF tasks after wake up from low powe state */ |
| 231 | int micscif_resume_handler(struct notifier_block *notif, |
| 232 | unsigned long event, void *ptr); |
| 233 | |
| 234 | #endif /* MICSCIF_NM_H */ |