Commit | Line | Data |
---|---|---|
800f879a AT |
1 | /* |
2 | * Copyright 2010-2017 Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License, version 2, | |
6 | * as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * Disclaimer: The codes contained in these modules may be specific to | |
14 | * the Intel Software Development Platform codenamed Knights Ferry, | |
15 | * and the Intel product codenamed Knights Corner, and are not backward | |
16 | * compatible with other Intel products. Additionally, Intel will NOT | |
17 | * support the codes or instruction set in future products. | |
18 | * | |
19 | * Intel offers no warranty of any kind regarding the code. This code is | |
20 | * licensed on an "AS IS" basis and Intel is not obligated to provide | |
21 | * any support, assistance, installation, training, or other services | |
22 | * of any kind. Intel is also not obligated to provide any updates, | |
23 | * enhancements or extensions. Intel specifically disclaims any warranty | |
24 | * of merchantability, non-infringement, fitness for any particular | |
25 | * purpose, and any other warranty. | |
26 | * | |
27 | * Further, Intel disclaims all liability of any kind, including but | |
28 | * not limited to liability for infringement of any proprietary rights, | |
29 | * relating to the use of the code, even if Intel is notified of the | |
30 | * possibility of such liability. Except as expressly stated in an Intel | |
31 | * license agreement provided with this code and agreed upon with Intel, | |
32 | * no license, express or implied, by estoppel or otherwise, to any | |
33 | * intellectual property rights is granted herein. | |
34 | */ | |
35 | ||
36 | #include "mic_common.h" | |
37 | #include "scif.h" | |
38 | #include "mic/micscif.h" | |
39 | #include "mic/mic_pm.h" | |
40 | #include "mic/micveth_dma.h" | |
41 | #include <linux/virtio_ring.h> | |
42 | #include "linux/virtio_blk.h" | |
43 | #include "mic/mic_virtio.h" | |
44 | ||
45 | //few helper functions | |
46 | int pm_reg_read(mic_ctx_t *mic_ctx, uint32_t regoffset) { | |
47 | uint32_t regval = 0; | |
fb5e53b5 AT |
48 | if (mic_ctx->bi_family == FAMILY_ABR) |
49 | regval = DBOX_READ(mic_ctx->mmio.va, regoffset); | |
50 | else if (mic_ctx->bi_family == FAMILY_KNC) | |
51 | regval = SBOX_READ(mic_ctx->mmio.va, regoffset); | |
800f879a AT |
52 | |
53 | return regval; | |
54 | } | |
55 | ||
56 | int pm_reg_write(uint32_t value, mic_ctx_t *mic_ctx, uint32_t regoffset) { | |
57 | int err = 0; | |
58 | if (mic_ctx->bi_family == FAMILY_ABR) | |
59 | DBOX_WRITE(value, mic_ctx->mmio.va, regoffset); | |
60 | else if (mic_ctx->bi_family == FAMILY_KNC) | |
61 | SBOX_WRITE(value, mic_ctx->mmio.va, regoffset); | |
62 | ||
63 | return err; | |
64 | } | |
65 | ||
66 | int hw_idle(mic_ctx_t *mic_ctx) { | |
67 | ||
68 | uint8_t is_ring_active; | |
69 | sbox_pcu_ctrl_t ctrl_regval = {0}; | |
70 | uint32_t idle_wait_cnt; | |
71 | ||
72 | for(idle_wait_cnt = 0; idle_wait_cnt <= MAX_HW_IDLE_WAIT_COUNT; | |
73 | idle_wait_cnt++) { | |
74 | ctrl_regval.value = pm_reg_read(mic_ctx,SBOX_PCU_CONTROL); | |
75 | is_ring_active = ctrl_regval.bits.mclk_enabled; | |
76 | if(likely(!is_ring_active)) | |
77 | return !is_ring_active; | |
78 | msleep(1); | |
79 | } | |
80 | ||
81 | PM_DEBUG("Timing out waiting for HW to become idle\n"); | |
82 | return !is_ring_active; | |
83 | } | |
84 | ||
85 | int hw_active(mic_ctx_t *mic_ctx) { | |
86 | uint8_t is_ring_active; | |
87 | sbox_pcu_ctrl_t ctrl_regval; | |
88 | uint32_t idle_wait_cnt; | |
89 | ||
90 | for(idle_wait_cnt = 0; idle_wait_cnt <= MAX_HW_IDLE_WAIT_COUNT; | |
91 | idle_wait_cnt++) { | |
92 | ctrl_regval.value = pm_reg_read(mic_ctx,SBOX_PCU_CONTROL); | |
93 | is_ring_active = ctrl_regval.bits.mclk_enabled; | |
94 | if (likely(is_ring_active)) | |
95 | return is_ring_active; | |
96 | msleep(10); | |
97 | } | |
98 | ||
99 | PM_DEBUG("Timing out waiting for HW to become active\n"); | |
100 | return is_ring_active; | |
101 | ||
102 | } | |
103 | ||
104 | PM_IDLE_STATE get_card_state(mic_ctx_t *mic_ctx) { | |
105 | ||
106 | PM_IDLE_STATE state; | |
107 | sbox_uos_pm_state_t upmstate_regval = {0}; | |
108 | upmstate_regval.value = pm_reg_read(mic_ctx, SBOX_UOS_PMSTATE); | |
109 | state = (PM_IDLE_STATE)(upmstate_regval.bits.uos_pm_state); | |
110 | return state; | |
111 | ||
112 | } | |
113 | ||
114 | PM_IDLE_STATE get_host_state(mic_ctx_t *mic_ctx) { | |
115 | ||
116 | PM_IDLE_STATE state; | |
117 | sbox_host_pm_state_t hpmstate_regval = {0}; | |
118 | hpmstate_regval.value = pm_reg_read(mic_ctx, SBOX_HOST_PMSTATE); | |
119 | state = (PM_IDLE_STATE)(hpmstate_regval.bits.host_pm_state); | |
120 | return state; | |
121 | ||
122 | } | |
123 | ||
124 | int set_host_state(mic_ctx_t *mic_ctx, PM_IDLE_STATE state) { | |
125 | ||
126 | int err = 0; | |
127 | sbox_host_pm_state_t hpmstate_regval = {0}; | |
128 | hpmstate_regval.value = pm_reg_read(mic_ctx, SBOX_HOST_PMSTATE); | |
129 | hpmstate_regval.bits.host_pm_state = 0; | |
130 | hpmstate_regval.bits.host_pm_state = state; | |
131 | pm_reg_write(hpmstate_regval.value, mic_ctx, SBOX_HOST_PMSTATE); | |
132 | return err; | |
133 | } | |
134 | ||
135 | int check_card_state(mic_ctx_t *mic_ctx, PM_IDLE_STATE state) { | |
136 | PM_IDLE_STATE card_state = get_card_state(mic_ctx); | |
137 | return (state == card_state) ? 1 : 0; | |
138 | } | |
139 | ||
140 | int check_host_state(mic_ctx_t *mic_ctx, PM_IDLE_STATE state) { | |
141 | PM_IDLE_STATE host_state = get_host_state(mic_ctx); | |
142 | return (state == host_state) ? 1 : 0; | |
143 | } | |
144 | ||
145 | uint32_t svid_cmd_fmt(unsigned int bits) | |
146 | { | |
147 | unsigned int bits_set,bmask; | |
148 | ||
149 | bmask = bits; | |
150 | ||
151 | for (bits_set = 0; bmask; bits_set++) { | |
152 | /* Zero the least significant bit that is set */ | |
153 | bmask &= (bmask - 1); | |
154 | } | |
155 | bits <<= 1; /* Make way for the parity bit */ | |
156 | if (bits_set & 1) { /* odd number of 1s */ | |
157 | bits |= 1; | |
158 | } | |
159 | ||
160 | return bits; | |
161 | } | |
162 | ||
163 | void set_vid(mic_ctx_t *mic_ctx, sbox_svid_control svidctrl_regval, unsigned int vidcode) { | |
164 | ||
165 | uint32_t temp; | |
166 | uint32_t svid_cmd = 0; | |
167 | uint32_t svid_dout = 0; | |
168 | temp = svid_cmd_fmt((KNC_SVID_ADDR << 13) | | |
169 | (KNC_SETVID_SLOW << 8) | vidcode); | |
170 | svid_cmd = (KNC_SVID_ADDR << 5) | KNC_SETVID_SLOW; | |
171 | svidctrl_regval.bits.svid_cmd = 0x0e0; | |
172 | svidctrl_regval.bits.svid_cmd = svid_cmd; | |
173 | ||
174 | svid_dout = temp & 0x1ff; | |
175 | svidctrl_regval.bits.svid_dout = 0; | |
176 | svidctrl_regval.bits.svid_dout = svid_dout; | |
177 | ||
178 | svidctrl_regval.bits.cmd_start = 0x1; | |
179 | pm_reg_write(svidctrl_regval.value, mic_ctx, | |
180 | SBOX_SVID_CONTROL); | |
181 | ||
182 | msleep(10); | |
183 | ||
184 | return; | |
185 | } | |
186 | ||
187 | int set_vid_knc(mic_ctx_t *mic_ctx, unsigned int vidcode) | |
188 | { | |
189 | uint32_t status = 0; | |
190 | ||
191 | sbox_svid_control svidctrl_regval = {0}; | |
192 | uint32_t svid_idle = 0; | |
193 | uint32_t svid_error = 0; | |
194 | int i = 0; | |
195 | uint32_t wait_cnt = 0; | |
196 | sbox_core_volt_t core_volt_regval = {0}; | |
197 | int retry = 0; | |
198 | ||
199 | if (mic_ctx->bi_stepping >= KNC_B0_STEP) { | |
200 | for (retry = 0; retry < SET_VID_RETRY_COUNT; retry++) { | |
201 | status = 0; | |
202 | for (i = 0; i < KNC_SETVID_ATTEMPTS; i++) { | |
203 | svidctrl_regval.value = pm_reg_read(mic_ctx,SBOX_SVID_CONTROL); | |
204 | svid_idle = svidctrl_regval.bits.svid_idle; | |
205 | ||
206 | if (svid_idle) { | |
207 | set_vid(mic_ctx, svidctrl_regval, vidcode); | |
208 | svidctrl_regval.value = | |
209 | pm_reg_read(mic_ctx,SBOX_SVID_CONTROL); | |
210 | svid_idle = svidctrl_regval.bits.svid_idle; | |
211 | svid_error = svidctrl_regval.bits.svid_error; | |
212 | ||
213 | if (!svid_idle) { | |
214 | printk(KERN_ERR "%s SVID command failed - Idle not set\n", | |
215 | __func__); | |
216 | msleep(10); | |
217 | continue; | |
218 | } | |
219 | ||
220 | if (svid_error) { | |
221 | if (SBOX_SVIDCTRL_ACK1ACK0(svidctrl_regval.value) == 0x2) { | |
222 | printk(KERN_ERR "%s SVID command failed - rx parity error\n", | |
223 | __func__); | |
224 | } else { | |
225 | printk(KERN_ERR "%s SVID command failed - tx parity error\n", | |
226 | __func__); | |
227 | } | |
228 | status = -EINVAL; | |
229 | goto exit; | |
230 | } else { | |
231 | PM_DEBUG("SVID Command Successful - VID set to %d\n",vidcode); | |
232 | break; | |
233 | } | |
234 | } | |
235 | } | |
236 | ||
237 | if (i == KNC_SETVID_ATTEMPTS) { | |
238 | printk(KERN_ERR "%s Timed out waiting for SVID idle\n", __func__); | |
239 | status = -EINVAL; | |
240 | goto exit; | |
241 | } | |
242 | ||
243 | /* Verify that the voltage is set */ | |
244 | for(wait_cnt = 0; wait_cnt <= 100; wait_cnt++) { | |
245 | core_volt_regval.value = pm_reg_read(mic_ctx, SBOX_COREVOLT); | |
246 | if(vidcode == core_volt_regval.bits.vid) { | |
247 | return status; | |
248 | } | |
249 | msleep(10); | |
250 | PM_DEBUG("Retry: %d Voltage not set yet. vidcode = 0x%x Current vid = 0x%x\n", | |
251 | retry, vidcode, core_volt_regval.bits.vid); | |
252 | } | |
253 | ||
254 | PM_PRINT("Retry: %d Failed to set vid for node %d. vid code = 0x%x Current vid = 0x%x.\n", | |
255 | retry, mic_get_scifnode_id(mic_ctx), vidcode, core_volt_regval.bits.vid); | |
256 | status = -ENODEV; | |
257 | } | |
258 | } else { | |
259 | set_vid(mic_ctx, svidctrl_regval, vidcode); | |
260 | ||
261 | /* SBOX_COREVOLT does not reflect the correct vid | |
262 | * value on A0. Just wait here for sometime to | |
263 | * allow for the vid to be set. | |
264 | */ | |
265 | msleep(20); | |
266 | } | |
267 | ||
268 | exit: | |
269 | return status; | |
270 | } | |
271 | ||
272 | /* @print_nodemaskbuf | |
273 | * | |
274 | * @param - buf - the nodemask buffer | |
275 | * | |
276 | * prints the nodes in the nodemask. | |
277 | * | |
278 | * @returns - none | |
279 | */ | |
280 | void print_nodemaskbuf(uint8_t* buf) { | |
281 | ||
282 | uint8_t *temp_buf_ptr; | |
283 | uint32_t i,j; | |
284 | ||
285 | temp_buf_ptr = buf; | |
286 | PM_DEBUG("Nodes in nodemask: "); | |
287 | for(i = 0; i <= ms_info.mi_maxid; i++) { | |
288 | temp_buf_ptr = buf + i; | |
289 | for (j = 0; j < 8; j++) { | |
290 | if (get_nodemask_bit(temp_buf_ptr, j)) | |
291 | pr_debug("%d ", j + (i * 8)); | |
292 | } | |
293 | } | |
294 | } | |
295 | ||
296 | void restore_pc6_registers(mic_ctx_t *mic_ctx, bool from_dpc3) { | |
297 | sbox_pcu_ctrl_t ctrl_regval = {0}; | |
298 | sbox_uos_pcu_ctrl_t uos_ctrl_regval = {0}; | |
299 | gbox_pm_control pmctrl_reg = {0}; | |
300 | sbox_core_freq_t core_freq_reg = {0}; | |
301 | ||
302 | if (!from_dpc3) { | |
303 | if(KNC_A_STEP == mic_ctx->bi_stepping) { | |
304 | ctrl_regval.value = pm_reg_read(mic_ctx, SBOX_PCU_CONTROL); | |
305 | ctrl_regval.bits.enable_mclk_pl_shutdown = 0; | |
306 | pm_reg_write(ctrl_regval.value, mic_ctx, SBOX_PCU_CONTROL); | |
307 | } else { | |
308 | uos_ctrl_regval.value = pm_reg_read(mic_ctx,SBOX_UOS_PCUCONTROL); | |
309 | uos_ctrl_regval.bits.enable_mclk_pll_shutdown = 0; | |
310 | pm_reg_write(uos_ctrl_regval.value, mic_ctx, SBOX_UOS_PCUCONTROL); | |
311 | } | |
312 | ||
313 | ||
314 | ctrl_regval.value = pm_reg_read(mic_ctx, SBOX_PCU_CONTROL); | |
315 | ctrl_regval.bits.prevent_auto_c3_exit = 0; | |
316 | pm_reg_write(ctrl_regval.value, mic_ctx, SBOX_PCU_CONTROL); | |
317 | } | |
318 | ||
319 | pmctrl_reg.value = pm_reg_read(mic_ctx, GBOX_PM_CTRL); | |
320 | pmctrl_reg.bits.in_pckgc6 = 0; | |
321 | pm_reg_write(pmctrl_reg.value, mic_ctx, GBOX_PM_CTRL); | |
322 | ||
323 | ctrl_regval.value = pm_reg_read(mic_ctx, SBOX_PCU_CONTROL); | |
324 | ctrl_regval.bits.grpB_pwrgood_mask = 0; | |
325 | pm_reg_write(ctrl_regval.value, mic_ctx, SBOX_PCU_CONTROL); | |
326 | ||
327 | core_freq_reg.value = pm_reg_read(mic_ctx, SBOX_COREFREQ); | |
328 | core_freq_reg.bits.booted = 1; | |
329 | pm_reg_write(core_freq_reg.value, mic_ctx, SBOX_COREFREQ); | |
330 | } | |
331 | ||
332 | void program_mclk_shutdown(mic_ctx_t *mic_ctx, bool set) | |
333 | { | |
334 | sbox_uos_pcu_ctrl_t uos_ctrl_regval; | |
335 | sbox_pcu_ctrl_t ctrl_regval; | |
336 | ||
337 | if(KNC_A_STEP == mic_ctx->bi_stepping) { | |
338 | ctrl_regval.value = pm_reg_read(mic_ctx,SBOX_PCU_CONTROL); | |
339 | ctrl_regval.bits.enable_mclk_pl_shutdown = (set ? 1: 0); | |
340 | pm_reg_write(ctrl_regval.value, mic_ctx, SBOX_PCU_CONTROL); | |
341 | } else { | |
342 | uos_ctrl_regval.value = pm_reg_read(mic_ctx, | |
343 | SBOX_UOS_PCUCONTROL); | |
344 | uos_ctrl_regval.bits.enable_mclk_pll_shutdown = (set ? 1: 0); | |
345 | pm_reg_write(uos_ctrl_regval.value, | |
346 | mic_ctx, SBOX_UOS_PCUCONTROL); | |
347 | } | |
348 | } | |
349 | ||
350 | void program_prevent_C3Exit(mic_ctx_t *mic_ctx, bool set) | |
351 | { | |
352 | sbox_pcu_ctrl_t ctrl_regval; | |
353 | ctrl_regval.value = pm_reg_read(mic_ctx,SBOX_PCU_CONTROL); | |
354 | ctrl_regval.bits.prevent_auto_c3_exit = (set ? 1: 0); | |
355 | pm_reg_write(ctrl_regval.value, mic_ctx, SBOX_PCU_CONTROL); | |
356 | ||
357 | } | |
358 | ||
359 | int pm_pc3_to_pc6_entry(mic_ctx_t *mic_ctx) | |
360 | { | |
361 | int err; | |
362 | sbox_pcu_ctrl_t ctrl_regval; | |
363 | gbox_pm_control pmctrl_reg; | |
364 | sbox_core_freq_t core_freq_reg; | |
365 | ||
366 | if ((get_card_state(mic_ctx)) != PM_IDLE_STATE_PC3) { | |
367 | PM_DEBUG("Card not ready to go to PC6. \n"); | |
368 | err = -EAGAIN; | |
369 | goto exit; | |
370 | } | |
371 | ||
372 | if (atomic_cmpxchg(&mic_ctx->gate_interrupt, 0, 1) == 1) { | |
373 | PM_DEBUG("Cannot gate interrupt handler while it is in use\n"); | |
374 | err = -EFAULT; | |
375 | goto exit; | |
376 | } | |
377 | ||
378 | program_prevent_C3Exit(mic_ctx, true); | |
379 | program_mclk_shutdown(mic_ctx, true); | |
380 | ||
381 | /* Wait for uos to become idle. */ | |
382 | if (!hw_idle(mic_ctx)) { | |
383 | program_mclk_shutdown(mic_ctx, false); | |
384 | if (!hw_idle(mic_ctx)) { | |
385 | program_prevent_C3Exit(mic_ctx, false); | |
386 | PM_DEBUG("Card not ready to go to PC6. \n"); | |
387 | err = -EAGAIN; | |
388 | goto intr_ungate; | |
389 | } else { | |
390 | program_mclk_shutdown(mic_ctx, true); | |
391 | } | |
392 | } | |
393 | ||
394 | pmctrl_reg.value = pm_reg_read(mic_ctx, GBOX_PM_CTRL); | |
395 | pmctrl_reg.bits.in_pckgc6 = 1; | |
396 | pm_reg_write(pmctrl_reg.value, mic_ctx, GBOX_PM_CTRL); | |
397 | ||
398 | core_freq_reg.value = pm_reg_read(mic_ctx, SBOX_COREFREQ); | |
399 | core_freq_reg.bits.booted = 0; | |
400 | pm_reg_write(core_freq_reg.value, mic_ctx, SBOX_COREFREQ); | |
401 | ||
402 | udelay(500); | |
403 | ||
404 | ctrl_regval.value = pm_reg_read(mic_ctx, SBOX_PCU_CONTROL); | |
405 | ctrl_regval.bits.grpB_pwrgood_mask = 1; | |
406 | pm_reg_write(ctrl_regval.value, mic_ctx, SBOX_PCU_CONTROL); | |
407 | ||
408 | err = set_vid_knc(mic_ctx, 0); | |
409 | if (err != 0) { | |
410 | PM_DEBUG("Aborting PC6 entry...Failed to set VID\n"); | |
411 | restore_pc6_registers(mic_ctx, true); | |
412 | goto intr_ungate; | |
413 | } | |
414 | ||
415 | mic_ctx->micpm_ctx.idle_state = PM_IDLE_STATE_PC6; | |
416 | set_host_state(mic_ctx, PM_IDLE_STATE_PC6); | |
417 | ||
418 | dma_prep_suspend(mic_ctx->dma_handle); | |
419 | ||
420 | PM_PRINT("Node %d entered PC6\n", | |
421 | mic_get_scifnode_id(mic_ctx)); | |
422 | ||
423 | return err; | |
424 | ||
425 | intr_ungate: | |
426 | atomic_set(&mic_ctx->gate_interrupt, 0); | |
427 | tasklet_schedule(&mic_ctx->bi_dpc); | |
428 | exit: | |
429 | return err; | |
430 | } | |
431 | ||
432 | /* | |
433 | * pm_pc6_exit: | |
434 | * | |
435 | * Execute pc6 exit for a node. | |
436 | * mic_ctx: The driver context of the node. | |
437 | */ | |
438 | int pm_pc6_exit(mic_ctx_t *mic_ctx) | |
439 | { | |
440 | ||
441 | int err = 0; | |
442 | ||
443 | sbox_host_pm_state_t hpmstate_regval; | |
444 | sbox_pcu_ctrl_t ctrl_regval; | |
445 | uint8_t tdp_vid = 0; | |
446 | uint8_t is_pll_locked; | |
447 | uint32_t wait_cnt; | |
448 | int i; | |
449 | ||
450 | ||
451 | if (!check_host_state(mic_ctx, PM_IDLE_STATE_PC6)) { | |
452 | PM_DEBUG("Wrong Host PM state. State = %d\n", | |
453 | get_host_state(mic_ctx)); | |
454 | err = -EINVAL; | |
455 | goto restore_registers; | |
456 | } | |
457 | ||
458 | hpmstate_regval.value = pm_reg_read(mic_ctx, SBOX_HOST_PMSTATE); | |
459 | tdp_vid = hpmstate_regval.bits.tdp_vid; | |
460 | PM_DEBUG("TDP_VID value obtained from Host PM Register = %d",tdp_vid); | |
461 | ||
462 | PM_DEBUG("Setting voltage to %dV using SVID Control\n",tdp_vid); | |
463 | err = set_vid_knc(mic_ctx, tdp_vid); | |
464 | if (err != 0) { | |
465 | printk(KERN_ERR "%s Failed PC6 entry...error in setting VID\n", | |
466 | __func__); | |
467 | goto restore_registers; | |
468 | } | |
469 | ||
470 | ctrl_regval.value = pm_reg_read(mic_ctx, SBOX_PCU_CONTROL); | |
471 | ||
472 | program_mclk_shutdown(mic_ctx, false); | |
473 | program_prevent_C3Exit(mic_ctx, false); | |
474 | ||
475 | for(wait_cnt = 0; wait_cnt < 200; wait_cnt++) { | |
476 | ctrl_regval.value = pm_reg_read(mic_ctx,SBOX_PCU_CONTROL); | |
477 | is_pll_locked = ctrl_regval.bits.mclk_pll_lock; | |
478 | if(likely(is_pll_locked)) | |
479 | break; | |
480 | msleep(10); | |
481 | } | |
482 | ||
483 | if(wait_cnt >= 200) { | |
484 | PM_DEBUG("mclk_pll_locked bit is not set.\n"); | |
485 | err = -EAGAIN; | |
486 | goto restore_registers; | |
487 | } | |
488 | ||
489 | ctrl_regval.bits.grpB_pwrgood_mask = 0; | |
490 | pm_reg_write(ctrl_regval.value, mic_ctx, SBOX_PCU_CONTROL); | |
491 | ||
492 | if (!hw_active(mic_ctx)) { | |
493 | PM_DEBUG("Timing out waiting for hw to become active"); | |
494 | goto restore_registers; | |
495 | } | |
496 | ||
497 | for(wait_cnt = 0; wait_cnt < 200; wait_cnt++) { | |
498 | if ((get_card_state(mic_ctx)) == PM_IDLE_STATE_PC0) | |
499 | break; | |
500 | msleep(10); | |
501 | } | |
502 | ||
503 | if(wait_cnt >= 200) { | |
504 | PM_DEBUG("PC6 Exit not complete.\n"); | |
505 | err = -EFAULT; | |
506 | goto restore_registers; | |
507 | } | |
508 | ||
509 | mic_ctx->micpm_ctx.idle_state = PM_IDLE_STATE_PC0; | |
510 | ||
511 | for (i = 0; i <= mic_data.dd_numdevs; i++) { | |
512 | if (micscif_get_nodedep(mic_get_scifnode_id(mic_ctx), i) == | |
513 | DEP_STATE_DISCONNECTED) { | |
514 | micscif_set_nodedep(mic_get_scifnode_id(mic_ctx), i, | |
515 | DEP_STATE_DEPENDENT); | |
516 | } | |
517 | } | |
518 | ||
519 | PM_PRINT("Node %d exited PC6\n", | |
520 | mic_get_scifnode_id(mic_ctx)); | |
521 | goto exit; | |
522 | ||
523 | restore_registers: | |
524 | restore_pc6_registers(mic_ctx, false); | |
525 | exit: | |
526 | atomic_set(&mic_ctx->gate_interrupt, 0); | |
527 | tasklet_schedule(&mic_ctx->bi_dpc); | |
528 | return err; | |
529 | } | |
530 | ||
531 | /* | |
532 | * setup_pm_dependency: | |
533 | * | |
534 | * Function sets up the dependency matrix by populating | |
535 | * the matrix with node depency information. | |
536 | * | |
537 | * Returns 0 on success. Appropriate error on failure. | |
538 | */ | |
539 | int setup_pm_dependency(void){ | |
540 | int err = 0; | |
541 | uint16_t i; | |
542 | uint16_t j; | |
543 | mic_ctx_t *mic_ctx; | |
544 | ||
545 | for (i = 0; i < mic_data.dd_numdevs; i++) { | |
546 | mic_ctx = get_per_dev_ctx(i); | |
547 | if (!mic_ctx) { | |
548 | PM_DEBUG("Failed to retrieve driver context\n"); | |
549 | return -EFAULT; | |
550 | } | |
551 | if (mic_ctx->micpm_ctx.idle_state == | |
552 | PM_IDLE_STATE_PC3_READY) { | |
553 | for (j = 0; j < mic_data.dd_numdevs; j++) { | |
554 | if (micscif_get_nodedep(mic_get_scifnode_id(mic_ctx),j+1) == | |
555 | DEP_STATE_DEPENDENT) { | |
556 | micscif_set_nodedep(mic_get_scifnode_id(mic_ctx),j+1, | |
557 | DEP_STATE_DISCONNECT_READY); | |
558 | } | |
559 | } | |
560 | } | |
561 | } | |
562 | return err; | |
563 | } | |
564 | ||
565 | /* | |
566 | * teardown_pm_dependency | |
567 | * | |
568 | * Function resets dependency matrix by removing all depenendy info | |
569 | * from it. | |
570 | * | |
571 | * Returns 0 on success. Appropriate error on failure. | |
572 | */ | |
573 | int teardown_pm_dependency(void) { | |
574 | int err = 0; | |
575 | int i; | |
576 | int j; | |
577 | ||
578 | for (i = 0; i < mic_data.dd_numdevs; i++) { | |
579 | for (j = 0; j < mic_data.dd_numdevs; j++) { | |
580 | ||
581 | if (micscif_get_nodedep(i+1,j+1) == DEP_STATE_DISCONNECT_READY) { | |
582 | micscif_set_nodedep(i+1,j+1, DEP_STATE_DEPENDENT); | |
583 | } | |
584 | } | |
585 | } | |
586 | return err; | |
587 | } | |
588 | ||
589 | /* | |
590 | * revert_idle_entry_trasaction: | |
591 | * | |
592 | * @node_disc_bitmask: Bitmask of nodes which were involved in the | |
593 | * transaction | |
594 | * | |
595 | * Function Reverts idle state changes made to nodes when an idle | |
596 | * state trasaction fails. | |
597 | */ | |
598 | int revert_idle_entry_trasaction(uint8_t *node_disc_bitmask) { | |
599 | int err = 0; | |
600 | mic_ctx_t *node_ctx; | |
601 | uint32_t node_id = 0; | |
602 | ||
603 | for(node_id = 0; node_id <= ms_info.mi_maxid; node_id++) { | |
604 | if (node_id == SCIF_HOST_NODE) | |
605 | continue; | |
606 | ||
607 | if (!get_nodemask_bit(node_disc_bitmask, node_id)) | |
608 | continue; | |
609 | ||
610 | node_ctx = get_per_dev_ctx(node_id - 1); | |
611 | if (!node_ctx) { | |
612 | PM_DEBUG("Failed to retrieve node context."); | |
613 | err = -EINVAL; | |
614 | goto exit; | |
615 | } | |
616 | ||
617 | if (node_ctx->micpm_ctx.idle_state == PM_IDLE_STATE_PC3) { | |
618 | err = pm_pc3_exit(node_ctx); | |
619 | if (err) { | |
620 | PM_DEBUG("Wakeup of Node %d failed. Node is lost" | |
621 | " and is to be disconnected",node_id); | |
622 | node_ctx->micpm_ctx.idle_state = PM_IDLE_STATE_LOST; | |
623 | /* Since node is lost, ref_cnt increment(decement) through the | |
624 | * pm_get(put)_reference interface is prevented by idle_state. | |
625 | * We still need to ensure the ref_cnt iself is reset | |
626 | * back to 0 so that pm_get(put)_reference will work after the | |
627 | * lost node interface recovers the node. */ | |
628 | atomic_set(&node_ctx->micpm_ctx.pm_ref_cnt, 0); | |
629 | } | |
630 | } | |
631 | } | |
632 | exit: | |
633 | return err; | |
634 | } | |
635 | ||
636 | /* pm_node_disconnect | |
637 | * | |
638 | * Called during idlestate entry. | |
639 | * | |
640 | * Function checks the pm_ref_cnt and returns ACK | |
641 | * or NACK depending on the pm_ref_cnt value. | |
642 | */ | |
643 | int pm_node_disconnect(uint8_t *nodemask) { | |
644 | ||
645 | uint32_t node_id; | |
646 | mic_ctx_t *mic_ctx; | |
647 | int ret = 0; | |
648 | int err = 0; | |
649 | ||
650 | for (node_id = 0; node_id <= ms_info.mi_maxid; node_id++) { | |
651 | if (node_id == SCIF_HOST_NODE) | |
652 | continue; | |
653 | ||
654 | if (!get_nodemask_bit(nodemask, node_id)) | |
655 | continue; | |
656 | ||
657 | mic_ctx = get_per_dev_ctx(node_id - 1); | |
658 | if (!mic_ctx) { | |
659 | set_nodemask_bit(nodemask, node_id, 0); | |
660 | return -EAGAIN; | |
661 | } | |
662 | ||
663 | if (mic_ctx->state != MIC_ONLINE) { | |
664 | set_nodemask_bit(nodemask, node_id, 0); | |
665 | return -EAGAIN; | |
666 | } | |
667 | ||
668 | ret = atomic_cmpxchg(&mic_ctx->micpm_ctx.pm_ref_cnt, | |
669 | 0, PM_NODE_IDLE); | |
670 | if (((ret != 0) && (ret != PM_NODE_IDLE)) | |
671 | || atomic_read(&mic_data.dd_pm.wakeup_in_progress)) { | |
672 | set_nodemask_bit(nodemask, node_id, 0); | |
673 | return -EAGAIN; | |
674 | } | |
675 | } | |
676 | ||
677 | return err; | |
678 | } | |
679 | ||
680 | /* | |
681 | * pm_pc3_entry: | |
682 | * | |
683 | * Execute pc3 entry for a node. | |
684 | * mic_ctx: The driver context of the node. | |
685 | */ | |
686 | int pm_pc3_entry(mic_ctx_t *mic_ctx) | |
687 | { | |
688 | int err = 0; | |
689 | if (mic_ctx == NULL) { | |
690 | err = -EINVAL; | |
691 | goto exit; | |
692 | } | |
693 | ||
694 | if (((!check_host_state(mic_ctx, PM_IDLE_STATE_PC0))) || | |
695 | (mic_ctx->micpm_ctx.idle_state != PM_IDLE_STATE_PC0)) { | |
696 | PM_DEBUG("Wrong host state. register state = %d" | |
697 | " idle state = %d\n", get_host_state(mic_ctx), | |
698 | mic_ctx->micpm_ctx.idle_state); | |
699 | goto send_wakeup; | |
700 | } | |
701 | ||
702 | /* cancel pc6 entry work that may be scheduled. We need to | |
703 | * do this either here or after a pervious pc3 exit */ | |
704 | cancel_delayed_work_sync(&mic_ctx->micpm_ctx.pc6_entry_work); | |
705 | ||
706 | if ((mic_ctx->micpm_ctx.con_state != PM_CONNECTED) || | |
707 | (!mic_ctx->micpm_ctx.pc3_enabled)) | |
708 | goto send_wakeup; | |
709 | ||
710 | mic_ctx->micpm_ctx.idle_state = PM_IDLE_STATE_PC3_READY; | |
711 | err = do_idlestate_entry(mic_ctx); | |
712 | if (err) | |
713 | goto exit; | |
714 | if ((mic_ctx->micpm_ctx.pc6_enabled) && | |
715 | (KNC_C_STEP <= mic_ctx->bi_stepping) && | |
716 | (KNC_B1_STEP != mic_ctx->bi_stepping)) { | |
717 | queue_delayed_work(mic_ctx->micpm_ctx.pc6_entry_wq, | |
718 | &mic_ctx->micpm_ctx.pc6_entry_work, | |
719 | mic_ctx->micpm_ctx.pc6_timeout*HZ); | |
720 | } | |
721 | ||
722 | goto exit; | |
723 | ||
724 | send_wakeup: | |
725 | mutex_lock(&mic_data.dd_pm.pm_idle_mutex); | |
726 | pm_pc3_exit(mic_ctx); | |
727 | mutex_unlock(&mic_data.dd_pm.pm_idle_mutex); | |
728 | exit: | |
729 | return err; | |
730 | } | |
731 | ||
732 | /* | |
733 | * pm_pc3_exit: | |
734 | * Calling function needs to grab idle_state mutex. | |
735 | * | |
736 | * Execute pc3 exit for a node. | |
737 | * mic_ctx: The driver context of the node. | |
738 | */ | |
739 | int pm_pc3_exit(mic_ctx_t *mic_ctx) | |
740 | { | |
741 | int err; | |
742 | int wait_cnt; | |
743 | ||
744 | WARN_ON(!mutex_is_locked(&mic_data.dd_pm.pm_idle_mutex)); | |
745 | mic_send_pm_intr(mic_ctx); | |
746 | for (wait_cnt = 0; wait_cnt < PC3_EXIT_WAIT_COUNT; wait_cnt++) { | |
747 | if (check_card_state(mic_ctx, PM_IDLE_STATE_PC0)) | |
748 | break; | |
749 | msleep(1); | |
750 | } | |
751 | ||
752 | ||
753 | if(wait_cnt >= PC3_EXIT_WAIT_COUNT) { | |
754 | PM_DEBUG("Syncronization with card failed." | |
755 | " Node is lost\n"); | |
756 | err = -EFAULT; | |
757 | goto exit; | |
758 | } | |
759 | ||
760 | set_host_state(mic_ctx, PM_IDLE_STATE_PC0); | |
761 | mic_ctx->micpm_ctx.idle_state = PM_IDLE_STATE_PC0; | |
762 | PM_DEBUG("Node %d exited PC3\n", mic_get_scifnode_id(mic_ctx)); | |
763 | ||
764 | return 0; | |
765 | exit: | |
766 | return err; | |
767 | } | |
768 | ||
769 | /* | |
770 | * do_idlestate_entry: | |
771 | * | |
772 | * Function to start the idle state entry transaction for a node. Puts a node | |
773 | * and all the nodes that are dependent on this node to idle state if | |
774 | * it is possible. | |
775 | * | |
776 | * mic_ctx: The device context of node that needs to be put in idle state | |
777 | * Returs 0 in success. Appropriate error code on failure | |
778 | */ | |
779 | int do_idlestate_entry(mic_ctx_t *mic_ctx) | |
780 | { | |
781 | int err = 0; | |
782 | uint32_t node_id = 0; | |
783 | mic_ctx_t *node_ctx; | |
784 | uint8_t *nodemask_buf; | |
785 | ||
786 | if(!mic_ctx) | |
787 | return -EINVAL; | |
788 | ||
789 | mutex_lock(&mic_data.dd_pm.pm_idle_mutex); | |
790 | ||
791 | if ((err = setup_pm_dependency())) { | |
792 | PM_DEBUG("Failed to set up PM specific dependencies"); | |
793 | goto unlock; | |
794 | } | |
795 | ||
796 | nodemask_buf = (uint8_t *) | |
797 | kzalloc(mic_ctx->micpm_ctx.nodemask.len, GFP_KERNEL); | |
798 | if(!nodemask_buf) { | |
799 | PM_DEBUG("Error allocating nodemask buffer\n"); | |
800 | err = ENOMEM; | |
801 | goto dep_teardown; | |
802 | } | |
803 | ||
804 | err = micscif_get_deactiveset(mic_get_scifnode_id(mic_ctx), | |
805 | nodemask_buf, 1); | |
806 | if (err) { | |
807 | PM_DEBUG("Node disconnection failed " | |
808 | "during deactivation set calculation"); | |
809 | goto free_buf; | |
810 | } | |
811 | ||
812 | print_nodemaskbuf(nodemask_buf); | |
813 | ||
814 | if ((err = micscif_disconnect_node(mic_get_scifnode_id(mic_ctx), | |
815 | nodemask_buf, DISCONN_TYPE_POWER_MGMT))) { | |
816 | PM_DEBUG("SCIF Node disconnect failed. err: %d", err); | |
817 | goto free_buf; | |
818 | } | |
819 | ||
820 | if ((err = pm_node_disconnect(nodemask_buf))) { | |
821 | PM_DEBUG("PM Node disconnect failed. err = %d\n", err); | |
822 | goto free_buf; | |
823 | } | |
824 | ||
825 | if ((err = micvcons_pm_disconnect_node(nodemask_buf, | |
826 | DISCONN_TYPE_POWER_MGMT))) { | |
827 | PM_DEBUG("VCONS Node disconnect failed. err = %d\n", err); | |
828 | goto free_buf; | |
829 | } | |
830 | ||
831 | for(node_id = 0; node_id <= ms_info.mi_maxid; node_id++) { | |
832 | if (node_id == SCIF_HOST_NODE) | |
833 | continue; | |
834 | if (!get_nodemask_bit(nodemask_buf, node_id)) | |
835 | continue; | |
836 | node_ctx = get_per_dev_ctx(node_id - 1); | |
837 | if (!node_ctx) { | |
838 | PM_DEBUG("Failed to retrieve node context."); | |
839 | err = -EINVAL; | |
840 | goto revert; | |
841 | } | |
842 | ||
843 | if (node_ctx->micpm_ctx.idle_state == | |
844 | PM_IDLE_STATE_PC3_READY) { | |
845 | set_host_state(node_ctx, PM_IDLE_STATE_PC3); | |
846 | node_ctx->micpm_ctx.idle_state = | |
847 | PM_IDLE_STATE_PC3; | |
848 | PM_DEBUG("Node %d entered PC3\n", | |
849 | mic_get_scifnode_id(node_ctx)); | |
850 | } else { | |
851 | PM_DEBUG("Invalid idle state \n"); | |
852 | err = -EINVAL; | |
853 | goto revert; | |
854 | } | |
855 | } | |
856 | ||
857 | revert: | |
858 | if (err) | |
859 | revert_idle_entry_trasaction(nodemask_buf); | |
860 | free_buf: | |
861 | kfree(nodemask_buf); | |
862 | dep_teardown: | |
863 | teardown_pm_dependency(); | |
864 | unlock: | |
865 | if (err && (mic_ctx->micpm_ctx.idle_state != PM_IDLE_STATE_PC0)) | |
866 | pm_pc3_exit(mic_ctx); | |
867 | ||
868 | mutex_unlock(&mic_data.dd_pm.pm_idle_mutex); | |
869 | return err; | |
870 | } | |
871 | ||
872 | /** | |
873 | * is_idlestate_exit_needed: | |
874 | * | |
875 | * @node_id[in]: node to wakeup. | |
876 | * | |
877 | * Method responsible for checking if idle state exit is required | |
878 | * In some situation we would like to know whether node is idle or not before | |
879 | * making decision to bring the node out of idle state. | |
880 | * For example - Lost node detection. | |
881 | * returns false if the node is not in IDLE state, returns true otherwise | |
882 | */ | |
883 | int | |
884 | is_idlestate_exit_needed(mic_ctx_t *mic_ctx) | |
885 | { | |
886 | int ret = 0; | |
887 | mutex_lock(&mic_data.dd_pm.pm_idle_mutex); | |
888 | ||
889 | switch (mic_ctx->micpm_ctx.idle_state) | |
890 | { | |
891 | case PM_IDLE_STATE_PC0: | |
892 | case PM_IDLE_STATE_LOST: | |
893 | break; | |
894 | case PM_IDLE_STATE_PC3: | |
895 | case PM_IDLE_STATE_PC3_READY: | |
896 | case PM_IDLE_STATE_PC6: | |
897 | { | |
898 | ret = 1; | |
899 | break; | |
900 | } | |
901 | default: | |
902 | ret = 1; | |
903 | } | |
904 | ||
905 | mutex_unlock(&mic_data.dd_pm.pm_idle_mutex); | |
906 | return ret; | |
907 | } | |
908 | ||
909 | /* do_idlestate_exit: | |
910 | * | |
911 | * Initiate idle state exits for nodes specified | |
912 | * by the bitmask. | |
913 | * | |
914 | * mic_ctx: The device context. | |
915 | * get_ref: Set to true if the entity that wants to wake | |
916 | * a node up also wantes to get a reference to the node. | |
917 | * | |
918 | * Returs 0 on success. Appropriate error on failure. | |
919 | * | |
920 | */ | |
921 | int do_idlestate_exit(mic_ctx_t *mic_ctx, bool get_ref) { | |
922 | int err = 0; | |
923 | uint32_t node_id = 0; | |
924 | mic_ctx_t *node_ctx; | |
925 | uint8_t *nodemask_buf; | |
926 | ||
927 | if(!mic_ctx) | |
928 | return -EINVAL; | |
929 | ||
930 | might_sleep(); | |
931 | /* If the idle_state_mutex is already obtained by another thread | |
932 | * try to wakeup the thread which MAY be waiting for REMOVE_NODE | |
933 | * responses. This way, we give priority to idle state exits than | |
934 | * idle state entries. | |
935 | */ | |
936 | if (!mutex_trylock(&mic_data.dd_pm.pm_idle_mutex)) { | |
937 | atomic_inc(&mic_data.dd_pm.wakeup_in_progress); | |
938 | wake_up(&ms_info.mi_disconn_wq); | |
939 | mutex_lock(&mic_data.dd_pm.pm_idle_mutex); | |
940 | atomic_dec(&mic_data.dd_pm.wakeup_in_progress); | |
941 | } | |
942 | ||
943 | nodemask_buf = (uint8_t *)kzalloc(mic_ctx->micpm_ctx.nodemask.len, GFP_KERNEL); | |
944 | if(!nodemask_buf) { | |
945 | PM_DEBUG("Error allocating nodemask buffer\n"); | |
946 | mutex_unlock(&mic_data.dd_pm.pm_idle_mutex); | |
947 | err = ENOMEM; | |
948 | goto abort_node_wake; | |
949 | } | |
950 | ||
951 | if ((err = micscif_get_activeset(mic_get_scifnode_id(mic_ctx), nodemask_buf))) { | |
952 | PM_DEBUG("Node connect failed during Activation set calculation for node\n"); | |
953 | mutex_unlock(&mic_data.dd_pm.pm_idle_mutex); | |
954 | err = -EINVAL; | |
955 | goto free_buf; | |
956 | } | |
957 | ||
958 | print_nodemaskbuf(nodemask_buf); | |
959 | ||
960 | for(node_id = 0; node_id <= ms_info.mi_maxid; node_id++) { | |
961 | if (node_id == SCIF_HOST_NODE) | |
962 | continue; | |
963 | ||
964 | if (!get_nodemask_bit(nodemask_buf, node_id)) | |
965 | continue; | |
966 | ||
967 | node_ctx = get_per_dev_ctx(node_id - 1); | |
968 | if (!node_ctx) { | |
969 | mutex_unlock(&mic_data.dd_pm.pm_idle_mutex); | |
970 | goto free_buf; | |
971 | } | |
972 | ||
973 | switch (node_ctx->micpm_ctx.idle_state) { | |
974 | case PM_IDLE_STATE_PC3: | |
975 | case PM_IDLE_STATE_PC3_READY: | |
976 | if ((err = pm_pc3_exit(node_ctx))) { | |
977 | PM_DEBUG("Wakeup of Node %d failed." | |
978 | "Node to be disconnected",node_id); | |
979 | set_nodemask_bit(nodemask_buf, node_id, 0); | |
980 | node_ctx->micpm_ctx.idle_state = PM_IDLE_STATE_LOST; | |
981 | /* Since node is lost, ref_cnt increment(decement) through the | |
982 | * pm_get(put)_reference interface is prevented by idle_state. | |
983 | * We still need to ensure the ref_cnt iself is reset | |
984 | * back to 0 so that pm_get(put)_reference will work after the | |
985 | * lost node interface recovers the node. */ | |
986 | atomic_set(&node_ctx->micpm_ctx.pm_ref_cnt, 0); | |
987 | } else { | |
988 | if ((mic_ctx == node_ctx) && get_ref) | |
989 | if (atomic_cmpxchg(&mic_ctx->micpm_ctx.pm_ref_cnt, PM_NODE_IDLE, 1) != | |
990 | PM_NODE_IDLE) | |
991 | atomic_inc(&mic_ctx->micpm_ctx.pm_ref_cnt); | |
992 | } | |
993 | break; | |
994 | case PM_IDLE_STATE_PC6: | |
995 | if ((err = pm_pc6_exit(node_ctx))) { | |
996 | PM_DEBUG("Wakeup of Node %d failed." | |
997 | "Node to be disconnected",node_id); | |
998 | set_nodemask_bit(nodemask_buf, node_id, 0); | |
999 | node_ctx->micpm_ctx.idle_state = PM_IDLE_STATE_LOST; | |
1000 | /* Since node is lost, ref_cnt increment(decement) through the | |
1001 | * pm_get(put)_reference interface is prevented by idle_state. | |
1002 | * We still need to ensure the ref_cnt iself is reset | |
1003 | * back to 0 so that pm_get(put)_reference will work after the | |
1004 | * lost node interface recovers the node. */ | |
1005 | atomic_set(&node_ctx->micpm_ctx.pm_ref_cnt, 0); | |
1006 | } else { | |
1007 | if ((mic_ctx == node_ctx) && get_ref) | |
1008 | if (atomic_cmpxchg(&mic_ctx->micpm_ctx.pm_ref_cnt, PM_NODE_IDLE, 1) != | |
1009 | PM_NODE_IDLE) | |
1010 | atomic_inc(&mic_ctx->micpm_ctx.pm_ref_cnt); | |
1011 | } | |
1012 | break; | |
1013 | case PM_IDLE_STATE_PC0: | |
1014 | PM_DEBUG("Node %d is in state %d " | |
1015 | "and already out of package state.\n",node_id, | |
1016 | node_ctx->micpm_ctx.idle_state); | |
1017 | if ((mic_ctx == node_ctx) && get_ref) | |
1018 | if (atomic_cmpxchg(&mic_ctx->micpm_ctx.pm_ref_cnt, PM_NODE_IDLE, 1) != | |
1019 | PM_NODE_IDLE) | |
1020 | atomic_inc(&mic_ctx->micpm_ctx.pm_ref_cnt); | |
1021 | break; | |
1022 | default: | |
1023 | PM_DEBUG("Invalid idle state of node %d." | |
1024 | " State = %d \n", node_id, | |
1025 | node_ctx->micpm_ctx.idle_state); | |
1026 | mutex_unlock(&mic_data.dd_pm.pm_idle_mutex); | |
1027 | err = -ENODEV; | |
1028 | goto free_buf; | |
1029 | } | |
1030 | } | |
1031 | ||
1032 | /* Idle state exit of nodes are complete. | |
1033 | * Set the register state now for those nodes | |
1034 | * that are successfully up. | |
1035 | */ | |
1036 | for(node_id = 0; node_id <= ms_info.mi_maxid; node_id++) { | |
1037 | if (node_id == SCIF_HOST_NODE) | |
1038 | continue; | |
1039 | ||
1040 | if (!get_nodemask_bit(nodemask_buf, node_id)) | |
1041 | continue; | |
1042 | ||
1043 | node_ctx = get_per_dev_ctx(node_id - 1); | |
1044 | if (!node_ctx) { | |
1045 | PM_DEBUG("Failed to retrieve node context."); | |
1046 | continue; | |
1047 | } | |
1048 | ||
1049 | ||
1050 | if (node_ctx->micpm_ctx.idle_state == PM_IDLE_STATE_PC0) | |
1051 | set_host_state(node_ctx, PM_IDLE_STATE_PC0); | |
1052 | } | |
1053 | ||
1054 | mutex_unlock(&mic_data.dd_pm.pm_idle_mutex); | |
1055 | free_buf: | |
1056 | kfree(nodemask_buf); | |
1057 | abort_node_wake: | |
1058 | return err; | |
1059 | } | |
1060 | ||
1061 | int pc6_entry_start(mic_ctx_t *mic_ctx) { | |
1062 | ||
1063 | int err = 0; | |
1064 | ||
1065 | if (mic_ctx->micpm_ctx.idle_state == PM_IDLE_STATE_PC0) { | |
1066 | PM_DEBUG("Node not in PC3\n"); | |
1067 | err = -EFAULT; | |
1068 | goto exit; | |
1069 | } | |
1070 | ||
1071 | mutex_lock(&mic_data.dd_pm.pm_idle_mutex); | |
1072 | ||
1073 | if (mic_ctx->micpm_ctx.idle_state != PM_IDLE_STATE_PC3) { | |
1074 | PM_DEBUG("PC6 transition failed. Node not in PC3\n"); | |
1075 | mutex_unlock(&mic_data.dd_pm.pm_idle_mutex); | |
1076 | err = -EINVAL; | |
1077 | goto exit; | |
1078 | } | |
1079 | ||
1080 | if ((err = pm_pc3_to_pc6_entry(mic_ctx))) { | |
1081 | PM_DEBUG("PC6 transition from PC3 failed for node %d\n", | |
1082 | mic_get_scifnode_id(mic_ctx)); | |
1083 | mutex_unlock(&mic_data.dd_pm.pm_idle_mutex); | |
1084 | goto exit; | |
1085 | } | |
1086 | mutex_unlock(&mic_data.dd_pm.pm_idle_mutex); | |
1087 | exit: | |
1088 | return err; | |
1089 | ||
1090 | } | |
1091 | ||
1092 | /* | |
1093 | * mic_get_scifnode_id: | |
1094 | * | |
1095 | * Function to retrieve node id of a scif node. | |
1096 | * | |
1097 | * mic_ctx: The driver context of the specified node. | |
1098 | * Returns the scif node_id of the specified node. | |
1099 | */ | |
1100 | uint32_t mic_get_scifnode_id(mic_ctx_t *mic_ctx) { | |
1101 | /* NOTE: scif node_id cannot assumed to be a simple increment | |
1102 | * of the bi_id of the driver context. This function is really | |
1103 | * a placeholder for the board_id to node_id conversion that | |
1104 | * we need to do in the host driver. | |
1105 | */ | |
1106 | return (uint32_t)mic_ctx->bi_id + 1; | |
1107 | } |