Updated `README.md` with instructions for building/using the kernel module.
[xeon-phi-kernel-module] / host / micscif_pm.c
CommitLineData
800f879a
AT
1/*
2 * Copyright 2010-2017 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * Disclaimer: The codes contained in these modules may be specific to
14 * the Intel Software Development Platform codenamed Knights Ferry,
15 * and the Intel product codenamed Knights Corner, and are not backward
16 * compatible with other Intel products. Additionally, Intel will NOT
17 * support the codes or instruction set in future products.
18 *
19 * Intel offers no warranty of any kind regarding the code. This code is
20 * licensed on an "AS IS" basis and Intel is not obligated to provide
21 * any support, assistance, installation, training, or other services
22 * of any kind. Intel is also not obligated to provide any updates,
23 * enhancements or extensions. Intel specifically disclaims any warranty
24 * of merchantability, non-infringement, fitness for any particular
25 * purpose, and any other warranty.
26 *
27 * Further, Intel disclaims all liability of any kind, including but
28 * not limited to liability for infringement of any proprietary rights,
29 * relating to the use of the code, even if Intel is notified of the
30 * possibility of such liability. Except as expressly stated in an Intel
31 * license agreement provided with this code and agreed upon with Intel,
32 * no license, express or implied, by estoppel or otherwise, to any
33 * intellectual property rights is granted herein.
34 */
35
36#include "mic_common.h"
37#include "scif.h"
38#include "mic/micscif.h"
39#include "mic/mic_pm.h"
40#include "mic/micveth.h"
41
42extern int set_host_state(mic_ctx_t *mic_ctx, PM_IDLE_STATE state);
43extern int pc6_entry_start(mic_ctx_t *mic_ctx);
44
45/* Function that decrements the count of number of PM clients connected
46 * to the host.
47 */
48void
49micpm_decrement_clients(void)
50{
51 if(unlikely(atomic_dec_return(&mic_data.dd_pm.connected_clients) < 0)) {
52 PM_DEBUG("connected_clients is negative (%d)\n",
53 atomic_read(&mic_data.dd_pm.connected_clients));
54 }
55 return;
56}
57
58static char *pm_message_types[PM_MESSAGE_MAX+1] = {"PM_MESSAGE_PC3READY",
59 "PM_MESSAGE_OPEN",
60 "PM_MESSAGE_OPEN_ACK",
61 "PM_MESSAGE_CLOSE",
62 "PM_MESSAGE_CLOSE_ACK",
63 "PM_MESSAGE_TEST",
64 "PM_MESSAGE_MAX"};
65void
66micpm_display_message(mic_ctx_t *mic_ctx, void *header, void *msg, const char* label) {
67 pm_msg_header *header_ref;
68 int msg_len;
69 int i=0;
70 char *payload;
71 scif_epd_t epd = mic_ctx->micpm_ctx.pm_epd;
72 header_ref = (pm_msg_header *)header;
73 msg_len = header_ref->len;
74
75 if(!epd)
76 return;
77
78 if(0 <= header_ref->opcode && header_ref->opcode < PM_MESSAGE_MAX) {
79 if(strcmp(label,"SENT")==0) {
80 printk("%s: Msg type %s, SrcNode:SrcPort %d:%d, DestNode:DestPort %d:%d", label,
81 pm_message_types[header_ref->opcode], epd->port.node, epd->port.port,
82 epd->peer.node, epd->peer.port);
83 }
84 else
85 printk("%s: Msg type %s, DestNode:DestPort %d:%d, SrcNode:SrcPort %d:%d", label,
86 pm_message_types[header_ref->opcode], epd->port.node, epd->port.port,
87 epd->peer.node, epd->peer.port);
88 }
89
90
91 if(msg != NULL) {
92 payload = (char *)msg;
93 printk(" Payload");
94 for(i=0;i<msg_len;i++){
95 printk("0x%02x:", payload[i]);
96 }
97 }
98}
99
100int micpm_update_pc6(mic_ctx_t *mic_ctx, bool set)
101{
102
103 int err = 0;
104 if (mic_ctx->micpm_ctx.pm_options.pc6_enabled) {
105 if (set && !mic_ctx->micpm_ctx.pc6_enabled) {
106 mic_ctx->micpm_ctx.pc6_enabled = set;
107 queue_delayed_work(mic_ctx->micpm_ctx.pc6_entry_wq,
108 &mic_ctx->micpm_ctx.pc6_entry_work,
109 mic_ctx->micpm_ctx.pc6_timeout*HZ);
110 }
111 if (set == false) {
112 mic_ctx->micpm_ctx.pc6_enabled = set;
113 micpm_get_reference(mic_ctx, true);
114 micpm_put_reference(mic_ctx);
115 }
116 } else {
117 if (set)
118 err = -EINVAL;
119 else
120 mic_ctx->micpm_ctx.pc6_enabled = set;
121 }
122 return err;
123}
124
125int micpm_update_pc3(mic_ctx_t *mic_ctx, bool set)
126{
127 int err = 0;
128 if (mic_ctx->micpm_ctx.pm_options.pc3_enabled) {
129 if (set) {
130 mic_ctx->micpm_ctx.pc3_enabled = set;
131 } else {
132 mic_ctx->micpm_ctx.pc3_enabled = set;
133 micpm_get_reference(mic_ctx, true);
134 micpm_put_reference(mic_ctx);
135 }
136 } else {
137 if (set)
138 err = -EINVAL;
139 else
140 mic_ctx->micpm_ctx.pc3_enabled = set;
141 }
142 return err;
143}
144
145/*
146 * Wraper to scif_send that takes in the buffer to be sent
147 * as input.
148 */
149int
150mic_pm_send(mic_ctx_t *mic_ctx, void *msg, uint32_t len)
151{
152 int err;
153 scif_epd_t epd;
154
155 if(mic_ctx == NULL) {
156 PM_DEBUG("Mic context not Initialized\n");
157 return -EINVAL;
158 }
159
160 if((msg == NULL) || (len == 0)) {
161 PM_DEBUG("Invalid Parameters\n");
162 return -EINVAL;
163 }
164
165 epd = mic_ctx->micpm_ctx.pm_epd;
166 if(epd == NULL) {
167 PM_DEBUG("Scif Endpoint Undefined\n");
168 return -EINVAL;
169 }
170
171 if ((mic_ctx->micpm_ctx.con_state != PM_CONNECTING) &&
172 (mic_ctx->micpm_ctx.con_state != PM_CONNECTED)) {
173 PM_DEBUG("Endpoint not in connected state\n");
174 return -EINVAL;
175 }
176
177 err = scif_send(epd, msg, len, PM_SEND_MODE);
178 /*scif_send returns the number of bytes returned on success */
179 if(err <= 0) {
180 PM_DEBUG("scif_send to node: %d port: %d failed with error %d\n",
181 epd->peer.node, epd->peer.port, err);
182 } else {
183 PM_DEBUG("Bytes sent = %d\n",err);
184 err = 0;
185 }
186
187 return err;
188}
189
190/*
191 * Wrapper to scif_recv.
192 */
193int
194mic_pm_recv(mic_ctx_t *mic_ctx, void *msg, uint32_t len)
195{
196 int err;
197 scif_epd_t epd;
198
199 if(mic_ctx == NULL) {
200 PM_DEBUG("Mic context not Initialized\n");
201 return -EINVAL;
202 }
203
204 if((msg == NULL) || (len == 0)) {
205 PM_DEBUG("Invalid Parameters\n");
206 return -EINVAL;
207 }
208
209 epd = mic_ctx->micpm_ctx.pm_epd;
210 if(epd == NULL) {
211 PM_DEBUG("Scif Endpoint Undefined\n");
212 return -EINVAL;
213 }
214
215 if ((mic_ctx->micpm_ctx.con_state != PM_CONNECTING) &&
216 (mic_ctx->micpm_ctx.con_state != PM_CONNECTED)) {
217 PM_DEBUG("Endpoint not in connected state\n");
218 return -EINVAL;
219 }
220
221 err = scif_recv(epd, msg, len, PM_RECV_MODE);
222
223 if(err <= 0) {
224 pr_debug("scif_recv failed with error %d\n", err);
225 if(err == 0) {
226 /*0 bytes were sent */
227 err = -ENXIO;
228 }
229 } else {
230 PM_DEBUG("Bytes received = %d\n",err);
231 err = 0;
232 }
233 return err;
234}
235
236/*
237 * Function to send a Power Management message over scif. Gets the message type
238 * as input and builds a message header. It then creates a single message buffer
239 * with this header and body and sends it to the receiving node.
240 */
241int
242mic_pm_send_msg(mic_ctx_t *mic_ctx, PM_MESSAGE type, void *msg, uint32_t len)
243{
244 pm_msg_header header;
245 char *send_msg = NULL;
246 int err = 0;
247
248 header.opcode = type;
249 header.len = len;
250
251 send_msg = kmalloc(len + sizeof(pm_msg_header), GFP_KERNEL);
252 if(send_msg == NULL) {
253 PM_DEBUG("error allocating memory");
254 err = -ENOMEM;
255 return err;
256 }
257 memcpy(send_msg , &header, sizeof(pm_msg_header));
258 if((len != 0) && (msg != NULL)) {
259 memcpy((send_msg + sizeof(pm_msg_header)), msg, len);
260 }
261
262 if(mic_data.dd_pm.enable_pm_logging) {
263 if((len != 0) && (msg != NULL))
264 micpm_display_message(mic_ctx,send_msg,send_msg+sizeof(pm_msg_header),"SENT");
265 else
266 micpm_display_message(mic_ctx,send_msg,NULL,"SENT");
267 }
268 err = mic_pm_send(mic_ctx, send_msg, len + sizeof(pm_msg_header));
269 kfree(send_msg);
270 return err;
271}
272
273/*
274 * Handler invoked when receiving a PC3 ready message.
275 */
276int
277handle_pc3_ready(mic_ctx_t *mic_ctx)
278{
279 int err = 0;
280 PM_ENTRY;
281 err = pm_pc3_entry(mic_ctx);
282 PM_EXIT;
283 return err;
284}
285
286/*
287 * Handler invoked when receiving the latency response message
288 */
289int
290handle_open_ack(mic_ctx_t *mic_ctx, pm_msg_pm_options *msg)
291{
292 int err = 0;
293 PM_ENTRY;
294
295 if ((mic_ctx == NULL) || (msg == NULL)) {
296 err = EINVAL;
297 goto inval;
298 }
299
300 if ((msg->version.major_version != PM_MAJOR_VERSION) ||
301 (msg->version.minor_version != PM_MINOR_VERSION)) {
302 printk(KERN_ERR "PM Driver version mismatch. "
303 "Expected version: %d.%d Received version %d.%d\n",
304 PM_MAJOR_VERSION, PM_MINOR_VERSION,
305 msg->version.major_version, msg->version.minor_version);
306 schedule_work(&mic_ctx->micpm_ctx.pm_close);
307 goto inval;
308 }
309
310 mic_ctx->micpm_ctx.pm_options.pc3_enabled = msg->pc3_enabled;
311 mic_ctx->micpm_ctx.pm_options.pc6_enabled = msg->pc6_enabled;
312
313 mic_ctx->micpm_ctx.pc3_enabled =
314 (mic_ctx->micpm_ctx.pm_options.pc3_enabled)? true : false;
315 mic_ctx->micpm_ctx.pc6_enabled =
316 (mic_ctx->micpm_ctx.pm_options.pc6_enabled)? true : false;
317
318 mic_ctx->micpm_ctx.con_state = PM_CONNECTED;
319
320inval:
321 PM_EXIT;
322 return err;
323}
324
325/*
326 * Message handler invoked by the per device receive workqueue when it receives
327 * a message from the device.
328 */
329int
330mic_pm_handle_message(mic_ctx_t *mic_ctx, pm_recv_msg_t *recv_msg)
331{
332 int res = 0;
333
334 if(mic_ctx == NULL) {
335 return -EINVAL;
336 }
337
338 if(recv_msg == NULL) {
339 PM_DEBUG("Undefined message\n");
340 return -EINVAL;
341 }
342
343 switch(recv_msg->msg_header.opcode) {
344 case PM_MESSAGE_PC3READY:
345 res = handle_pc3_ready(mic_ctx);
346 break;
347 case PM_MESSAGE_OPEN_ACK:
348 /*Size of the payload needs to be equal to what the
349 * host is trying to cast it to
350 */
351 if (sizeof(pm_msg_pm_options) != recv_msg->msg_header.len) {
352 printk(KERN_ERR "Incompatible PM message. Opcode = %d\n",
353 recv_msg->msg_header.opcode);
354 return -EINVAL;
355 }
356 res = handle_open_ack(mic_ctx,
357 ((pm_msg_pm_options *) recv_msg->msg_body));
358 break;
359 default:
360 printk(KERN_ERR "Unknown PM message. Opcode = %d\n",
361 recv_msg->msg_header.opcode);
362 break;
363 }
364 return res;
365}
366
367/*
368 * retrieve_msg:
369 *
370 * Retrieve message from the head of list.
371 * @mic_ctx: The device context
372 * Returns the retrieved message.
373 */
374pm_recv_msg_t *
375pm_retrieve_msg(mic_ctx_t *mic_ctx) {
376
377 pm_recv_msg_t *recv_msg = NULL;
378 struct list_head *pos, *tmpq;
379 bool msg_found = false;
380
381 mutex_lock(&mic_ctx->micpm_ctx.msg_mutex);
382 if (!list_empty_careful(&mic_ctx->micpm_ctx.msg_list))
383 {
384 list_for_each_safe(pos, tmpq, &mic_ctx->micpm_ctx.msg_list) {
385 recv_msg = list_entry(pos, pm_recv_msg_t, msg);
386 /*Do not touch the message if its a test message */
387 if (recv_msg->msg_header.opcode != PM_MESSAGE_TEST) {
388 list_del(&recv_msg->msg);
389 msg_found = true;
390 break;
391 }
392 }
393 }
394
395 if (msg_found == false)
396 recv_msg = NULL;
397
398 mutex_unlock(&mic_ctx->micpm_ctx.msg_mutex);
399 return recv_msg;
400}
401
402/*
403 * pm_process_msg_list:
404 *
405 * Process the message list of a node and handle each message in the list.
406 * @mic_ctx[in]: The deive context whose message list is to be processed
407 * Returns: None
408 */
409void
410pm_process_msg_list(mic_ctx_t *mic_ctx) {
411
412 pm_recv_msg_t *process_msg = NULL;
413 int ret = 0;
414
415 if(mic_ctx == NULL) {
416 PM_DEBUG("Cannot get device handle \n");
417 return;
418 }
419
420 while(!list_empty(&mic_ctx->micpm_ctx.msg_list)) {
421 process_msg = pm_retrieve_msg(mic_ctx);
422 if(!process_msg) {
423 PM_DEBUG("No Message to process.\n");
424 return;
425 }
426
427 ret = mic_pm_handle_message(mic_ctx, process_msg);
428 if(ret) {
429 PM_DEBUG("Power Management message not processed"
430 " successfully.\n");
431 }
432
433 if(process_msg->msg_body != NULL) {
434 kfree(process_msg->msg_body);
435 }
436 kfree(process_msg);
437 }
438}
439
440/*
441 * Retrieves each message from the message list and calls the handler
442 * for the same. After the handler returns, the message is removed
443 * from the list and deleted.
444 */
445static void
446mic_pm_msg_handle_work(struct work_struct *msg_handle_work)
447{
448 pm_wq_t *pm_wq = container_of(msg_handle_work, pm_wq_t, work);
449 micpm_ctx_t *pm_ctx = container_of(pm_wq, micpm_ctx_t, handle_msg);
450 mic_ctx_t *mic_ctx = container_of(pm_ctx, mic_ctx_t, micpm_ctx);
451 pm_process_msg_list(mic_ctx);
452 return;
453}
454
455static void
456pc6_entry_work(struct work_struct *work)
457{
458 int err;
459 micpm_ctx_t *pm_ctx =
460 container_of(to_delayed_work(work),
461 micpm_ctx_t, pc6_entry_work);
462 mic_ctx_t *mic_ctx = container_of(pm_ctx, mic_ctx_t, micpm_ctx);
463
464 err = pc6_entry_start(mic_ctx);
465 if (err == -EAGAIN)
466 queue_delayed_work(mic_ctx->micpm_ctx.pc6_entry_wq,
467 &mic_ctx->micpm_ctx.pc6_entry_work,
468 mic_ctx->micpm_ctx.pc6_timeout*HZ);
469 return;
470}
471
472/*
473 * Called when a device creates a PM connection to Host. There can be
474 * only one PM connection between Host and a device. The function checks
475 * for an existing connection and rejects this new request if present.
476 */
477static void
478mic_pm_accept_work(struct work_struct *work)
479{
480 scif_epd_t newepd;
481 struct scif_portID portID;
482 int err;
483 uint16_t i;
484 mic_ctx_t *mic_ctx;
485 mic_data_t *mic_data_p = &mic_data;
486
487 PM_DEBUG("Accept thread waiting for new PM connections\n");
488 err = scif_accept(mic_data.dd_pm.epd, &portID, &newepd, SCIF_ACCEPT_SYNC);
489 if (err == -EBUSY || err == -ENODEV) {
490 PM_DEBUG("scif_accept error %d\n", err);
491 goto continue_accepting;
492 }
493 else if (err < 0) {
494 PM_DEBUG("scif_accept failed with errno %d\n", err);
495 goto exit;
496
497 }
498 PM_DEBUG("Connection request received. \n");
499
500 mutex_lock(&mic_data.dd_pm.pm_accept_mutex);
501
502 if (newepd->peer.node == SCIF_HOST_NODE) {
503 /* Reject connection request from HOST itself */
504 PM_DEBUG("PM: Peer node cannot be HOST. Peer Node = %d Peer Port = %d",
505 newepd->peer.node, newepd->peer.port);
506 scif_close(newepd);
507 mutex_unlock(&mic_data.dd_pm.pm_accept_mutex);
508 goto continue_accepting;
509 }
510
511 /*Only one Power Management connection per node. */
512 for (i = 0; i < mic_data_p->dd_numdevs; i++) {
513 mic_ctx = get_per_dev_ctx(i);
514 if (mic_ctx != NULL) {
515 if (mic_ctx->micpm_ctx.pm_epd != NULL) {
516 if (mic_ctx->micpm_ctx.pm_epd->peer.node == newepd->peer.node) {
517 PM_DEBUG("There is already Power Management connection"
518 " established from this node. Rejecting request.\n");
519 PM_DEBUG("Peer Node = %d, Peer Port = %d\n",
520 mic_ctx->micpm_ctx.pm_epd->peer.node,
521 mic_ctx->micpm_ctx.pm_epd->peer.port);
522 scif_close(newepd);
523 mutex_unlock(&mic_data.dd_pm.pm_accept_mutex);
524 goto continue_accepting;
525 }
526 }
527 }
528
529 }
530 mutex_unlock(&mic_data.dd_pm.pm_accept_mutex);
531 mic_ctx = get_per_dev_ctx(newepd->peer.node -1);
532 mic_ctx->micpm_ctx.pm_epd = newepd;
533 micpm_start(mic_ctx);
534
535
536continue_accepting:
537 mutex_lock(&mic_data.dd_pm.pm_accept_mutex);
538 queue_work(mic_data.dd_pm.accept.wq,
539 &mic_data.dd_pm.accept.work);
540 mutex_unlock(&mic_data.dd_pm.pm_accept_mutex);
541exit:
542 return;
543}
544
545/*
546 * Work item function that waits for incoming PM messages from
547 * a node. The function adds the message to a per device message
548 * list that is later processed by the message handler.
549 */
550static void
551mic_pm_recv_work(struct work_struct *recv_work)
552{
553 int err = 0;
554 int size = 0;
555
556 pm_wq_t *pm_wq = container_of(recv_work, pm_wq_t, work);
557 micpm_ctx_t *pm_ctx = container_of(pm_wq, micpm_ctx_t, recv);
558 mic_ctx_t *mic_ctx = container_of(pm_ctx, mic_ctx_t, micpm_ctx);
559 pm_recv_msg_t *recv_msg = NULL;
560
561 if (mic_ctx == NULL || pm_ctx == NULL) {
562 PM_DEBUG("Error retrieving driver context \n");
563 goto unqueue;
564 }
565
566 size = sizeof(pm_msg_header);
567 recv_msg = (void *)kmalloc(sizeof(pm_recv_msg_t), GFP_KERNEL);
568
569 if (recv_msg == NULL) {
570 PM_DEBUG("Error allocating memory to save receive message.\n");
571 goto unqueue;
572 }
573 INIT_LIST_HEAD(&recv_msg->msg);
574 recv_msg->msg_body = NULL;
575
576 /*Get the header */
577 err = mic_pm_recv(mic_ctx, &recv_msg->msg_header, size);
578 if (err < 0) {
579 PM_DEBUG("Error in scif_recv while waiting for PM header message.\n");
580 if (err == -ECONNRESET) {
581 /*Remote node is not in a connected state. */
582 schedule_work(&mic_ctx->micpm_ctx.pm_close);
583 }
584 goto unqueue;
585
586 }
587
588 if(recv_msg->msg_header.len != 0) {
589 PM_DEBUG("Retrieving %d bytes of message body\n", recv_msg->msg_header.len);
590 recv_msg->msg_body = (void *)kmalloc((sizeof(char) * recv_msg->msg_header.len), GFP_KERNEL);
591 if (recv_msg->msg_body == NULL) {
592 PM_DEBUG("Error allocating memory to receive PM Message\n");
593 goto unqueue;
594 }
595 err = mic_pm_recv(mic_ctx, recv_msg->msg_body, recv_msg->msg_header.len);
596 if (err < 0) {
597 PM_DEBUG("Error in scif_recv while waiting for PM message body\n");
598 if (err == -ECONNRESET) {
599 /*Remote node is not in a connected state. */
600 schedule_work(&mic_ctx->micpm_ctx.pm_close);
601 }
602 goto unqueue;
603 }
604 }
605
606 if(mic_data.dd_pm.enable_pm_logging) {
607 micpm_display_message(mic_ctx,&recv_msg->msg_header,
608 recv_msg->msg_body,"RECV");
609 }
610
611 if ((recv_msg->msg_header.opcode != PM_MESSAGE_CLOSE) &&
612 ((recv_msg->msg_header.opcode != PM_MESSAGE_CLOSE_ACK))){
613 PM_DEBUG("Adding received message from node %d to list.\n",
614 mic_ctx->bi_id+1);
615 mutex_lock(&mic_ctx->micpm_ctx.msg_mutex);
616 list_add_tail(&recv_msg->msg , &mic_ctx->micpm_ctx.msg_list);
617 mutex_unlock(&mic_ctx->micpm_ctx.msg_mutex);
618
619 if(likely(recv_msg->msg_header.opcode != PM_MESSAGE_TEST)) {
620 PM_DEBUG("Queue message handler work for node: %d\n",mic_ctx->bi_id+1);
621 queue_work(mic_ctx->micpm_ctx.handle_msg.wq,
622 &mic_ctx->micpm_ctx.handle_msg.work);
623 }
624
625 queue_work(mic_ctx->micpm_ctx.recv.wq,
626 &mic_ctx->micpm_ctx.recv.work);
627 } else {
628
629 if (recv_msg->msg_header.opcode == PM_MESSAGE_CLOSE) {
630 mic_pm_send_msg(mic_ctx , PM_MESSAGE_CLOSE_ACK, NULL, 0);
631 mic_ctx->micpm_ctx.con_state = PM_DISCONNECTING;
632 schedule_work(&mic_ctx->micpm_ctx.pm_close);
633 } else {
634 mic_ctx->micpm_ctx.con_state = PM_DISCONNECTING;
635 wake_up(&mic_ctx->micpm_ctx.disc_wq);
636 }
637 goto unqueue;
638 }
639 return;
640unqueue:
641 if (recv_msg) {
642 if (recv_msg->msg_body)
643 kfree(recv_msg->msg_body);
644 kfree(recv_msg);
645 }
646 return;
647}
648
649/*
650 * Work item to handle closing of PM end point to a device and all the
651 * related receive workqueues.
652 */
653static void
654mic_pm_close_work(struct work_struct *work)
655{
656 micpm_ctx_t *pm_ctx = container_of(work, micpm_ctx_t, pm_close);
657 mic_ctx_t *mic_ctx = container_of(pm_ctx, mic_ctx_t, micpm_ctx);
658 micpm_stop(mic_ctx);
659 return;
660}
661
662static void
663mic_pm_resume_work(struct work_struct *resume_work)
664{
665 int err;
666 pm_wq_t *pm_wq = container_of(resume_work, pm_wq_t, work);
667 micpm_ctx_t *pm_ctx = container_of(pm_wq, micpm_ctx_t, resume);
668 mic_ctx_t *mic_ctx = container_of(pm_ctx, mic_ctx_t, micpm_ctx);
669
670 if (mic_ctx != NULL) {
671 err = pm_start_device(mic_ctx);
672 if (err) {
673 PM_DEBUG("Failed to start device %d after resume\n",
674 mic_ctx->bi_id);
675 }
676 } else {
677 PM_DEBUG("Error retrieving node context.\n");
678 }
679}
680
681/* Create PM specific workqueues during driver probe.
682 *
683 * Receive workqueue will store the received message and kick-off
684 * a message handler workqueue which will process them.
685 *
686 * Resume workqueue handles the task of booting uOS rduring
687 * OSPM resume/restore phase.
688 */
689int
690setup_pm_workqueues(mic_ctx_t *mic_ctx)
691{
692 int err = 0;
693
694 if(!mic_ctx) {
695 PM_DEBUG("Failed to retrieve device context\n");
696 err = -EINVAL;
697 goto err;
698 }
699
700 /* setup resume wq */
701 snprintf(mic_ctx->micpm_ctx.resume.wq_name,
702 sizeof(mic_ctx->micpm_ctx.resume.wq_name),
703 "PM_RESUME_WQ %d", mic_get_scifnode_id(mic_ctx));
704
705 if (!(mic_ctx->micpm_ctx.resume.wq
706 = __mic_create_singlethread_workqueue(
707 mic_ctx->micpm_ctx.resume.wq_name))) {
708 err = -ENOMEM;
709 goto err;
710 }
711
712 /* Setup Receive wq */
713 snprintf(mic_ctx->micpm_ctx.recv.wq_name,
714 sizeof(mic_ctx->micpm_ctx.recv.wq_name),
715 "RECV_WORK_Q %d", mic_get_scifnode_id(mic_ctx));
716
717 if (!(mic_ctx->micpm_ctx.recv.wq
718 = __mic_create_singlethread_workqueue(
719 mic_ctx->micpm_ctx.recv.wq_name))) {
720 err = -ENOMEM;
721 goto err;
722 }
723
724 /* Setup Msg handler wq */
725 snprintf(mic_ctx->micpm_ctx.handle_msg.wq_name,
726 sizeof(mic_ctx->micpm_ctx.handle_msg.wq_name),
727 "MSG_HANDLER_WQ %d", mic_get_scifnode_id(mic_ctx));
728
729 if (!(mic_ctx->micpm_ctx.handle_msg.wq
730 = __mic_create_singlethread_workqueue(
731 mic_ctx->micpm_ctx.handle_msg.wq_name))) {
732 err = -ENOMEM;
733 goto err;
734 }
735
736 /* Setup pc6 entry wq */
737 snprintf(mic_ctx->micpm_ctx.pc6_wq_name,
738 sizeof(mic_ctx->micpm_ctx.pc6_wq_name),
739 "PC6_WORK_Q %d", mic_get_scifnode_id(mic_ctx));
740
741 if (!(mic_ctx->micpm_ctx.pc6_entry_wq
742 = __mic_create_singlethread_workqueue(
743 mic_ctx->micpm_ctx.pc6_wq_name))) {
744 err = -ENOMEM;
745 goto err;
746 }
747 INIT_WORK(&mic_ctx->micpm_ctx.recv.work, mic_pm_recv_work);
748 INIT_WORK(&mic_ctx->micpm_ctx.handle_msg.work, mic_pm_msg_handle_work);
749 INIT_WORK(&mic_ctx->micpm_ctx.pm_close, mic_pm_close_work);
750 INIT_WORK(&mic_ctx->micpm_ctx.resume.work, mic_pm_resume_work);
751 INIT_DELAYED_WORK(&mic_ctx->micpm_ctx.pc6_entry_work, pc6_entry_work);
752
753err:
754 return err;
755}
756/*Power Management Initialization function. Sets up SCIF
757 * end points and accept threads.
758 */
759int micpm_init()
760{
761 scif_epd_t epd;
762 int con_port;
763 int err = 0;
764
765 epd = scif_open();
766 if (epd == SCIF_OPEN_FAILED || epd == NULL) {
767 PM_DEBUG("scif_open failed\n");
768 return -1;
769 }
770
771 if ((con_port = scif_bind(epd, SCIF_PM_PORT_0)) < 0) {
772 PM_DEBUG("scif_bind to port failed with error %d\n", con_port);
773 err = con_port;
774 goto exit_close;
775 }
776
777 /*No real upper limit on number of connections.
778 Once scif_listen accepts 0 as an acceptable parameter for max
779 connections(to mean tht there is no upper limit), change this. */
780 if ((err = scif_listen(epd, 100)) < 0) {
781 PM_DEBUG("Listen ioctl failed with error %d\n", err);
782 goto exit_close;
783 }
784 mic_data.dd_pm.epd = epd;
785
786 snprintf(mic_data.dd_pm.accept.wq_name,
787 sizeof(mic_data.dd_pm.accept.wq_name),"PM ACCEPT");
788
789 mic_data.dd_pm.accept.wq =
790 __mic_create_singlethread_workqueue(mic_data.dd_pm.accept.wq_name);
791 if (!mic_data.dd_pm.accept.wq){
792 err = -ENOMEM;
793 PM_DEBUG("create workqueue returned null\n");
794 goto exit_close;
795 }
796 INIT_WORK(&mic_data.dd_pm.accept.work, mic_pm_accept_work);
797 mutex_init (&mic_data.dd_pm.pm_accept_mutex);
798 mutex_init (&mic_data.dd_pm.pm_idle_mutex);
799 atomic_set(&mic_data.dd_pm.connected_clients, 0);
800
801 /*Add work to the work queue */
802 queue_work(mic_data.dd_pm.accept.wq,
803 &mic_data.dd_pm.accept.work);
804 mic_data.dd_pm.enable_pm_logging = 0;
805 atomic_set(&mic_data.dd_pm.wakeup_in_progress, 0);
806
807 micpm_dbg_parent_init();
808
809 return err;
810
811exit_close:
812 scif_close(epd);
813 return err;
814}
815
816/*
817 * Close the SCIF acceptor endpoint and uninit a lot of driver level
818 * data structures including accept threads,
819 */
820void
821micpm_uninit(void)
822{
823 int err;
824 scif_epd_t epd = mic_data.dd_pm.epd;
825
826 if(atomic_read(&mic_data.dd_pm.connected_clients) > 0) {
827 PM_DEBUG("connected_clients is nonzero (%d)\n",
828 atomic_read(&mic_data.dd_pm.connected_clients));
829 }
830 err = scif_close(epd);
831 if (err != 0) {
832 PM_DEBUG("Scif_close failed with error %d\n",err);
833 }
834
835 if (mic_data.dd_pm.accept.wq != NULL) {
836 PM_DEBUG("Flushing accept workqueue\n");
837 flush_workqueue(mic_data.dd_pm.accept.wq);
838 destroy_workqueue(mic_data.dd_pm.accept.wq);
839 mic_data.dd_pm.accept.wq = NULL;
840 }
841
842 mutex_destroy(&mic_data.dd_pm.pm_accept_mutex);
843 mutex_destroy(&mic_data.dd_pm.pm_idle_mutex);
844
845 debugfs_remove_recursive(mic_data.dd_pm.pmdbgparent_dir);
846
847}
848
849/*
850 * Open the Per device Power Management context.
851 */
852int
853micpm_probe(mic_ctx_t * mic_ctx) {
854
855 int err = 0;
856
857 mic_ctx->micpm_ctx.pm_epd = NULL;
858 mic_ctx->micpm_ctx.idle_state = PM_IDLE_STATE_PC0;
859 mic_ctx->micpm_ctx.recv.wq = NULL;
860 mic_ctx->micpm_ctx.handle_msg.wq = NULL;
861 mic_ctx->micpm_ctx.mic_suspend_state = MIC_RESET;
862 mic_ctx->micpm_ctx.pc3_enabled = true;
863 mic_ctx->micpm_ctx.pc6_enabled = true;
864 mic_ctx->micpm_ctx.pm_options.pc3_enabled = 0;
865 mic_ctx->micpm_ctx.pm_options.pc6_enabled = 0;
866
867 if ((err = setup_pm_workqueues(mic_ctx)))
868 goto err;
869
870 mutex_init (&mic_ctx->micpm_ctx.msg_mutex);
871 INIT_LIST_HEAD(&mic_ctx->micpm_ctx.msg_list);
872 init_waitqueue_head(&mic_ctx->micpm_ctx.disc_wq);
873 atomic_set(&mic_ctx->micpm_ctx.pm_ref_cnt, 0);
874 mic_ctx->micpm_ctx.pc6_timeout = PC6_TIMER;
875
876 /* create debugfs entries*/
877 micpm_dbg_init(mic_ctx);
878
879err:
880 return err;
881}
882
883int
884micpm_remove(mic_ctx_t * mic_ctx) {
885
886 debugfs_remove_recursive(mic_ctx->micpm_ctx.pmdbg_dir);
887
888 if (mic_ctx->micpm_ctx.resume.wq != NULL) {
889 destroy_workqueue(mic_ctx->micpm_ctx.resume.wq);
890 mic_ctx->micpm_ctx.resume.wq = NULL;
891 }
892
893 if(mic_ctx->micpm_ctx.pc6_entry_wq != NULL) {
894 destroy_workqueue(mic_ctx->micpm_ctx.pc6_entry_wq);
895 mic_ctx->micpm_ctx.pc6_entry_wq = NULL;
896 }
897
898 if(mic_ctx->micpm_ctx.recv.wq != NULL) {
899 destroy_workqueue(mic_ctx->micpm_ctx.recv.wq);
900 mic_ctx->micpm_ctx.recv.wq = NULL;
901 }
902
903 if(mic_ctx->micpm_ctx.handle_msg.wq != NULL) {
904 destroy_workqueue(mic_ctx->micpm_ctx.handle_msg.wq);
905 mic_ctx->micpm_ctx.handle_msg.wq = NULL;
906 }
907
908 micpm_nodemask_uninit(mic_ctx);
909
910 mutex_destroy(&mic_ctx->micpm_ctx.msg_mutex);
911 return 0;
912}
913
914int
915micpm_start(mic_ctx_t *mic_ctx) {
916
917 int ref_cnt;
918 mic_ctx->micpm_ctx.con_state = PM_CONNECTING;
919
920 /* queue receiver */
921 queue_work(mic_ctx->micpm_ctx.recv.wq,
922 &mic_ctx->micpm_ctx.recv.work);
923
924 atomic_inc(&mic_data.dd_pm.connected_clients);
925 if ((ref_cnt = atomic_read(&mic_ctx->micpm_ctx.pm_ref_cnt)))
926 printk("Warning: PM ref_cnt is non-zero during start. "
927 "ref_cnt = %d PM features may not work as expected\n",
928 ref_cnt);
929 mic_ctx->micpm_ctx.idle_state = PM_IDLE_STATE_PC0;
930 set_host_state(mic_ctx, PM_IDLE_STATE_PC0);
931 return mic_pm_send_msg(mic_ctx , PM_MESSAGE_OPEN, NULL, 0);
932}
933
934/*
935 * Close the per device Power management context here.
936 * It does various things such as: closing scif endpoints,
937 * delete pending work items and wait for those that are
938 * executing to complete, delete pending messages in the
939 * message list, delete pending timers and wait for runnig
940 * timers to complete. The function can block.
941 */
942int
943micpm_stop(mic_ctx_t *mic_ctx) {
944
945 int err = 0;
946 int node_lost = 0;
947 if(mic_ctx == NULL) {
948 PM_DEBUG("Mic context not Initialized\n");
949 return -EINVAL;
950 }
951
952 if ((micpm_get_reference(mic_ctx, true))) {
953 PM_DEBUG("get_reference failed. Node may be lost\n");
954 node_lost = 1;
955 }
956
957 mutex_lock(&mic_data.dd_pm.pm_accept_mutex);
958 if ((mic_ctx->micpm_ctx.con_state == PM_CONNECTED) &&
959 (mic_ctx->state != MIC_LOST)) {
960 if (!mic_pm_send_msg(mic_ctx, PM_MESSAGE_CLOSE, NULL, 0)) {
961 err = wait_event_timeout(
962 mic_ctx->micpm_ctx.disc_wq,
963 mic_ctx->micpm_ctx.con_state == PM_DISCONNECTING,
964 NODE_ALIVE_TIMEOUT);
965 if (!err) {
966 PM_DEBUG("Timed out waiting CLOSE ACK"
967 " from node.\n");
968 }
969 }
970 }
971
972 if(mic_ctx->micpm_ctx.pm_epd != NULL) {
973 PM_DEBUG("Power Management: Closing connection to"
974 " node: %d port:%d\n", mic_ctx->micpm_ctx.pm_epd->peer.node,
975 mic_ctx->micpm_ctx.pm_epd->peer.port);
976 err = scif_close(mic_ctx->micpm_ctx.pm_epd);
977 if(err!= 0)
978 PM_DEBUG("Scif_close failed with error %d\n",err);
979 mic_ctx->micpm_ctx.pm_epd = NULL;
980 micpm_decrement_clients();
981 }
982 mic_ctx->micpm_ctx.con_state = PM_DISCONNECTED;
983 mic_ctx->micpm_ctx.idle_state = PM_IDLE_STATE_PC0;
984 flush_workqueue(mic_ctx->micpm_ctx.resume.wq);
985 flush_workqueue(mic_ctx->micpm_ctx.recv.wq);
986 flush_workqueue(mic_ctx->micpm_ctx.handle_msg.wq);
987 cancel_delayed_work_sync(&mic_ctx->micpm_ctx.pc6_entry_work);
988
989 /* Process messages in message queue */
990 pm_process_msg_list(mic_ctx);
991
992 if (!node_lost)
993 micpm_put_reference(mic_ctx);
994 mutex_unlock(&mic_data.dd_pm.pm_accept_mutex);
995 return err;
996}
997
998/*
999 * Function to load the uOS and start all the driver components
1000 * after a resume/restore operation
1001 */
1002int
1003pm_start_device(mic_ctx_t *mic_ctx)
1004{
1005 if (!mic_ctx) {
1006 PM_DEBUG("Error retreving driver context\n");
1007 return 0;
1008 }
1009
1010 PM_DEBUG("Resume MIC device:%d\n", mic_ctx->bi_id);
1011 /* Make sure the Power reset during Resume/Restore is complete*/
1012 adapter_wait_reset(mic_ctx);
1013 wait_for_reset(mic_ctx);
1014
1015 /*Perform software reset */
1016 adapter_reset(mic_ctx, RESET_WAIT, !RESET_REATTEMPT);
1017 wait_for_reset(mic_ctx);
1018
1019 /* Boot uOS only if it was online before suspend */
1020 if (MIC_ONLINE == mic_ctx->micpm_ctx.mic_suspend_state) {
1021 if(adapter_start_device(mic_ctx)) {
1022 PM_DEBUG("booting uos... failed\n");
1023 }
1024 }
1025
1026 return 0;
1027}
1028
1029/*
1030 * Function to stop all the driver components and unload the uOS
1031 * during a suspend/hibernate operation
1032 */
1033int
1034pm_stop_device(mic_ctx_t *mic_ctx)
1035{
1036 if (!mic_ctx) {
1037 PM_DEBUG("Error retreving driver context\n");
1038 return 0;
1039 }
1040
1041 mic_ctx->micpm_ctx.mic_suspend_state = mic_ctx->state;
1042
1043 PM_DEBUG("Suspend MIC device:#%d\n", mic_ctx->bi_id);
1044 if (MIC_ONLINE == mic_ctx->micpm_ctx.mic_suspend_state) {
1045 adapter_shutdown_device(mic_ctx);
1046 if (!wait_for_shutdown_and_reset(mic_ctx)) {
1047 /* Shutdown failed. Fall back on forced reset */
1048 adapter_stop_device(mic_ctx, RESET_WAIT, !RESET_REATTEMPT);
1049 wait_for_reset(mic_ctx);
1050 }
1051 }
1052 else {
1053 /* If card is in any state but ONLINE, make sure card stops */
1054 adapter_stop_device(mic_ctx, RESET_WAIT, !RESET_REATTEMPT);
1055 wait_for_reset(mic_ctx);
1056 }
1057
1058 mutex_lock(&mic_ctx->state_lock);
1059 mic_ctx->state = MIC_RESET;
1060 mutex_unlock(&mic_ctx->state_lock);
1061 return 0;
1062}