Initial commit of files contained in `mpss-modules-3.8.6.tar.bz2` for Intel Xeon...
[xeon-phi-kernel-module] / vnet / micveth_dma.c
CommitLineData
800f879a
AT
1
2/*
3 * Copyright 2010-2017 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * Disclaimer: The codes contained in these modules may be specific to
15 * the Intel Software Development Platform codenamed Knights Ferry,
16 * and the Intel product codenamed Knights Corner, and are not backward
17 * compatible with other Intel products. Additionally, Intel will NOT
18 * support the codes or instruction set in future products.
19 *
20 * Intel offers no warranty of any kind regarding the code. This code is
21 * licensed on an "AS IS" basis and Intel is not obligated to provide
22 * any support, assistance, installation, training, or other services
23 * of any kind. Intel is also not obligated to provide any updates,
24 * enhancements or extensions. Intel specifically disclaims any warranty
25 * of merchantability, non-infringement, fitness for any particular
26 * purpose, and any other warranty.
27 *
28 * Further, Intel disclaims all liability of any kind, including but
29 * not limited to liability for infringement of any proprietary rights,
30 * relating to the use of the code, even if Intel is notified of the
31 * possibility of such liability. Except as expressly stated in an Intel
32 * license agreement provided with this code and agreed upon with Intel,
33 * no license, express or implied, by estoppel or otherwise, to any
34 * intellectual property rights is granted herein.
35 */
36
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/list.h>
40#include <linux/circ_buf.h>
41#include <linux/reboot.h>
42#include "mic_common.h"
43#include "mic/micveth_dma.h"
44#include "mic/mic_macaddr.h"
45
46/* TODO: Clean up shutdown, let DMA's drain */
47
48#ifndef HOST
49#define SBOX_SDBIC0_DBREQ_BIT 0x80000000
50#define SBOX_MMIO_LENGTH (64 * 1024)
51#endif
52#define STOP_WAIT_TIMEOUT (4 * HZ)
53
54#ifndef HOST
55static mic_ctx_t mic_ctx_g;
56#endif
57
58struct micvnet micvnet;
59
60
61static void micvnet_send_intr(struct micvnet_info *vnet_info);
62static int micvnet_init_msg_rings(struct micvnet_info *vnet_info);
63static int micvnet_init_rx_skb_send_msg(struct micvnet_info *vnet_info);
64static void micvnet_send_add_dma_buffer_messages(struct micvnet_info *vnet_info);
65static void micvnet_stop_ws(struct work_struct *work);
66static void micvnet_start_ws(struct work_struct *work);
67int get_sbox_irq(int index);
68
69static __always_inline mic_ctx_t *
70vnet_to_ctx(struct micvnet_info *vnet_info)
71{
72 return vnet_info->mic_ctx;
73}
74
75static __always_inline void
76micvnet_wake_queue(struct micvnet_info *vnet_info)
77{
78 if (atomic_read(&vnet_info->vi_state) == MICVNET_STATE_LINKUP)
79 netif_wake_queue(vnet_info->vi_netdev);
80}
81
82static __always_inline void
83micvnet_dec_cnt_tx_pending(struct micvnet_info *vnet_info)
84{
85 if (atomic_dec_and_test(&vnet_info->cnt_tx_pending) &&
86 (atomic_read(&vnet_info->vi_state) == MICVNET_STATE_LINK_DOWN))
87 wake_up_interruptible(&vnet_info->stop_waitq);
88}
89
90
91/***********************************************************
92 Pre-allocated "list" of objects which are allocated and deallocated in FIFO
93 sequence. Allows reservation of memory at init time to prevent mem allocation
94 failures at run time. */
95static int
96list_obj_list_init(int num_obj, size_t obj_size, struct obj_list *list)
97{
98 list->size = num_obj + 1;
99 list->obj_size = obj_size;
100 list->head = list->tail = 0;
101
102 if (!(list->buf = kmalloc(list->size * list->obj_size, GFP_KERNEL))) {
103 printk(KERN_ERR "%s: list alloc failed\n", __func__);
104 return -ENOMEM;
105 }
106 return 0;
107}
108
109static void
110list_obj_list_deinit(struct obj_list *list)
111{
112 if (list->buf) {
113 kfree(list->buf);
114 list->buf = NULL;
115 }
116}
117
118static void *
119list_obj_alloc(struct obj_list *list)
120{
121 char *obj;
122
123 /* Remove bug_on() here to handle VNET OOO messages. In OOO conditions
124 * requests to allocate more objects than list->size are possible. */
125 if (((list->head + 1) % list->size) == list->tail) {
126 printk(KERN_ERR "%s: BUG: no free objects in obj list\n", __func__);
127 return NULL;
128 }
129
130 obj = list->buf + list->head * list->obj_size;
131 wmb();
132 list->head = (list->head + 1) % list->size;
133
134 return obj;
135}
136
137void
138list_obj_free(struct obj_list *list)
139{
140 /* Remove bug_on() here to handle VNET OOO messages */
141 if (list->tail == list->head) {
142 printk(KERN_ERR "%s: BUG: free too many list objects\n", __func__);
143 return;
144 }
145
146 list->tail = (list->tail + 1) % list->size;
147}
148
149/***********************************************************
150 * Vnet message functions
151 */
152#ifdef HOST
153static void
154micvnet_msg_rb_init(struct micvnet_msg_rb *rb)
155{
156 rb->head = rb->tail = 0;
157 rb->size = MICVNET_MSG_RB_SIZE;
158 rb->prev_head = rb->prev_tail = rb->size - 1;
159}
160
161static void
162micvnet_reset_msg_rings(struct micvnet_info *vnet_info)
163{
164 micvnet_msg_rb_init(vnet_info->vi_qp.tx);
165 micvnet_msg_rb_init(vnet_info->vi_qp.rx);
166}
167#endif
168
169static void
170micvnet_msg_rb_write_msg(struct micvnet_info *vnet_info, struct micvnet_msg *msg)
171{
172 struct micvnet_msg_rb *rb = vnet_info->vi_qp.tx;
173
174 /* The condition below should never occur under normal conditions
175 because the VNET message ring buffer size is at least 1 greater than
176 the maximum total number of outstanding messages possible in the
177 system. However, all bets are off if VNET OOO messages are
178 seen. Therefore remove the previous bug_on() here and busy wait. */
179 while (((rb->head + 1) % rb->size) == rb->tail)
180 cpu_relax();
181
182 if (!(rb->head == (rb->prev_head + 1) % rb->size))
183 printk(KERN_ERR "BUG: head not equal to prev_head + 1:\n \
184 head %d prev_head %d\n", rb->head, rb->prev_head);
185
186 smp_mb();
187#ifdef HOST
188 rb->buf[rb->head] = *msg;
189#else
190 memcpy_toio(&rb->buf[rb->head], msg, sizeof(*msg));
191#endif
192 smp_mb();
193 serializing_request(&rb->buf[rb->head]);
194
195 rb->prev_head = rb->head;
196 rb->head = (rb->head + 1) % rb->size;
197#ifndef HOST
198 rb->head = rb->head;
199#endif
200 smp_mb();
201 serializing_request(&rb->head);
202}
203
204static int
205micvnet_msg_rb_read_msg(struct micvnet_info *vnet_info, struct micvnet_msg *msg)
206{
207 struct micvnet_msg_rb *rb = vnet_info->vi_qp.rx;
208
209 if (rb->tail == rb->head)
210 return 1;
211
212 if (!(rb->tail == (rb->prev_tail + 1) % rb->size))
213 printk(KERN_ERR "BUG: tail not equal to prev_tail + 1:\n \
214 tail %d prev_tail %d\n", rb->tail, rb->prev_tail);
215
216 smp_mb();
217#ifdef HOST
218 *msg = rb->buf[rb->tail];
219#else
220 memcpy_fromio(msg, &rb->buf[rb->tail], sizeof(*msg));
221#endif
222 smp_mb();
223 serializing_request(&rb->buf[rb->tail]);
224
225 rb->prev_tail = rb->tail;
226 rb->tail = (rb->tail + 1) % rb->size;
227#ifndef HOST
228 rb->tail = rb->tail;
229#endif
230 smp_mb();
231 serializing_request(&rb->tail);
232
233 return 0;
234}
235
236void
237micvnet_msg_send_msg(struct micvnet_info *vnet_info, struct micvnet_msg *msg)
238{
239 micvnet_msg_rb_write_msg(vnet_info, msg);
240#ifdef HOST
241 if (micpm_get_reference(vnet_to_ctx(vnet_info), true))
242 return;
243#endif
244 micvnet_send_intr(vnet_info);
245#ifdef HOST
246 micpm_put_reference(vnet_to_ctx(vnet_info));
247#endif
248}
249
250static void
251micvnet_msg_send_add_dma_buffer_msg(struct micvnet_info *vnet_info,
252 struct rx_node *rnode)
253{
254 struct micvnet_msg msg;
255 struct micvnet_msg_add_dma_buffer
256 *body = &msg.body.micvnet_msg_add_dma_buffer;
257
258 msg.msg_id = MICVNET_MSG_ADD_DMA_BUFFER;
259 body->buf_phys = rnode->phys;
260 body->buf_size = rnode->size;
261 micvnet_msg_send_msg(vnet_info, &msg);
262}
263
264static void
265micvnet_msg_recv_add_dma_buffer(struct micvnet_info *vnet_info,
266 struct micvnet_msg_add_dma_buffer *msg)
267{
268 struct dma_node *dnode;
269
270 /* Remove bug_on() here to handle VNET OOO messages */
271 if (!(dnode = list_obj_alloc(&vnet_info->dnode_list)))
272 return;
273
274 dnode->phys = msg->buf_phys;
275 dnode->size = msg->buf_size;
276
277 spin_lock(&vnet_info->vi_rxlock);
278 list_add_tail(&dnode->list, &vnet_info->vi_dma_buf);
279 spin_unlock(&vnet_info->vi_rxlock);
280
281 atomic_inc(&vnet_info->cnt_dma_buf_avail);
282 micvnet_wake_queue(vnet_info);
283}
284
285static void
286micvnet_msg_send_dma_complete_msg(struct micvnet_info *vnet_info,
287 struct sched_node *snode)
288{
289 struct micvnet_msg msg;
290 struct micvnet_msg_dma_complete
291 *body = &msg.body.micvnet_msg_dma_complete;
292
293 msg.msg_id = MICVNET_MSG_DMA_COMPLETE;
294 body->dst_phys = snode->dst_phys;
295 body->size = snode->skb->len;
296 body->dma_offset = snode->dma_offset;
297 micvnet_msg_send_msg(vnet_info, &msg);
298}
299
300/* Handle an unexpected out-of-order message */
301static int
302micvnet_msg_handle_ooo_msg(struct micvnet_info *vnet_info,
303 struct micvnet_msg_dma_complete *msg)
304{
305 struct micvnet_msg_rb *rb = vnet_info->vi_qp.rx;
306 struct rx_node *rnode;
307 struct list_head *pos, *tmpl;
308 bool found = false;
309
310 rnode = list_entry((&vnet_info->vi_rx_skb)->next, struct rx_node, list);
311
312 /* Normal operation */
313 if (rnode->phys == msg->dst_phys
314 && msg->size <= (rnode->size - 3 * DMA_ALIGNMENT)
315 && msg->dma_offset < 2 * DMA_ALIGNMENT)
316 return 0;
317
318 /* Flag that weird stuff's going on */
319 printk(KERN_ERR "BUG: Unexpected vnet dma_complete message parameters:\n \
320 rnode->phys %p, msg->dst_phys %p\n \
321 rnode->size %lld, msg->size %lld, msg->dma_offset %lld\n \
322 rx rb head %d tail %d size %d\n",
323 (char *) rnode->phys, (char *) msg->dst_phys,
324 rnode->size, msg->size, msg->dma_offset,
325 rb->head, rb->tail, rb->size);
326
327 /* if message is received in order but with incorrect parameters
328 (size/dma_offset), drop it, but re-add the rnode at the back of the
329 rx_skb list, as well as at tx, similar to what is done below for ooo
330 case. */
331 if (rnode->phys == msg->dst_phys) {
332 list_del(&rnode->list);
333 list_add_tail(&rnode->list, &vnet_info->vi_rx_skb);
334 micvnet_msg_send_add_dma_buffer_msg(vnet_info, rnode);
335 vnet_info->vi_netdev->stats.rx_dropped++;
336 return 1;
337 }
338
339 /* Start of OOO message processing. First check if the message has
340 * really been received OOO. If it is completely unknown to us we just
341 * drop it and go on. */
342 list_for_each(pos, &vnet_info->vi_rx_skb) {
343 rnode = list_entry(pos, struct rx_node, list);
344 if (rnode->phys == msg->dst_phys) {
345 found = true;
346 break;
347 }
348 }
349
350 if (!found) {
351 vnet_info->vi_netdev->stats.rx_dropped++;
352 return 1;
353 }
354
355 vnet_info->vi_netdev->stats.rx_errors++;
356
357 /* Skip all the rnode's till we find the one we are looking for. Rather
358 * than free rnode skb's and reallocate them, and therby risk allocation
359 * failures, we simply delete the rnode's from their current position on
360 * the rnode list and re-add them at back of the list, as well as add
361 * them back at tx. */
362 list_for_each_safe(pos, tmpl, &vnet_info->vi_rx_skb) {
363 rnode = list_entry(pos, struct rx_node, list);
364 if (rnode->phys == msg->dst_phys)
365 break;
366
367 list_del(&rnode->list);
368 list_add_tail(&rnode->list, &vnet_info->vi_rx_skb);
369 micvnet_msg_send_add_dma_buffer_msg(vnet_info, rnode);
370 }
371
372 return 0;
373}
374
375static void
376micvnet_msg_recv_dma_complete(struct micvnet_info *vnet_info,
377 struct micvnet_msg_dma_complete *msg)
378{
379 struct rx_node *rnode;
380 struct sk_buff *skb;
381
382 vnet_info->vi_netdev->stats.rx_packets++;
383
384 if (micvnet_msg_handle_ooo_msg(vnet_info, msg))
385 return;
386
387 rnode = list_entry((&vnet_info->vi_rx_skb)->next, struct rx_node, list);
388 /* Our OOO message handling guarantees that rnode->phys == msg->dst_phys */
389
390 vnet_info->vi_netdev->stats.rx_bytes += msg->size;
391 list_del(&rnode->list);
392
393 spin_lock_bh(&vnet_info->vi_txlock);
394 if (atomic_read(&vnet_info->vi_state) != MICVNET_STATE_LINKUP) {
395 spin_unlock_bh(&vnet_info->vi_txlock);
396 goto skip_adding_new_buffers;
397 }
398 atomic_inc(&vnet_info->cnt_tx_pending);
399 spin_unlock_bh(&vnet_info->vi_txlock);
400
401 /* OOM handling: check if a new SKB can be allocated. If not, we will re-add the
402 old SKB to TX and not give it to the network stack, i.e. drop it */
403 if (micvnet_init_rx_skb_send_msg(vnet_info)) {
404 list_add_tail(&rnode->list, &vnet_info->vi_rx_skb);
405 micvnet_msg_send_add_dma_buffer_msg(vnet_info, rnode);
406 micvnet_dec_cnt_tx_pending(vnet_info);
407 vnet_info->vi_netdev->stats.rx_dropped++;
408 return;
409 }
410 micvnet_dec_cnt_tx_pending(vnet_info);
411
412skip_adding_new_buffers:
413 skb = rnode->skb;
414 skb_reserve(skb, msg->dma_offset);
415 skb_put(skb, msg->size);
416 skb->dev = vnet_info->vi_netdev;
417 skb->protocol = eth_type_trans(skb, skb->dev);
418 skb->ip_summed = CHECKSUM_NONE;
419
420 local_bh_disable();
421 netif_receive_skb(skb);
422 local_bh_enable();
423
424#ifdef HOST
425 mic_ctx_unmap_single(vnet_to_ctx(vnet_info), rnode->phys, rnode->size);
426#endif
427 kfree(rnode);
428}
429
430static void
431micvnet_msg_send_link_down_msg(struct work_struct *work)
432{
433 struct micvnet_info *vnet_info
434 = container_of(work, struct micvnet_info, vi_ws_link_down);
435 struct micvnet_msg msg;
436 msg.msg_id = MICVNET_MSG_LINK_DOWN;
437 micvnet_msg_send_msg(vnet_info, &msg);
438}
439
440static void
441micvnet_msg_recv_msg_link_down(struct micvnet_info *vnet_info)
442{
443 atomic_set(&vnet_info->vi_state, MICVNET_STATE_BEGIN_UNINIT);
444
445 if (vnet_info->link_down_initiator)
446 wake_up_interruptible(&vnet_info->stop_waitq);
447 else
448 schedule_work(&vnet_info->vi_ws_stop);
449}
450
451static void
452micvnet_msg_send_link_up_msg(struct micvnet_info *vnet_info)
453{
454 struct micvnet_msg msg;
455 struct micvnet_msg_link_up
456 *body = &msg.body.micvnet_msg_link_up;
457
458 msg.msg_id = MICVNET_MSG_LINK_UP;
459 body->vnet_driver_version = VNET_DRIVER_VERSION;
460 micvnet_msg_send_msg(vnet_info, &msg);
461}
462
463static void
464micvnet_msg_recv_msg_link_up(struct micvnet_info *vnet_info,
465 struct micvnet_msg_link_up *msg)
466{
467 if (msg->vnet_driver_version != VNET_DRIVER_VERSION) {
468 printk(KERN_ERR "%s: Error: vnet driver version mismatch: "
469 "expected %d actual %lld\n"
470 "Ensure that host and card modules are "
471 "from the same build.\n",
472 __func__, VNET_DRIVER_VERSION,
473 msg->vnet_driver_version);
474 return;
475 }
476#ifdef HOST
477 schedule_work(&vnet_info->vi_ws_start);
478#else
479 micvnet_send_add_dma_buffer_messages(vnet_info);
480#endif
481}
482
483static void
484micvnet_msg_process_messages(struct micvnet_info *vnet_info)
485{
486 struct micvnet_msg msg;
487
488#ifdef HOST
489 micpm_get_reference(vnet_to_ctx(vnet_info), true);
490#endif
491 while (!micvnet_msg_rb_read_msg(vnet_info, &msg)) {
492 switch(msg.msg_id) {
493 case MICVNET_MSG_ADD_DMA_BUFFER:
494 micvnet_msg_recv_add_dma_buffer
495 (vnet_info,
496 &msg.body.micvnet_msg_add_dma_buffer);
497 break;
498
499 case MICVNET_MSG_DMA_COMPLETE:
500 micvnet_msg_recv_dma_complete
501 (vnet_info,
502 &msg.body.micvnet_msg_dma_complete);
503 break;
504
505 case MICVNET_MSG_LINK_DOWN:
506 micvnet_msg_recv_msg_link_down(vnet_info);
507 break;
508
509 case MICVNET_MSG_LINK_UP:
510 micvnet_msg_recv_msg_link_up(vnet_info,
511 &msg.body.micvnet_msg_link_up);
512 break;
513
514 default:
515 printk(KERN_ERR "BUG: unknown vnet msg id: %lld\n", msg.msg_id);
516 break;
517 }
518 }
519#ifdef HOST
520 micpm_put_reference(vnet_to_ctx(vnet_info));
521#endif
522}
523
524/***********************************************************
525 * Interrupts
526 */
527#ifdef HOST
528static int
529micvnet_host_doorbell_intr_handler(mic_ctx_t *mic_ctx, int doorbell)
530{
531 struct micvnet_info *vnet_info;
532 vnet_info = mic_ctx->bi_vethinfo;
533
534 queue_work(vnet_info->vi_wq, &vnet_info->vi_ws_bh);
535 return 0;
536}
537#else
538static irqreturn_t
539micvnet_host_intr_handler(int irq, void *data)
540{
541 struct micvnet_info *vnet_info = data;
542 queue_work(vnet_info->vi_wq, &vnet_info->vi_ws_bh);
543 return IRQ_HANDLED;
544}
545#endif
546
547static void
548micvnet_intr_bh_handler(struct work_struct *work)
549{
550 struct micvnet_info *vnet_info
551 = container_of(work, struct micvnet_info, vi_ws_bh);
552
553 micvnet_msg_process_messages(vnet_info);
554}
555
556#ifdef HOST
557static void
558micvnet_send_intr(struct micvnet_info *vnet_info)
559{
560 mic_ctx_t *mic_ctx = vnet_info->mic_ctx;
561 mic_send_vnet_intr(mic_ctx);
562}
563#else
564/* Ring host doorbell 3 interrupt */
565static void
566micvnet_send_intr(struct micvnet_info *vnet_info)
567{
568 uint32_t db_reg;
569
570 /* Ring host doorbell 3 interrupt */
571 db_reg = readl(vnet_info->vi_sbox + SBOX_SDBIC3)
572 | SBOX_SDBIC0_DBREQ_BIT;
573 writel(db_reg, vnet_info->vi_sbox + SBOX_SDBIC3);
574}
575#endif
576
577/***********************************************************
578 * Net device ops and rtnl link ops
579 */
580/*
581 Do nothing in ndo_open and ndo_stop. There are two reasons for this:
582 1. Since host and card side drivers are driver pairs, if ifconfig up or
583 ifconfig down occurs on one side this needs to be communicated to the other
584 side other side otherwise in the current implementation this can bring down
585 the system. Ignoring ifconfig up or down avoids this issue.
586 2. For now, micvnet_init is called before the dma can be initialized. However,
587 as soon as micvnet_init has been called and netdev has been created, the OS
588 can invoke .ndo_open, which however requires the DMA to have been
589 initialized. But DMA can not be initialized until later (at present after
590 the card has booted).
591 Therefore we ourselves call micvnet_start and micvnet_stop at appropriate
592 times when we are ready for them. The only consequence is all packets till
593 micvnet_start has been invoked will be dropped in ndo_start_xmit.
594 */
595
596/* Start callback */
597static int
598micvnet_start_dev(struct net_device *dev)
599{
600 struct micvnet_info *vnet_info = dev->ml_priv;
601
602 /* Stop the queue till the state becomes LINKUP. The queue will be started when
603 dma buffers are added in micvnet_msg_recv_add_dma_buffer(). Not doing this
604 results in packets getting dropped till state is LINKUP. */
605 if (atomic_read(&vnet_info->vi_state) != MICVNET_STATE_LINKUP)
606 netif_stop_queue(vnet_info->vi_netdev);
607
608 return 0;
609}
610
611/* Stop callback */
612static int
613micvnet_stop_dev(struct net_device *dev)
614{
615 return 0;
616}
617
618static void
619micvnet_dma_cb_bh(struct work_struct *work)
620{
621 struct micvnet_info
622 *vnet_info = container_of(work, struct micvnet_info, vi_ws_dmacb);
623 struct sched_node *snode;
624
625 if (!atomic_read(&vnet_info->cnt_dma_complete))
626 return;
627
628 do {
629 spin_lock_bh(&vnet_info->vi_txlock);
630 snode = list_entry((&vnet_info->vi_sched_skb)->next,
631 struct sched_node, list);
632 list_del(&snode->list);
633 spin_unlock_bh(&vnet_info->vi_txlock);
634
635 micvnet_msg_send_dma_complete_msg(vnet_info, snode);
636
637 micvnet_dec_cnt_tx_pending(vnet_info);
638#ifdef HOST
639 mic_ctx_unmap_single(vnet_to_ctx(vnet_info),
640 snode->dma_src_phys, snode->dma_size);
641 micpm_put_reference(vnet_to_ctx(vnet_info));
642#endif
643 kfree_skb(snode->skb);
644 kfree(snode);
645
646 } while (!atomic_dec_and_test(&vnet_info->cnt_dma_complete));
647}
648
649static void
650micvnet_dma_completion_callback(uint64_t data)
651{
652 struct micvnet_info *vnet_info = (struct micvnet_info *) data;
653
654 atomic_inc(&vnet_info->cnt_dma_complete);
655
656 queue_work(vnet_info->vi_wq, &vnet_info->vi_ws_dmacb);
657}
658
659static int
660micvnet_do_dma(struct micvnet_info *vnet_info, struct sched_node *snode)
661{
662 uint64_t dma_src, dma_dst;
663 int ret = 0;
664
665 dma_src = snode->dma_src_phys;
666 dma_dst = ALIGN(snode->dst_phys, DMA_ALIGNMENT);
667 snode->dma_offset = (snode->skb->data - snode->skb_data_aligned)
668 + (dma_dst - snode->dst_phys);
669 if ((ret = request_dma_channel(vnet_info->dma_chan)))
670 goto err_exit;
671
672 ret = do_dma(vnet_info->dma_chan,
673 DO_DMA_INTR,
674 dma_src,
675 dma_dst,
676 snode->dma_size,
677 &vnet_info->dma_cb);
678
679 free_dma_channel(vnet_info->dma_chan);
680
681err_exit:
682 return ret;
683}
684
685static int
686micvnet_schedule_dma(struct micvnet_info *vnet_info)
687{
688 struct tx_node *tnode;
689 struct sched_node *snode;
690 struct dma_node *dnode;
691 struct sk_buff *skb;
692 int ret = 0;
693 /* tnode */
694 spin_lock_bh(&vnet_info->vi_txlock);
695 BUG_ON(list_empty(&vnet_info->vi_tx_skb));
696 tnode = list_entry((&vnet_info->vi_tx_skb)->next,
697 struct tx_node, list);
698 list_del(&tnode->list);
699 spin_unlock_bh(&vnet_info->vi_txlock);
700 skb = tnode->skb;
701 kfree(tnode);
702
703#ifdef HOST
704 if ((ret = micpm_get_reference(vnet_to_ctx(vnet_info), true)))
705 goto err_exit_no_dec_node_refcnt;
706#endif
707
708 /* dnode */
709 spin_lock(&vnet_info->vi_rxlock);
710 BUG_ON(list_empty(&vnet_info->vi_dma_buf));
711 dnode = list_entry((&vnet_info->vi_dma_buf)->next,
712 struct dma_node, list);
713 spin_unlock(&vnet_info->vi_rxlock);
714 if (dnode->size < skb->len + 3 * DMA_ALIGNMENT) {
715 ret = -ENOMEM;
716 goto err_exit;
717 }
718
719 /* snode */
720 if (!(snode = kmalloc(sizeof(*snode), GFP_KERNEL))) {
721 ret = -ENOMEM;
722 goto err_exit;
723 }
724 snode->skb = skb;
725 snode->dst_phys = dnode->phys;
726 snode->skb_data_aligned
727 = (unsigned char *) ((uint64_t) skb->data & ~(DMA_ALIGNMENT - 1));
728 snode->dma_size
729 = ALIGN((skb->len + (skb->data - snode->skb_data_aligned)),
730 DMA_ALIGNMENT);
731#ifdef HOST
732 snode->dma_src_phys = mic_ctx_map_single(vnet_to_ctx(vnet_info),
733 snode->skb_data_aligned,
734 snode->dma_size);
735 if (mic_map_error(snode->dma_src_phys)) {
736 kfree(snode);
737 ret = -ENOMEM;
738 goto err_exit;
739 }
740#else
741 snode->dma_src_phys = virt_to_phys(snode->skb_data_aligned);
742#endif
743
744 if ((ret = micvnet_do_dma(vnet_info, snode))) {
745#ifdef HOST
746 mic_ctx_unmap_single(vnet_to_ctx(vnet_info),
747 snode->dma_src_phys, snode->dma_size);
748#endif
749 kfree(snode);
750 goto err_exit;
751 }
752
753 /* Update snode/dnode lists only after all operations have successfully
754 completed and no further errors are possible */
755 spin_lock_bh(&vnet_info->vi_txlock);
756 list_add_tail(&snode->list, &vnet_info->vi_sched_skb);
757 spin_unlock_bh(&vnet_info->vi_txlock);
758
759 spin_lock(&vnet_info->vi_rxlock);
760 list_del(&dnode->list);
761 spin_unlock(&vnet_info->vi_rxlock);
762 list_obj_free(&vnet_info->dnode_list);
763
764 vnet_info->vi_netdev->stats.tx_packets++;
765 vnet_info->vi_netdev->stats.tx_bytes += skb->len;
766
767 return ret;
768
769err_exit:
770#ifdef HOST
771 micpm_put_reference(vnet_to_ctx(vnet_info));
772err_exit_no_dec_node_refcnt:
773#endif
774 micvnet_dec_cnt_tx_pending(vnet_info);
775 atomic_inc(&vnet_info->cnt_dma_buf_avail);
776 micvnet_wake_queue(vnet_info);
777 skb->dev->stats.tx_dropped++;
778 kfree_skb(skb);
779 return ret;
780}
781
782static void
783micvnet_schedule_dmas(struct work_struct *work)
784{
785 struct micvnet_info *vnet_info
786 = container_of(work, struct micvnet_info, vi_ws_tx);
787 volatile bool tx_skb_list_empty;
788 while (1) {
789 spin_lock_bh(&vnet_info->vi_txlock);
790 tx_skb_list_empty = list_empty(&vnet_info->vi_tx_skb);
791 spin_unlock_bh(&vnet_info->vi_txlock);
792 if (tx_skb_list_empty)
793 break;
794
795 micvnet_schedule_dma(vnet_info);
796 }
797}
798
799int
800micvnet_xmit(struct sk_buff *skb, struct net_device *dev)
801{
802 struct micvnet_info *vnet_info = (struct micvnet_info*)dev->ml_priv;
803 struct tx_node *tnode;
804 if (!vnet_info || !atomic_read(&vnet_info->cnt_dma_buf_avail)){
805 goto err_exit;
806 }
807
808 if (!(tnode = kmalloc(sizeof(*tnode), GFP_ATOMIC)))
809 goto err_exit;
810 tnode->skb = skb;
811
812 spin_lock(&vnet_info->vi_txlock);
813 if (atomic_read(&vnet_info->vi_state) != MICVNET_STATE_LINKUP)
814 goto err_exit_unlock;
815 list_add_tail(&tnode->list, &vnet_info->vi_tx_skb);
816 atomic_inc(&vnet_info->cnt_tx_pending);
817 spin_unlock(&vnet_info->vi_txlock);
818
819 queue_work(vnet_info->vi_wq, &vnet_info->vi_ws_tx);
820
821 if (atomic_dec_and_test(&vnet_info->cnt_dma_buf_avail))
822 netif_stop_queue(vnet_info->vi_netdev);
823
824 return NETDEV_TX_OK;
825
826err_exit_unlock:
827 kfree(tnode);
828 spin_unlock(&vnet_info->vi_txlock);
829err_exit:
830 kfree_skb(skb);
831 dev->stats.tx_dropped++;
832 return NETDEV_TX_OK;
833}
834
835#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
836static void
837micvnet_multicast_list(struct net_device *dev)
838{
839}
840#endif
841
842static int
843micvnet_set_address(struct net_device *dev, void *p)
844{
845 struct sockaddr *sa = p;
846
847 if (!is_valid_ether_addr(sa->sa_data))
848 return -EADDRNOTAVAIL;
849
850 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
851 return 0;
852}
853
854#define MIN_MTU 68
855#define MAX_MTU MICVNET_MAX_MTU
856
857static int
858micvnet_change_mtu(struct net_device *dev, int new_mtu)
859{
860 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
861 return -EINVAL;
862 dev->mtu = new_mtu;
863 return 0;
864}
865
866union serial {
867 uint32_t regs[3];
868 char string[13];
869};
870
871void
872mic_get_serial_from_dbox(struct micvnet_info *vni, char *serialnum)
873{
874 union serial serial;
875#ifdef HOST
876 serial.regs[0] = DBOX_READ(vni->mic_ctx->mmio.va, DBOX_SWF1X0);
877 serial.regs[1] = DBOX_READ(vni->mic_ctx->mmio.va, DBOX_SWF1X1);
878 serial.regs[2] = DBOX_READ(vni->mic_ctx->mmio.va, DBOX_SWF1X2);
879#else
880 serial.regs[0] = readl(vni->vi_dbox + DBOX_SWF1X0);
881 serial.regs[1] = readl(vni->vi_dbox + DBOX_SWF1X1);
882 serial.regs[2] = readl(vni->vi_dbox + DBOX_SWF1X2);
883#endif
884 serial.string[12] = '\0';
885 strcpy(serialnum, serial.string);
886}
887
888int
889micvnet_setmac_from_serial(struct net_device *dev)
890{
891 struct micvnet_info *vni = (struct micvnet_info *)dev->ml_priv;
892 char serialnum[17];
893 int err;
894
895 mic_get_serial_from_dbox(vni, serialnum);
896#ifdef HOST
897 err = mic_get_mac_from_serial(serialnum, dev->dev_addr, 1);
898#else
899 err = mic_get_mac_from_serial(serialnum, dev->dev_addr, 0);
900#endif
901 return err;
902}
903
904static const struct net_device_ops micvnet_netdev_ops = {
905 .ndo_open = micvnet_start_dev,
906 .ndo_stop = micvnet_stop_dev,
907 .ndo_start_xmit = micvnet_xmit,
908 .ndo_validate_addr = eth_validate_addr,
909#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
910 .ndo_set_multicast_list = micvnet_multicast_list,
911#endif
912 .ndo_set_mac_address = micvnet_set_address,
913 .ndo_change_mtu = micvnet_change_mtu,
914};
915
916static void
917micvnet_setup(struct net_device *dev)
918{
919 ether_setup(dev);
920
921 /* Initialize the device structure. */
922 dev->netdev_ops = &micvnet_netdev_ops;
923 dev->destructor = free_netdev;
924
925 /* Fill in device structure with ethernet-generic values. */
926 dev->mtu = MICVNET_MAX_MTU;
927 dev->flags &= ~IFF_MULTICAST;
928}
929
930static struct rtnl_link_ops micvnet_link_ops __read_mostly = {
931 .kind = "micvnet",
932 .setup = micvnet_setup,
933};
934
935/***********************************************************
936 * Vnet init/deinit
937 */
938static int
939micvnet_init_hw_regs(struct micvnet_info *vnet_info)
940{
941#ifdef HOST
942 mic_ctx_t *mic_ctx = vnet_info->mic_ctx;
943
944 vnet_info->vi_pdev = mic_ctx->bi_pdev;
945 vnet_info->vi_sbox = (uint8_t *)((unsigned long) mic_ctx->mmio.va +
946 HOST_SBOX_BASE_ADDRESS);
947 vnet_info->vi_scratch14
948 = (uint32_t *)((unsigned long)mic_ctx->mmio.va +
949 HOST_SBOX_BASE_ADDRESS + SBOX_SCRATCH14);
950#else
951 vnet_info->vi_sbox = ioremap_nocache(SBOX_BASE, SBOX_MMIO_LENGTH);
952 vnet_info->vi_dbox = ioremap_nocache(DBOX_BASE, SBOX_MMIO_LENGTH);
953 if (!vnet_info->vi_sbox) {
954 printk(KERN_ERR "%s: NULL SBOX ptr\n", __func__);
955 return -ENOMEM;
956 }
957 vnet_info->vi_scratch14
958 = (uint32_t *)(vnet_info->vi_sbox + SBOX_SCRATCH14);
959#endif
960 return 0;
961}
962
963static void
964micvnet_deinit_hw_regs(struct micvnet_info *vnet_info)
965{
966#ifndef HOST
967 iounmap(vnet_info->vi_sbox);
968 iounmap(vnet_info->vi_dbox);
969#endif
970}
971
972static int
973micvnet_init_interrupts(struct micvnet_info *vnet_info)
974{
975 mic_ctx_t *mic_ctx = vnet_info->mic_ctx;
976 int ret = 0;
977
978 spin_lock_init(&vnet_info->vi_txlock);
979 spin_lock_init(&vnet_info->vi_rxlock);
980
981 snprintf(vnet_info->vi_wqname, sizeof(vnet_info->vi_wqname),
982 "VNET WQ %d", mic_ctx->bi_id);
983
984 if (!(vnet_info->vi_wq =
985 __mic_create_singlethread_workqueue(vnet_info->vi_wqname))) {
986 printk(KERN_ERR "%s: create_singlethread_workqueue\n", __func__);
987 return -ENOMEM;
988 }
989 init_waitqueue_head(&vnet_info->stop_waitq);
990
991 INIT_WORK(&vnet_info->vi_ws_bh, micvnet_intr_bh_handler);
992 INIT_WORK(&vnet_info->vi_ws_tx, micvnet_schedule_dmas);
993 INIT_WORK(&vnet_info->vi_ws_dmacb, micvnet_dma_cb_bh);
994 INIT_WORK(&vnet_info->vi_ws_link_down, micvnet_msg_send_link_down_msg);
995 INIT_WORK(&vnet_info->vi_ws_stop, micvnet_stop_ws);
996 INIT_WORK(&vnet_info->vi_ws_start, micvnet_start_ws);
997#ifdef HOST
998 if ((ret = mic_reg_irqhandler(mic_ctx, 3, "Host DoorBell 3",
999 micvnet_host_doorbell_intr_handler))) {
1000#else
1001 if ((ret = request_irq(get_sbox_irq(VNET_SBOX_INT_IDX),
1002 micvnet_host_intr_handler, IRQF_DISABLED,
1003 "vnet intr", vnet_info))) {
1004#endif
1005 printk(KERN_ERR "%s: interrupt registration failed\n", __func__);
1006 goto err_exit_destroy_workqueue;
1007 }
1008 return 0;
1009
1010err_exit_destroy_workqueue:
1011 destroy_workqueue(vnet_info->vi_wq);
1012 return ret;
1013}
1014
1015static void
1016micvnet_deinit_interrupts(struct micvnet_info *vnet_info)
1017{
1018#ifdef HOST
1019 mic_unreg_irqhandler(vnet_info->mic_ctx, 3, "Host DoorBell 3");
1020#else
1021 free_irq(get_sbox_irq(VNET_SBOX_INT_IDX), vnet_info);
1022#endif
1023 destroy_workqueue(vnet_info->vi_wq);
1024}
1025
1026
1027static int
1028micvnet_init_netdev(struct micvnet_info *vnet_info)
1029{
1030 struct net_device *dev_vnet;
1031 int ret = 0;
1032
1033
1034#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0))
1035 if ((dev_vnet = (struct net_device *)alloc_netdev(sizeof(struct micvnet_info), "mic%d",
1036 NET_NAME_UNKNOWN, micvnet_setup)) == NULL) {
1037#else
1038 if ((dev_vnet = (struct net_device *)alloc_netdev(sizeof(struct micvnet_info), "mic%d",
1039 micvnet_setup)) == NULL) {
1040#endif
1041 printk(KERN_ERR "%s: alloc_netdev failed\n", __func__);
1042 return -ENOMEM;
1043 }
1044
1045 vnet_info->vi_netdev = dev_vnet;
1046 dev_vnet->ml_priv = vnet_info;
1047
1048 if (micvnet_setmac_from_serial(dev_vnet))
1049 random_ether_addr(dev_vnet->dev_addr);
1050
1051 dev_vnet->rtnl_link_ops = &micvnet_link_ops;
1052
1053 if ((ret = register_netdev(dev_vnet)) < 0) {
1054 printk(KERN_ERR "%s: register_netdev failed %d\n", __func__, ret);
1055 free_netdev(dev_vnet);
1056 return ret;
1057 }
1058
1059 return 0;
1060}
1061
1062static int
1063micvnet_init_msg_rings(struct micvnet_info *vnet_info)
1064{
1065#ifdef HOST
1066 vnet_info->vi_qp.tx = &vnet_info->vi_rp.rb_tx;
1067 vnet_info->vi_qp.rx = &vnet_info->vi_rp.rb_rx;
1068 micvnet_reset_msg_rings(vnet_info);
1069
1070 vnet_info->vi_rp_phys = mic_ctx_map_single(vnet_to_ctx(vnet_info),
1071 &vnet_info->vi_rp,
1072 sizeof(vnet_info->vi_rp));
1073 if (mic_map_error(vnet_info->vi_rp_phys)) {
1074 printk(KERN_ERR "%s: mic_map_error failed\n", __func__);
1075 return -ENOMEM;
1076 }
1077#else
1078 if (!(vnet_info->vi_rp_phys = vnet_addr)) {
1079 printk(KERN_ERR "%s: null vnet_addr\n", __func__);
1080 return -ENOMEM;
1081 }
1082 vnet_info->ring_ptr
1083 = ioremap_nocache(vnet_info->vi_rp_phys,
1084 sizeof(struct micvnet_msg_ring_pair));
1085 if (!vnet_info->ring_ptr) {
1086 printk(KERN_ERR "%s: NULL ring ptr\n", __func__);
1087 return -ENOMEM;
1088 }
1089 vnet_info->vi_qp.tx = &vnet_info->ring_ptr->rb_rx;
1090 vnet_info->vi_qp.rx = &vnet_info->ring_ptr->rb_tx;
1091#endif
1092 return 0;
1093}
1094
1095static void
1096micvnet_deinit_msg_rings(struct micvnet_info *vnet_info)
1097{
1098#ifdef HOST
1099 mic_ctx_unmap_single(vnet_to_ctx(vnet_info),
1100 vnet_info->vi_rp_phys, sizeof(vnet_info->vi_rp));
1101#else
1102 iounmap(vnet_info->ring_ptr);
1103#endif
1104}
1105
1106static int
1107micvnet_init_lists(struct micvnet_info *vnet_info)
1108{
1109 int ret;
1110 if ((ret = list_obj_list_init(VNET_MAX_SKBS, sizeof(struct dma_node),
1111 &vnet_info->dnode_list)))
1112 return ret;
1113
1114 INIT_LIST_HEAD(&vnet_info->vi_rx_skb);
1115 INIT_LIST_HEAD(&vnet_info->vi_dma_buf);
1116 INIT_LIST_HEAD(&vnet_info->vi_tx_skb);
1117 INIT_LIST_HEAD(&vnet_info->vi_sched_skb);
1118 return 0;
1119}
1120
1121static void
1122micvnet_deinit_lists(struct micvnet_info *vnet_info)
1123{
1124 struct list_head *pos, *tmpq;
1125 struct rx_node *rnode;
1126 struct tx_node *tnode;
1127 struct dma_node *dnode;
1128 struct sched_node *snode;
1129
1130 list_for_each_safe(pos, tmpq, &vnet_info->vi_rx_skb) {
1131 rnode = list_entry(pos, struct rx_node, list);
1132 list_del(&rnode->list);
1133#ifdef HOST
1134 mic_ctx_unmap_single(vnet_to_ctx(vnet_info),
1135 rnode->phys, rnode->size);
1136#endif
1137 kfree_skb(rnode->skb);
1138 kfree(rnode);
1139 }
1140
1141 list_for_each_safe(pos, tmpq, &vnet_info->vi_dma_buf) {
1142 dnode = list_entry(pos, struct dma_node, list);
1143 list_del(&dnode->list);
1144 list_obj_free(&vnet_info->dnode_list);
1145 }
1146
1147 list_for_each_safe(pos, tmpq, &vnet_info->vi_tx_skb) {
1148 tnode = list_entry(pos, struct tx_node, list);
1149 list_del(&tnode->list);
1150 kfree_skb(tnode->skb);
1151 kfree(tnode);
1152 }
1153
1154 list_for_each_safe(pos, tmpq, &vnet_info->vi_sched_skb) {
1155 snode = list_entry(pos, struct sched_node, list);
1156 list_del(&snode->list);
1157#ifdef HOST
1158 mic_ctx_unmap_single(vnet_to_ctx(vnet_info), snode->dma_src_phys,
1159 snode->dma_size);
1160 micpm_put_reference(vnet_to_ctx(vnet_info));
1161#endif
1162 kfree_skb(snode->skb);
1163 kfree(snode);
1164 }
1165
1166 list_obj_list_deinit(&vnet_info->dnode_list);
1167}
1168static int
1169micvnet_init_dma(struct micvnet_info *vnet_info)
1170{
1171 mic_ctx_t *mic_ctx = vnet_info->mic_ctx;
1172 int ret;
1173
1174 /* Note: open_dma_device must use mic_ctx->dma_handle since that is
1175 used in the isr */
1176#ifdef HOST
1177 if (micpm_get_reference(mic_ctx, true) != 0) {
1178 printk(KERN_ERR "%s: micpm_get_reference failed\n", __func__);
1179 return -ENODEV;
1180 }
1181
1182 if ((ret = open_dma_device(mic_ctx->bi_id + 1,
1183 mic_ctx->mmio.va + HOST_SBOX_BASE_ADDRESS,
1184 &mic_ctx->dma_handle))) {
1185 printk(KERN_ERR "%s: open_dma_device failed\n", __func__);
1186 micpm_put_reference(mic_ctx);
1187 return ret;
1188 }
1189 micpm_put_reference(mic_ctx);
1190#else
1191 if ((ret = open_dma_device(0, 0, &mic_ctx->dma_handle))) {
1192 printk(KERN_ERR "%s: open_dma_device failed\n", __func__);
1193 return ret;
1194 }
1195#endif
1196
1197 vnet_info->dma_handle = mic_ctx->dma_handle;
1198
1199 if ((ret = allocate_dma_channel(vnet_info->dma_handle,
1200 &vnet_info->dma_chan))) {
1201 printk(KERN_ERR "%s: allocate_dma_channel failed\n", __func__);
1202 goto err_exit_close_dma;
1203 }
1204 free_dma_channel(vnet_info->dma_chan);
1205 vnet_info->dma_cb.dma_completion_func = micvnet_dma_completion_callback;
1206 vnet_info->dma_cb.cb_cookie = (uint64_t) vnet_info;
1207 atomic_set(&vnet_info->cnt_dma_complete, 0);
1208 atomic_set(&vnet_info->cnt_dma_buf_avail, 0);
1209 vnet_info->link_down_initiator = false;
1210 atomic_set(&vnet_info->cnt_tx_pending, 0);
1211 return 0;
1212
1213err_exit_close_dma:
1214 close_dma_device(mic_ctx->bi_id + 1, &vnet_info->dma_handle);
1215 return ret;
1216}
1217
1218static void
1219micvnet_deinit_dma(struct micvnet_info *vnet_info)
1220{
1221 mic_ctx_t *mic_ctx = vnet_info->mic_ctx;
1222
1223 close_dma_device(mic_ctx->bi_id + 1, &vnet_info->dma_handle);
1224}
1225static int
1226micvnet_alloc_rx_node(struct micvnet_info *vnet_info, struct rx_node **node)
1227{
1228 struct rx_node *rnode;
1229
1230 if (!(rnode = kmalloc(sizeof(*rnode), GFP_KERNEL)))
1231 return -ENOMEM;
1232
1233 rnode->size = vnet_info->vi_netdev->mtu + 3 * DMA_ALIGNMENT + ETH_HLEN;
1234
1235 if (!(rnode->skb = dev_alloc_skb(rnode->size))) {
1236 kfree(rnode);
1237 return -ENOMEM;
1238 }
1239
1240#ifdef HOST
1241 rnode->phys = mic_ctx_map_single(vnet_to_ctx(vnet_info),
1242 rnode->skb->data, rnode->size);
1243 if (mic_map_error(rnode->phys)) {
1244 kfree_skb(rnode->skb);
1245 kfree(rnode);
1246 return -ENOMEM;
1247 }
1248#else
1249 rnode->phys = virt_to_phys(rnode->skb->data);
1250#endif
1251
1252 *node = rnode;
1253
1254 return 0;
1255}
1256
1257static int
1258micvnet_init_rx_skb_send_msg(struct micvnet_info *vnet_info)
1259{
1260 struct rx_node *rnode;
1261 int ret = 0;
1262
1263 if (unlikely(ret = micvnet_alloc_rx_node(vnet_info, &rnode)))
1264 return ret;
1265
1266 list_add_tail(&rnode->list, &vnet_info->vi_rx_skb);
1267
1268 micvnet_msg_send_add_dma_buffer_msg(vnet_info, rnode);
1269
1270 return 0;
1271}
1272
1273static int
1274micvnet_init_rx_skbs(struct micvnet_info *vnet_info)
1275{
1276 struct rx_node *rnode;
1277 int i, ret = 0;
1278
1279
1280 if ( (vnet_num_buffers <= 0) || (vnet_num_buffers > VNET_MAX_SKBS) )
1281 vnet_num_buffers = VNET_MAX_SKBS;
1282
1283 for (i = 0; i < vnet_num_buffers; i++) {
1284 if (unlikely(ret = micvnet_alloc_rx_node(vnet_info, &rnode)))
1285 return ret;
1286
1287 list_add_tail(&rnode->list, &vnet_info->vi_rx_skb);
1288 }
1289
1290 return ret;
1291}
1292
1293static void
1294micvnet_send_add_dma_buffer_messages(struct micvnet_info *vnet_info)
1295{
1296 struct rx_node *rnode;
1297 struct list_head *pos;
1298
1299 list_for_each(pos, &vnet_info->vi_rx_skb) {
1300 rnode = list_entry(pos, struct rx_node, list);
1301 micvnet_msg_send_add_dma_buffer_msg(vnet_info, rnode);
1302 }
1303}
1304
1305static void
1306micvnet_initiate_link_down(struct micvnet_info *vnet_info)
1307{
1308 int ret;
1309 netif_tx_disable(vnet_info->vi_netdev);
1310 spin_lock_bh(&vnet_info->vi_txlock);
1311 atomic_set(&vnet_info->vi_state, MICVNET_STATE_LINK_DOWN);
1312 spin_unlock_bh(&vnet_info->vi_txlock);
1313
1314 /* This wait precludes this function to be called from the context of
1315 * the vnet wq thread */
1316 ret = wait_event_interruptible_timeout(
1317 vnet_info->stop_waitq,
1318 (atomic_read(&vnet_info->cnt_tx_pending) == 0),
1319 STOP_WAIT_TIMEOUT);
1320 if (!ret)
1321 printk(KERN_ERR "%s timeout waiting for Tx dma buffers to drain\n", __func__);
1322 /* To avoid introducing a lock in micvnet_msg_send_msg() send the
1323 * LINK_DOWN message from vnet wq thread context. LINK_DOWN will be the
1324 * LAST message sent. */
1325 queue_work(vnet_info->vi_wq, &vnet_info->vi_ws_link_down);
1326}
1327
1328static void
1329micvnet_stop_deinit(struct micvnet_info *vnet_info)
1330{
1331 flush_workqueue(vnet_info->vi_wq);
1332 atomic_set(&vnet_info->vi_state, MICVNET_STATE_UNINITIALIZED);
1333
1334 micvnet_deinit_dma(vnet_info);
1335 micvnet_deinit_lists(vnet_info);
1336#ifdef HOST
1337 micvnet_reset_msg_rings(vnet_info);
1338#endif
1339 atomic_dec(&micvnet.lv_active_clients);
1340}
1341
1342int
1343micvnet_probe(mic_ctx_t *mic_ctx)
1344{
1345 struct micvnet_info *vnet_info;
1346 int ret = 0;
1347
1348 mic_ctx->bi_vethinfo = NULL;
1349
1350 if (!micvnet.created)
1351 return 1;
1352
1353 if (!(vnet_info = kzalloc(sizeof(struct micvnet_info), GFP_KERNEL))) {
1354 printk(KERN_ERR "%s: vnet_info alloc failed\n", __func__);
1355 return -ENOMEM;
1356 }
1357
1358 mic_ctx->bi_vethinfo = vnet_info;
1359 vnet_info->mic_ctx = mic_ctx;
1360 if ((ret = micvnet_init_hw_regs(vnet_info)))
1361 goto err_exit_free_vnet_info;
1362 if ((ret = micvnet_init_msg_rings(vnet_info)))
1363 goto err_exit_deinit_hw_regs;
1364 if ((ret = micvnet_init_interrupts(vnet_info)))
1365 goto err_exit_deinit_msg_rings;
1366 if ((ret = micvnet_init_netdev(vnet_info)))
1367 goto err_exit_deinit_interrupts;
1368
1369 atomic_set(&vnet_info->vi_state, MICVNET_STATE_UNINITIALIZED);
1370 return 0;
1371
1372err_exit_deinit_interrupts:
1373 micvnet_deinit_interrupts(vnet_info);
1374err_exit_deinit_msg_rings:
1375 micvnet_deinit_msg_rings(vnet_info);
1376err_exit_deinit_hw_regs:
1377 micvnet_deinit_hw_regs(vnet_info);
1378err_exit_free_vnet_info:
1379 kfree(vnet_info);
1380
1381 return ret;
1382}
1383
1384void
1385micvnet_remove(mic_ctx_t *mic_ctx)
1386{
1387 struct micvnet_info
1388 *vnet_info = (struct micvnet_info *) mic_ctx->bi_vethinfo;
1389
1390 if (!vnet_info)
1391 return;
1392
1393 micvnet_stop(mic_ctx);
1394
1395 vnet_info->vi_netdev->ml_priv = NULL;
1396
1397 micvnet_deinit_interrupts(vnet_info);
1398 micvnet_deinit_msg_rings(vnet_info);
1399 micvnet_deinit_hw_regs(vnet_info);
1400
1401 mic_ctx->bi_vethinfo = NULL;
1402
1403 kfree(vnet_info);
1404}
1405
1406int
1407micvnet_execute_start(struct micvnet_info *vnet_info)
1408{
1409 int ret = 0;
1410
1411 if (!vnet_info) {
1412 printk(KERN_ERR "%s: vnet_info is NULL\n", __func__);
1413 return 1;
1414 }
1415
1416 if (atomic_cmpxchg(&vnet_info->vi_state, MICVNET_STATE_UNINITIALIZED,
1417 MICVNET_STATE_TRANSITIONING) != MICVNET_STATE_UNINITIALIZED) {
1418 printk(KERN_ERR "%s: wrong vnet state %d\n", __func__,
1419 atomic_read(&vnet_info->vi_state));
1420 return 1;
1421 }
1422
1423 if ((ret = micvnet_init_lists(vnet_info)))
1424 goto err_exit;
1425 if ((ret = micvnet_init_dma(vnet_info)))
1426 goto err_exit_deinit_lists;
1427 if ((ret = micvnet_init_rx_skbs(vnet_info))) {
1428 printk(KERN_ERR "%s: micvnet_init_rx_skbs failed\n", __func__);
1429 goto err_exit_deinit_dma;
1430 }
1431
1432 memset(&vnet_info->vi_netdev->stats, 0, sizeof(vnet_info->vi_netdev->stats));
1433 atomic_inc(&micvnet.lv_active_clients);
1434 atomic_set(&vnet_info->vi_state, MICVNET_STATE_LINKUP);
1435
1436 micvnet_msg_send_link_up_msg(vnet_info);
1437#ifdef HOST
1438 micvnet_send_add_dma_buffer_messages(vnet_info);
1439#else
1440 writel(MICVNET_CARD_UP_MAGIC, vnet_info->vi_scratch14);
1441 /* Card adds DMA buffers to host after receiving MICVNET_MSG_LINK_UP */
1442#endif
1443 return 0;
1444
1445err_exit_deinit_dma:
1446 micvnet_deinit_dma(vnet_info);
1447err_exit_deinit_lists:
1448 /* RX SKB's are deallocated in micvnet_deinit_lists() */
1449 micvnet_deinit_lists(vnet_info);
1450err_exit:
1451 atomic_set(&vnet_info->vi_state, MICVNET_STATE_UNINITIALIZED);
1452 return ret;
1453}
1454
1455static void
1456micvnet_start_ws(struct work_struct *work)
1457{
1458 struct micvnet_info *vnet_info
1459 = container_of(work, struct micvnet_info, vi_ws_start);
1460
1461 micvnet_execute_start(vnet_info);
1462}
1463
1464int micvnet_start(mic_ctx_t *mic_ctx)
1465{
1466#ifndef HOST
1467 struct micvnet_info *vnet_info = (struct micvnet_info *) mic_ctx->bi_vethinfo;
1468 micvnet_execute_start(vnet_info);
1469#endif
1470 return 0;
1471}
1472
1473void
1474micvnet_execute_stop(struct micvnet_info *vnet_info)
1475{
1476 int ret;
1477 if (!vnet_info)
1478 return;
1479
1480 switch(atomic_read(&vnet_info->vi_state)) {
1481 case MICVNET_STATE_LINKUP:
1482 case MICVNET_STATE_BEGIN_UNINIT:
1483 break;
1484 default:
1485 return;
1486 }
1487
1488#ifdef HOST
1489 if ((micpm_get_reference(vnet_to_ctx(vnet_info), true)) != 0)
1490 goto exit;
1491#endif
1492 micvnet_initiate_link_down(vnet_info);
1493 if (vnet_info->link_down_initiator && !(vnet_info->mic_ctx->state == MIC_SHUTDOWN && vnet_info->mic_ctx->sdbic1)){
1494 ret = wait_event_interruptible_timeout(
1495 vnet_info->stop_waitq,
1496 (atomic_read(&vnet_info->vi_state) == MICVNET_STATE_BEGIN_UNINIT),
1497 STOP_WAIT_TIMEOUT);
1498 if (!ret)
1499 printk(KERN_ERR "%s: timeout waiting for link down message response\n", __func__);
1500 }
1501
1502#ifdef HOST
1503 micpm_put_reference(vnet_to_ctx(vnet_info));
1504exit:
1505#endif
1506 micvnet_stop_deinit(vnet_info);
1507}
1508
1509void
1510micvnet_stop(mic_ctx_t *mic_ctx)
1511{
1512 struct micvnet_info *vnet_info = (struct micvnet_info *) mic_ctx->bi_vethinfo;
1513
1514 vnet_info->link_down_initiator = true;
1515 micvnet_execute_stop(vnet_info);
1516}
1517
1518static void
1519micvnet_stop_ws(struct work_struct *work)
1520{
1521 struct micvnet_info *vnet_info
1522 = container_of(work, struct micvnet_info, vi_ws_stop);
1523
1524 vnet_info->link_down_initiator = false;
1525 micvnet_execute_stop(vnet_info);
1526}
1527
1528#if !defined(WINDOWS) && defined(HOST)
1529static ssize_t
1530show_vnet(struct device *dev, struct device_attribute *attr, char *buf);
1531DEVICE_ATTR(vnet, S_IRUGO, show_vnet, NULL);
1532
1533static ssize_t
1534show_vnet(struct device *dev, struct device_attribute *attr, char *buf)
1535{
1536 return snprintf(buf, PAGE_SIZE, "Number of active vnet clients: %d\n",
1537 atomic_read(&micvnet.lv_active_clients));
1538}
1539#endif
1540
1541int
1542micvnet_init(struct device *dev)
1543{
1544 int ret = 0;
1545
1546 micvnet.created = 0;
1547 atomic_set(&micvnet.lv_active_clients, 0);
1548
1549 if ((ret = rtnl_link_register(&micvnet_link_ops))) {
1550 printk(KERN_ERR "%s: rtnl_link_register failed\n", __func__);
1551 return ret;
1552 }
1553
1554#ifdef HOST
1555 if ((ret = device_create_file(dev, &dev_attr_vnet))) {
1556 printk(KERN_ERR "%s: device_create_file failed\n", __func__);
1557 rtnl_link_unregister(&micvnet_link_ops);
1558 return ret;
1559 }
1560#endif
1561 micvnet.created = 1;
1562 return 0;
1563}
1564
1565void
1566micvnet_exit(void)
1567{
1568 rtnl_link_unregister(&micvnet_link_ops);
1569}
1570
1571#ifndef HOST
1572static void __exit
1573_micvnet_module_exit(void)
1574{
1575 mic_ctx_t *mic_ctx = &mic_ctx_g;
1576
1577 micvnet_stop(mic_ctx);
1578 micvnet_remove(mic_ctx);
1579 micvnet_exit();
1580}
1581
1582static int
1583micvnet_reboot(struct notifier_block *notifier, unsigned long unused1, void *unused2)
1584{
1585 /* Calling _micvnet_module_exit() here will hang the uOS during shutdown in NFS
1586 * root case */
1587 return NOTIFY_OK;
1588}
1589
1590static struct notifier_block micvnet_reboot_notifier = {
1591 .notifier_call = micvnet_reboot,
1592 .priority = 0,
1593};
1594
1595void __exit
1596micvnet_module_exit(void)
1597{
1598 unregister_reboot_notifier(&micvnet_reboot_notifier);
1599 _micvnet_module_exit();
1600}
1601
1602int __init
1603micvnet_module_init(void)
1604{
1605 mic_ctx_t *mic_ctx = &mic_ctx_g;
1606 int ret = 0;
1607
1608 if ((ret = register_reboot_notifier(&micvnet_reboot_notifier))) {
1609 printk(KERN_ERR "register_reboot_notifier failed: error %d\n", ret);
1610 goto err_exit;
1611 }
1612
1613 memset(mic_ctx, 0, sizeof(*mic_ctx));
1614 mic_ctx->bi_id = 0;
1615
1616 if ((ret = micvnet_init(NULL)))
1617 goto err_exit_unregister_reboot_notifier;
1618 if ((ret = micvnet_probe(mic_ctx)))
1619 goto err_exit_micvnet_exit;
1620 if ((ret = micvnet_start(mic_ctx)))
1621 goto err_exit_micvnet_remove;
1622
1623 return 0;
1624
1625err_exit_micvnet_remove:
1626 micvnet_remove(mic_ctx);
1627err_exit_micvnet_exit:
1628 micvnet_exit();
1629err_exit_unregister_reboot_notifier:
1630 unregister_reboot_notifier(&micvnet_reboot_notifier);
1631err_exit:
1632 printk(KERN_ERR "%s failed: error %d\n", __func__, ret);
1633 return ret;
1634}
1635
1636#ifdef STANDALONE_VNET_DMA
1637module_init(micvnet_module_init);
1638module_exit(micvnet_module_exit);
1639#endif
1640
1641MODULE_LICENSE("GPL");
1642#endif