Updated `README.md` with instructions for building/using the kernel module.
[xeon-phi-kernel-module] / virtio / mic_virtblk.c
CommitLineData
800f879a
AT
1/*
2 virtio block device adapted for MIC.
3 copied from drivers/block/virtio_blk.c of Linux kernel
4 It is initially commited by
5 Rusty Russell <rusty@rustcorp.com.au> 2007-10-21 18:03:38
6 with SHA1 ID, e467cde238184d1b0923db2cd61ae1c5a6dc15aa
7
8 drivers/block/virtio_blk.c of Linux kernel does not have copyright notice.
9
10 * For adapting to MIC
11 * (C) Copyright 2012 Intel Corporation
12 * Author: Caz Yokoyama <Caz.Yokoyama@intel.com>
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms and conditions of the GNU General Public License,
16 * version 2, as published by the Free Software Foundation.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING".
24 */
25//#define DEBUG
26#include <linux/spinlock.h>
27#include <linux/slab.h>
28#include <linux/blkdev.h>
29#include <linux/hdreg.h>
30#include <linux/virtio.h>
31#include <linux/virtio_ring.h>
32#include <linux/virtio_blk.h>
33#include <linux/scatterlist.h>
34#include <linux/list.h>
35#include "mic_common.h"
36#include "mic/micveth_dma.h"
37#include "mic/micscif_intr.h"
38#include "mic/mic_virtio.h"
39
40#define SBOX_MMIO_LENGTH (64 * 1024)
41
42#define PART_BITS 4
43
44#define VIRTQUEUE_LENGTH 128
45#define MIC_VRING_ALIGN PAGE_SIZE
46
47#define INTERRUPT_ID_FOR_VIRTBLK 3
48
49extern int get_sbox_irq(int index);
50
51static int major, index = 0;
52static long virtio_addr = 0;
53static mic_data_t virtblk_mic_data;
54
55struct virtio_blk
56{
57 spinlock_t lock;
58
59 struct virtio_device *vdev;
60 struct virtqueue *vq;
61
62 /* The disk structure for the kernel. */
63 struct gendisk *disk;
64
65 /* Request tracking. */
66 struct list_head reqs;
67
68 mempool_t *pool;
69
70 /* virtual address of blk_config */
71 void __iomem *ioaddr;
72
73 /* What host tells us, plus 2 for header & tailer. */
74 unsigned int sg_elems;
75
76 /* sbox va */
77 u8 *sbox;
78
79 /* Scatterlist: can be too big for stack. */
80 struct scatterlist sg[/*sg_elems*/];
81};
82
83struct virtblk_req
84{
85 struct list_head list;
86 struct request *req;
87 struct virtio_blk_outhdr out_hdr;
88 struct virtio_scsi_inhdr in_hdr;
89 u8 status;
90};
91
92#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
93
94/* The following vring_virtqueue and to_vvq() are copied from virtio_ring.c. Please name sure you have the same structure
95 as in virtio_ring.c. The reason why they are copied is that I don't want to change virtio_ring.c which is a symbolic link.
96*/
97struct vring_virtqueue
98{
99 struct virtqueue vq;
100
101 /* Actual memory layout for this queue */
102 struct vring vring;
103
104 /* Other side has made a mess, don't try any more. */
105 bool broken;
106
107 /* Host supports indirect buffers */
108 bool indirect;
109
110 /* Number of free buffers */
111 unsigned int num_free;
112 /* Head of free buffer list. */
113 unsigned int free_head;
114 /* Number we've added since last sync. */
115 unsigned int num_added;
116
117 /* Last used index we've seen. */
118 u16 last_used_idx;
119
120 /* How to notify other side. FIXME: commonalize hcalls! */
121 void (*notify)(struct virtqueue *vq);
122
123#ifdef DEBUG
124 /* They're supposed to lock for us. */
125 unsigned int in_use;
126#endif
127
128 struct _mic_ctx_t *mic_ctx;
129 /* Tokens for callbacks. */
130 void *data[];
131};
132
133#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
134
135static void blk_done(struct virtqueue *vq)
136{
137 struct virtio_blk *vblk = vq->vdev->priv;
138 struct virtblk_req *vbr;
139 unsigned int len;
140 unsigned long flags;
141
142 spin_lock_irqsave(&vblk->lock, flags);
143 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
144 int error;
145
146 switch (vbr->status) {
147 case VIRTIO_BLK_S_OK:
148 error = 0;
149 break;
150 case VIRTIO_BLK_S_UNSUPP:
151 error = -ENOTTY;
152 break;
153 default:
154 error = -EIO;
155 break;
156 }
157
158 if (blk_pc_request(vbr->req)) {
159 vbr->req->resid_len = vbr->in_hdr.residual;
160 vbr->req->sense_len = vbr->in_hdr.sense_len;
161 vbr->req->errors = vbr->in_hdr.errors;
162 }
163
164 __blk_end_request_all(vbr->req, error);
165 list_del(&vbr->list);
166 mempool_free(vbr, vblk->pool);
167 }
168 /* In case queue is stopped waiting for more buffers. */
169 blk_start_queue(vblk->disk->queue);
170 spin_unlock_irqrestore(&vblk->lock, flags);
171}
172
173static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
174 struct request *req)
175{
176 unsigned long num, out = 0, in = 0;
177 struct virtblk_req *vbr;
178
179 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
180 if (!vbr)
181 /* When another request finishes we'll try again. */
182 return false;
183
184 vbr->req = req;
185
186 if (req->cmd_flags & REQ_FLUSH) {
187 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
188 vbr->out_hdr.sector = 0;
189 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
190 } else {
191 switch (req->cmd_type) {
192 case REQ_TYPE_FS:
193 vbr->out_hdr.type = 0;
194 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
195 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
196 break;
197 case REQ_TYPE_BLOCK_PC:
198 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
199 vbr->out_hdr.sector = 0;
200 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
201 break;
202 case REQ_TYPE_SPECIAL:
203 vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
204 vbr->out_hdr.sector = 0;
205 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
206 break;
207 default:
208 /* We don't put anything else in the queue. */
209 BUG();
210 }
211 }
212
213 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
214
215 /*
216 * If this is a packet command we need a couple of additional headers.
217 * Behind the normal outhdr we put a segment with the scsi command
218 * block, and before the normal inhdr we put the sense data and the
219 * inhdr with additional status information before the normal inhdr.
220 */
221 if (blk_pc_request(vbr->req))
222 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
223
224 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
225
226 if (blk_pc_request(vbr->req)) {
227 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
228 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
229 sizeof(vbr->in_hdr));
230 }
231
232 sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
233 sizeof(vbr->status));
234
235 if (num) {
236 if (rq_data_dir(vbr->req) == WRITE) {
237 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
238 out += num;
239 } else {
240 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
241 in += num;
242 }
243 }
244
245 if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
246 mempool_free(vbr, vblk->pool);
247 return false;
248 }
249
250 list_add_tail(&vbr->list, &vblk->reqs);
251 return true;
252}
253
254static void do_virtblk_request(struct request_queue *q)
255{
256 struct virtio_blk *vblk = q->queuedata;
257 struct request *req;
258 unsigned int issued = 0;
259
260 while ((req = blk_peek_request(q)) != NULL) {
261 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
262
263 /* If this request fails, stop queue and wait for something to
264 finish to restart it. */
265 if (!do_req(q, vblk, req)) {
266 blk_stop_queue(q);
267 break;
268 }
269 blk_start_request(req);
270 issued++;
271 }
272
273 if (issued)
274 virtqueue_kick(vblk->vq);
275}
276
277static int
278set_capacity_from_host(struct virtio_blk *vblk)
279{
280 struct virtio_device *vdev = vblk->vdev;
281 u64 cap;
282
283 /* Host must always specify the capacity. */
284 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
285 &cap, sizeof(cap));
286 if (cap == 0) {
287 printk(KERN_ERR "Have you set virtblk file?\n");
288 return -ENXIO;
289 }
290
291 /* If capacity is too big, truncate with warning. */
292 if ((sector_t)cap != cap) {
293 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
294 (unsigned long long)cap);
295 cap = (sector_t)-1;
296 }
297 set_capacity(vblk->disk, cap);
298
299 return 0;
300}
301
302static int
303virtblk_open(struct block_device *bdev, fmode_t mode)
304{
305 struct gendisk *disk = bdev->bd_disk;
306 struct virtio_blk *vblk = disk->private_data;
307
308 return set_capacity_from_host(vblk);
309}
310
311static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
312 unsigned cmd, unsigned long data)
313{
314 struct gendisk *disk = bdev->bd_disk;
315 struct virtio_blk *vblk = disk->private_data;
316
317 /*
318 * Only allow the generic SCSI ioctls if the host can support it.
319 */
320 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
321 return -ENOTTY;
322
323 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
324 (void __user *)data);
325}
326
327/* We provide getgeo only to please some old bootloader/partitioning tools */
328static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
329{
330 struct virtio_blk *vblk = bd->bd_disk->private_data;
331 struct virtio_blk_geometry vgeo;
332 int err;
333
334 /* see if the host passed in geometry config */
335 err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
336 offsetof(struct virtio_blk_config, geometry),
337 &vgeo);
338
339 if (!err) {
340 geo->heads = vgeo.heads;
341 geo->sectors = vgeo.sectors;
342 geo->cylinders = vgeo.cylinders;
343 } else {
344 /* some standard values, similar to sd */
345 geo->heads = 1 << 6;
346 geo->sectors = 1 << 5;
347 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
348 }
349 return 0;
350}
351
352static const struct block_device_operations virtblk_fops = {
353 .open = virtblk_open,
354 .ioctl = virtblk_ioctl,
355 .owner = THIS_MODULE,
356 .getgeo = virtblk_getgeo,
357};
358
359static int index_to_minor(int index)
360{
361 return index << PART_BITS;
362}
363
364static inline bool more_used(const struct vring_virtqueue *vq)
365{
366 return vq->last_used_idx != vq->vring.used->idx;
367}
368
369static irqreturn_t
370mic_virtblk_intr_handler(int irq, void *_vq)
371{
372 struct vring_virtqueue *vq = to_vvq(_vq);
373
374 if (!more_used(vq)) {
375 pr_debug("virtqueue interrupt with no work for %p\n", vq);
376 goto _exit_;
377 }
378
379 if (unlikely(vq->broken))
380 goto _exit_;
381
382 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
383 if (vq->vq.callback)
384 vq->vq.callback(&vq->vq);
385
386 _exit_:
387 return IRQ_HANDLED;
388}
389
390static int __devinit virtblk_probe(struct virtio_device *vdev)
391{
392 struct virtio_blk *vblk;
393 struct request_queue *q;
394 int err;
395 u32 v, blk_size, sg_elems, opt_io_size;
396 u16 min_io_size;
397 u8 physical_block_exp, alignment_offset;
398 struct board_info *bd_info = virtblk_mic_data.dd_bi[0];
399 struct vb_shared *vb_shared;
400
401 if (index_to_minor(index) >= 1 << MINORBITS)
402 return -ENOSPC;
403
404 vb_shared = ((struct mic_virtblk *)bd_info->bi_virtio)->vb_shared;
405 vdev->features[0] = readl(&vb_shared->host_features);
406
407 /* We need to know how many segments before we allocate. */
408 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
409 offsetof(struct virtio_blk_config, seg_max),
410 &sg_elems);
411 if (err)
412 sg_elems = 1;
413
414 /* We need an extra sg elements at head and tail. */
415 sg_elems += 2;
416 vdev->priv = vblk = kmalloc(sizeof(*vblk) +
417 sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
418 if (!vblk) {
419 err = -ENOMEM;
420 goto out;
421 }
422
423 INIT_LIST_HEAD(&vblk->reqs);
424 spin_lock_init(&vblk->lock);
425 vblk->vdev = vdev;
426 vblk->sg_elems = sg_elems;
427 sg_init_table(vblk->sg, vblk->sg_elems);
428
429 /* map sbox */
430 vblk->sbox = ioremap_nocache(SBOX_BASE, SBOX_MMIO_LENGTH);
431 if (!vblk->sbox) {
432 printk(KERN_ERR "%s: NULL SBOX ptr\n", __func__);
433 err = -ENOMEM;
434 goto out_free_vblk;
435 }
436
437 /* We expect one virtqueue, for output. */
438 vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
439 if (IS_ERR(vblk->vq)) {
440 err = PTR_ERR(vblk->vq);
441 goto out_unmap_sbox;
442 }
443
444 if ((err = request_irq(get_sbox_irq(VIRTIO_SBOX_INT_IDX),
445 mic_virtblk_intr_handler, IRQF_DISABLED,
446 "virtio intr", vblk->vq))) {
447 printk(KERN_ERR "%s: can't register interrupt: %d\n", __func__, err);
448 goto out_free_vq;
449 }
450
451 vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
452 if (!vblk->pool) {
453 err = -ENOMEM;
454 goto out_free_irq;
455 }
456
457 /* FIXME: How many partitions? How long is a piece of string? */
458 vblk->disk = alloc_disk(1 << PART_BITS);
459 if (!vblk->disk) {
460 err = -ENOMEM;
461 goto out_mempool;
462 }
463
464 q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
465 if (!q) {
466 err = -ENOMEM;
467 goto out_put_disk;
468 }
469
470 q->queuedata = vblk;
471
472 if (index < 26) {
473 sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
474 } else if (index < (26 + 1) * 26) {
475 sprintf(vblk->disk->disk_name, "vd%c%c",
476 'a' + index / 26 - 1, 'a' + index % 26);
477 } else {
478 const unsigned int m1 = (index / 26 - 1) / 26 - 1;
479 const unsigned int m2 = (index / 26 - 1) % 26;
480 const unsigned int m3 = index % 26;
481 sprintf(vblk->disk->disk_name, "vd%c%c%c",
482 'a' + m1, 'a' + m2, 'a' + m3);
483 }
484
485 vblk->disk->major = major;
486 vblk->disk->first_minor = index_to_minor(index);
487 vblk->disk->private_data = vblk;
488 vblk->disk->fops = &virtblk_fops;
489 vblk->disk->driverfs_dev = NULL; // There is no parent device.
490 index++;
491
492 /* configure queue flush support */
493 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
494 blk_queue_flush(q, REQ_FLUSH);
495
496 /* If disk is read-only in the host, the guest should obey */
497 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) {
498 if (vdev->config->get_features(vdev) & (1U << VIRTIO_BLK_F_RO)) {
499 set_disk_ro(vblk->disk, 1);
500 }
501 }
502
503 err = set_capacity_from_host(vblk);
504 if (err)
505 goto out_put_disk;
506
507 /* We can handle whatever the host told us to handle. */
508 blk_queue_max_segments(q, vblk->sg_elems-2);
509
510 /* No need to bounce any requests */
511 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
512
513 /* No real sector limit. */
514 blk_queue_max_hw_sectors(q, -1U);
515
516 /* Host can optionally specify maximum segment size and number of
517 * segments. */
518 err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
519 offsetof(struct virtio_blk_config, size_max),
520 &v);
521 if (!err)
522 blk_queue_max_segment_size(q, v);
523 else
524 blk_queue_max_segment_size(q, -1U);
525
526 /* Host can optionally specify the block size of the device */
527 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
528 offsetof(struct virtio_blk_config, blk_size),
529 &blk_size);
530 if (!err)
531 blk_queue_logical_block_size(q, blk_size);
532 else
533 blk_size = queue_logical_block_size(q);
534
535 /* Use topology information if available */
536 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
537 offsetof(struct virtio_blk_config, physical_block_exp),
538 &physical_block_exp);
539 if (!err && physical_block_exp)
540 blk_queue_physical_block_size(q,
541 blk_size * (1 << physical_block_exp));
542
543 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
544 offsetof(struct virtio_blk_config, alignment_offset),
545 &alignment_offset);
546 if (!err && alignment_offset)
547 blk_queue_alignment_offset(q, blk_size * alignment_offset);
548
549 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
550 offsetof(struct virtio_blk_config, min_io_size),
551 &min_io_size);
552 if (!err && min_io_size)
553 blk_queue_io_min(q, blk_size * min_io_size);
554
555 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
556 offsetof(struct virtio_blk_config, opt_io_size),
557 &opt_io_size);
558 if (!err && opt_io_size)
559 blk_queue_io_opt(q, blk_size * opt_io_size);
560
561 add_disk(vblk->disk);
562 return 0;
563
564out_put_disk:
565 put_disk(vblk->disk);
566out_mempool:
567 mempool_destroy(vblk->pool);
568out_free_irq:
569 free_irq(get_sbox_irq(VIRTIO_SBOX_INT_IDX), vblk->vq);
570out_free_vq:
571 vdev->config->del_vqs(vdev);
572out_unmap_sbox:
573 iounmap(vblk->sbox);
574out_free_vblk:
575 kfree(vblk);
576out:
577 return err;
578}
579
580static void __devexit virtblk_remove(struct virtio_device *vdev)
581{
582 struct virtio_blk *vblk = vdev->priv;
583
584 /* Nothing should be pending. */
585 BUG_ON(!list_empty(&vblk->reqs));
586
587 free_irq(get_sbox_irq(VIRTIO_SBOX_INT_IDX), vblk->vq);
588
589 /* Stop all the virtqueues. */
590 vdev->config->reset(vdev);
591
592 del_gendisk(vblk->disk);
593 blk_cleanup_queue(vblk->disk->queue);
594 put_disk(vblk->disk);
595 mempool_destroy(vblk->pool);
596 vdev->config->del_vqs(vdev);
597 iounmap(vblk->sbox);
598 kfree(vblk);
599}
600
601/* config->get_features() implementation */
602static u32 virtblk_get_features(struct virtio_device *vdev)
603{
604 /* When someone needs more than 32 feature bits, we'll need to
605 * steal a bit to indicate that the rest are somewhere else. */
606 struct board_info *bd_info = virtblk_mic_data.dd_bi[0];
607 struct vb_shared *vb_shared;
608
609 vb_shared = ((struct mic_virtblk *)bd_info->bi_virtio)->vb_shared;
610 return readl(&vb_shared->host_features);
611}
612
613/* virtio config->finalize_features() implementation */
614static void virtblk_finalize_features(struct virtio_device *vdev)
615{
616 struct board_info *bd_info = virtblk_mic_data.dd_bi[0];
617 struct vb_shared *vb_shared;
618
619 /* Give virtio_ring a chance to accept features. */
620 vring_transport_features(vdev);
621
622 /* We only support 32 feature bits. */
623 BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1);
624
625 vb_shared = ((struct mic_virtblk *)bd_info->bi_virtio)->vb_shared;
626 writel(vdev->features[0], &vb_shared->client_features);
627}
628
629/* config->get() implementation */
630static void virtblk_get(struct virtio_device *vdev, unsigned offset,
631 void *buf, unsigned len)
632{
633 struct board_info *bd_info = virtblk_mic_data.dd_bi[0];
634 struct vb_shared *vb_shared;
635 void *ioaddr;
636 u8 *ptr = buf;
637 int i;
638
639 vb_shared = ((struct mic_virtblk *)bd_info->bi_virtio)->vb_shared;
640 ioaddr = (void *)&vb_shared->blk_config + offset;
641 for (i = 0; i < len; i++)
642 ptr[i] = readb(ioaddr + i);
643}
644
645static void virtblk_reset(struct virtio_device *vdev)
646{
647}
648
649/* the notify function used when creating a virt queue */
650static void virtblk_notify(struct virtqueue *vq)
651{
652 const int doorbell = 2;
653 struct virtio_blk *vblk = vq->vdev->priv;
654 uint32_t db_reg;
655
656 /* Ring host doorbell interrupt */
657 db_reg = readl(vblk->sbox + (SBOX_SDBIC0 + (4 * doorbell)))
658 | SBOX_SDBIC0_DBREQ_BIT;
659 writel(db_reg, vblk->sbox + (SBOX_SDBIC0 + (4 * doorbell)));
660}
661
662/* the config->del_vqs() implementation */
663static void virtblk_del_vqs(struct virtio_device *vdev)
664{
665 struct virtio_blk *vblk = vdev->priv;
666 unsigned long size;
667
668 size = PAGE_ALIGN(vring_size(VIRTQUEUE_LENGTH, MIC_VRING_ALIGN));
669 free_pages_exact(vblk->vq->priv, size);
670
671 vring_del_virtqueue(vblk->vq);
672 vblk->vq = NULL;
673}
674
675/* the config->find_vqs() implementation */
676static int virtblk_find_vqs(struct virtio_device *vdev, unsigned nvqs,
677 struct virtqueue *vqs[],
678 vq_callback_t *callbacks[],
679 const char *names[])
680{
681 struct virtio_blk *vblk = vdev->priv;
682 struct virtqueue *vq;
683 int err;
684 unsigned long size;
685 void *queue; /* the virtual address of the ring queue */
686 struct vring_virtqueue *vvq;
687 struct vring *vring;
688 struct board_info *bd_info = virtblk_mic_data.dd_bi[0];
689
690 BUG_ON(nvqs != 1);
691 BUG_ON(vblk == NULL);
692
693 size = PAGE_ALIGN(vring_size(VIRTQUEUE_LENGTH, MIC_VRING_ALIGN));
694 queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
695 if (queue == NULL) {
696 err = -ENOMEM;
697 goto out_info;
698 }
699
700 /* create the vring */
701 vq = vring_new_virtqueue(VIRTQUEUE_LENGTH, MIC_VRING_ALIGN,
702 vdev, queue, virtblk_notify, callbacks[0], names[0]);
703 if (vq == NULL) {
704 err = -ENOMEM;
705 goto out_activate_queue;
706 }
707 vq->priv = queue;
708
709 vqs[0] = vblk->vq = vq;
710
711 vvq = to_vvq(vq);
712 vring = &((struct mic_virtblk *)bd_info->bi_virtio)->vb_shared->vring;
713 writel(vvq->vring.num, &vring->num);
714 writeq(virt_to_phys(vvq->vring.desc), &vring->desc);
715 writeq(virt_to_phys(vvq->vring.avail), &vring->avail);
716 writeq(virt_to_phys(vvq->vring.used), &vring->used);
717
718 return 0;
719
720out_activate_queue:
721 free_pages_exact(queue, size);
722out_info:
723 return err;
724}
725
726static struct virtio_config_ops virtio_blk_config_ops = {
727 .get = virtblk_get,
728 // .set = vp_set,
729 // .get_status = vp_get_status,
730 // .set_status = vp_set_status,
731 .reset = virtblk_reset,
732 .find_vqs = virtblk_find_vqs,
733 .del_vqs = virtblk_del_vqs,
734 .get_features = virtblk_get_features,
735 .finalize_features = virtblk_finalize_features,
736};
737
738static unsigned int features[] = {
739 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
740 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
741 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
742};
743
744/*
745 * virtio_blk causes spurious section mismatch warning by
746 * simultaneously referring to a __devinit and a __devexit function.
747 * Use __refdata to avoid this warning.
748 */
749static struct virtio_driver __refdata virtio_blk = {
750 .feature_table = features,
751 .feature_table_size = ARRAY_SIZE(features),
752 .driver.name = KBUILD_MODNAME,
753 .driver.owner = THIS_MODULE,
754};
755
756struct class block_class = {
757 .name = "block",
758};
759
760static struct device_type disk_type = {
761 .name = "disk",
762 /*
763 .groups = disk_attr_groups,
764 .release = disk_release,
765 .devnode = block_devnode,
766 */
767};
768
769static int __init init(void)
770{
771 bd_info_t *bd_info;
772 struct virtio_device *vdev;
773 struct mic_virtblk *mic_virtblk;
774 int ret;
775 struct vb_shared *vb_shared;
776
777#ifdef CONFIG_ML1OM
778 printk(KERN_ERR "virtio block device is not available on KNF\n");
779 ret = -ENODEV;
780 goto error_return;
781#endif
782 major = register_blkdev(0, "virtblk");
783 if (major < 0) {
784 ret = major;
785 goto error_return;
786 }
787
788 bd_info = kmalloc(sizeof(bd_info_t), GFP_KERNEL);
789 if (bd_info == NULL) {
790 ret = -ENOMEM;
791 goto error_return;
792 }
793 memset(bd_info, 0, sizeof(*bd_info));
794 virtblk_mic_data.dd_numdevs = 1;
795 index = 0;
796 virtblk_mic_data.dd_bi[0] = bd_info;
797 bd_info->bi_ctx.bi_id = 0;
798
799 mic_virtblk = kmalloc(sizeof(*mic_virtblk), GFP_KERNEL);
800 if (mic_virtblk == NULL) {
801 ret = -ENOMEM;
802 goto free_bd_info;
803 }
804 memset(mic_virtblk, 0, sizeof(*mic_virtblk));
805 bd_info->bi_virtio = (void *)mic_virtblk;
806
807 if (virtio_addr == 0) {
808 printk(KERN_ERR "virtio address is not passed from host\n");
809 return -ENODEV;
810 goto free_mic_virtblk;
811 }
812 vb_shared = ioremap_nocache(virtio_addr, sizeof(*vb_shared));
813 if (vb_shared == NULL) {
814 ret = -ENODEV;
815 goto free_mic_virtblk;
816 }
817 vb_shared->update = true;
818 mic_virtblk->vb_shared = vb_shared;
819
820 vdev = kmalloc(sizeof(*vdev), GFP_KERNEL);
821 if (vdev == NULL) {
822 ret = -ENOMEM;
823 goto free_mic_virtblk;
824 }
825 memset(vdev, 0, sizeof(*vdev));
826 vdev->config = &virtio_blk_config_ops;
827 INIT_LIST_HEAD(&vdev->vqs);
828 vdev->dev.driver = &virtio_blk.driver;
829 vdev->dev.class = &block_class;
830 vdev->dev.type = &disk_type;
831 device_initialize(&vdev->dev);
832 mic_virtblk->vdev = (void *)vdev;
833
834 return virtblk_probe(vdev);
835
836 free_mic_virtblk:
837 kfree(bd_info->bi_virtio);
838 free_bd_info:
839 kfree(bd_info);
840 error_return:
841 return ret;
842}
843
844static void __exit fini(void)
845{
846 bd_info_t *bd_info = virtblk_mic_data.dd_bi[0];
847 struct mic_virtblk *mic_virtblk = (struct mic_virtblk *)bd_info->bi_virtio;
848
849 unregister_blkdev(major, "virtblk");
850 virtblk_remove(mic_virtblk->vdev);
851 iounmap(mic_virtblk->vb_shared);
852 kfree(mic_virtblk->vdev);
853 kfree(bd_info->bi_virtio);
854 kfree(bd_info);
855}
856module_init(init);
857module_exit(fini);
858
859MODULE_DESCRIPTION("Virtio block driver");
860MODULE_LICENSE("GPL");
861MODULE_PARM_DESC(virtio_addr, "address of virtio related structure");
862module_param(virtio_addr, long, S_IRUGO);