+ if (foff >= vnp->vnp_size) {
+ errtype = 1;
+ error = VM_PAGER_BAD;
+ } else {
+ if (foff + NBPG > vnp->vnp_size)
+ size = vnp->vnp_size - foff;
+ else
+ size = NBPG;
+/*
+ * Allocate a kernel virtual address and initialize so that
+ * we can use VOP_READ/WRITE routines.
+ */
+ kva = vm_pager_map_page(m[0]);
+ aiov.iov_base = (caddr_t)kva;
+ aiov.iov_len = size;
+ auio.uio_iov = &aiov;
+ auio.uio_iovcnt = 1;
+ auio.uio_offset = foff;
+ auio.uio_segflg = UIO_SYSSPACE;
+ auio.uio_rw = rw;
+ auio.uio_resid = size;
+ auio.uio_procp = (struct proc *)0;
+ if (rw == UIO_READ) {
+ error = VOP_READ(vp, &auio, IO_PAGER, p->p_ucred);
+ } else {
+ error = VOP_WRITE(vp, &auio, IO_PAGER, p->p_ucred);
+ }
+ if (!error) {
+ register int count = size - auio.uio_resid;
+
+ if (count == 0)
+ error = EINVAL;
+ else if (count != NBPG && rw == UIO_READ)
+ bzero((caddr_t)kva + count, NBPG - count);
+ }
+ vm_pager_unmap_page(kva);
+ }
+ } else {
+
+ /*
+ * here on direct device I/O
+ */
+ int first=0, last=count;
+ int reqaddr, firstaddr;
+ int block, offset;
+
+ struct buf *bp;
+ int s;
+ int failflag;
+
+ foff = m[reqpage]->offset + paging_offset;
+
+ /*
+ * This pathetic hack gets data from the buffer cache, if it's there.
+ * I believe that this is not really necessary, and the ends can
+ * be gotten by defaulting to the normal vfs read behavior, but this
+ * might be more efficient, because the will NOT invoke read-aheads
+ * and one of the purposes of this code is to bypass the buffer
+ * cache and keep from flushing it by reading in a program.
+ */
+ /*
+ * calculate logical block and offset
+ */
+ block = foff / bsize;
+ offset = foff % bsize;
+ s = splbio();
+
+ /*
+ * if we have a buffer in core, then try to use it
+ */
+ while (bp = incore(vp, block)) {
+ int amount;
+
+ /*
+ * wait until the buffer is avail or gone
+ */
+ if (bp->b_flags & B_BUSY) {
+ bp->b_flags |= B_WANTED;
+ tsleep ((caddr_t)bp, PVM, "vnwblk", 0);
+ continue;
+ }
+
+ amount = NBPG;
+ if ((foff + amount) > vnp->vnp_size)
+ amount = vnp->vnp_size - foff;
+
+ /*
+ * make sure that this page is in the buffer
+ */
+ if ((amount > 0) && (offset + amount) <= bp->b_bcount) {
+ bp->b_flags |= B_BUSY;
+ splx(s);
+
+ /*
+ * map the requested page
+ */
+ pmap_enter(vm_map_pmap(pager_map),
+ kva, VM_PAGE_TO_PHYS(m[reqpage]),
+ VM_PROT_DEFAULT, TRUE);
+ /*
+ * copy the data from the buffer
+ */
+ bcopy(bp->b_un.b_addr + offset, (caddr_t)kva, amount);
+ if (amount < NBPG) {
+ bzero((caddr_t)kva + amount, NBPG - amount);
+ }
+ /*
+ * unmap the page and free the kva
+ */
+ pmap_remove(vm_map_pmap(pager_map), kva, kva + NBPG);
+ kmem_free_wakeup(pager_map, kva, mapsize);
+ /*
+ * release the buffer back to the block subsystem
+ */
+ bp->b_flags &= ~B_BUSY;
+ wakeup((caddr_t)bp);
+ /*
+ * we did not have to do any work to get the requested
+ * page, the read behind/ahead does not justify a read
+ */
+ for (i = 0; i < count; i++) {
+ if (i != reqpage) {
+ vnode_pager_freepage(m[i]);
+ m[i] = 0;
+ }
+ }
+ /*
+ * sorry for the goto
+ */
+ goto finishup;
+ }
+ /*
+ * buffer is nowhere to be found, read from the disk
+ */
+ break;
+ }
+
+ foff = m[reqpage]->offset + paging_offset;
+ reqaddr = vnode_pager_addr(vp, foff);
+ /*
+ * Make sure that our I/O request is contiguous.
+ * Scan backward and stop for the first discontiguous
+ * entry or stop for a page being in buffer cache.
+ */
+ failflag = 0;
+ for (i = reqpage - 1; i >= 0; --i) {
+ int myaddr;
+ if (failflag ||
+ incore(vp, (foff + (i - reqpage) * NBPG) / bsize) ||
+ (myaddr = vnode_pager_addr(vp, m[i]->offset + paging_offset))
+ != reqaddr + (i - reqpage) * NBPG) {
+ vnode_pager_freepage(m[i]);
+ m[i] = 0;
+ if (first == 0)
+ first = i + 1;
+ failflag = 1;
+ }
+ }
+
+ /*
+ * Scan forward and stop for the first non-contiguous
+ * entry or stop for a page being in buffer cache.
+ */
+ failflag = 0;
+ for (i = reqpage + 1; i < count; i++) {
+ int myaddr;
+ if (failflag ||
+ incore(vp, (foff + (i - reqpage) * NBPG) / bsize) ||
+ (myaddr = vnode_pager_addr(vp, m[i]->offset + paging_offset))
+ != reqaddr + (i - reqpage) * NBPG) {
+ vnode_pager_freepage(m[i]);
+ m[i] = 0;
+ if (last == count)
+ last = i;
+ failflag = 1;
+ }
+ }
+
+ /*
+ * the first and last page have been calculated now, move input
+ * pages to be zero based...
+ */
+ count = last;
+ if (first != 0) {
+ for (i = first; i < count; i++) {
+ m[i - first] = m[i];
+ }
+ count -= first;
+ reqpage -= first;
+ }
+
+
+ /*
+ * calculate the file virtual address for the transfer
+ */
+ foff = m[0]->offset + paging_offset;
+ /*
+ * and get the disk physical address (in bytes)
+ */
+ firstaddr = vnode_pager_addr(vp, foff);
+
+ /*
+ * calculate the size of the transfer
+ */
+ if ((m[count - 1]->offset + paging_offset) + NBPG > vnp->vnp_size)
+ size = vnp->vnp_size - foff;
+ else
+ size = count * NBPG;
+
+
+ /*
+ * and map the pages to be read into the kva
+ */
+ for (i = 0; i < count; i++)
+ pmap_enter(vm_map_pmap(pager_map),
+ kva + NBPG * i, VM_PAGE_TO_PHYS(m[i]),
+ VM_PROT_DEFAULT, TRUE);
+ VHOLD(vp);
+ bp = getpbuf();
+
+ /* build a minimal buffer header */
+ bzero((caddr_t)bp, sizeof(struct buf));
+ bp->b_flags = B_BUSY | B_READ | B_CALL;
+ bp->b_iodone = vnode_pager_iodone;
+ /* B_PHYS is not set, but it is nice to fill this in */
+ /* bp->b_proc = &proc0; */
+ bp->b_proc = curproc;
+ bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
+ bp->b_un.b_addr = (caddr_t) kva;
+ bp->b_blkno = firstaddr / DEV_BSIZE;
+ bp->b_vp = dp;
+
+ /* Should be a BLOCK or character DEVICE if we get here */
+ bp->b_dev = dp->v_rdev;
+ bp->b_bcount = NBPG * count;
+
+ /* do the input */
+ VOP_STRATEGY(bp);
+
+ /* we definitely need to be at splbio here */
+
+ while ((bp->b_flags & B_DONE) == 0) {
+ tsleep((caddr_t)bp, PVM, "vnread", 0);
+ }
+ splx(s);
+ if ((bp->b_flags & B_ERROR) != 0)
+ error = EIO;
+
+ if (!error) {
+ if (size != count * NBPG)
+ bzero((caddr_t)kva + size, NBPG * count - size);
+ }
+ HOLDRELE(vp);
+
+ pmap_remove(vm_map_pmap(pager_map), kva, kva + NBPG * count);
+ kmem_free_wakeup(pager_map, kva, mapsize);
+
+ /*
+ * free the buffer header back to the swap buffer pool
+ */
+ relpbuf(bp);
+