* Copyright (c) 1990 University of Utah.
* Copyright (c) 1991 The Regents of the University of California.
* Copyright (c) 1993,1994 John S. Dyson
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.16 1994/03/30 02:22:00 davidg Exp $
* Page to/from files (vnodes).
* fix credential use (uses current process credentials now)
* John S. Dyson 08 Dec 93
* This file in conjunction with some vm_fault mods, eliminate the performance
* advantage for using the buffer cache and minimize memory copies.
* 1) Supports multiple - block reads
* 2) Bypasses buffer cache for reads
* 1) Totally bypass buffer cache for reads
* (Currently will still sometimes use buffer cache for reads)
* 2) Bypass buffer cache for writes
* (Code does not support it, but mods are simple)
int vnode_pager_putmulti();
struct pagerops vnodepagerops
= {
static int vnode_pager_input(vn_pager_t vnp
, vm_page_t
*m
, int count
, int reqpage
);
static int vnode_pager_output(vn_pager_t vnp
, vm_page_t
*m
, int count
, int *rtvals
);
void relpbuf(struct buf
*bp
) ;
extern vm_map_t pager_map
;
queue_head_t vnode_pager_list
; /* list of managed vnodes */
#define MAXBP (NBPG/DEV_BSIZE);
queue_init(&vnode_pager_list
);
* Allocate (or lookup) pager for a vnode.
* Handle is a vnode pointer.
vnode_pager_alloc(handle
, size
, prot
, offset
)
register vm_pager_t pager
;
struct proc
*p
= curproc
; /* XXX */
* Pageout to vnode, no can do yet.
* Vnodes keep a pointer to any associated pager so no need to
* lookup with vm_pager_lookup.
vp
= (struct vnode
*)handle
;
pager
= (vm_pager_t
)vp
->v_vmdata
;
* Allocate pager structures
pager
= (vm_pager_t
)malloc(sizeof *pager
, M_VMPAGER
, M_WAITOK
);
vnp
= (vn_pager_t
)malloc(sizeof *vnp
, M_VMPGDATA
, M_WAITOK
);
free((caddr_t
)pager
, M_VMPAGER
);
* And an object of the appropriate size
if (VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
) == 0) {
object
= vm_object_allocate(round_page(vattr
.va_size
));
vm_object_enter(object
, pager
);
vm_object_setpager(object
, pager
, 0, TRUE
);
free((caddr_t
)vnp
, M_VMPGDATA
);
free((caddr_t
)pager
, M_VMPAGER
);
* Hold a reference to the vnode and initialize pager data.
vnp
->vnp_size
= vattr
.va_size
;
queue_enter(&vnode_pager_list
, pager
, vm_pager_t
, pg_list
);
pager
->pg_handle
= handle
;
pager
->pg_type
= PG_VNODE
;
pager
->pg_ops
= &vnodepagerops
;
pager
->pg_data
= (caddr_t
)vnp
;
vp
->v_vmdata
= (caddr_t
)pager
;
* vm_object_lookup() will remove the object from the
* cache if found and also gain a reference to the object.
object
= vm_object_lookup(pager
);
vnode_pager_dealloc(pager
)
register vn_pager_t vnp
= (vn_pager_t
)pager
->pg_data
;
register struct vnode
*vp
;
struct proc
*p
= curproc
; /* XXX */
/* can hang if done at reboot on NFS FS */
(void) VOP_FSYNC(vp
, p
->p_ucred
, p
);
queue_remove(&vnode_pager_list
, pager
, vm_pager_t
, pg_list
);
free((caddr_t
)vnp
, M_VMPGDATA
);
free((caddr_t
)pager
, M_VMPAGER
);
vnode_pager_getmulti(pager
, m
, count
, reqpage
, sync
)
return vnode_pager_input((vn_pager_t
) pager
->pg_data
, m
, count
, reqpage
);
vnode_pager_getpage(pager
, m
, sync
)
return vnode_pager_input((vn_pager_t
)pager
->pg_data
, marray
, 1, 0);
vnode_pager_putpage(pager
, m
, sync
)
vnode_pager_output((vn_pager_t
)pager
->pg_data
, marray
, 1, rtvals
);
vnode_pager_putmulti(pager
, m
, c
, sync
, rtvals
)
return vnode_pager_output((vn_pager_t
)pager
->pg_data
, m
, c
, rtvals
);
vnode_pager_haspage(pager
, offset
)
register vn_pager_t vnp
= (vn_pager_t
)pager
->pg_data
;
* Offset beyond end of file, do not have the page
if (offset
>= vnp
->vnp_size
) {
* Read the index to find the disk block to read
* from. If there is no block, report that we don't
* Assumes that the vnode has whole page or nothing.
err
= VOP_BMAP(vnp
->vnp_vp
,
offset
/ vnp
->vnp_vp
->v_mount
->mnt_stat
.f_bsize
,
(struct vnode
**)0, &bn
);
return((long)bn
< 0 ? FALSE
: TRUE
);
* Lets the VM system know about a change in size for a file.
* If this vnode is mapped into some address space (i.e. we have a pager
* for it) we adjust our own internal size and flush any cached pages in
* the associated object that are affected by the size change.
* Note: this routine may be invoked as a result of a pager put
* operation (possibly at object termination time), so we must be careful.
vnode_pager_setsize(vp
, nsize
)
register vm_object_t object
;
if (vp
== NULL
|| vp
->v_type
!= VREG
|| vp
->v_vmdata
== NULL
)
pager
= (vm_pager_t
)vp
->v_vmdata
;
vnp
= (vn_pager_t
)pager
->pg_data
;
if (nsize
== vnp
->vnp_size
)
* This can happen during object termination since
* vm_object_page_clean is called after the object
* has been removed from the hash table, and clean
* may cause vnode write operations which can wind
object
= vm_object_lookup(pager
);
* Toss any cached pages beyond the new EOF.
if (round_page(nsize
) < round_page(vnp
->vnp_size
)) {
vm_object_page_remove(object
,
(vm_offset_t
)round_page(nsize
), round_page(vnp
->vnp_size
));
vm_object_unlock(object
);
vnp
->vnp_size
= (vm_offset_t
)nsize
;
vm_object_deallocate(object
);
register struct mount
*mp
;
register vm_pager_t pager
, npager
;
pager
= (vm_pager_t
) queue_first(&vnode_pager_list
);
while (!queue_end(&vnode_pager_list
, (queue_entry_t
)pager
)) {
* Save the next pointer now since uncaching may
* terminate the object and render pager invalid
vp
= ((vn_pager_t
)pager
->pg_data
)->vnp_vp
;
npager
= (vm_pager_t
) queue_next(&pager
->pg_list
);
if (mp
== (struct mount
*)0 || vp
->v_mount
== mp
)
(void) vnode_pager_uncache(vp
);
* Remove vnode associated object from the object cache.
* Note: this routine may be invoked as a result of a pager put
* operation (possibly at object termination time), so we must be careful.
register struct vnode
*vp
;
register vm_object_t object
;
boolean_t uncached
, locked
;
pager
= (vm_pager_t
)vp
->v_vmdata
;
* Unlock the vnode if it is currently locked.
* We do this since uncaching the object may result
* in its destruction which may initiate paging
* activity which may necessitate locking the vnode.
locked
= VOP_ISLOCKED(vp
);
* Must use vm_object_lookup() as it actually removes
* the object from the cache list.
object
= vm_object_lookup(pager
);
uncached
= (object
->ref_count
<= 1);
pager_cache(object
, FALSE
);
* calculate the linear (byte) disk address of specified virtual
vnode_pager_addr(vp
, address
)
bsize
= vp
->v_mount
->mnt_stat
.f_bsize
;
vblock
= address
/ bsize
;
voffset
= address
% bsize
;
err
= VOP_BMAP(vp
,vblock
,&rtvp
,&block
);
rtaddress
= block
* DEV_BSIZE
+ voffset
;
* interrupt routine for I/O completion
* small block file system vnode pager input
vnode_pager_input_smlfs(vnp
, m
)
vm_offset_t paging_offset
;
paging_offset
= m
->object
->paging_offset
;
bsize
= vp
->v_mount
->mnt_stat
.f_bsize
;
foff
= m
->offset
+ paging_offset
;
VOP_BMAP(vp
, foff
, &dp
, 0);
kva
= vm_pager_map_page(m
);
for(i
=0;i
<NBPG
/bsize
;i
++) {
* calculate logical block and offset
block
= foff
/ bsize
+ i
;
while (bp
= incore(vp
, block
)) {
* wait until the buffer is avail or gone
if (bp
->b_flags
& B_BUSY
) {
tsleep ((caddr_t
)bp
, PVM
, "vnwblk", 0);
if ((foff
+ bsize
) > vnp
->vnp_size
)
amount
= vnp
->vnp_size
- foff
;
* make sure that this page is in the buffer
if ((amount
> 0) && amount
<= bp
->b_bcount
) {
* copy the data from the buffer
bcopy(bp
->b_un
.b_addr
, (caddr_t
)kva
+ i
* bsize
, amount
);
bzero((caddr_t
)kva
+ amount
, bsize
- amount
);
fileaddr
= vnode_pager_addr(vp
, foff
+ i
* bsize
);
/* build a minimal buffer header */
bp
->b_flags
= B_BUSY
| B_READ
| B_CALL
;
bp
->b_iodone
= vnode_pager_iodone
;
bp
->b_rcred
= bp
->b_wcred
= bp
->b_proc
->p_ucred
;
bp
->b_un
.b_addr
= (caddr_t
) kva
+ i
* bsize
;
bp
->b_blkno
= fileaddr
/ DEV_BSIZE
;
/* Should be a BLOCK or character DEVICE if we get here */
/* we definitely need to be at splbio here */
while ((bp
->b_flags
& B_DONE
) == 0) {
tsleep((caddr_t
)bp
, PVM
, "vnsrd", 0);
if ((bp
->b_flags
& B_ERROR
) != 0)
* free the buffer header back to the swap buffer pool
bzero((caddr_t
) kva
+ i
* bsize
, bsize
);
vm_pager_unmap_page(kva
);
pmap_clear_modify(VM_PAGE_TO_PHYS(m
));
* old style vnode pager output routine
vnode_pager_input_old(vnp
, m
)
foff
= m
->offset
+ m
->object
->paging_offset
;
* Return failure if beyond current EOF
if (foff
>= vnp
->vnp_size
) {
if (foff
+ size
> vnp
->vnp_size
)
size
= vnp
->vnp_size
- foff
;
* Allocate a kernel virtual address and initialize so that
* we can use VOP_READ/WRITE routines.
kva
= vm_pager_map_page(m
);
aiov
.iov_base
= (caddr_t
)kva
;
auio
.uio_segflg
= UIO_SYSSPACE
;
auio
.uio_procp
= (struct proc
*)0;
error
= VOP_READ(vnp
->vnp_vp
, &auio
, IO_PAGER
, curproc
->p_ucred
);
register int count
= size
- auio
.uio_resid
;
bzero((caddr_t
)kva
+ count
, NBPG
- count
);
vm_pager_unmap_page(kva
);
pmap_clear_modify(VM_PAGE_TO_PHYS(m
));
return error
?VM_PAGER_FAIL
:VM_PAGER_OK
;
* generic vnode pager input routine
vnode_pager_input(vnp
, m
, count
, reqpage
)
struct proc
*p
= curproc
; /* XXX */
vm_offset_t paging_offset
;
int errtype
=0; /* 0 is file type otherwise vm type */
object
= m
[reqpage
]->object
; /* all vm_page_t items are in same object */
paging_offset
= object
->paging_offset
;
bsize
= vp
->v_mount
->mnt_stat
.f_bsize
;
/* get the UNDERLYING device for the file with VOP_BMAP() */
* originally, we did not check for an error return
* value -- assuming an fs always has a bmap entry point
* -- that assumption is wrong!!!
foff
= m
[reqpage
]->offset
+ paging_offset
;
if (!VOP_BMAP(vp
, foff
, &dp
, 0)) {
* we do not block for a kva, notice we default to a kva
kva
= kmem_alloc_pageable(pager_map
, (mapsize
= count
*NBPG
));
for (i
= 0; i
< count
; i
++) {
vnode_pager_freepage(m
[i
]);
kva
= kmem_alloc_wait(pager_map
, mapsize
= NBPG
);
* if we can't get a kva or we can't bmap, use old VOP code
for (i
= 0; i
< count
; i
++) {
vnode_pager_freepage(m
[i
]);
return vnode_pager_input_old(vnp
, m
[reqpage
]);
* if the blocksize is smaller than a page size, then use
* special small filesystem code. NFS sometimes has a small
* blocksize, but it can handle large reads itself.
} else if( (NBPG
/ bsize
) > 1 &&
(vp
->v_mount
->mnt_stat
.f_type
!= MOUNT_NFS
)) {
kmem_free_wakeup(pager_map
, kva
, mapsize
);
for (i
= 0; i
< count
; i
++) {
vnode_pager_freepage(m
[i
]);
return vnode_pager_input_smlfs(vnp
, m
[reqpage
]);
* here on direct device I/O
* This pathetic hack gets data from the buffer cache, if it's there.
* I believe that this is not really necessary, and the ends can
* be gotten by defaulting to the normal vfs read behavior, but this
* might be more efficient, because the will NOT invoke read-aheads
* and one of the purposes of this code is to bypass the buffer
* cache and keep from flushing it by reading in a program.
* calculate logical block and offset
* if we have a buffer in core, then try to use it
while (bp
= incore(vp
, block
)) {
* wait until the buffer is avail or gone
if (bp
->b_flags
& B_BUSY
) {
tsleep ((caddr_t
)bp
, PVM
, "vnwblk", 0);
if ((foff
+ amount
) > vnp
->vnp_size
)
amount
= vnp
->vnp_size
- foff
;
* make sure that this page is in the buffer
if ((amount
> 0) && (offset
+ amount
) <= bp
->b_bcount
) {
pmap_kenter(kva
, VM_PAGE_TO_PHYS(m
[reqpage
]));
* copy the data from the buffer
bcopy(bp
->b_un
.b_addr
+ offset
, (caddr_t
)kva
, amount
);
bzero((caddr_t
)kva
+ amount
, NBPG
- amount
);
* unmap the page and free the kva
pmap_remove(vm_map_pmap(pager_map
), kva
, kva
+ NBPG
);
kmem_free_wakeup(pager_map
, kva
, mapsize
);
* release the buffer back to the block subsystem
* we did not have to do any work to get the requested
* page, the read behind/ahead does not justify a read
for (i
= 0; i
< count
; i
++) {
vnode_pager_freepage(m
[i
]);
* buffer is nowhere to be found, read from the disk
reqaddr
= vnode_pager_addr(vp
, foff
);
* Make sure that our I/O request is contiguous.
* Scan backward and stop for the first discontiguous
* entry or stop for a page being in buffer cache.
for (i
= reqpage
- 1; i
>= 0; --i
) {
incore(vp
, (foff
+ (i
- reqpage
) * NBPG
) / bsize
) ||
(vnode_pager_addr(vp
, m
[i
]->offset
+ paging_offset
))
!= reqaddr
+ (i
- reqpage
) * NBPG
) {
vnode_pager_freepage(m
[i
]);
* Scan forward and stop for the first non-contiguous
* entry or stop for a page being in buffer cache.
for (i
= reqpage
+ 1; i
< count
; i
++) {
incore(vp
, (foff
+ (i
- reqpage
) * NBPG
) / bsize
) ||
(vnode_pager_addr(vp
, m
[i
]->offset
+ paging_offset
))
!= reqaddr
+ (i
- reqpage
) * NBPG
) {
vnode_pager_freepage(m
[i
]);
* the first and last page have been calculated now, move input
* pages to be zero based...
for (i
= first
; i
< count
; i
++) {
* calculate the file virtual address for the transfer
foff
= m
[0]->offset
+ paging_offset
;
* and get the disk physical address (in bytes)
firstaddr
= vnode_pager_addr(vp
, foff
);
* calculate the size of the transfer
if ((foff
+ size
) > vnp
->vnp_size
)
size
= vnp
->vnp_size
- foff
;
* round up physical size for real devices
if( dp
->v_type
== VBLK
|| dp
->v_type
== VCHR
)
size
= (size
+ DEV_BSIZE
- 1) & ~(DEV_BSIZE
- 1);
* and map the pages to be read into the kva
for (i
= 0; i
< count
; i
++)
pmap_kenter( kva
+ NBPG
* i
, VM_PAGE_TO_PHYS(m
[i
]));
/* build a minimal buffer header */
bp
->b_flags
= B_BUSY
| B_READ
| B_CALL
;
bp
->b_iodone
= vnode_pager_iodone
;
/* B_PHYS is not set, but it is nice to fill this in */
bp
->b_rcred
= bp
->b_wcred
= bp
->b_proc
->p_ucred
;
bp
->b_un
.b_addr
= (caddr_t
) kva
;
bp
->b_blkno
= firstaddr
/ DEV_BSIZE
;
/* Should be a BLOCK or character DEVICE if we get here */
/* we definitely need to be at splbio here */
while ((bp
->b_flags
& B_DONE
) == 0) {
tsleep((caddr_t
)bp
, PVM
, "vnread", 0);
if ((bp
->b_flags
& B_ERROR
) != 0)
if (size
!= count
* NBPG
)
bzero((caddr_t
)kva
+ size
, NBPG
* count
- size
);
pmap_remove(vm_map_pmap(pager_map
), kva
, kva
+ NBPG
* count
);
kmem_free_wakeup(pager_map
, kva
, mapsize
);
* free the buffer header back to the swap buffer pool
for (i
= 0; i
< count
; i
++) {
pmap_clear_modify(VM_PAGE_TO_PHYS(m
[i
]));
m
[i
]->flags
&= ~PG_LAUNDRY
;
* whether or not to leave the page activated
* is up in the air, but we should put the page
* on a page queue somewhere. (it already is in
* Result: It appears that emperical results show
* that deactivating pages is best.
* just in case someone was asking for this
* page we now tell them that it is ok to use
vm_page_deactivate(m
[i
]);
vnode_pager_freepage(m
[i
]);
printf("vnode pager read error: %d\n", error
);
return (error
? VM_PAGER_FAIL
: VM_PAGER_OK
);
* old-style vnode pager output routine
vnode_pager_output_old(vnp
, m
)
foff
= m
->offset
+ m
->object
->paging_offset
;
* Return failure if beyond current EOF
if (foff
>= vnp
->vnp_size
) {
if (foff
+ size
> vnp
->vnp_size
)
size
= vnp
->vnp_size
- foff
;
* Allocate a kernel virtual address and initialize so that
* we can use VOP_WRITE routines.
kva
= vm_pager_map_page(m
);
aiov
.iov_base
= (caddr_t
)kva
;
auio
.uio_segflg
= UIO_SYSSPACE
;
auio
.uio_procp
= (struct proc
*)0;
error
= VOP_WRITE(vp
, &auio
, IO_PAGER
, curproc
->p_ucred
);
if ((size
- auio
.uio_resid
) == 0) {
vm_pager_unmap_page(kva
);
return error
?VM_PAGER_FAIL
:VM_PAGER_OK
;
* vnode pager output on a small-block file system
vnode_pager_output_smlfs(vnp
, m
)
vm_offset_t paging_offset
;
paging_offset
= m
->object
->paging_offset
;
bsize
= vp
->v_mount
->mnt_stat
.f_bsize
;
foff
= m
->offset
+ paging_offset
;
VOP_BMAP(vp
, foff
, &dp
, 0);
kva
= vm_pager_map_page(m
);
for(i
= 0; !error
&& i
< (NBPG
/bsize
); i
++) {
* calculate logical block and offset
fileaddr
= vnode_pager_addr(vp
, foff
+ i
* bsize
);
if( bp
= incore( vp
, (foff
/bsize
) + i
)) {
bp
= getblk(vp
, (foff
/bsize
) + i
, bp
->b_bufsize
);
/* build a minimal buffer header */
bp
->b_flags
= B_BUSY
| B_CALL
| B_WRITE
;
bp
->b_iodone
= vnode_pager_iodone
;
bp
->b_rcred
= bp
->b_wcred
= bp
->b_proc
->p_ucred
;
bp
->b_un
.b_addr
= (caddr_t
) kva
+ i
* bsize
;
bp
->b_blkno
= fileaddr
/ DEV_BSIZE
;
/* Should be a BLOCK or character DEVICE if we get here */
/* we definitely need to be at splbio here */
while ((bp
->b_flags
& B_DONE
) == 0) {
tsleep((caddr_t
)bp
, PVM
, "vnswrt", 0);
if ((bp
->b_flags
& B_ERROR
) != 0)
* free the buffer header back to the swap buffer pool
vm_pager_unmap_page(kva
);
* generic vnode pager output routine
vnode_pager_output(vnp
, m
, count
, rtvals
)
struct proc
*p
= curproc
; /* XXX */
vm_offset_t paging_offset
;
object
= m
[0]->object
; /* all vm_page_t items are in same object */
paging_offset
= object
->paging_offset
;
bsize
= vp
->v_mount
->mnt_stat
.f_bsize
;
rtvals
[i
] = VM_PAGER_TRYAGAIN
;
* if the filesystem does not have a bmap, then use the
if (VOP_BMAP(vp
, m
[0]->offset
+paging_offset
, &dp
, 0)) {
rtvals
[0] = vnode_pager_output_old(vnp
, m
[0]);
pmap_clear_modify(VM_PAGE_TO_PHYS(m
[0]));
m
[0]->flags
&= ~PG_LAUNDRY
;
* if the filesystem has a small blocksize, then use
* the small block filesystem output code
(vp
->v_mount
->mnt_stat
.f_type
!= MOUNT_NFS
)) {
rtvals
[i
] = vnode_pager_output_smlfs(vnp
, m
[i
]);
if( rtvals
[i
] == VM_PAGER_OK
) {
pmap_clear_modify(VM_PAGE_TO_PHYS(m
[i
]));
m
[i
]->flags
&= ~PG_LAUNDRY
;
* get some kva for the output
kva
= kmem_alloc_pageable(pager_map
, (mapsize
= count
*NBPG
));
kva
= kmem_alloc_pageable(pager_map
, (mapsize
= NBPG
));
foff
= m
[i
]->offset
+ paging_offset
;
if (foff
>= vnp
->vnp_size
) {
rtvals
[j
] = VM_PAGER_BAD
;
foff
= m
[0]->offset
+ paging_offset
;
reqaddr
= vnode_pager_addr(vp
, foff
);
* Scan forward and stop for the first non-contiguous
* entry or stop for a page being in buffer cache.
for (i
= 1; i
< count
; i
++) {
if ( vnode_pager_addr(vp
, m
[i
]->offset
+ paging_offset
)
* calculate the size of the transfer
if ((foff
+ size
) > vnp
->vnp_size
)
size
= vnp
->vnp_size
- foff
;
* round up physical size for real devices
if( dp
->v_type
== VBLK
|| dp
->v_type
== VCHR
)
size
= (size
+ DEV_BSIZE
- 1) & ~(DEV_BSIZE
- 1);
* and map the pages to be read into the kva
for (i
= 0; i
< count
; i
++)
pmap_kenter( kva
+ NBPG
* i
, VM_PAGE_TO_PHYS(m
[i
]));
printf("vnode: writing foff: %d, devoff: %d, size: %d\n",
* next invalidate the incore vfs_bio data
for (i
= 0; i
< count
; i
++) {
int filblock
= (foff
+ i
* NBPG
) / bsize
;
if( fbp
= incore( vp
, filblock
)) {
/* printf("invalidating: %d\n", filblock); */
fbp
= getblk(vp
, filblock
, fbp
->b_bufsize
);
/* build a minimal buffer header */
bp
->b_flags
= B_BUSY
| B_WRITE
| B_CALL
;
bp
->b_iodone
= vnode_pager_iodone
;
/* B_PHYS is not set, but it is nice to fill this in */
/* bp->b_proc = &proc0; */
bp
->b_rcred
= bp
->b_wcred
= bp
->b_proc
->p_ucred
;
bp
->b_un
.b_addr
= (caddr_t
) kva
;
bp
->b_blkno
= reqaddr
/ DEV_BSIZE
;
/* Should be a BLOCK or character DEVICE if we get here */
/* we definitely need to be at splbio here */
while ((bp
->b_flags
& B_DONE
) == 0) {
tsleep((caddr_t
)bp
, PVM
, "vnwrite", 0);
if ((bp
->b_flags
& B_ERROR
) != 0)
pmap_remove(vm_map_pmap(pager_map
), kva
, kva
+ NBPG
* count
);
kmem_free_wakeup(pager_map
, kva
, mapsize
);
* free the buffer header back to the swap buffer pool
pmap_clear_modify(VM_PAGE_TO_PHYS(m
[i
]));
m
[i
]->flags
&= ~PG_LAUNDRY
;
printf("vnode pager write error: %d\n", error
);
return (error
? VM_PAGER_FAIL
: VM_PAGER_OK
);