* Copyright (c) 1989, 1991 The Regents of the University of California.
* This code is derived from software contributed to Berkeley by
* Rick Macklem at The University of Guelph.
* %sccs.include.redist.c%
* @(#)nfs_socket.c 7.26 (Berkeley) %G%
* Socket operations for use by nfs
#include "machine/endian.h"
#include "ufs/ufs/quota.h"
#include "ufs/ufs/ufsmount.h"
int netnetnet
= sizeof (struct netaddrhash
);
* Estimate rto for an nfs rpc sent via. an unreliable datagram.
* Use the mean and mean deviation of rtt for the appropriate type of rpc
* for the frequent rpcs and a default for the others.
* The justification for doing "other" this way is that these rpcs
* happen so infrequently that timer est. would probably be stale.
* Also, since many of these rpcs are
* non-idempotent, a conservative timeout is desired.
((t) == 0 ? (n)->nm_timeo : \
(((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
#define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
#define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
* External data, mostly RPC constants in XDR form
extern u_long rpc_reply
, rpc_msgdenied
, rpc_mismatch
, rpc_vers
, rpc_auth_unix
,
rpc_msgaccepted
, rpc_call
, rpc_autherr
, rpc_rejectedcred
,
extern u_long nfs_prog
, nfs_vers
, nqnfs_prog
, nqnfs_vers
;
extern time_t nqnfsstarttime
;
extern int nonidempotent
[NFS_NPROCS
];
* Maps errno values to nfs error numbers.
* Use NFSERR_IO as the catch all for ones not specifically defined in
static int nfsrv_errmap
[ELAST
] = {
NFSERR_PERM
, NFSERR_NOENT
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
NFSERR_NXIO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
NFSERR_IO
, NFSERR_IO
, NFSERR_ACCES
, NFSERR_IO
, NFSERR_IO
,
NFSERR_IO
, NFSERR_EXIST
, NFSERR_IO
, NFSERR_NODEV
, NFSERR_NOTDIR
,
NFSERR_ISDIR
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
NFSERR_IO
, NFSERR_FBIG
, NFSERR_NOSPC
, NFSERR_IO
, NFSERR_ROFS
,
NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
NFSERR_IO
, NFSERR_IO
, NFSERR_NAMETOL
, NFSERR_IO
, NFSERR_IO
,
NFSERR_NOTEMPTY
, NFSERR_IO
, NFSERR_IO
, NFSERR_DQUOT
, NFSERR_STALE
,
NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
* Defines which timer to use for the procnum.
static int proct
[NFS_NPROCS
] = {
0, 1, 0, 0, 2, 3, 3, 0, 4, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0,
* There is a congestion window for outstanding rpcs maintained per mount
* point. The cwnd size is adjusted in roughly the way that:
* Van Jacobson, Congestion avoidance and Control, In "Proceedings of
* SIGCOMM '88". ACM, August 1988.
* describes for TCP. The cwnd size is chopped in half on a retransmit timeout
* and incremented by 1/cwnd when each rpc reply is received and a full cwnd
* of rpcs is in progress.
* (The sent count and cwnd are scaled for integer arith.)
* Variants of "slow start" were tried and were found to be too much of a
* performance hit (ave. rtt 3 times larger),
* I suspect due to the large rtt that nfs rpcs have.
#define NFS_CWNDSCALE 256
#define NFS_MAXCWND (NFS_CWNDSCALE * 32)
static int nfs_backoff
[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
void nfs_disconnect(), nfs_realign(), nfsrv_wakenfsd(), nfs_sndunlock();
void nfs_rcvunlock(), nqnfs_serverd();
struct mbuf
*nfsm_rpchead();
int (*nfsrv_procs
[NFS_NPROCS
])() = {
* Initialize sockets and congestion for a new NFS connection.
* We do not free the sockaddr if error.
register struct nfsmount
*nmp
;
register struct socket
*so
;
int s
, error
, rcvreserve
, sndreserve
;
nmp
->nm_so
= (struct socket
*)0;
if (error
= socreate(mtod(nmp
->nm_nam
, struct sockaddr
*)->sa_family
,
&nmp
->nm_so
, nmp
->nm_sotype
, nmp
->nm_soproto
))
nmp
->nm_soflags
= so
->so_proto
->pr_flags
;
* Protocols that do not require connections may be optionally left
* unconnected for servers that reply from a port other than NFS_PORT.
if (nmp
->nm_flag
& NFSMNT_NOCONN
) {
if (nmp
->nm_soflags
& PR_CONNREQUIRED
) {
if (error
= soconnect(so
, nmp
->nm_nam
))
* Wait for the connection to complete. Cribbed from the
* connect system call but with the wait timing out so
* that interruptible mounts don't hang here for a long time.
while ((so
->so_state
& SS_ISCONNECTING
) && so
->so_error
== 0) {
(void) tsleep((caddr_t
)&so
->so_timeo
, PSOCK
,
if ((so
->so_state
& SS_ISCONNECTING
) &&
so
->so_error
== 0 && rep
&&
(error
= nfs_sigintr(nmp
, rep
, rep
->r_procp
))) {
so
->so_state
&= ~SS_ISCONNECTING
;
if (nmp
->nm_flag
& (NFSMNT_SOFT
| NFSMNT_INT
)) {
so
->so_rcv
.sb_timeo
= (5 * hz
);
so
->so_snd
.sb_timeo
= (5 * hz
);
if (nmp
->nm_sotype
== SOCK_DGRAM
) {
sndreserve
= nmp
->nm_wsize
+ NFS_MAXPKTHDR
;
rcvreserve
= nmp
->nm_rsize
+ NFS_MAXPKTHDR
;
} else if (nmp
->nm_sotype
== SOCK_SEQPACKET
) {
sndreserve
= (nmp
->nm_wsize
+ NFS_MAXPKTHDR
) * 2;
rcvreserve
= (nmp
->nm_rsize
+ NFS_MAXPKTHDR
) * 2;
if (nmp
->nm_sotype
!= SOCK_STREAM
)
if (so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) {
MGET(m
, M_WAIT
, MT_SOOPTS
);
sosetopt(so
, SOL_SOCKET
, SO_KEEPALIVE
, m
);
if (so
->so_proto
->pr_protocol
== IPPROTO_TCP
) {
MGET(m
, M_WAIT
, MT_SOOPTS
);
sosetopt(so
, IPPROTO_TCP
, TCP_NODELAY
, m
);
sndreserve
= (nmp
->nm_wsize
+ NFS_MAXPKTHDR
+ sizeof (u_long
))
rcvreserve
= (nmp
->nm_rsize
+ NFS_MAXPKTHDR
+ sizeof (u_long
))
if (error
= soreserve(so
, sndreserve
, rcvreserve
))
so
->so_rcv
.sb_flags
|= SB_NOINTR
;
so
->so_snd
.sb_flags
|= SB_NOINTR
;
/* Initialize other non-zero congestion variables */
nmp
->nm_srtt
[0] = nmp
->nm_srtt
[1] = nmp
->nm_srtt
[2] = nmp
->nm_srtt
[3] =
nmp
->nm_srtt
[4] = (NFS_TIMEO
<< 3);
nmp
->nm_sdrtt
[0] = nmp
->nm_sdrtt
[1] = nmp
->nm_sdrtt
[2] =
nmp
->nm_sdrtt
[3] = nmp
->nm_sdrtt
[4] = 0;
nmp
->nm_cwnd
= NFS_MAXCWND
/ 2; /* Initial send window */
* Called when a connection is broken on a reliable protocol.
* - clean up the old socket
* - set R_MUSTRESEND for all outstanding requests on mount point
* If this fails the mount point is DEAD!
* nb: Must be called with the nfs_sndlock() set on the mount point.
register struct nfsreq
*rep
;
register struct nfsreq
*rp
;
register struct nfsmount
*nmp
= rep
->r_nmp
;
while (error
= nfs_connect(nmp
, rep
)) {
if (error
== EINTR
|| error
== ERESTART
)
(void) tsleep((caddr_t
)&lbolt
, PSOCK
, "nfscon", 0);
* Loop through outstanding request list and fix up all requests
rp
->r_flags
|= R_MUSTRESEND
;
* NFS disconnect. Clean up and unlink.
register struct nfsmount
*nmp
;
register struct socket
*so
;
nmp
->nm_so
= (struct socket
*)0;
* This is the nfs send routine. For connection based socket types, it
* must be called with an nfs_sndlock() on the socket.
* "rep == NULL" indicates that it has been called from a server.
* - return EINTR if the RPC is terminated, 0 otherwise
* - set R_MUSTRESEND if the send fails for any reason
* - do any cleanup required by recoverable socket errors (???)
* - return EINTR or ERESTART if interrupted by a signal
* - return EPIPE if a connection is lost for connection based sockets (TCP...)
* - do any cleanup required by recoverable socket errors (???)
nfs_send(so
, nam
, top
, rep
)
register struct socket
*so
;
register struct mbuf
*top
;
int error
, soflags
, flags
;
if (rep
->r_flags
& R_SOFTTERM
) {
if ((so
= rep
->r_nmp
->nm_so
) == NULL
) {
rep
->r_flags
|= R_MUSTRESEND
;
rep
->r_flags
&= ~R_MUSTRESEND
;
soflags
= rep
->r_nmp
->nm_soflags
;
soflags
= so
->so_proto
->pr_flags
;
if ((soflags
& PR_CONNREQUIRED
) || (so
->so_state
& SS_ISCONNECTED
))
sendnam
= (struct mbuf
*)0;
if (so
->so_type
== SOCK_SEQPACKET
)
error
= sosend(so
, sendnam
, (struct uio
*)0, top
,
(struct mbuf
*)0, flags
);
if(error
) printf("nfssnd err=%d\n",error
);
* Deal with errors for the client side.
if (rep
->r_flags
& R_SOFTTERM
)
rep
->r_flags
|= R_MUSTRESEND
;
* Handle any recoverable (soft) socket errors here. (???)
if (error
!= EINTR
&& error
!= ERESTART
&&
error
!= EWOULDBLOCK
&& error
!= EPIPE
)
* Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
* done by soreceive(), but for SOCK_STREAM we must deal with the Record
* Mark and consolidate the data into a new mbuf list.
* nb: Sometimes TCP passes the data up to soreceive() in long lists of
* For SOCK_STREAM we must be very careful to read an entire record once
* we have read any of it, even if the system call has been interrupted.
nfs_receive(rep
, aname
, mp
)
register struct nfsreq
*rep
;
register struct socket
*so
;
int error
, sotype
, rcvflg
;
struct proc
*p
= curproc
; /* XXX */
* Set up arguments for soreceive()
*aname
= (struct mbuf
*)0;
sotype
= rep
->r_nmp
->nm_sotype
;
* For reliable protocols, lock against other senders/receivers
* in case a reconnect is necessary.
* For SOCK_STREAM, first get the Record Mark to find out how much
* We must lock the socket against other receivers
* until we have an entire rpc request/reply.
if (sotype
!= SOCK_DGRAM
) {
if (error
= nfs_sndlock(&rep
->r_nmp
->nm_flag
, rep
))
* Check for fatal errors and resending request.
* Ugh: If a reconnect attempt just happened, nm_so
* would have changed. NULL indicates a failed
* attempt that has essentially shut down this
if (rep
->r_mrep
|| (rep
->r_flags
& R_SOFTTERM
)) {
nfs_sndunlock(&rep
->r_nmp
->nm_flag
);
if ((so
= rep
->r_nmp
->nm_so
) == NULL
) {
if (error
= nfs_reconnect(rep
)) {
nfs_sndunlock(&rep
->r_nmp
->nm_flag
);
while (rep
->r_flags
& R_MUSTRESEND
) {
m
= m_copym(rep
->r_mreq
, 0, M_COPYALL
, M_WAIT
);
if (error
= nfs_send(so
, rep
->r_nmp
->nm_nam
, m
, rep
)) {
if (error
== EINTR
|| error
== ERESTART
||
(error
= nfs_reconnect(rep
))) {
nfs_sndunlock(&rep
->r_nmp
->nm_flag
);
nfs_sndunlock(&rep
->r_nmp
->nm_flag
);
if (sotype
== SOCK_STREAM
) {
aio
.iov_base
= (caddr_t
) &len
;
aio
.iov_len
= sizeof(u_long
);
auio
.uio_segflg
= UIO_SYSSPACE
;
auio
.uio_resid
= sizeof(u_long
);
error
= soreceive(so
, (struct mbuf
**)0, &auio
,
(struct mbuf
**)0, (struct mbuf
**)0, &rcvflg
);
if (error
== EWOULDBLOCK
&& rep
) {
if (rep
->r_flags
& R_SOFTTERM
)
} while (error
== EWOULDBLOCK
);
if (!error
&& auio
.uio_resid
> 0) {
"short receive (%d/%d) from nfs server %s\n",
sizeof(u_long
) - auio
.uio_resid
,
rep
->r_nmp
->nm_mountp
->mnt_stat
.f_mntfromname
);
len
= ntohl(len
) & ~0x80000000;
* This is SERIOUS! We are out of sync with the sender
* and forcing a disconnect/reconnect is all I can do.
if (len
> NFS_MAXPACKET
) {
log(LOG_ERR
, "%s (%d) from nfs server %s\n",
"impossible packet length",
rep
->r_nmp
->nm_mountp
->mnt_stat
.f_mntfromname
);
error
= soreceive(so
, (struct mbuf
**)0,
&auio
, mp
, (struct mbuf
**)0, &rcvflg
);
} while (error
== EWOULDBLOCK
|| error
== EINTR
||
if (!error
&& auio
.uio_resid
> 0) {
"short receive (%d/%d) from nfs server %s\n",
len
- auio
.uio_resid
, len
,
rep
->r_nmp
->nm_mountp
->mnt_stat
.f_mntfromname
);
* NB: Since uio_resid is big, MSG_WAITALL is ignored
* and soreceive() will return when it has either a
* control msg or a data msg.
* We have no use for control msg., but must grab them
* and then throw them away so we know what is going
auio
.uio_resid
= len
= 100000000; /* Anything Big */
error
= soreceive(so
, (struct mbuf
**)0,
&auio
, mp
, &control
, &rcvflg
);
if (error
== EWOULDBLOCK
&& rep
) {
if (rep
->r_flags
& R_SOFTTERM
)
} while (error
== EWOULDBLOCK
||
(!error
&& *mp
== NULL
&& control
));
if ((rcvflg
& MSG_EOR
) == 0)
if (!error
&& *mp
== NULL
)
if (error
&& error
!= EINTR
&& error
!= ERESTART
) {
if (error
!= EPIPE
&& rep
)
"receive error %d from nfs server %s\n",
rep
->r_nmp
->nm_mountp
->mnt_stat
.f_mntfromname
);
error
= nfs_sndlock(&rep
->r_nmp
->nm_flag
, rep
);
error
= nfs_reconnect(rep
);
if ((so
= rep
->r_nmp
->nm_so
) == NULL
)
if (so
->so_state
& SS_ISCONNECTED
)
getnam
= (struct mbuf
**)0;
auio
.uio_resid
= len
= 1000000;
error
= soreceive(so
, getnam
, &auio
, mp
,
(struct mbuf
**)0, &rcvflg
);
if (error
== EWOULDBLOCK
&&
(rep
->r_flags
& R_SOFTTERM
))
} while (error
== EWOULDBLOCK
);
* Search for any mbufs that are not a multiple of 4 bytes long
* or with m_data not longword aligned.
* These could cause pointer alignment problems, so copy them to
nfs_realign(*mp
, 5 * NFSX_UNSIGNED
);
* Implement receipt of reply on a socket.
* We must search through the list of received datagrams matching them
* with outstanding requests using the xid, until ours is found.
register struct nfsreq
*rep
;
register struct nfsmount
*nmp
= myrep
->r_nmp
;
struct mbuf
*mrep
, *nam
, *md
;
* Loop around until we get our own reply
* Lock against other receivers so that I don't get stuck in
* sbwait() after someone else has received my reply for me.
* Also necessary for connection based protocols to avoid
* race conditions during a reconnect.
if (error
= nfs_rcvlock(myrep
))
/* Already received, bye bye */
if (myrep
->r_mrep
!= NULL
) {
nfs_rcvunlock(&nmp
->nm_flag
);
* Get the next Rpc reply off the socket
error
= nfs_receive(myrep
, &nam
, &mrep
);
nfs_rcvunlock(&nmp
->nm_flag
);
if (error
) printf("rcv err=%d\n",error
);
* Ignore routing errors on connectionless protocols??
if (NFSIGNORE_SOERROR(nmp
->nm_soflags
, error
)) {
nmp
->nm_so
->so_error
= 0;
* Get the xid and check that it is an rpc reply
dpos
= mtod(md
, caddr_t
);
nfsm_dissect(tl
, u_long
*, 2*NFSX_UNSIGNED
);
if (nmp
->nm_flag
& NFSMNT_NQNFS
) {
if (nqnfs_callback(nmp
, mrep
, md
, dpos
))
* Loop through the request list to match up the reply
* Iff no match, just drop the datagram
while (rep
!= &nfsreqh
) {
if (rep
->r_mrep
== NULL
&& rxid
== rep
->r_xid
) {
rt
= &nfsrtt
.rttl
[nfsrtt
.pos
];
rt
->proc
= rep
->r_procnum
;
rt
->rto
= NFS_RTO(nmp
, proct
[rep
->r_procnum
]);
rt
->srtt
= nmp
->nm_srtt
[proct
[rep
->r_procnum
] - 1];
rt
->sdrtt
= nmp
->nm_sdrtt
[proct
[rep
->r_procnum
] - 1];
rt
->fsid
= nmp
->nm_mountp
->mnt_stat
.f_fsid
;
if (rep
->r_flags
& R_TIMING
)
nfsrtt
.pos
= (nfsrtt
.pos
+ 1) % NFSRTTLOGSIZ
;
* Update congestion window.
* Do the additive increase of
if (nmp
->nm_cwnd
<= nmp
->nm_sent
) {
(NFS_CWNDSCALE
* NFS_CWNDSCALE
+
(nmp
->nm_cwnd
>> 1)) / nmp
->nm_cwnd
;
if (nmp
->nm_cwnd
> NFS_MAXCWND
)
nmp
->nm_cwnd
= NFS_MAXCWND
;
nmp
->nm_sent
-= NFS_CWNDSCALE
;
* Update rtt using a gain of 0.125 on the mean
* and a gain of 0.25 on the deviation.
if (rep
->r_flags
& R_TIMING
) {
* Since the timer resolution of
* NFS_HZ is so course, it can often
* result in r_rtt == 0. Since
* r_rtt == N means that the actual
* rtt is between N+dt and N+2-dt ticks,
t1
-= (NFS_SRTT(rep
) >> 3);
t1
-= (NFS_SDRTT(rep
) >> 2);
* If not matched to a request, drop it.
nfsstats
.rpcunexpected
++;
* nfs_request - goes something like this
* - fill in request struct
* - calls nfs_send() for first transmit
* - calls nfs_receive() to get reply
* - break down rpc header and return with nfs reply pointed to
* nb: always frees up mreq mbuf list
nfs_request(vp
, mrest
, procnum
, procp
, cred
, mrp
, mdp
, dposp
)
register struct mbuf
*m
, *mrep
;
register struct nfsreq
*rep
;
struct mbuf
*md
, *mheadend
;
time_t reqtime
, waituntil
;
int t1
, nqlflag
, cachable
, s
, error
= 0, mrest_len
, auth_len
, auth_type
;
int trylater_delay
= NQ_TRYLATERDEL
, trylater_cnt
= 0, failed_auth
= 0;
nmp
= VFSTONFS(vp
->v_mount
);
MALLOC(rep
, struct nfsreq
*, sizeof(struct nfsreq
), M_NFSREQ
, M_WAITOK
);
rep
->r_procnum
= procnum
;
* Get the RPC header with authorization.
if (nmp
->nm_flag
& NFSMNT_KERB
) {
error
= nfs_getauth(nmp
, rep
, cred
, &auth_type
,
free((caddr_t
)rep
, M_NFSREQ
);
auth_type
= RPCAUTH_UNIX
;
auth_len
= 5 * NFSX_UNSIGNED
;
auth_type
= RPCAUTH_UNIX
;
auth_len
= ((((cred
->cr_ngroups
- 1) > nmp
->nm_numgrps
) ?
nmp
->nm_numgrps
: (cred
->cr_ngroups
- 1)) << 2) +
m
= nfsm_rpchead(cred
, (nmp
->nm_flag
& NFSMNT_NQNFS
), procnum
,
auth_type
, auth_len
, auth_str
, mrest
, mrest_len
, &mheadend
, &xid
);
* For stream protocols, insert a Sun RPC Record Mark.
if (nmp
->nm_sotype
== SOCK_STREAM
) {
M_PREPEND(m
, NFSX_UNSIGNED
, M_WAIT
);
*mtod(m
, u_long
*) = htonl(0x80000000 |
(m
->m_pkthdr
.len
- NFSX_UNSIGNED
));
if (nmp
->nm_flag
& NFSMNT_SOFT
)
rep
->r_retry
= nmp
->nm_retry
;
rep
->r_retry
= NFS_MAXREXMIT
+ 1; /* past clip limit */
rep
->r_rtt
= rep
->r_rexmit
= 0;
* Do the client side RPC.
* Chain request into list of outstanding requests. Be sure
* to put it LAST so timer finds oldest requests first.
reph
->r_prev
->r_next
= rep
;
rep
->r_prev
= reph
->r_prev
;
/* Get send time for nqnfs */
* If backing off another request or avoiding congestion, don't
* send this one now but let timer do it. If not timing a request,
if (nmp
->nm_so
&& (nmp
->nm_sotype
!= SOCK_DGRAM
||
(nmp
->nm_flag
& NFSMNT_DUMBTIMR
) ||
nmp
->nm_sent
< nmp
->nm_cwnd
)) {
if (nmp
->nm_soflags
& PR_CONNREQUIRED
)
error
= nfs_sndlock(&nmp
->nm_flag
, rep
);
m
= m_copym(m
, 0, M_COPYALL
, M_WAIT
);
error
= nfs_send(nmp
->nm_so
, nmp
->nm_nam
, m
, rep
);
if (nmp
->nm_soflags
& PR_CONNREQUIRED
)
nfs_sndunlock(&nmp
->nm_flag
);
if (!error
&& (rep
->r_flags
& R_MUSTRESEND
) == 0) {
nmp
->nm_sent
+= NFS_CWNDSCALE
;
* Wait for the reply from our send or the timer's.
* RPC done, unlink the request.
rep
->r_prev
->r_next
= rep
->r_next
;
rep
->r_next
->r_prev
= rep
->r_prev
;
* If there was a successful reply and a tprintf msg.
if (!error
&& (rep
->r_flags
& R_TPRINTFMSG
))
nfs_msg(rep
->r_procp
, nmp
->nm_mountp
->mnt_stat
.f_mntfromname
,
free((caddr_t
)rep
, M_NFSREQ
);
* break down the rpc header and check if ok
nfsm_dissect(tl
, u_long
*, 3*NFSX_UNSIGNED
);
if (*tl
++ == rpc_msgdenied
) {
else if ((nmp
->nm_flag
& NFSMNT_KERB
) && *tl
++ == rpc_autherr
) {
if (*tl
== rpc_rejectedcred
&& failed_auth
== 0) {
mheadend
->m_next
= (struct mbuf
*)0;
free((caddr_t
)rep
, M_NFSREQ
);
* skip over the auth_verf, someday we may want to cache auth_short's
* for nfs_reqhead(), but for now just dump it
i
= nfsm_rndup(fxdr_unsigned(long, *tl
));
nfsm_dissect(tl
, u_long
*, NFSX_UNSIGNED
);
nfsm_dissect(tl
, u_long
*, NFSX_UNSIGNED
);
error
= fxdr_unsigned(int, *tl
);
if ((nmp
->nm_flag
& NFSMNT_NQNFS
) &&
error
== NQNFS_TRYLATER
) {
waituntil
= time
.tv_sec
+ trylater_delay
;
while (time
.tv_sec
< waituntil
)
(void) tsleep((caddr_t
)&lbolt
,
trylater_delay
*= nfs_backoff
[trylater_cnt
];
free((caddr_t
)rep
, M_NFSREQ
);
* For nqnfs, get any lease in reply
if (nmp
->nm_flag
& NFSMNT_NQNFS
) {
nfsm_dissect(tl
, u_long
*, NFSX_UNSIGNED
);
nqlflag
= fxdr_unsigned(int, *tl
);
nfsm_dissect(tl
, u_long
*, 4*NFSX_UNSIGNED
);
cachable
= fxdr_unsigned(int, *tl
++);
reqtime
+= fxdr_unsigned(int, *tl
++);
if (reqtime
> time
.tv_sec
) {
if (np
->n_tnext
== (struct nfsnode
*)nmp
)
nmp
->nm_tprev
= np
->n_tprev
;
np
->n_tnext
->n_tprev
= np
->n_tprev
;
if (np
->n_tprev
== (struct nfsnode
*)nmp
)
nmp
->nm_tnext
= np
->n_tnext
;
np
->n_tprev
->n_tnext
= np
->n_tnext
;
if (nqlflag
== NQL_WRITE
)
np
->n_flag
|= NQNFSWRITE
;
} else if (nqlflag
== NQL_READ
)
np
->n_flag
&= ~NQNFSWRITE
;
np
->n_flag
|= NQNFSWRITE
;
np
->n_flag
&= ~NQNFSNONCACHE
;
np
->n_flag
|= NQNFSNONCACHE
;
fxdr_hyper(tl
, &np
->n_lrev
);
while (tp
!= (struct nfsnode
*)nmp
&&
tp
->n_expiry
> np
->n_expiry
)
if (tp
== (struct nfsnode
*)nmp
) {
np
->n_tnext
= nmp
->nm_tnext
;
np
->n_tnext
= tp
->n_tnext
;
if (np
->n_tnext
== (struct nfsnode
*)nmp
)
np
->n_tnext
->n_tprev
= np
;
FREE((caddr_t
)rep
, M_NFSREQ
);
free((caddr_t
)rep
, M_NFSREQ
);
* Generate the rpc reply header
* siz arg. is used to decide if adding a cluster is worthwhile
nfs_rephead(siz
, nd
, err
, cache
, frev
, mrq
, mbp
, bposp
)
register struct mbuf
*mreq
;
MGETHDR(mreq
, M_WAIT
, MT_DATA
);
* If this is a big reply, use a cluster else
* try and leave leading space for the lower level headers.
tl
= mtod(mreq
, u_long
*);
mreq
->m_len
= 6*NFSX_UNSIGNED
;
bpos
= ((caddr_t
)tl
)+mreq
->m_len
;
if (err
== ERPCMISMATCH
|| err
== NQNFS_AUTHERR
) {
if (err
== NQNFS_AUTHERR
) {
mreq
->m_len
-= NFSX_UNSIGNED
;
*tl
++ = txdr_unsigned(2);
*tl
= txdr_unsigned(RPC_PROGUNAVAIL
);
*tl
= txdr_unsigned(RPC_PROGMISMATCH
);
nfsm_build(tl
, u_long
*, 2*NFSX_UNSIGNED
);
*tl
++ = txdr_unsigned(2);
*tl
= txdr_unsigned(2); /* someday 3 */
*tl
= txdr_unsigned(RPC_PROCUNAVAIL
);
nfsm_build(tl
, u_long
*, NFSX_UNSIGNED
);
*tl
= txdr_unsigned(nfsrv_errmap
[err
- 1]);
* For nqnfs, piggyback lease as requested.
if (nd
->nd_nqlflag
!= NQL_NOVAL
&& err
== 0) {
nfsm_build(tl
, u_long
*, 5*NFSX_UNSIGNED
);
*tl
++ = txdr_unsigned(nd
->nd_nqlflag
);
*tl
++ = txdr_unsigned(cache
);
*tl
++ = txdr_unsigned(nd
->nd_duration
);
nfsm_build(tl
, u_long
*, NFSX_UNSIGNED
);
if (err
!= 0 && err
!= VNOVAL
)
* Scan the nfsreq list and retranmit any requests that have timed out
* To avoid retransmission attempts on STREAM sockets (in the future) make
* sure to set the r_retry field to 0 (implies nm_retry == 0).
register struct nfsreq
*rep
;
register struct socket
*so
;
register struct nfsmount
*nmp
;
static long lasttime
= 0;
for (rep
= nfsreqh
.r_next
; rep
!= &nfsreqh
; rep
= rep
->r_next
) {
if (rep
->r_mrep
|| (rep
->r_flags
& R_SOFTTERM
))
if (nfs_sigintr(nmp
, rep
, rep
->r_procp
)) {
rep
->r_flags
|= R_SOFTTERM
;
if (nmp
->nm_flag
& NFSMNT_DUMBTIMR
)
timeo
= NFS_RTO(nmp
, proct
[rep
->r_procnum
]);
if (nmp
->nm_timeouts
> 0)
timeo
*= nfs_backoff
[nmp
->nm_timeouts
- 1];
if (nmp
->nm_timeouts
< 8)
* Check for server not responding
if ((rep
->r_flags
& R_TPRINTFMSG
) == 0 &&
rep
->r_rexmit
> nmp
->nm_deadthresh
) {
nmp
->nm_mountp
->mnt_stat
.f_mntfromname
,
rep
->r_flags
|= R_TPRINTFMSG
;
if (rep
->r_rexmit
>= rep
->r_retry
) { /* too many */
rep
->r_flags
|= R_SOFTTERM
;
if (nmp
->nm_sotype
!= SOCK_DGRAM
) {
if (++rep
->r_rexmit
> NFS_MAXREXMIT
)
rep
->r_rexmit
= NFS_MAXREXMIT
;
if ((so
= nmp
->nm_so
) == NULL
)
* If there is enough space and the window allows..
* Set r_rtt to -1 in case we fail to send it now.
if (sbspace(&so
->so_snd
) >= rep
->r_mreq
->m_pkthdr
.len
&&
((nmp
->nm_flag
& NFSMNT_DUMBTIMR
) ||
(rep
->r_flags
& R_SENT
) ||
nmp
->nm_sent
< nmp
->nm_cwnd
) &&
(m
= m_copym(rep
->r_mreq
, 0, M_COPYALL
, M_DONTWAIT
))){
if ((nmp
->nm_flag
& NFSMNT_NOCONN
) == 0)
error
= (*so
->so_proto
->pr_usrreq
)(so
, PRU_SEND
, m
,
(struct mbuf
*)0, (struct mbuf
*)0);
error
= (*so
->so_proto
->pr_usrreq
)(so
, PRU_SEND
, m
,
nmp
->nm_nam
, (struct mbuf
*)0);
if (NFSIGNORE_SOERROR(nmp
->nm_soflags
, error
))
* Iff first send, start timing
* else turn timing off, backoff timer
* and divide congestion window by 2.
if (rep
->r_flags
& R_SENT
) {
rep
->r_flags
&= ~R_TIMING
;
if (++rep
->r_rexmit
> NFS_MAXREXMIT
)
rep
->r_rexmit
= NFS_MAXREXMIT
;
if (nmp
->nm_cwnd
< NFS_CWNDSCALE
)
nmp
->nm_cwnd
= NFS_CWNDSCALE
;
nmp
->nm_sent
+= NFS_CWNDSCALE
;
* Call the nqnfs server timer once a second to handle leases.
if (lasttime
!= time
.tv_sec
) {
timeout(nfs_timer
, (caddr_t
)0, hz
/NFS_HZ
);
* Test for a termination condition pending on the process.
* This is used for NFSMNT_INT mounts.
if (rep
&& (rep
->r_flags
& R_SOFTTERM
))
if (!(nmp
->nm_flag
& NFSMNT_INT
))
if (p
&& p
->p_sig
&& (((p
->p_sig
&~ p
->p_sigmask
) &~ p
->p_sigignore
) &
* Lock a socket against others.
* Necessary for STREAM sockets to ensure you get an entire rpc request/reply
* and also to avoid race conditions between the processes with nfs requests
* in progress when a reconnect is necessary.
while (*flagp
& NFSMNT_SNDLOCK
) {
if (nfs_sigintr(rep
->r_nmp
, rep
, p
))
*flagp
|= NFSMNT_WANTSND
;
(void) tsleep((caddr_t
)flagp
, PZERO
-1, "nfsndlck", 0);
*flagp
|= NFSMNT_SNDLOCK
;
* Unlock the stream socket for others.
if ((*flagp
& NFSMNT_SNDLOCK
) == 0)
*flagp
&= ~NFSMNT_SNDLOCK
;
if (*flagp
& NFSMNT_WANTSND
) {
*flagp
&= ~NFSMNT_WANTSND
;
register struct nfsreq
*rep
;
register int *flagp
= &rep
->r_nmp
->nm_flag
;
while (*flagp
& NFSMNT_RCVLOCK
) {
if (nfs_sigintr(rep
->r_nmp
, rep
, rep
->r_procp
))
*flagp
|= NFSMNT_WANTRCV
;
(void) tsleep((caddr_t
)flagp
, PZERO
-1, "nfsrcvlck", 0);
*flagp
|= NFSMNT_RCVLOCK
;
* Unlock the stream socket for others.
if ((*flagp
& NFSMNT_RCVLOCK
) == 0)
*flagp
&= ~NFSMNT_RCVLOCK
;
if (*flagp
& NFSMNT_WANTRCV
) {
*flagp
&= ~NFSMNT_WANTRCV
;
* This function compares two net addresses by family and returns TRUE
* if they are the same host.
* If there is any doubt, return FALSE.
* The AF_INET family is handled as a special case so that address mbufs
* don't need to be saved to store "struct in_addr", which is only 4 bytes.
nfs_netaddr_match(family
, haddr
, hmask
, nam
)
union nethostaddr
*haddr
;
union nethostaddr
*hmask
;
register struct sockaddr_in
*inetaddr
;
register struct sockaddr_iso
*isoaddr1
, *isoaddr2
;
inetaddr
= mtod(nam
, struct sockaddr_in
*);
if (inetaddr
->sin_family
!= AF_INET
)
if ((inetaddr
->sin_addr
.s_addr
& hmask
->had_inetaddr
) ==
(haddr
->had_inetaddr
& hmask
->had_inetaddr
))
} else if (inetaddr
->sin_addr
.s_addr
== haddr
->had_inetaddr
)
isoaddr1
= mtod(nam
, struct sockaddr_iso
*);
if (isoaddr1
->siso_family
!= AF_ISO
)
isoaddr2
= mtod(haddr
->had_nam
, struct sockaddr_iso
*);
if (isoaddr1
->siso_nlen
> 0 &&
isoaddr1
->siso_nlen
== isoaddr2
->siso_nlen
&&
SAME_ISOADDR(isoaddr1
, isoaddr2
))
* Build hash lists of net addresses and hang them off the mount point.
* Called by ufs_mount() to set up the lists of export addresses.
register struct netaddrhash
*np
, **hnp
;
struct mbuf
*nam
, *msk
= (struct mbuf
*)0;
union nethostaddr netmsk
;
if (error
= sockargs(&nam
, (caddr_t
)argp
->saddr
, argp
->slen
,
saddr
= mtod(nam
, struct sockaddr
*);
if (saddr
->sa_family
== AF_INET
&&
((struct sockaddr_in
*)saddr
)->sin_addr
.s_addr
== INADDR_ANY
) {
if (mp
->mnt_flag
& MNT_DEFEXPORTED
)
np
= &ump
->um_defexported
;
np
->neth_exflags
= argp
->exflags
;
np
->neth_anon
= argp
->anon
;
np
->neth_anon
.cr_ref
= 1;
mp
->mnt_flag
|= MNT_DEFEXPORTED
;
if (error
= sockargs(&msk
, (caddr_t
)argp
->smask
, argp
->msklen
,
* Scan all the hash lists to check against duplications.
* For the net list, try both masks to catch a subnet
hnp
= &ump
->um_netaddr
[NETMASK_HASH
];
if (saddr
->sa_family
== AF_INET
)
mtod(msk
, struct sockaddr_in
*)->sin_addr
.s_addr
;
if (nfs_netaddr_match(np
->neth_family
, &np
->neth_haddr
,
nfs_netaddr_match(np
->neth_family
, &np
->neth_haddr
,
for (i
= 0; i
< NETHASHSZ
; i
++) {
if (nfs_netaddr_match(np
->neth_family
, &np
->neth_haddr
,
hnp
= &ump
->um_netaddr
[NETADDRHASH(saddr
)];
np
= ump
->um_netaddr
[NETMASK_HASH
];
if (nfs_netaddr_match(np
->neth_family
, &np
->neth_haddr
,
if (nfs_netaddr_match(np
->neth_family
, &np
->neth_haddr
,
(union nethostaddr
*)0, nam
)) {
np
= (struct netaddrhash
*) malloc(sizeof(struct netaddrhash
), M_NETADDR
,
np
->neth_family
= saddr
->sa_family
;
if (saddr
->sa_family
== AF_INET
) {
np
->neth_inetaddr
= ((struct sockaddr_in
*)saddr
)->sin_addr
.s_addr
;
np
->neth_inetmask
= netmsk
.had_inetaddr
;
if (np
->neth_inetaddr
&~ np
->neth_inetmask
)
np
->neth_inetmask
= 0xffffffff;
np
->neth_exflags
= argp
->exflags
;
np
->neth_anon
= argp
->anon
;
np
->neth_anon
.cr_ref
= 1;
* Free the net address hash lists that are hanging off the mount points.
register struct netaddrhash
*np
, *onp
;
for (i
= 0; i
<= NETHASHSZ
; i
++) {
ump
->um_netaddr
[i
] = (struct netaddrhash
*)0;
if (onp
->neth_family
!= AF_INET
) {
free((caddr_t
)onp
, M_NETADDR
);
* Generate a hash code for an iso host address. Used by NETADDRHASH() for
register struct sockaddr_iso
*siso
;
for (i
= 0; i
< siso
->siso_nlen
; i
++)
sum
+= siso
->siso_data
[i
];
return (sum
& (NETHASHSZ
- 1));
* Check for badly aligned mbuf data areas and
* realign data in an mbuf list by copying the data areas up, as required.
register struct mbuf
*m2
;
register int siz
, mlen
, olen
;
register caddr_t tcp
, fcp
;
* This never happens for UDP, rarely happens for TCP
* but frequently happens for iso transport.
if ((m
->m_len
& 0x3) || (mtod(m
, int) & 0x3)) {
m
->m_data
= m
->m_ext
.ext_buf
;
* If possible, only put the first invariant part
* of the RPC header in the first mbuf.
mlen
= M_TRAILINGSPACE(m
);
* Loop through the mbuf list consolidating data.
m2
->m_flags
&= ~M_PKTHDR
;
m2
->m_data
= m2
->m_ext
.ext_buf
;
mlen
= M_TRAILINGSPACE(m2
);
* Finally, set m_len == 0 for any trailing mbufs that have
* Socket upcall routine for the nfsd sockets.
* The caddr_t arg is a pointer to the "struct nfssvc_sock".
* Essentially do as much as possible non-blocking, else punt and it will
* be called with M_WAIT from an nfsd.
nfsrv_rcv(so
, arg
, waitflag
)
register struct nfssvc_sock
*slp
= (struct nfssvc_sock
*)arg
;
if ((slp
->ns_flag
& SLP_VALID
) == 0)
* Define this to test for nfsds handling this under heavy load.
if (waitflag
== M_DONTWAIT
) {
slp
->ns_flag
|= SLP_NEEDQ
; goto dorecs
;
if (so
->so_type
== SOCK_STREAM
) {
* If there are already records on the queue, defer soreceive()
* to an nfsd so that there is feedback to the TCP layer that
* the nfs servers are heavily loaded.
if (slp
->ns_rec
&& waitflag
== M_DONTWAIT
) {
slp
->ns_flag
|= SLP_NEEDQ
;
auio
.uio_resid
= 1000000000;
error
= soreceive(so
, &nam
, &auio
, &mp
, (struct mbuf
**)0, &flags
);
if (error
|| mp
== (struct mbuf
*)0) {
if (error
== EWOULDBLOCK
)
slp
->ns_flag
|= SLP_NEEDQ
;
slp
->ns_flag
|= SLP_DISCONN
;
slp
->ns_rawend
->m_next
= m
;
slp
->ns_cc
+= 1000000000 - auio
.uio_resid
;
slp
->ns_cc
= 1000000000 - auio
.uio_resid
;
* Now try and parse record(s) out of the raw stream data.
if (error
= nfsrv_getstream(slp
, waitflag
)) {
slp
->ns_flag
|= SLP_DISCONN
;
slp
->ns_flag
|= SLP_NEEDQ
;
auio
.uio_resid
= 1000000000;
error
= soreceive(so
, &nam
, &auio
, &mp
,
(struct mbuf
**)0, &flags
);
nfs_realign(mp
, 10 * NFSX_UNSIGNED
);
slp
->ns_recend
->m_nextpkt
= m
;
m
->m_nextpkt
= (struct mbuf
*)0;
if ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
)
&& error
!= EWOULDBLOCK
) {
slp
->ns_flag
|= SLP_DISCONN
;
* Now try and process the request records, non-blocking.
if (waitflag
== M_DONTWAIT
&&
(slp
->ns_rec
|| (slp
->ns_flag
& (SLP_NEEDQ
| SLP_DISCONN
))))
* Try and extract an RPC request from the mbuf data list received on a
* stream socket. The "waitflag" argument indicates whether or not it
nfsrv_getstream(slp
, waitflag
)
register struct nfssvc_sock
*slp
;
register char *cp1
, *cp2
;
struct mbuf
*om
, *m2
, *recm
;
if (slp
->ns_flag
& SLP_GETSTREAM
)
slp
->ns_flag
|= SLP_GETSTREAM
;
if (slp
->ns_reclen
== 0) {
if (slp
->ns_cc
< NFSX_UNSIGNED
) {
slp
->ns_flag
&= ~SLP_GETSTREAM
;
if (m
->m_len
>= NFSX_UNSIGNED
) {
bcopy(mtod(m
, caddr_t
), (caddr_t
)&recmark
, NFSX_UNSIGNED
);
m
->m_data
+= NFSX_UNSIGNED
;
m
->m_len
-= NFSX_UNSIGNED
;
while (cp1
< ((caddr_t
)&recmark
) + NFSX_UNSIGNED
) {
slp
->ns_cc
-= NFSX_UNSIGNED
;
slp
->ns_reclen
= ntohl(recmark
) & ~0x80000000;
if (slp
->ns_reclen
< NFS_MINPACKET
|| slp
->ns_reclen
> NFS_MAXPACKET
) {
slp
->ns_flag
&= ~SLP_GETSTREAM
;
* Now get the record part.
if (slp
->ns_cc
== slp
->ns_reclen
) {
slp
->ns_raw
= slp
->ns_rawend
= (struct mbuf
*)0;
slp
->ns_cc
= slp
->ns_reclen
= 0;
} else if (slp
->ns_cc
> slp
->ns_reclen
) {
while (len
< slp
->ns_reclen
) {
if ((len
+ m
->m_len
) > slp
->ns_reclen
) {
m2
= m_copym(m
, 0, slp
->ns_reclen
- len
,
m
->m_data
+= slp
->ns_reclen
- len
;
m
->m_len
-= slp
->ns_reclen
- len
;
slp
->ns_flag
&= ~SLP_GETSTREAM
;
} else if ((len
+ m
->m_len
) == slp
->ns_reclen
) {
om
->m_next
= (struct mbuf
*)0;
slp
->ns_flag
&= ~SLP_GETSTREAM
;
nfs_realign(recm
, 10 * NFSX_UNSIGNED
);
slp
->ns_recend
->m_nextpkt
= recm
;
register struct nfssvc_sock
*slp
;
register struct nfsd
*nd
;
if ((slp
->ns_flag
& SLP_VALID
) == 0 ||
(m
= slp
->ns_rec
) == (struct mbuf
*)0)
if (slp
->ns_rec
= m
->m_nextpkt
)
m
->m_nextpkt
= (struct mbuf
*)0;
slp
->ns_recend
= (struct mbuf
*)0;
if (m
->m_type
== MT_SONAME
) {
nd
->nd_md
= nd
->nd_mrep
= m
->m_next
;
m
->m_next
= (struct mbuf
*)0;
nd
->nd_nam
= (struct mbuf
*)0;
nd
->nd_md
= nd
->nd_mrep
= m
;
nd
->nd_dpos
= mtod(nd
->nd_md
, caddr_t
);
if (error
= nfs_getreq(nd
, TRUE
)) {
* - fill in the cred struct.
nfs_getreq(nd
, has_header
)
register struct nfsd
*nd
;
u_long nfsvers
, auth_type
;
int error
= 0, nqnfs
= 0;
nfsm_dissect(tl
, u_long
*, 10*NFSX_UNSIGNED
);
nfsm_dissect(tl
, u_long
*, 8*NFSX_UNSIGNED
);
nd
->nd_repstat
= ERPCMISMATCH
;
nd
->nd_procnum
= NFSPROC_NOOP
;
nd
->nd_repstat
= EPROGUNAVAIL
;
nd
->nd_procnum
= NFSPROC_NOOP
;
nd
->nd_repstat
= EPROGMISMATCH
;
nd
->nd_procnum
= NFSPROC_NOOP
;
nd
->nd_procnum
= fxdr_unsigned(u_long
, *tl
++);
if (nd
->nd_procnum
== NFSPROC_NULL
)
if (nd
->nd_procnum
>= NFS_NPROCS
||
(!nqnfs
&& nd
->nd_procnum
> NFSPROC_STATFS
) ||
(*tl
!= rpc_auth_unix
&& *tl
!= rpc_auth_kerb
)) {
nd
->nd_repstat
= EPROCUNAVAIL
;
nd
->nd_procnum
= NFSPROC_NOOP
;
len
= fxdr_unsigned(int, *tl
++);
if (len
< 0 || len
> RPCAUTH_MAXSIZ
) {
* Handle auth_unix or auth_kerb.
if (auth_type
== rpc_auth_unix
) {
len
= fxdr_unsigned(int, *++tl
);
if (len
< 0 || len
> NFS_MAXNAMLEN
) {
nfsm_adv(nfsm_rndup(len
));
nfsm_dissect(tl
, u_long
*, 3*NFSX_UNSIGNED
);
nd
->nd_cr
.cr_uid
= fxdr_unsigned(uid_t
, *tl
++);
nd
->nd_cr
.cr_gid
= fxdr_unsigned(gid_t
, *tl
++);
len
= fxdr_unsigned(int, *tl
);
if (len
< 0 || len
> RPCAUTH_UNIXGIDS
) {
nfsm_dissect(tl
, u_long
*, (len
+ 2)*NFSX_UNSIGNED
);
for (i
= 1; i
<= len
; i
++)
nd
->nd_cr
.cr_groups
[i
] = fxdr_unsigned(gid_t
, *tl
++);
nd
->nd_cr
.cr_ngroups
= (len
>= NGROUPS
) ? NGROUPS
: (len
+ 1);
} else if (auth_type
== rpc_auth_kerb
) {
nd
->nd_cr
.cr_uid
= fxdr_unsigned(uid_t
, *tl
++);
nd
->nd_authlen
= fxdr_unsigned(int, *tl
);
iov
.iov_len
= uio
.uio_resid
= nfsm_rndup(nd
->nd_authlen
);
if (uio
.uio_resid
> (len
- 2*NFSX_UNSIGNED
)) {
uio
.uio_segflg
= UIO_SYSSPACE
;
iov
.iov_base
= (caddr_t
)nd
->nd_authstr
;
nfsm_mtouio(&uio
, uio
.uio_resid
);
nfsm_dissect(tl
, u_long
*, 2*NFSX_UNSIGNED
);
nd
->nd_flag
|= NFSD_NEEDAUTH
;
* Do we have any use for the verifier.
* According to the "Remote Procedure Call Protocol Spec." it
* should be AUTH_NULL, but some clients make it AUTH_UNIX?
* For now, just skip over it
len
= fxdr_unsigned(int, *++tl
);
if (len
< 0 || len
> RPCAUTH_MAXSIZ
) {
nfsm_adv(nfsm_rndup(len
));
* For nqnfs, get piggybacked lease request.
if (nqnfs
&& nd
->nd_procnum
!= NQNFSPROC_EVICTED
) {
nfsm_dissect(tl
, u_long
*, NFSX_UNSIGNED
);
nd
->nd_nqlflag
= fxdr_unsigned(int, *tl
);
nfsm_dissect(tl
, u_long
*, NFSX_UNSIGNED
);
nd
->nd_duration
= fxdr_unsigned(int, *tl
);
nd
->nd_duration
= NQ_MINLEASE
;
nd
->nd_nqlflag
= NQL_NOVAL
;
nd
->nd_duration
= NQ_MINLEASE
;
* Search for a sleeping nfsd and wake it up.
* SIDE EFFECT: If none found, set NFSD_CHECKSLP flag, so that one of the
* running nfsds will go look for the work in the nfssvc_sock list.
register struct nfsd
*nd
= nfsd_head
.nd_next
;
if ((slp
->ns_flag
& SLP_VALID
) == 0)
while (nd
!= (struct nfsd
*)&nfsd_head
) {
if (nd
->nd_flag
& NFSD_WAITING
) {
nd
->nd_flag
&= ~NFSD_WAITING
;
slp
->ns_flag
|= SLP_DOREC
;
nfsd_head
.nd_flag
|= NFSD_CHECKSLP
;
tprintf(tpr
, "nfs server %s: %s\n", server
, msg
);