* Copyright (c) 1989 The Regents of the University of California.
* This code is derived from software contributed to Berkeley by
* Rick Macklem at The University of Guelph.
* Redistribution and use in source and binary forms are permitted
* provided that the above copyright notice and this paragraph are
* duplicated in all such forms and that any documentation,
* advertising materials, and other materials related to such
* distribution and use acknowledge that the software was developed
* by the University of California, Berkeley. The name of the
* University may not be used to endorse or promote products derived
* from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
* @(#)nfs_socket.c 7.6 (Berkeley) %G%
* Socket operations for use by nfs (similar to uipc_socket.c, but never
* with copies to/from a uio vector)
* NB: For now, they only work for datagram sockets.
* (Use on stream sockets would require some record boundary mark in the
* stream as defined by "RPC: Remote Procedure Call Protocol
* Specification" RFC1057 Section 10)
* and different versions of send, receive and reply that do not assume
#define nfs_log(message, host) log(LOG_ERR, message, host)
/* set lock on sockbuf sb, sleep at neg prio */
#define nfs_sblock(sb) { \
while ((sb)->sb_flags & SB_LOCK) { \
(sb)->sb_flags |= SB_WANT; \
sleep((caddr_t)&(sb)->sb_flags, PZERO-1); \
(sb)->sb_flags |= SB_LOCK; \
* nfs_sbwait() is simply sbwait() but at a negative priority so that it
* can not be interrupted by a signal.
sleep((caddr_t
)&sb
->sb_cc
, PZERO
-2);
* External data, mostly RPC constants in XDR form
extern u_long rpc_reply
, rpc_msgdenied
, rpc_mismatch
, rpc_vers
, rpc_auth_unix
,
rpc_msgaccepted
, rpc_call
;
extern u_long nfs_prog
, nfs_vers
;
int (*nfsrv_procs
[NFS_NPROCS
])() = {
struct nfshost
*nfshosth
;
int nfsrexmtthresh
= NFS_FISHY
;
* Initialize sockets and per-host congestion for a new NFS connection.
* We do not free the sockaddr if error.
register struct nfsmount
*nmp
;
int s
, error
, srvaddrlen
;
register struct nfshost
*nfshp
;
if (error
= socreate(mtod(saddr
, struct sockaddr
*)->sa_family
,
&nmp
->nm_so
, SOCK_DGRAM
, 0))
/* Unix sockets do not provide a local bind for server reply */
if (mtod(saddr
, struct sockaddr
*)->sa_family
== AF_UNIX
) {
static char client
[] = "/tmp/.nfs/nfsclient##";
m
= m_getclr(M_WAIT
, MT_SONAME
);
m
->m_len
= sizeof (client
) + 2;
sa
= mtod(m
, struct sockaddr
*);
#ifdef MSG_TRUNC /* Have sa_len to set? */
bcopy(client
, sa
->sa_data
, sizeof(client
));
if (++serial
>= 100) serial
= 0;
sa
->sa_data
[19] = (serial
/ 10) + '0';
sa
->sa_data
[20] = (serial
% 10) + '0';
error
= sobind(nmp
->nm_so
, m
);
if (firstserial
== serial
) break;
} while (error
== EADDRINUSE
);
if (error
= soconnect(nmp
->nm_so
, saddr
))
error
= soreserve(nmp
->nm_so
, /* get space ! */
nmp
->nm_wsize
+ 1024, /* one out */
(nmp
->nm_rsize
+ 1024) * 4); /* four in */
* Search mount list for existing server entry.
* Note, even though we have a sockaddr, it is not quite reliable
* enough to bcmp against. For instance, a sockaddr_in has a
* sin_zero field which is not reliably zeroed by user code (e.g.
* mount). So what we do as an attempt at transport independence
* is to get the peeraddr of our connected socket into a zeroed
* sockaddr. Then we cache that and compare against it. This is
* not exactly perfect. However it is not critical that it be, if
* we cannot match the sockaddr we will simply allocate a new nfshp
* per mount, which will disable the per-host congestion but
* everything else will work as normal.
m
= m_getclr(M_WAIT
, MT_SONAME
);
if (m
&& (*(nmp
->nm_so
->so_proto
->pr_usrreq
))(nmp
->nm_so
, PRU_PEERADDR
,
(struct mbuf
*)0, m
, (struct mbuf
*)0) == 0) {
srvaddrlen
= saddr
->m_len
;
for (nfshp
= nfshosth
; nfshp
; nfshp
= nfshp
->nh_next
) {
if (srvaddrlen
!= nfshp
->nh_salen
)
if (!bcmp(mtod(saddr
,caddr_t
),mtod(nfshp
->nh_sockaddr
,caddr_t
),
if (nfshp
) /* Have an existing mount host */
MALLOC(nfshp
,struct nfshost
*,sizeof *nfshp
,M_NFSMNT
,M_WAITOK
);
bzero((caddr_t
)nfshp
, sizeof *nfshp
);
nfshp
->nh_sockaddr
= saddr
;
nfshp
->nh_salen
= srvaddrlen
;
/* Initialize other non-zero congestion variables */
nfshp
->nh_currto
= NFS_TIMEO
;
nfshp
->nh_window
= 1; /* Initial send window */
nfshp
->nh_ssthresh
= NFS_MAXWINDOW
; /* Slowstart threshold */
if (nfshosth
) nfshosth
->nh_prev
= nfshp
; /* Chain in */
nfshp
->nh_next
= nfshosth
;
nmp
->nm_hostinfo
= nfshp
;
if (nmp
->nm_rto
== NFS_TIMEO
) {
nmp
->nm_rto
= nfshp
->nh_currto
;
nmp
->nm_rttvar
= nmp
->nm_rto
<< 1;
if (nmp
->nm_so
) (void) soclose(nmp
->nm_so
);
* NFS disconnect. Clean up and unlink.
register struct nfsmount
*nmp
;
register struct nfshost
*nfshp
;
if (nfshp
= nmp
->nm_hostinfo
) {
if (--nfshp
->nh_refcnt
<= 0) {
nfshp
->nh_next
->nh_prev
= nfshp
->nh_prev
;
nfshp
->nh_prev
->nh_next
= nfshp
->nh_next
;
nfshosth
= nfshp
->nh_next
;
/* If unix family, remove the nfsclient from /tmp */
if (mtod(nfshp
->nh_sockaddr
,
struct sockaddr
*)->sa_family
== AF_UNIX
) {
/* Lookup sa_data, do VOP_REMOVE... */
m_freem(nfshp
->nh_sockaddr
);
* This is a stripped down non-interruptible version of sosend().
nfs_send(so
, nam
, top
, flags
, siz
)
register struct socket
*so
;
if (error
= nfs_sockerr(so
, 1)) {
if (sbspace(&so
->so_snd
) < siz
) {
error
= (*so
->so_proto
->pr_usrreq
)(so
, PRU_SEND
, top
,
(struct mbuf
*)nam
, (struct mbuf
*)0, (struct mbuf
*)0);
* This is a stripped down datagram specific version of soreceive()
nfs_dgreceive(so
, msk
, mtch
, aname
, mp
)
register struct socket
*so
;
if (so
->so_rcv
.sb_cc
== 0) {
if (error
= nfs_sockerr(so
, 0)) {
panic("nfs_dgreceive 1");
nextrecord
= m
->m_nextpkt
;
/* Save sender's address */
if (m
->m_type
!= MT_SONAME
)
panic("nfs_dgreceive 1a");
so
->so_rcv
.sb_mb
= m
->m_next
;
MFREE(m
, so
->so_rcv
.sb_mb
);
/* Drop control mbuf's */
if (m
&& m
->m_type
== MT_RIGHTS
)
panic("nfs_dgreceive 2");
if (m
&& m
->m_type
== MT_CONTROL
) {
MFREE(m
, so
->so_rcv
.sb_mb
);
/* Dequeue packet from sockbuf */
if (m
->m_type
!= MT_DATA
&& m
->m_type
!= MT_HEADER
)
panic("nfs_dgreceive 3");
m
= so
->so_rcv
.sb_mb
= m
->m_next
;
so
->so_rcv
.sb_mb
= nextrecord
;
* Implement NFS client side datagram receive.
* We depend on the way that records are added to the sockbuf
* by sbappend*. In particular, each record (mbufs linked through m_next)
* must begin with an address, followed by optional MT_CONTROL mbuf
* and then zero or more mbufs of data.
* We must search through the list of received datagrams matching them
* with outstanding requests using the xid, until ours is found.
nfs_dgreply(so
, mntp
, myrep
)
register struct socket
*so
;
register struct nfsreq
*rep
;
register int error
= 0, s
;
struct rpc_replyhead replyh
;
/* Already received and queued for us, bye bye */
if (myrep
->r_mrep
!= NULL
) {
/* If we have run out of retries (hard mounts have bogus count) */
if (myrep
->r_rexmit
> myrep
->r_retry
) {
if (myrep
->r_flags
& R_TIMING
) {
myrep
->r_flags
&= ~R_TIMING
;
if (myrep
->r_flags
& R_SENT
) {
myrep
->r_flags
&= ~R_SENT
;
--mntp
->nm_hostinfo
->nh_sent
;
/* If count now 0, want to initiate new req */
if (error
= nfs_sockerr(so
, 0)) {
/* Allow signals to interrupt request? (nfs_timer wakes up) */
if ((mntp
->nm_flag
& NFSMNT_INT
) &&
u
.u_procp
->p_sig
& ~u
.u_procp
->p_sigmask
) {
if (mntp
->nm_rexmit
>= nfsrexmtthresh
&& logged
++ == 0)
uprintf("NFS server %s not responding, retrying\n",
* Take off the address, check for rights and ditch any control
nextrecord
= m
->m_nextpkt
;
if (m
->m_type
!= MT_SONAME
)
panic("nfs reply SONAME");
MFREE(m
, so
->so_rcv
.sb_mb
);
if (m
&& m
->m_type
== MT_RIGHTS
)
panic("nfs reply RIGHTS");
if (m
&& m
->m_type
== MT_CONTROL
) {
MFREE(m
, so
->so_rcv
.sb_mb
);
m
->m_nextpkt
= nextrecord
;
so
->so_rcv
.sb_mb
= nextrecord
;
* Get the xid and check that it is an rpc reply
if (m
->m_len
>= sizeof replyh
)
bcopy(mtod(m
, caddr_t
), (caddr_t
)&replyh
, sizeof replyh
);
caddr_t cp
= (caddr_t
)&replyh
;
int xfer
= (mp
->m_len
>= cnt
) ? cnt
: mp
->m_len
;
bcopy(mtod(mp
, caddr_t
), cp
, xfer
);
if (mp
== NULL
) { /* Insufficient length */
if (replyh
.r_rep
!= rpc_reply
) { /* Not a reply */
* Loop through the request list to match up the reply
* If no match, just drop the datagram
if (rep
= nfsreqh
.r_next
) {
while (rep
!= &nfsreqh
) {
/* The socket, being connected, will only queue matches */
if (replyh
.r_xid
== rep
->r_xid
&& so
== rep
->r_mntp
->nm_so
) {
if (rep
->r_mrep
) /* Already there - duplicate */
if (m
->m_type
!= MT_DATA
&& m
->m_type
!= MT_HEADER
)
m
= so
->so_rcv
.sb_mb
= m
->m_next
;
so
->so_rcv
.sb_mb
= nextrecord
;
if (rep
->r_flags
& R_TIMING
) {
rep
->r_flags
&= ~R_TIMING
;
mntp
->nm_rtt
= -1; /* re-arm timer */
if (rep
->r_flags
& R_SENT
) {
--mntp
->nm_hostinfo
->nh_sent
;
/* If count now 0, want to initiate new req */
if (rep
== myrep
) { /* This is success */
uprintf("NFS server %s responded\n",
/* Else wake up other sleeper and wait for next */
/* If not matched to request, drop it */
nfsstats
.rpcunexpected
++;
sbdroprecord(&so
->so_rcv
);
* nfs_request - goes something like this
* - fill in request struct
* - calls nfs_sosend() for first transmit
* - calls nfs_soreceive() to get reply
* - break down rpc header and return with nfs reply pointed to
* nb: always frees up mreq mbuf list
nfs_request(vp
, mreq
, xid
, idem
, mp
, mrp
, mdp
, dposp
)
register struct mbuf
*m
, *mrep
;
register struct nfsreq
*rep
;
MALLOC(rep
, struct nfsreq
*, sizeof(struct nfsreq
), M_NFSREQ
, M_WAITOK
);
if (mntp
->nm_flag
& NFSMNT_SOFT
)
rep
->r_retry
= mntp
->nm_retry
;
rep
->r_retry
= NFS_MAXREXMIT
+ 1; /* past clip limit */
rep
->r_flags
= rep
->r_rexmit
= 0;
/* Idempotency: add N * MINTIMEO to requests if not, else use 0 */
rep
->r_timer
= rep
->r_timerinit
= -(idem
* NFS_MINTIMEO
);
* Do the client side RPC.
/* Chain request into list of outstanding requests. Be sure
* to put it LAST so timer finds oldest requests first. */
if (reph
->r_prev
== NULL
) {
reph
->r_prev
->r_next
= rep
;
rep
->r_prev
= reph
->r_prev
;
* If backing off another request or avoiding congestion, don't
* send this one now but let timer do it. If not timing a request,
if (mntp
->nm_hostinfo
->nh_sent
> 0 &&
(mntp
->nm_hostinfo
->nh_currexmit
!= 0 ||
mntp
->nm_hostinfo
->nh_sent
>= mntp
->nm_hostinfo
->nh_window
)) {
++mntp
->nm_hostinfo
->nh_sent
; /* Inconsistent if can't NFSMCOPY */
rep
->r_flags
|= R_SENT
; /* But not a catastrophe */
if (mntp
->nm_rtt
== -1) {
rep
->r_flags
|= R_TIMING
;
* If we can get a packet to send, send it off...
* otherwise the timer will retransmit later
m
= NFSMCOPY(mreq
, 0, M_COPYALL
, M_WAIT
);
(void) nfs_send(mntp
->nm_so
, (struct mbuf
*)0, m
, 0, len
);
* Wait for the reply from our send or the timer's.
error
= nfs_dgreply(mntp
->nm_so
, mntp
, rep
);
* RPC done, unlink the request.
rep
->r_prev
->r_next
= rep
->r_next
;
rep
->r_next
->r_prev
= rep
->r_prev
;
FREE((caddr_t
)rep
, M_NFSREQ
);
* break down the rpc header and check if ok
dpos
= mtod(md
, caddr_t
);
nfsm_disect(p
, u_long
*, 5*NFSX_UNSIGNED
);
if (*p
++ == rpc_msgdenied
) {
* skip over the auth_verf, someday we may want to cache auth_short's
* for nfs_reqhead(), but for now just dump it
len
= nfsm_rndup(fxdr_unsigned(long, *p
));
nfsm_disect(p
, u_long
*, NFSX_UNSIGNED
);
nfsm_disect(p
, u_long
*, NFSX_UNSIGNED
);
error
= fxdr_unsigned(int, *p
);
return (EPROTONOSUPPORT
);
* Get a request for the server main loop
* - receive a request via. nfs_soreceive()
* - fill in the cred struct.
nfs_getreq(so
, prog
, vers
, maxproc
, nam
, mrp
, mdp
, dposp
, retxid
, proc
, cr
,
register struct ucred
*cr
;
if (error
= nfs_dgreceive(so
, msk
, mtch
, nam
, &mrep
))
dpos
= mtod(mrep
, caddr_t
);
nfsm_disect(p
, u_long
*, 10*NFSX_UNSIGNED
);
*proc
= fxdr_unsigned(u_long
, *p
++);
if (*proc
== NFSPROC_NULL
) {
if (*proc
> maxproc
|| *p
++ != rpc_auth_unix
) {
(void) fxdr_unsigned(int, *p
++);
len
= fxdr_unsigned(int, *++p
);
nfsm_adv(nfsm_rndup(len
));
nfsm_disect(p
, u_long
*, 3*NFSX_UNSIGNED
);
cr
->cr_uid
= fxdr_unsigned(uid_t
, *p
++);
cr
->cr_gid
= fxdr_unsigned(gid_t
, *p
++);
len
= fxdr_unsigned(int, *p
);
nfsm_disect(p
, u_long
*, (len
+ 2)*NFSX_UNSIGNED
);
for (i
= 1; i
<= len
; i
++)
cr
->cr_groups
[i
] = fxdr_unsigned(gid_t
, *p
++);
cr
->cr_ngroups
= len
+ 1;
* Do we have any use for the verifier.
* According to the "Remote Procedure Call Protocol Spec." it
* should be AUTH_NULL, but some clients make it AUTH_UNIX?
* For now, just skip over it
len
= fxdr_unsigned(int, *++p
);
nfsm_adv(nfsm_rndup(len
));
* Generate the rpc reply header
* siz arg. is used to decide if adding a cluster is worthwhile
nfs_rephead(siz
, retxid
, err
, mrq
, mbp
, bposp
)
struct mbuf
*mreq
, *mb
, *mb2
;
if ((siz
+RPC_REPLYSIZ
) > MHLEN
)
p
= mtod(mreq
, u_long
*);
mreq
->m_len
= 6*NFSX_UNSIGNED
;
bpos
= ((caddr_t
)p
)+mreq
->m_len
;
if (err
== ERPCMISMATCH
) {
*p
= txdr_unsigned(RPC_PROGUNAVAIL
);
*p
= txdr_unsigned(RPC_PROGMISMATCH
);
nfsm_build(p
, u_long
*, 2*NFSX_UNSIGNED
);
*p
= txdr_unsigned(2); /* someday 3 */
*p
= txdr_unsigned(RPC_PROCUNAVAIL
);
nfsm_build(p
, u_long
*, NFSX_UNSIGNED
);
if (err
!= 0 && err
!= VNOVAL
)
* Scan the nfsreq list and retranmit any requests that have timed out
* To avoid retransmission attempts on STREAM sockets (in the future) make
* sure to set the r_retry field to 0 (implies nm_retry == 0).
register struct nfsreq
*rep
;
register struct socket
*so
;
register struct nfsmount
*mntp
;
if (rep
) for ( ; rep
!= &nfsreqh
; rep
= rep
->r_next
) {
if (rep
->r_flags
& R_TIMING
) /* update rtt in mount */
/* If not timed out or reply already received, skip */
if (++rep
->r_timer
< mntp
->nm_rto
|| rep
->r_mrep
)
/* Do backoff and save new timeout in mount */
if (rep
->r_flags
& R_TIMING
) {
rep
->r_flags
&= ~R_TIMING
;
if (rep
->r_flags
& R_SENT
) {
--mntp
->nm_hostinfo
->nh_sent
;
/* Check state of socket, cf nfs_send */
if (error
= nfs_sockerr(so
, 1))
if (sbspace(&so
->so_snd
) < rep
->r_msiz
)
/* Check for too many retries, cf nfs_dgreply */
if (++rep
->r_rexmit
> NFS_MAXREXMIT
) /* clip */
rep
->r_rexmit
= NFS_MAXREXMIT
;
if (rep
->r_rexmit
> rep
->r_retry
) /* too many */
/* Check for congestion control, cf nfs_request */
if (mntp
->nm_hostinfo
->nh_sent
>= mntp
->nm_hostinfo
->nh_window
)
m
= NFSMCOPY(rep
->r_mreq
, 0, M_COPYALL
, M_DONTWAIT
);
m
->m_pkthdr
.len
= rep
->r_msiz
;
(void)(*so
->so_proto
->pr_usrreq
)(so
, PRU_SEND
, m
,
(struct mbuf
*)0, (struct mbuf
*)0, (struct mbuf
*)0);
/* We need to time the request even though we're
* retransmitting, in order to maintain backoff. */
++mntp
->nm_hostinfo
->nh_sent
;
rep
->r_flags
|= (R_SENT
|R_TIMING
);
rep
->r_timer
= rep
->r_timerinit
;
/* If error or interruptible mount, give user a look */
if (error
|| (mntp
->nm_flag
& NFSMNT_INT
))
timeout(nfs_timer
, (caddr_t
)0, hz
/NFS_HZ
);
* NFS timer update and backoff. The "Jacobson/Karels/Karn" scheme is
* used here. The timer state is held in the nfsmount structure and
* a single request is used to clock the response. When successful
* the rtt smoothing in nfs_updatetimer is used, when failed the backoff
* is done by nfs_backofftimer. We also log failure messages in these
* Congestion variables are held in the nfshost structure which
* is referenced by nfsmounts and shared per-server. This separation
* makes it possible to do per-mount timing which allows varying disk
* access times to be dealt with, while preserving a network oriented
* congestion control scheme.
* The windowing implements the Jacobson/Karels slowstart algorithm
* with adjusted scaling factors. We start with one request, then send
* 4 more after each success until the ssthresh limit is reached, then
* we increment at a rate proportional to the window. On failure, we
* remember 3/4 the current window and clamp the send limit to 1. Note
* ICMP source quench is not reflected in so->so_error so we ignore that
* NFS behaves much more like a transport protocol with these changes,
* shedding the teenage pedal-to-the-metal tendencies of "other"
* Timers and congestion avoidance by Tom Talpey, Open Software Foundation.
* The TCP algorithm was not forgiving enough. Because the NFS server
* responds only after performing lookups/diskio/etc, we have to be
* more prepared to accept a spiky variance. The TCP algorithm is:
* TCP_RTO(mntp) ((((mntp)->nm_srtt >> 2) + (mntp)->nm_rttvar) >> 1)
#define NFS_RTO(mntp) (((mntp)->nm_srtt >> 3) + (mntp)->nm_rttvar)
register struct nfsmount
*mntp
;
register struct nfshost
*nfshp
= mntp
->nm_hostinfo
;
/* If retransmitted, clear and return */
if (mntp
->nm_rexmit
|| nfshp
->nh_currexmit
) {
if (nfshp
->nh_currexmit
>= nfsrexmtthresh
)
nfs_log("NFS server %s OK\n", mntp
->nm_host
);
mntp
->nm_rexmit
= nfshp
->nh_currexmit
= 0;
/* If have a measurement, do smoothing */
delta
= mntp
->nm_rtt
- (mntp
->nm_srtt
>> 3);
if ((mntp
->nm_srtt
+= delta
) <= 0)
delta
-= (mntp
->nm_rttvar
>> 2);
if ((mntp
->nm_rttvar
+= delta
) <= 0)
mntp
->nm_rttvar
= mntp
->nm_rtt
<< 1;
if (mntp
->nm_rttvar
== 0) mntp
->nm_rttvar
= 2;
mntp
->nm_srtt
= mntp
->nm_rttvar
<< 2;
/* Compute new Retransmission TimeOut and clip */
mntp
->nm_rto
= NFS_RTO(mntp
);
if (mntp
->nm_rto
< NFS_MINTIMEO
)
mntp
->nm_rto
= NFS_MINTIMEO
;
else if (mntp
->nm_rto
> NFS_MAXTIMEO
)
mntp
->nm_rto
= NFS_MAXTIMEO
;
nfshp
->nh_currto
= mntp
->nm_rto
;
/* Update window estimate */
if (nfshp
->nh_window
< nfshp
->nh_ssthresh
) /* quickly */
register long incr
= ++nfshp
->nh_winext
;
incr
= (incr
* incr
) / nfshp
->nh_window
;
if (nfshp
->nh_window
> NFS_MAXWINDOW
)
nfshp
->nh_window
= NFS_MAXWINDOW
;
register struct nfsmount
*mntp
;
register struct nfshost
*nfshp
= mntp
->nm_hostinfo
;
register unsigned long newrto
;
if (++mntp
->nm_rexmit
> 8 * sizeof mntp
->nm_rto
)
mntp
->nm_rexmit
= 8 * sizeof mntp
->nm_rto
;
/* Back off RTO exponentially */
newrto
<<= (mntp
->nm_rexmit
- 1);
if (newrto
== 0 || newrto
> NFS_MAXTIMEO
)
mntp
->nm_rto
= nfshp
->nh_currto
= newrto
;
/* If too many retries, message, assume a bogus RTT and re-measure */
if (nfshp
->nh_currexmit
< mntp
->nm_rexmit
) {
nfshp
->nh_currexmit
= mntp
->nm_rexmit
;
if (nfshp
->nh_currexmit
>= nfsrexmtthresh
) {
if (nfshp
->nh_currexmit
== nfsrexmtthresh
) {
nfs_log("NFS server %s not responding\n",
mntp
->nm_rttvar
+= (mntp
->nm_srtt
>> 2);
/* The routing invalidation should be a usrreq PRU */
if (mtod(nfshp
->nh_sockaddr
,
struct sockaddr
*)->sa_family
== AF_INET
)
in_losing(mntp
->nm_so
->so_pcb
);
/* Close down window but remember this point (3/4 current) for later */
nfshp
->nh_ssthresh
= ((nfshp
->nh_window
<< 1) + nfshp
->nh_window
) >> 2;
* Not all errors are fatal. The closed checks deal
* with errors a little strangely.
if (sending
&& (so
->so_state
& SS_CANTSENDMORE
)) {
switch (so
->so_error
) { /* inhibit certain errors */
default: /* return all others */
printf("nfs_sockerr: error %d on %s\n", so
->so_error
,
sending
?"send":"receive");
if (!sending
&& (so
->so_state
& SS_CANTRCVMORE
)) {
so
->so_error
= 0; /* (no error) */