2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $
38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.45 2007/05/18 17:05:13 dillon Exp $
42 * Socket operations for use by nfs
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/kernel.h>
52 #include <sys/vnode.h>
53 #include <sys/fcntl.h>
54 #include <sys/protosw.h>
55 #include <sys/resourcevar.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/socketops.h>
59 #include <sys/syslog.h>
60 #include <sys/thread.h>
61 #include <sys/tprintf.h>
62 #include <sys/sysctl.h>
63 #include <sys/signalvar.h>
64 #include <sys/mutex.h>
66 #include <sys/signal2.h>
67 #include <sys/mutex2.h>
69 #include <netinet/in.h>
70 #include <netinet/tcp.h>
71 #include <sys/thread2.h>
77 #include "nfsm_subs.h"
86 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
87 * Use the mean and mean deviation of rtt for the appropriate type of rpc
88 * for the frequent rpcs and a default for the others.
89 * The justification for doing "other" this way is that these rpcs
90 * happen so infrequently that timer est. would probably be stale.
91 * Also, since many of these rpcs are
92 * non-idempotent, a conservative timeout is desired.
93 * getattr, lookup - A+2D
97 #define NFS_RTO(n, t) \
98 ((t) == 0 ? (n)->nm_timeo : \
100 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
101 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
102 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
103 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
106 * Defines which timer to use for the procnum.
113 static int proct[NFS_NPROCS] = {
114 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0,
118 static int nfs_realign_test;
119 static int nfs_realign_count;
120 static int nfs_bufpackets = 4;
121 static int nfs_timer_raced;
123 SYSCTL_DECL(_vfs_nfs);
125 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
126 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
127 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, "");
129 static int nfs_request_setup(nfsm_info_t info);
130 static int nfs_request_auth(struct nfsreq *rep);
131 static int nfs_request_try(struct nfsreq *rep);
132 static int nfs_request_waitreply(struct nfsreq *rep);
133 static int nfs_request_processreply(nfsm_info_t info, int);
136 * There is a congestion window for outstanding rpcs maintained per mount
137 * point. The cwnd size is adjusted in roughly the way that:
138 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
139 * SIGCOMM '88". ACM, August 1988.
140 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
141 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
142 * of rpcs is in progress.
143 * (The sent count and cwnd are scaled for integer arith.)
144 * Variants of "slow start" were tried and were found to be too much of a
145 * performance hit (ave. rtt 3 times larger),
146 * I suspect due to the large rtt that nfs rpcs have.
148 #define NFS_CWNDSCALE 256
149 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
150 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
152 struct nfsrtt nfsrtt;
153 struct callout nfs_timer_handle;
155 static int nfs_msg (struct thread *,char *,char *);
156 static int nfs_rcvlock (struct nfsreq *);
157 static void nfs_rcvunlock (struct nfsreq *);
158 static void nfs_realign (struct mbuf **pm, int hsiz);
159 static int nfs_receive (struct nfsreq *rep, struct sockaddr **aname,
161 static void nfs_softterm (struct nfsreq *rep);
162 static int nfs_reconnect (struct nfsreq *rep);
164 static int nfsrv_getstream (struct nfssvc_sock *, int, int *);
165 static void nfs_timer_req(struct nfsreq *req);
167 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd,
168 struct nfssvc_sock *slp,
170 struct mbuf **mreqp) = {
198 #endif /* NFS_NOSERVER */
201 * Initialize sockets and congestion for a new NFS connection.
202 * We do not free the sockaddr if error.
205 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
208 int error, rcvreserve, sndreserve;
210 struct sockaddr *saddr;
211 struct sockaddr_in *sin;
212 struct thread *td = &thread0; /* only used for socreate and sobind */
216 error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype,
217 nmp->nm_soproto, td);
221 nmp->nm_soflags = so->so_proto->pr_flags;
224 * Some servers require that the client port be a reserved port number.
226 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) {
229 struct sockaddr_in ssin;
231 bzero(&sopt, sizeof sopt);
232 ip = IP_PORTRANGE_LOW;
233 sopt.sopt_level = IPPROTO_IP;
234 sopt.sopt_name = IP_PORTRANGE;
235 sopt.sopt_val = (void *)&ip;
236 sopt.sopt_valsize = sizeof(ip);
238 error = sosetopt(so, &sopt);
241 bzero(&ssin, sizeof ssin);
243 sin->sin_len = sizeof (struct sockaddr_in);
244 sin->sin_family = AF_INET;
245 sin->sin_addr.s_addr = INADDR_ANY;
246 sin->sin_port = htons(0);
247 error = sobind(so, (struct sockaddr *)sin, td);
250 bzero(&sopt, sizeof sopt);
251 ip = IP_PORTRANGE_DEFAULT;
252 sopt.sopt_level = IPPROTO_IP;
253 sopt.sopt_name = IP_PORTRANGE;
254 sopt.sopt_val = (void *)&ip;
255 sopt.sopt_valsize = sizeof(ip);
257 error = sosetopt(so, &sopt);
263 * Protocols that do not require connections may be optionally left
264 * unconnected for servers that reply from a port other than NFS_PORT.
266 if (nmp->nm_flag & NFSMNT_NOCONN) {
267 if (nmp->nm_soflags & PR_CONNREQUIRED) {
272 error = soconnect(so, nmp->nm_nam, td);
277 * Wait for the connection to complete. Cribbed from the
278 * connect system call but with the wait timing out so
279 * that interruptible mounts don't hang here for a long time.
282 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
283 (void) tsleep((caddr_t)&so->so_timeo, 0,
285 if ((so->so_state & SS_ISCONNECTING) &&
286 so->so_error == 0 && rep &&
287 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){
288 so->so_state &= ~SS_ISCONNECTING;
294 error = so->so_error;
301 so->so_rcv.ssb_timeo = (5 * hz);
302 so->so_snd.ssb_timeo = (5 * hz);
305 * Get buffer reservation size from sysctl, but impose reasonable
308 pktscale = nfs_bufpackets;
314 if (nmp->nm_sotype == SOCK_DGRAM) {
315 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
316 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
317 NFS_MAXPKTHDR) * pktscale;
318 } else if (nmp->nm_sotype == SOCK_SEQPACKET) {
319 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
320 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
321 NFS_MAXPKTHDR) * pktscale;
323 if (nmp->nm_sotype != SOCK_STREAM)
324 panic("nfscon sotype");
325 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
329 bzero(&sopt, sizeof sopt);
330 sopt.sopt_level = SOL_SOCKET;
331 sopt.sopt_name = SO_KEEPALIVE;
332 sopt.sopt_val = &val;
333 sopt.sopt_valsize = sizeof val;
337 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
341 bzero(&sopt, sizeof sopt);
342 sopt.sopt_level = IPPROTO_TCP;
343 sopt.sopt_name = TCP_NODELAY;
344 sopt.sopt_val = &val;
345 sopt.sopt_valsize = sizeof val;
349 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
350 sizeof (u_int32_t)) * pktscale;
351 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
352 sizeof (u_int32_t)) * pktscale;
354 error = soreserve(so, sndreserve, rcvreserve,
355 &td->td_proc->p_rlimit[RLIMIT_SBSIZE]);
358 so->so_rcv.ssb_flags |= SSB_NOINTR;
359 so->so_snd.ssb_flags |= SSB_NOINTR;
361 /* Initialize other non-zero congestion variables */
362 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
363 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
364 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
365 nmp->nm_sdrtt[3] = 0;
366 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
368 nmp->nm_timeouts = 0;
378 * Called when a connection is broken on a reliable protocol.
379 * - clean up the old socket
380 * - nfs_connect() again
381 * - set R_MUSTRESEND for all outstanding requests on mount point
382 * If this fails the mount point is DEAD!
383 * nb: Must be called with the nfs_sndlock() set on the mount point.
386 nfs_reconnect(struct nfsreq *rep)
389 struct nfsmount *nmp = rep->r_nmp;
393 while ((error = nfs_connect(nmp, rep)) != 0) {
394 if (error == EINTR || error == ERESTART)
396 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0);
400 * Loop through outstanding request list and fix up all requests
404 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
405 KKASSERT(req->r_nmp == nmp);
406 req->r_flags |= R_MUSTRESEND;
413 * NFS disconnect. Clean up and unlink.
416 nfs_disconnect(struct nfsmount *nmp)
423 soshutdown(so, SHUT_RDWR);
424 soclose(so, FNONBLOCK);
429 nfs_safedisconnect(struct nfsmount *nmp)
431 struct nfsreq dummyreq;
433 bzero(&dummyreq, sizeof(dummyreq));
434 dummyreq.r_nmp = nmp;
435 dummyreq.r_td = NULL;
436 mtx_link_init(&dummyreq.r_link);
437 nfs_rcvlock(&dummyreq);
439 nfs_rcvunlock(&dummyreq);
443 * This is the nfs send routine. For connection based socket types, it
444 * must be called with an nfs_sndlock() on the socket.
445 * "rep == NULL" indicates that it has been called from a server.
446 * For the client side:
447 * - return EINTR if the RPC is terminated, 0 otherwise
448 * - set R_MUSTRESEND if the send fails for any reason
449 * - do any cleanup required by recoverable socket errors (?)
450 * For the server side:
451 * - return EINTR or ERESTART if interrupted by a signal
452 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
453 * - do any cleanup required by recoverable socket errors (?)
456 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
459 struct sockaddr *sendnam;
460 int error, soflags, flags;
463 if (rep->r_flags & R_SOFTTERM) {
467 if ((so = rep->r_nmp->nm_so) == NULL) {
468 rep->r_flags |= R_MUSTRESEND;
472 rep->r_flags &= ~R_MUSTRESEND;
473 soflags = rep->r_nmp->nm_soflags;
475 soflags = so->so_proto->pr_flags;
476 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
480 if (so->so_type == SOCK_SEQPACKET)
485 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags,
488 * ENOBUFS for dgram sockets is transient and non fatal.
489 * No need to log, and no need to break a soft mount.
491 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
493 if (rep) /* do backoff retransmit on client */
494 rep->r_flags |= R_MUSTRESEND;
499 log(LOG_INFO, "nfs send error %d for server %s\n",error,
500 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
502 * Deal with errors for the client side.
504 if (rep->r_flags & R_SOFTTERM)
507 rep->r_flags |= R_MUSTRESEND;
509 log(LOG_INFO, "nfsd send error %d\n", error);
512 * Handle any recoverable (soft) socket errors here. (?)
514 if (error != EINTR && error != ERESTART &&
515 error != EWOULDBLOCK && error != EPIPE)
522 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
523 * done by soreceive(), but for SOCK_STREAM we must deal with the Record
524 * Mark and consolidate the data into a new mbuf list.
525 * nb: Sometimes TCP passes the data up to soreceive() in long lists of
527 * For SOCK_STREAM we must be very careful to read an entire record once
528 * we have read any of it, even if the system call has been interrupted.
531 nfs_receive(struct nfsreq *rep, struct sockaddr **aname, struct mbuf **mp)
538 struct mbuf *control;
540 struct sockaddr **getnam;
541 int error, sotype, rcvflg;
542 struct thread *td = curthread; /* XXX */
545 * Set up arguments for soreceive()
549 sotype = rep->r_nmp->nm_sotype;
552 * For reliable protocols, lock against other senders/receivers
553 * in case a reconnect is necessary.
554 * For SOCK_STREAM, first get the Record Mark to find out how much
555 * more there is to get.
556 * We must lock the socket against other receivers
557 * until we have an entire rpc request/reply.
559 if (sotype != SOCK_DGRAM) {
560 error = nfs_sndlock(rep);
565 * Check for fatal errors and resending request.
568 * Ugh: If a reconnect attempt just happened, nm_so
569 * would have changed. NULL indicates a failed
570 * attempt that has essentially shut down this
573 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) {
577 so = rep->r_nmp->nm_so;
579 error = nfs_reconnect(rep);
586 while (rep->r_flags & R_MUSTRESEND) {
587 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
588 nfsstats.rpcretries++;
589 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
591 if (error == EINTR || error == ERESTART ||
592 (error = nfs_reconnect(rep)) != 0) {
600 if (sotype == SOCK_STREAM) {
602 * Get the length marker from the stream
604 aio.iov_base = (caddr_t)&len;
605 aio.iov_len = sizeof(u_int32_t);
608 auio.uio_segflg = UIO_SYSSPACE;
609 auio.uio_rw = UIO_READ;
611 auio.uio_resid = sizeof(u_int32_t);
614 rcvflg = MSG_WAITALL;
615 error = so_pru_soreceive(so, NULL, &auio, NULL,
617 if (error == EWOULDBLOCK && rep) {
618 if (rep->r_flags & R_SOFTTERM)
621 } while (error == EWOULDBLOCK);
623 if (error == 0 && auio.uio_resid > 0) {
625 * Only log short packets if not EOF
627 if (auio.uio_resid != sizeof(u_int32_t))
629 "short receive (%d/%d) from nfs server %s\n",
630 (int)(sizeof(u_int32_t) - auio.uio_resid),
631 (int)sizeof(u_int32_t),
632 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
637 len = ntohl(len) & ~0x80000000;
639 * This is SERIOUS! We are out of sync with the sender
640 * and forcing a disconnect/reconnect is all I can do.
642 if (len > NFS_MAXPACKET) {
643 log(LOG_ERR, "%s (%d) from nfs server %s\n",
644 "impossible packet length",
646 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
652 * Get the rest of the packet as an mbuf chain
656 rcvflg = MSG_WAITALL;
657 error = so_pru_soreceive(so, NULL, NULL, &sio,
659 } while (error == EWOULDBLOCK || error == EINTR ||
661 if (error == 0 && sio.sb_cc != len) {
664 "short receive (%d/%d) from nfs server %s\n",
665 len - auio.uio_resid, len,
666 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
672 * Non-stream, so get the whole packet by not
673 * specifying MSG_WAITALL and by specifying a large
676 * We have no use for control msg., but must grab them
677 * and then throw them away so we know what is going
680 sbinit(&sio, 100000000);
683 error = so_pru_soreceive(so, NULL, NULL, &sio,
687 if (error == EWOULDBLOCK && rep) {
688 if (rep->r_flags & R_SOFTTERM) {
693 } while (error == EWOULDBLOCK ||
694 (error == 0 && sio.sb_mb == NULL && control));
695 if ((rcvflg & MSG_EOR) == 0)
697 if (error == 0 && sio.sb_mb == NULL)
703 if (error && error != EINTR && error != ERESTART) {
706 if (error != EPIPE) {
708 "receive error %d from nfs server %s\n",
710 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
712 error = nfs_sndlock(rep);
714 error = nfs_reconnect(rep);
722 if ((so = rep->r_nmp->nm_so) == NULL)
724 if (so->so_state & SS_ISCONNECTED)
728 sbinit(&sio, 100000000);
731 error = so_pru_soreceive(so, getnam, NULL, &sio,
733 if (error == EWOULDBLOCK &&
734 (rep->r_flags & R_SOFTTERM)) {
738 } while (error == EWOULDBLOCK);
747 * Search for any mbufs that are not a multiple of 4 bytes long
748 * or with m_data not longword aligned.
749 * These could cause pointer alignment problems, so copy them to
750 * well aligned mbufs.
752 nfs_realign(mp, 5 * NFSX_UNSIGNED);
757 * Implement receipt of reply on a socket.
758 * We must search through the list of received datagrams matching them
759 * with outstanding requests using the xid, until ours is found.
763 nfs_reply(struct nfsreq *myrep)
766 struct nfsmount *nmp = myrep->r_nmp;
767 struct sockaddr *nam;
771 struct nfsm_info info;
775 * Loop around until we get our own reply
779 * Lock against other receivers so that I don't get stuck in
780 * sbwait() after someone else has received my reply for me.
781 * Also necessary for connection based protocols to avoid
782 * race conditions during a reconnect.
784 * If nfs_rcvlock() returns EALREADY, that means that
785 * the reply has already been recieved by another
786 * process and we can return immediately. In this
787 * case, the lock is not taken to avoid races with
792 error = nfs_rcvlock(myrep);
793 if (error == EALREADY)
798 * Get the next Rpc reply off the socket
800 error = nfs_receive(myrep, &nam, &info.mrep);
801 nfs_rcvunlock(myrep);
804 * Ignore routing errors on connectionless protocols??
806 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) {
807 nmp->nm_so->so_error = 0;
808 if (myrep->r_flags & R_GETONEREP)
818 * Get the xid and check that it is an rpc reply
821 info.dpos = mtod(info.md, caddr_t);
822 NULLOUT(tl = nfsm_dissect(&info, 2*NFSX_UNSIGNED));
824 if (*tl != rpc_reply) {
825 nfsstats.rpcinvalid++;
829 if (myrep->r_flags & R_GETONEREP)
835 * Loop through the request list to match up the reply
836 * Iff no match, just drop the datagram. On match, set
837 * r_mrep atomically to prevent the timer from messing
838 * around with the request after we have exited the critical
842 TAILQ_FOREACH(rep, &nmp->nm_reqq, r_chain) {
843 if (rep->r_mrep == NULL && rxid == rep->r_xid)
849 * Fill in the rest of the reply if we found a match.
853 rep->r_dpos = info.dpos;
857 rt = &nfsrtt.rttl[nfsrtt.pos];
858 rt->proc = rep->r_procnum;
859 rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]);
860 rt->sent = nmp->nm_sent;
861 rt->cwnd = nmp->nm_cwnd;
862 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1];
863 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1];
864 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid;
865 getmicrotime(&rt->tstamp);
866 if (rep->r_flags & R_TIMING)
867 rt->rtt = rep->r_rtt;
870 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ;
873 * Update congestion window.
874 * Do the additive increase of
877 if (nmp->nm_cwnd <= nmp->nm_sent) {
879 (NFS_CWNDSCALE * NFS_CWNDSCALE +
880 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
881 if (nmp->nm_cwnd > NFS_MAXCWND)
882 nmp->nm_cwnd = NFS_MAXCWND;
884 crit_enter(); /* nfs_timer interlock for nm_sent */
885 if (rep->r_flags & R_SENT) {
886 rep->r_flags &= ~R_SENT;
887 nmp->nm_sent -= NFS_CWNDSCALE;
891 * Update rtt using a gain of 0.125 on the mean
892 * and a gain of 0.25 on the deviation.
894 if (rep->r_flags & R_TIMING) {
896 * Since the timer resolution of
897 * NFS_HZ is so course, it can often
898 * result in r_rtt == 0. Since
899 * r_rtt == N means that the actual
900 * rtt is between N+dt and N+2-dt ticks,
904 t1 -= (NFS_SRTT(rep) >> 3);
908 t1 -= (NFS_SDRTT(rep) >> 2);
909 NFS_SDRTT(rep) += t1;
911 nmp->nm_timeouts = 0;
912 rep->r_mrep = info.mrep;
913 mtx_abort_ex_link(&rep->r_nmp->nm_rxlock, &rep->r_link);
916 * If not matched to a request, drop it.
917 * If it's mine, get out.
920 nfsstats.rpcunexpected++;
923 } else if (rep == myrep) {
924 if (rep->r_mrep == NULL)
925 panic("nfsreply nil");
928 if (myrep->r_flags & R_GETONEREP)
934 * Run the request state machine until the target state is reached
935 * or a fatal error occurs. The target state is not run. Specifying
936 * a target of NFSM_STATE_DONE runs the state machine until the rpc
939 * EINPROGRESS is returned for all states other then the DONE state,
940 * indicating that the rpc is still in progress.
943 nfs_request(struct nfsm_info *info, nfsm_state_t target)
947 while (info->state == NFSM_STATE_DONE || info->state != target) {
948 switch(info->state) {
949 case NFSM_STATE_SETUP:
951 * Setup the nfsreq. Any error which occurs during
952 * this state is fatal.
954 info->error = nfs_request_setup(info);
956 info->state = NFSM_STATE_DONE;
957 return (info->error);
960 req->r_mrp = &info->mrep;
961 req->r_mdp = &info->md;
962 req->r_dposp = &info->dpos;
963 info->state = NFSM_STATE_AUTH;
966 case NFSM_STATE_AUTH:
968 * Authenticate the nfsreq. Any error which occurs
969 * during this state is fatal.
971 info->error = nfs_request_auth(info->req);
973 info->state = NFSM_STATE_DONE;
974 return (info->error);
976 info->state = NFSM_STATE_TRY;
981 * Transmit or retransmit attempt. An error in this
982 * state is ignored and we always move on to the
985 info->error = nfs_request_try(info->req);
986 info->state = NFSM_STATE_WAITREPLY;
988 case NFSM_STATE_WAITREPLY:
990 * Wait for a reply or timeout and move on to the
991 * next state. The error returned by this state
992 * is passed to the processing code in the next
995 info->error = nfs_request_waitreply(info->req);
996 info->state = NFSM_STATE_PROCESSREPLY;
998 case NFSM_STATE_PROCESSREPLY:
1000 * Process the reply or timeout. Errors which occur
1001 * in this state may cause the state machine to
1002 * go back to an earlier state, and are fatal
1005 info->error = nfs_request_processreply(info,
1007 switch(info->error) {
1009 info->state = NFSM_STATE_AUTH;
1012 info->state = NFSM_STATE_TRY;
1016 * Operation complete, with or without an
1017 * error. We are done.
1020 info->state = NFSM_STATE_DONE;
1021 return (info->error);
1024 case NFSM_STATE_DONE:
1026 * If the caller happens to re-call the state
1027 * machine after it returned completion, just
1028 * re-return the completion.
1030 return (info->error);
1036 * The target state (other then NFSM_STATE_DONE) was reached.
1037 * Return EINPROGRESS.
1039 return (EINPROGRESS);
1043 * nfs_request - goes something like this
1044 * - fill in request struct
1045 * - links it into list
1046 * - calls nfs_send() for first transmit
1047 * - calls nfs_receive() to get reply
1048 * - break down rpc header and return with nfs reply pointed to
1050 * nb: always frees up mreq mbuf list
1053 nfs_request_setup(nfsm_info_t info)
1056 struct nfsmount *nmp;
1061 * Reject requests while attempting a forced unmount.
1063 if (info->vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
1064 m_freem(info->mreq);
1068 nmp = VFSTONFS(info->vp->v_mount);
1069 req = kmalloc(sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
1071 req->r_vp = info->vp;
1072 req->r_td = info->td;
1073 req->r_procnum = info->procnum;
1081 req->r_mrest = info->mreq;
1082 req->r_mrest_len = i;
1083 req->r_cred = info->cred;
1089 nfs_request_auth(struct nfsreq *rep)
1091 struct nfsmount *nmp = rep->r_nmp;
1093 char nickv[RPCX_NICKVERF];
1094 int error = 0, auth_len, auth_type;
1097 char *auth_str, *verf_str;
1101 rep->r_failed_auth = 0;
1104 * Get the RPC header with authorization.
1106 verf_str = auth_str = NULL;
1107 if (nmp->nm_flag & NFSMNT_KERB) {
1109 verf_len = sizeof (nickv);
1110 auth_type = RPCAUTH_KERB4;
1111 bzero((caddr_t)rep->r_key, sizeof(rep->r_key));
1112 if (rep->r_failed_auth ||
1113 nfs_getnickauth(nmp, cred, &auth_str, &auth_len,
1114 verf_str, verf_len)) {
1115 error = nfs_getauth(nmp, rep, cred, &auth_str,
1116 &auth_len, verf_str, &verf_len, rep->r_key);
1118 m_freem(rep->r_mrest);
1119 rep->r_mrest = NULL;
1120 kfree((caddr_t)rep, M_NFSREQ);
1125 auth_type = RPCAUTH_UNIX;
1126 if (cred->cr_ngroups < 1)
1127 panic("nfsreq nogrps");
1128 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
1129 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
1132 m = nfsm_rpchead(cred, nmp->nm_flag, rep->r_procnum, auth_type,
1133 auth_len, auth_str, verf_len, verf_str,
1134 rep->r_mrest, rep->r_mrest_len, &rep->r_mheadend, &xid);
1135 rep->r_mrest = NULL;
1137 kfree(auth_str, M_TEMP);
1140 * For stream protocols, insert a Sun RPC Record Mark.
1142 if (nmp->nm_sotype == SOCK_STREAM) {
1143 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT);
1145 kfree(rep, M_NFSREQ);
1148 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1149 (m->m_pkthdr.len - NFSX_UNSIGNED));
1157 nfs_request_try(struct nfsreq *rep)
1159 struct nfsmount *nmp = rep->r_nmp;
1163 if (nmp->nm_flag & NFSMNT_SOFT)
1164 rep->r_retry = nmp->nm_retry;
1166 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
1167 rep->r_rtt = rep->r_rexmit = 0;
1168 if (proct[rep->r_procnum] > 0)
1169 rep->r_flags = R_TIMING | R_MASKTIMER;
1171 rep->r_flags = R_MASKTIMER;
1175 * Do the client side RPC.
1177 nfsstats.rpcrequests++;
1180 * Chain request into list of outstanding requests. Be sure
1181 * to put it LAST so timer finds oldest requests first. Note
1182 * that R_MASKTIMER is set at the moment to prevent any timer
1183 * action on this request while we are still doing processing on
1184 * it below. splsoftclock() primarily protects nm_sent. Note
1185 * that we may block in this code so there is no atomicy guarentee.
1188 TAILQ_INSERT_TAIL(&nmp->nm_reqq, rep, r_chain);
1189 mtx_link_init(&rep->r_link);
1194 * If backing off another request or avoiding congestion, don't
1195 * send this one now but let timer do it. If not timing a request,
1198 * Even though the timer will not mess with our request there is
1199 * still the possibility that we will race a reply (which clears
1200 * R_SENT), especially on localhost connections, so be very careful
1201 * when setting R_SENT. We could set R_SENT prior to calling
1202 * nfs_send() but why bother if the response occurs that quickly?
1204 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM ||
1205 (nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1206 nmp->nm_sent < nmp->nm_cwnd)) {
1207 if (nmp->nm_soflags & PR_CONNREQUIRED)
1208 error = nfs_sndlock(rep);
1210 m2 = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
1211 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
1212 if (nmp->nm_soflags & PR_CONNREQUIRED)
1215 if (!error && (rep->r_flags & R_MUSTRESEND) == 0 &&
1216 rep->r_mrep == NULL) {
1217 KASSERT((rep->r_flags & R_SENT) == 0,
1218 ("R_SENT ASSERT %p", rep));
1219 nmp->nm_sent += NFS_CWNDSCALE;
1220 rep->r_flags |= R_SENT;
1228 * Let the timer do what it will with the request, then
1229 * wait for the reply from our send or the timer's.
1232 rep->r_flags &= ~R_MASKTIMER;
1238 nfs_request_waitreply(struct nfsreq *rep)
1240 struct nfsmount *nmp = rep->r_nmp;
1244 error = nfs_reply(rep);
1248 * RPC done, unlink the request, but don't rip it out from under
1249 * the callout timer.
1251 while (rep->r_flags & R_LOCKED) {
1252 nfs_timer_raced = 1;
1253 tsleep(&nfs_timer_raced, 0, "nfstrac", 0);
1255 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain);
1258 * Decrement the outstanding request count.
1260 if (rep->r_flags & R_SENT) {
1261 rep->r_flags &= ~R_SENT;
1262 nmp->nm_sent -= NFS_CWNDSCALE;
1270 * Process reply with error returned from nfs_requet_waitreply().
1272 * Returns EAGAIN if it wants us to loop up to nfs_request_try() again.
1273 * Returns ENEEDAUTH if it wants us to loop up to nfs_request_auth() again.
1276 nfs_request_processreply(nfsm_info_t info, int error)
1278 struct nfsreq *req = info->req;
1279 struct nfsmount *nmp = req->r_nmp;
1282 int trylater_delay = 15, trylater_cnt = 0;
1287 * If there was a successful reply and a tprintf msg.
1288 * tprintf a response.
1290 if (error == 0 && (req->r_flags & R_TPRINTFMSG)) {
1291 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1294 info->mrep = req->r_mrep;
1295 info->md = req->r_md;
1296 info->dpos = req->r_dpos;
1298 m_freem(req->r_mreq);
1300 kfree(req, M_NFSREQ);
1306 * break down the rpc header and check if ok
1308 NULLOUT(tl = nfsm_dissect(info, 3 * NFSX_UNSIGNED));
1309 if (*tl++ == rpc_msgdenied) {
1310 if (*tl == rpc_mismatch) {
1312 } else if ((nmp->nm_flag & NFSMNT_KERB) &&
1313 *tl++ == rpc_autherr) {
1314 if (req->r_failed_auth == 0) {
1315 req->r_failed_auth++;
1316 req->r_mheadend->m_next = NULL;
1317 m_freem(info->mrep);
1319 m_freem(req->r_mreq);
1327 m_freem(info->mrep);
1329 m_freem(req->r_mreq);
1331 kfree(req, M_NFSREQ);
1337 * Grab any Kerberos verifier, otherwise just throw it away.
1339 verf_type = fxdr_unsigned(int, *tl++);
1340 i = fxdr_unsigned(int32_t, *tl);
1341 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) {
1342 error = nfs_savenickauth(nmp, req->r_cred, i, req->r_key,
1343 &info->md, &info->dpos, info->mrep);
1347 ERROROUT(nfsm_adv(info, nfsm_rndup(i)));
1349 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1352 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1354 error = fxdr_unsigned(int, *tl);
1355 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
1356 error == NFSERR_TRYLATER) {
1357 m_freem(info->mrep);
1360 waituntil = time_second + trylater_delay;
1361 while (time_second < waituntil)
1362 (void) tsleep((caddr_t)&lbolt,
1364 trylater_delay *= nfs_backoff[trylater_cnt];
1365 if (trylater_cnt < 7)
1367 req->r_flags &= ~R_MASKTIMER;
1368 return (EAGAIN); /* goto tryagain */
1372 * If the File Handle was stale, invalidate the
1373 * lookup cache, just in case.
1375 * To avoid namecache<->vnode deadlocks we must
1376 * release the vnode lock if we hold it.
1378 if (error == ESTALE) {
1379 struct vnode *vp = req->r_vp;
1382 ltype = lockstatus(&vp->v_lock, curthread);
1383 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED)
1384 lockmgr(&vp->v_lock, LK_RELEASE);
1385 cache_inval_vp(vp, CINV_CHILDREN);
1386 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED)
1387 lockmgr(&vp->v_lock, ltype);
1389 if (nmp->nm_flag & NFSMNT_NFSV3) {
1390 KKASSERT(*req->r_mrp == info->mrep);
1391 KKASSERT(*req->r_mdp == info->md);
1392 KKASSERT(*req->r_dposp == info->dpos);
1393 error |= NFSERR_RETERR;
1395 m_freem(info->mrep);
1398 m_freem(req->r_mreq);
1400 kfree(req, M_NFSREQ);
1405 KKASSERT(*req->r_mrp == info->mrep);
1406 KKASSERT(*req->r_mdp == info->md);
1407 KKASSERT(*req->r_dposp == info->dpos);
1408 m_freem(req->r_mreq);
1410 FREE(req, M_NFSREQ);
1413 m_freem(info->mrep);
1415 error = EPROTONOSUPPORT;
1417 m_freem(req->r_mreq);
1419 kfree(req, M_NFSREQ);
1424 #ifndef NFS_NOSERVER
1426 * Generate the rpc reply header
1427 * siz arg. is used to decide if adding a cluster is worthwhile
1430 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp,
1431 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp)
1434 struct nfsm_info info;
1436 siz += RPC_REPLYSIZ;
1437 info.mb = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
1438 info.mreq = info.mb;
1439 info.mreq->m_pkthdr.len = 0;
1441 * If this is not a cluster, try and leave leading space
1442 * for the lower level headers.
1444 if ((max_hdr + siz) < MINCLSIZE)
1445 info.mreq->m_data += max_hdr;
1446 tl = mtod(info.mreq, u_int32_t *);
1447 info.mreq->m_len = 6 * NFSX_UNSIGNED;
1448 info.bpos = ((caddr_t)tl) + info.mreq->m_len;
1449 *tl++ = txdr_unsigned(nd->nd_retxid);
1451 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
1452 *tl++ = rpc_msgdenied;
1453 if (err & NFSERR_AUTHERR) {
1454 *tl++ = rpc_autherr;
1455 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR);
1456 info.mreq->m_len -= NFSX_UNSIGNED;
1457 info.bpos -= NFSX_UNSIGNED;
1459 *tl++ = rpc_mismatch;
1460 *tl++ = txdr_unsigned(RPC_VER2);
1461 *tl = txdr_unsigned(RPC_VER2);
1464 *tl++ = rpc_msgaccepted;
1467 * For Kerberos authentication, we must send the nickname
1468 * verifier back, otherwise just RPCAUTH_NULL.
1470 if (nd->nd_flag & ND_KERBFULL) {
1471 struct nfsuid *nuidp;
1472 struct timeval ktvin, ktvout;
1474 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first;
1475 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1476 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid &&
1477 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp),
1478 &nuidp->nu_haddr, nd->nd_nam2)))
1483 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1);
1485 txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1488 * Encrypt the timestamp in ecb mode using the
1495 *tl++ = rpc_auth_kerb;
1496 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED);
1497 *tl = ktvout.tv_sec;
1498 tl = nfsm_build(&info, 3 * NFSX_UNSIGNED);
1499 *tl++ = ktvout.tv_usec;
1500 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid);
1511 *tl = txdr_unsigned(RPC_PROGUNAVAIL);
1514 *tl = txdr_unsigned(RPC_PROGMISMATCH);
1515 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED);
1516 *tl++ = txdr_unsigned(2);
1517 *tl = txdr_unsigned(3);
1520 *tl = txdr_unsigned(RPC_PROCUNAVAIL);
1523 *tl = txdr_unsigned(RPC_GARBAGE);
1527 if (err != NFSERR_RETVOID) {
1528 tl = nfsm_build(&info, NFSX_UNSIGNED);
1530 *tl = txdr_unsigned(nfsrv_errmap(nd, err));
1542 if (err != 0 && err != NFSERR_RETVOID)
1543 nfsstats.srvrpc_errs++;
1548 #endif /* NFS_NOSERVER */
1551 * Scan the nfsreq list and retranmit any requests that have timed out
1552 * To avoid retransmission attempts on STREAM sockets (in the future) make
1553 * sure to set the r_retry field to 0 (implies nm_retry == 0).
1556 nfs_timer(void *arg /* never used */)
1558 struct nfsmount *nmp;
1560 #ifndef NFS_NOSERVER
1561 struct nfssvc_sock *slp;
1563 #endif /* NFS_NOSERVER */
1566 TAILQ_FOREACH(nmp, &nfs_mountq, nm_entry) {
1567 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1568 KKASSERT(nmp == req->r_nmp);
1570 (req->r_flags & (R_SOFTTERM|R_MASKTIMER))) {
1573 req->r_flags |= R_LOCKED;
1574 if (nfs_sigintr(nmp, req, req->r_td)) {
1579 req->r_flags &= ~R_LOCKED;
1582 #ifndef NFS_NOSERVER
1585 * Scan the write gathering queues for writes that need to be
1588 cur_usec = nfs_curusec();
1589 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
1590 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec)
1591 nfsrv_wakenfsd(slp, 1);
1593 #endif /* NFS_NOSERVER */
1596 * Due to possible blocking, a client operation may be waiting for
1597 * us to finish processing this request so it can remove it.
1599 if (nfs_timer_raced) {
1600 nfs_timer_raced = 0;
1601 wakeup(&nfs_timer_raced);
1604 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer, NULL);
1609 nfs_timer_req(struct nfsreq *req)
1611 struct thread *td = &thread0; /* XXX for creds, will break if sleep */
1612 struct nfsmount *nmp = req->r_nmp;
1618 if (req->r_rtt >= 0) {
1620 if (nmp->nm_flag & NFSMNT_DUMBTIMR)
1621 timeo = nmp->nm_timeo;
1623 timeo = NFS_RTO(nmp, proct[req->r_procnum]);
1624 if (nmp->nm_timeouts > 0)
1625 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
1626 if (req->r_rtt <= timeo)
1628 if (nmp->nm_timeouts < 8)
1632 * Check for server not responding
1634 if ((req->r_flags & R_TPRINTFMSG) == 0 &&
1635 req->r_rexmit > nmp->nm_deadthresh) {
1637 nmp->nm_mountp->mnt_stat.f_mntfromname,
1639 req->r_flags |= R_TPRINTFMSG;
1641 if (req->r_rexmit >= req->r_retry) { /* too many */
1642 nfsstats.rpctimeouts++;
1646 if (nmp->nm_sotype != SOCK_DGRAM) {
1647 if (++req->r_rexmit > NFS_MAXREXMIT)
1648 req->r_rexmit = NFS_MAXREXMIT;
1651 if ((so = nmp->nm_so) == NULL)
1655 * If there is enough space and the window allows..
1657 * Set r_rtt to -1 in case we fail to send it now.
1660 if (ssb_space(&so->so_snd) >= req->r_mreq->m_pkthdr.len &&
1661 ((nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1662 (req->r_flags & R_SENT) ||
1663 nmp->nm_sent < nmp->nm_cwnd) &&
1664 (m = m_copym(req->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){
1665 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
1666 error = so_pru_send(so, 0, m, NULL, NULL, td);
1668 error = so_pru_send(so, 0, m, nmp->nm_nam,
1671 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
1673 } else if (req->r_mrep == NULL) {
1675 * Iff first send, start timing
1676 * else turn timing off, backoff timer
1677 * and divide congestion window by 2.
1679 * It is possible for the so_pru_send() to
1680 * block and for us to race a reply so we
1681 * only do this if the reply field has not
1682 * been filled in. R_LOCKED will prevent
1683 * the request from being ripped out from under
1686 if (req->r_flags & R_SENT) {
1687 req->r_flags &= ~R_TIMING;
1688 if (++req->r_rexmit > NFS_MAXREXMIT)
1689 req->r_rexmit = NFS_MAXREXMIT;
1691 if (nmp->nm_cwnd < NFS_CWNDSCALE)
1692 nmp->nm_cwnd = NFS_CWNDSCALE;
1693 nfsstats.rpcretries++;
1695 req->r_flags |= R_SENT;
1696 nmp->nm_sent += NFS_CWNDSCALE;
1704 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1705 * wait for all requests to complete. This is used by forced unmounts
1706 * to terminate any outstanding RPCs.
1709 nfs_nmcancelreqs(struct nfsmount *nmp)
1715 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1716 if (nmp != req->r_nmp || req->r_mrep != NULL ||
1717 (req->r_flags & R_SOFTTERM)) {
1724 for (i = 0; i < 30; i++) {
1726 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1727 if (nmp == req->r_nmp)
1733 tsleep(&lbolt, 0, "nfscancel", 0);
1739 * Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT).
1740 * The nm_send count is decremented now to avoid deadlocks when the process in
1741 * soreceive() hasn't yet managed to send its own request.
1743 * This routine must be called at splsoftclock() to protect r_flags and
1748 nfs_softterm(struct nfsreq *rep)
1750 rep->r_flags |= R_SOFTTERM;
1752 if (rep->r_flags & R_SENT) {
1753 rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
1754 rep->r_flags &= ~R_SENT;
1759 * Test for a termination condition pending on the process.
1760 * This is used for NFSMNT_INT mounts.
1763 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
1769 if (rep && (rep->r_flags & R_SOFTTERM))
1771 /* Terminate all requests while attempting a forced unmount. */
1772 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
1774 if (!(nmp->nm_flag & NFSMNT_INT))
1776 /* td might be NULL YYY */
1777 if (td == NULL || (p = td->td_proc) == NULL)
1781 tmpset = lwp_sigpend(lp);
1782 SIGSETNAND(tmpset, lp->lwp_sigmask);
1783 SIGSETNAND(tmpset, p->p_sigignore);
1784 if (SIGNOTEMPTY(tmpset) && NFSINT_SIGMASK(tmpset))
1791 * Lock a socket against others.
1792 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
1793 * and also to avoid race conditions between the processes with nfs requests
1794 * in progress when a reconnect is necessary.
1797 nfs_sndlock(struct nfsreq *rep)
1799 mtx_t mtx = &rep->r_nmp->nm_txlock;
1808 if (rep->r_nmp->nm_flag & NFSMNT_INT)
1811 while ((error = mtx_lock_ex_try(mtx)) != 0) {
1812 if (nfs_sigintr(rep->r_nmp, rep, td)) {
1816 error = mtx_lock_ex(mtx, "nfsndlck", slpflag, slptimeo);
1819 if (slpflag == PCATCH) {
1824 /* Always fail if our request has been cancelled. */
1825 if (rep->r_flags & R_SOFTTERM) {
1834 * Unlock the stream socket for others.
1837 nfs_sndunlock(struct nfsreq *rep)
1839 mtx_t mtx = &rep->r_nmp->nm_txlock;
1845 nfs_rcvlock(struct nfsreq *rep)
1847 mtx_t mtx = &rep->r_nmp->nm_rxlock;
1853 * Unconditionally check for completion in case another nfsiod
1854 * get the packet while the caller was blocked, before the caller
1855 * called us. Packet reception is handled by mainline code which
1856 * is protected by the BGL at the moment.
1858 * We do not strictly need the second check just before the
1859 * tsleep(), but it's good defensive programming.
1861 if (rep->r_mrep != NULL)
1864 if (rep->r_nmp->nm_flag & NFSMNT_INT)
1870 while ((error = mtx_lock_ex_try(mtx)) != 0) {
1871 if (nfs_sigintr(rep->r_nmp, rep, rep->r_td)) {
1875 if (rep->r_mrep != NULL) {
1881 * NOTE: can return ENOLCK, but in that case rep->r_mrep
1882 * will already be set.
1884 error = mtx_lock_ex_link(mtx, &rep->r_link, "nfsrcvlk",
1890 * If our reply was recieved while we were sleeping,
1891 * then just return without taking the lock to avoid a
1892 * situation where a single iod could 'capture' the
1895 if (rep->r_mrep != NULL) {
1899 if (slpflag == PCATCH) {
1905 if (rep->r_mrep != NULL) {
1914 * Unlock the stream socket for others.
1917 nfs_rcvunlock(struct nfsreq *rep)
1919 mtx_t mtx = &rep->r_nmp->nm_rxlock;
1927 * Check for badly aligned mbuf data and realign by copying the unaligned
1928 * portion of the data into a new mbuf chain and freeing the portions
1929 * of the old chain that were replaced.
1931 * We cannot simply realign the data within the existing mbuf chain
1932 * because the underlying buffers may contain other rpc commands and
1933 * we cannot afford to overwrite them.
1935 * We would prefer to avoid this situation entirely. The situation does
1936 * not occur with NFS/UDP and is supposed to only occassionally occur
1937 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
1940 nfs_realign(struct mbuf **pm, int hsiz)
1943 struct mbuf *n = NULL;
1948 while ((m = *pm) != NULL) {
1949 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
1950 n = m_getl(m->m_len, MB_WAIT, MT_DATA, 0, NULL);
1958 * If n is non-NULL, loop on m copying data, then replace the
1959 * portion of the chain that had to be realigned.
1962 ++nfs_realign_count;
1964 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
1973 #ifndef NFS_NOSERVER
1976 * Parse an RPC request
1978 * - fill in the cred struct.
1981 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header)
1988 u_int32_t nfsvers, auth_type;
1990 int error = 0, ticklen;
1991 struct nfsuid *nuidp;
1992 struct timeval tvin, tvout;
1993 struct nfsm_info info;
1994 #if 0 /* until encrypted keys are implemented */
1995 NFSKERBKEYSCHED_T keys; /* stores key schedule */
1998 info.mrep = nd->nd_mrep;
1999 info.md = nd->nd_md;
2000 info.dpos = nd->nd_dpos;
2003 NULLOUT(tl = nfsm_dissect(&info, 10 * NFSX_UNSIGNED));
2004 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++);
2005 if (*tl++ != rpc_call) {
2010 NULLOUT(tl = nfsm_dissect(&info, 8 * NFSX_UNSIGNED));
2014 if (*tl++ != rpc_vers) {
2015 nd->nd_repstat = ERPCMISMATCH;
2016 nd->nd_procnum = NFSPROC_NOOP;
2019 if (*tl != nfs_prog) {
2020 nd->nd_repstat = EPROGUNAVAIL;
2021 nd->nd_procnum = NFSPROC_NOOP;
2025 nfsvers = fxdr_unsigned(u_int32_t, *tl++);
2026 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) {
2027 nd->nd_repstat = EPROGMISMATCH;
2028 nd->nd_procnum = NFSPROC_NOOP;
2031 if (nfsvers == NFS_VER3)
2032 nd->nd_flag = ND_NFSV3;
2033 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++);
2034 if (nd->nd_procnum == NFSPROC_NULL)
2036 if (nd->nd_procnum >= NFS_NPROCS ||
2037 (nd->nd_procnum >= NQNFSPROC_GETLEASE) ||
2038 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) {
2039 nd->nd_repstat = EPROCUNAVAIL;
2040 nd->nd_procnum = NFSPROC_NOOP;
2043 if ((nd->nd_flag & ND_NFSV3) == 0)
2044 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
2046 len = fxdr_unsigned(int, *tl++);
2047 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2052 nd->nd_flag &= ~ND_KERBAUTH;
2054 * Handle auth_unix or auth_kerb.
2056 if (auth_type == rpc_auth_unix) {
2057 len = fxdr_unsigned(int, *++tl);
2058 if (len < 0 || len > NFS_MAXNAMLEN) {
2062 ERROROUT(nfsm_adv(&info, nfsm_rndup(len)));
2063 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED));
2064 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred));
2065 nd->nd_cr.cr_ref = 1;
2066 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++);
2067 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++);
2068 len = fxdr_unsigned(int, *tl);
2069 if (len < 0 || len > RPCAUTH_UNIXGIDS) {
2073 NULLOUT(tl = nfsm_dissect(&info, (len + 2) * NFSX_UNSIGNED));
2074 for (i = 1; i <= len; i++)
2076 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++);
2079 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
2080 if (nd->nd_cr.cr_ngroups > 1)
2081 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups);
2082 len = fxdr_unsigned(int, *++tl);
2083 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2088 ERROROUT(nfsm_adv(&info, nfsm_rndup(len)));
2090 } else if (auth_type == rpc_auth_kerb) {
2091 switch (fxdr_unsigned(int, *tl++)) {
2092 case RPCAKN_FULLNAME:
2093 ticklen = fxdr_unsigned(int, *tl);
2094 *((u_int32_t *)nfsd->nfsd_authstr) = *tl;
2095 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED;
2096 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED;
2097 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) {
2104 uio.uio_segflg = UIO_SYSSPACE;
2105 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4];
2106 iov.iov_len = RPCAUTH_MAXSIZ - 4;
2107 ERROROUT(nfsm_mtouio(&info, &uio, uio.uio_resid));
2108 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED));
2109 if (*tl++ != rpc_auth_kerb ||
2110 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) {
2111 kprintf("Bad kerb verifier\n");
2112 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2113 nd->nd_procnum = NFSPROC_NOOP;
2116 NULLOUT(cp = nfsm_dissect(&info, 4 * NFSX_UNSIGNED));
2117 tl = (u_int32_t *)cp;
2118 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) {
2119 kprintf("Not fullname kerb verifier\n");
2120 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2121 nd->nd_procnum = NFSPROC_NOOP;
2124 cp += NFSX_UNSIGNED;
2125 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED);
2126 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED;
2127 nd->nd_flag |= ND_KERBFULL;
2128 nfsd->nfsd_flag |= NFSD_NEEDAUTH;
2130 case RPCAKN_NICKNAME:
2131 if (len != 2 * NFSX_UNSIGNED) {
2132 kprintf("Kerb nickname short\n");
2133 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED);
2134 nd->nd_procnum = NFSPROC_NOOP;
2137 nickuid = fxdr_unsigned(uid_t, *tl);
2138 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED));
2139 if (*tl++ != rpc_auth_kerb ||
2140 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) {
2141 kprintf("Kerb nick verifier bad\n");
2142 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2143 nd->nd_procnum = NFSPROC_NOOP;
2146 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED));
2147 tvin.tv_sec = *tl++;
2150 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first;
2151 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
2152 if (nuidp->nu_cr.cr_uid == nickuid &&
2154 netaddr_match(NU_NETFAM(nuidp),
2155 &nuidp->nu_haddr, nd->nd_nam2)))
2160 (NFSERR_AUTHERR|AUTH_REJECTCRED);
2161 nd->nd_procnum = NFSPROC_NOOP;
2166 * Now, decrypt the timestamp using the session key
2173 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec);
2174 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec);
2175 if (nuidp->nu_expire < time_second ||
2176 nuidp->nu_timestamp.tv_sec > tvout.tv_sec ||
2177 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec &&
2178 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) {
2179 nuidp->nu_expire = 0;
2181 (NFSERR_AUTHERR|AUTH_REJECTVERF);
2182 nd->nd_procnum = NFSPROC_NOOP;
2185 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr);
2186 nd->nd_flag |= ND_KERBNICK;
2189 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
2190 nd->nd_procnum = NFSPROC_NOOP;
2194 nd->nd_md = info.md;
2195 nd->nd_dpos = info.dpos;
2204 * Send a message to the originating process's terminal. The thread and/or
2205 * process may be NULL. YYY the thread should not be NULL but there may
2206 * still be some uio_td's that are still being passed as NULL through to
2210 nfs_msg(struct thread *td, char *server, char *msg)
2214 if (td && td->td_proc)
2215 tpr = tprintf_open(td->td_proc);
2218 tprintf(tpr, "nfs server %s: %s\n", server, msg);
2223 #ifndef NFS_NOSERVER
2225 * Socket upcall routine for the nfsd sockets.
2226 * The caddr_t arg is a pointer to the "struct nfssvc_sock".
2227 * Essentially do as much as possible non-blocking, else punt and it will
2228 * be called with MB_WAIT from an nfsd.
2231 nfsrv_rcv(struct socket *so, void *arg, int waitflag)
2233 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
2235 struct sockaddr *nam;
2238 int nparallel_wakeup = 0;
2240 if ((slp->ns_flag & SLP_VALID) == 0)
2244 * Do not allow an infinite number of completed RPC records to build
2245 * up before we stop reading data from the socket. Otherwise we could
2246 * end up holding onto an unreasonable number of mbufs for requests
2247 * waiting for service.
2249 * This should give pretty good feedback to the TCP
2250 * layer and prevents a memory crunch for other protocols.
2252 * Note that the same service socket can be dispatched to several
2253 * nfs servers simultaniously.
2255 * the tcp protocol callback calls us with MB_DONTWAIT.
2256 * nfsd calls us with MB_WAIT (typically).
2258 if (waitflag == MB_DONTWAIT && slp->ns_numrec >= nfsd_waiting / 2 + 1) {
2259 slp->ns_flag |= SLP_NEEDQ;
2264 * Handle protocol specifics to parse an RPC request. We always
2265 * pull from the socket using non-blocking I/O.
2267 if (so->so_type == SOCK_STREAM) {
2269 * The data has to be read in an orderly fashion from a TCP
2270 * stream, unlike a UDP socket. It is possible for soreceive
2271 * and/or nfsrv_getstream() to block, so make sure only one
2272 * entity is messing around with the TCP stream at any given
2273 * moment. The receive sockbuf's lock in soreceive is not
2276 * Note that this procedure can be called from any number of
2277 * NFS severs *OR* can be upcalled directly from a TCP
2280 if (slp->ns_flag & SLP_GETSTREAM) {
2281 slp->ns_flag |= SLP_NEEDQ;
2284 slp->ns_flag |= SLP_GETSTREAM;
2287 * Do soreceive(). Pull out as much data as possible without
2290 sbinit(&sio, 1000000000);
2291 flags = MSG_DONTWAIT;
2292 error = so_pru_soreceive(so, &nam, NULL, &sio, NULL, &flags);
2293 if (error || sio.sb_mb == NULL) {
2294 if (error == EWOULDBLOCK)
2295 slp->ns_flag |= SLP_NEEDQ;
2297 slp->ns_flag |= SLP_DISCONN;
2298 slp->ns_flag &= ~SLP_GETSTREAM;
2302 if (slp->ns_rawend) {
2303 slp->ns_rawend->m_next = m;
2304 slp->ns_cc += sio.sb_cc;
2307 slp->ns_cc = sio.sb_cc;
2314 * Now try and parse as many record(s) as we can out of the
2317 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup);
2320 slp->ns_flag |= SLP_DISCONN;
2322 slp->ns_flag |= SLP_NEEDQ;
2324 slp->ns_flag &= ~SLP_GETSTREAM;
2327 * For UDP soreceive typically pulls just one packet, loop
2328 * to get the whole batch.
2331 sbinit(&sio, 1000000000);
2332 flags = MSG_DONTWAIT;
2333 error = so_pru_soreceive(so, &nam, NULL, &sio,
2336 struct nfsrv_rec *rec;
2337 int mf = (waitflag & MB_DONTWAIT) ?
2338 M_NOWAIT : M_WAITOK;
2339 rec = kmalloc(sizeof(struct nfsrv_rec),
2343 FREE(nam, M_SONAME);
2347 nfs_realign(&sio.sb_mb, 10 * NFSX_UNSIGNED);
2348 rec->nr_address = nam;
2349 rec->nr_packet = sio.sb_mb;
2350 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2355 if ((so->so_proto->pr_flags & PR_CONNREQUIRED)
2356 && error != EWOULDBLOCK) {
2357 slp->ns_flag |= SLP_DISCONN;
2361 } while (sio.sb_mb);
2365 * If we were upcalled from the tcp protocol layer and we have
2366 * fully parsed records ready to go, or there is new data pending,
2367 * or something went wrong, try to wake up an nfsd thread to deal
2371 if (waitflag == MB_DONTWAIT && (slp->ns_numrec > 0
2372 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) {
2373 nfsrv_wakenfsd(slp, nparallel_wakeup);
2378 * Try and extract an RPC request from the mbuf data list received on a
2379 * stream socket. The "waitflag" argument indicates whether or not it
2383 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp)
2385 struct mbuf *m, **mpp;
2388 struct mbuf *om, *m2, *recm;
2392 if (slp->ns_reclen == 0) {
2393 if (slp->ns_cc < NFSX_UNSIGNED)
2396 if (m->m_len >= NFSX_UNSIGNED) {
2397 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED);
2398 m->m_data += NFSX_UNSIGNED;
2399 m->m_len -= NFSX_UNSIGNED;
2401 cp1 = (caddr_t)&recmark;
2402 cp2 = mtod(m, caddr_t);
2403 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
2404 while (m->m_len == 0) {
2406 cp2 = mtod(m, caddr_t);
2413 slp->ns_cc -= NFSX_UNSIGNED;
2414 recmark = ntohl(recmark);
2415 slp->ns_reclen = recmark & ~0x80000000;
2416 if (recmark & 0x80000000)
2417 slp->ns_flag |= SLP_LASTFRAG;
2419 slp->ns_flag &= ~SLP_LASTFRAG;
2420 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) {
2421 log(LOG_ERR, "%s (%d) from nfs client\n",
2422 "impossible packet length",
2429 * Now get the record part.
2431 * Note that slp->ns_reclen may be 0. Linux sometimes
2432 * generates 0-length RPCs
2435 if (slp->ns_cc == slp->ns_reclen) {
2437 slp->ns_raw = slp->ns_rawend = NULL;
2438 slp->ns_cc = slp->ns_reclen = 0;
2439 } else if (slp->ns_cc > slp->ns_reclen) {
2444 while (len < slp->ns_reclen) {
2445 if ((len + m->m_len) > slp->ns_reclen) {
2446 m2 = m_copym(m, 0, slp->ns_reclen - len,
2454 m->m_data += slp->ns_reclen - len;
2455 m->m_len -= slp->ns_reclen - len;
2456 len = slp->ns_reclen;
2458 return (EWOULDBLOCK);
2460 } else if ((len + m->m_len) == slp->ns_reclen) {
2480 * Accumulate the fragments into a record.
2482 mpp = &slp->ns_frag;
2484 mpp = &((*mpp)->m_next);
2486 if (slp->ns_flag & SLP_LASTFRAG) {
2487 struct nfsrv_rec *rec;
2488 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK;
2489 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf);
2491 m_freem(slp->ns_frag);
2493 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED);
2494 rec->nr_address = NULL;
2495 rec->nr_packet = slp->ns_frag;
2496 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2500 slp->ns_frag = NULL;
2506 * Parse an RPC header.
2509 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd,
2510 struct nfsrv_descript **ndp)
2512 struct nfsrv_rec *rec;
2514 struct sockaddr *nam;
2515 struct nfsrv_descript *nd;
2519 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec))
2521 rec = STAILQ_FIRST(&slp->ns_rec);
2522 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link);
2523 KKASSERT(slp->ns_numrec > 0);
2525 nam = rec->nr_address;
2527 kfree(rec, M_NFSRVDESC);
2528 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript),
2529 M_NFSRVDESC, M_WAITOK);
2530 nd->nd_md = nd->nd_mrep = m;
2532 nd->nd_dpos = mtod(m, caddr_t);
2533 error = nfs_getreq(nd, nfsd, TRUE);
2536 FREE(nam, M_SONAME);
2538 kfree((caddr_t)nd, M_NFSRVDESC);
2547 * Try to assign service sockets to nfsd threads based on the number
2548 * of new rpc requests that have been queued on the service socket.
2550 * If no nfsd's are available or additonal requests are pending, set the
2551 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for
2552 * the work in the nfssvc_sock list when it is finished processing its
2553 * current work. This flag is only cleared when an nfsd can not find
2554 * any new work to perform.
2557 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel)
2561 if ((slp->ns_flag & SLP_VALID) == 0)
2565 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) {
2566 if (nd->nfsd_flag & NFSD_WAITING) {
2567 nd->nfsd_flag &= ~NFSD_WAITING;
2569 panic("nfsd wakeup");
2572 wakeup((caddr_t)nd);
2573 if (--nparallel == 0)
2578 slp->ns_flag |= SLP_DOREC;
2579 nfsd_head_flag |= NFSD_CHECKSLP;
2582 #endif /* NFS_NOSERVER */