2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $
38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.29 2005/06/09 18:39:05 hsu Exp $
42 * Socket operations for use by nfs
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/kernel.h>
52 #include <sys/vnode.h>
53 #include <sys/protosw.h>
54 #include <sys/resourcevar.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/socketops.h>
58 #include <sys/syslog.h>
59 #include <sys/thread.h>
60 #include <sys/tprintf.h>
61 #include <sys/sysctl.h>
62 #include <sys/signalvar.h>
64 #include <netinet/in.h>
65 #include <netinet/tcp.h>
66 #include <sys/thread2.h>
72 #include "nfsm_subs.h"
82 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
83 * Use the mean and mean deviation of rtt for the appropriate type of rpc
84 * for the frequent rpcs and a default for the others.
85 * The justification for doing "other" this way is that these rpcs
86 * happen so infrequently that timer est. would probably be stale.
87 * Also, since many of these rpcs are
88 * non-idempotent, a conservative timeout is desired.
89 * getattr, lookup - A+2D
93 #define NFS_RTO(n, t) \
94 ((t) == 0 ? (n)->nm_timeo : \
96 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
97 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
98 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
99 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
101 * External data, mostly RPC constants in XDR form
103 extern u_int32_t rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers,
104 rpc_auth_unix, rpc_msgaccepted, rpc_call, rpc_autherr,
106 extern u_int32_t nfs_prog, nqnfs_prog;
107 extern time_t nqnfsstarttime;
108 extern struct nfsstats nfsstats;
109 extern int nfsv3_procid[NFS_NPROCS];
110 extern int nfs_ticks;
113 * Defines which timer to use for the procnum.
120 static int proct[NFS_NPROCS] = {
121 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0,
125 static int nfs_realign_test;
126 static int nfs_realign_count;
127 static int nfs_bufpackets = 4;
129 SYSCTL_DECL(_vfs_nfs);
131 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
132 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
133 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, "");
137 * There is a congestion window for outstanding rpcs maintained per mount
138 * point. The cwnd size is adjusted in roughly the way that:
139 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
140 * SIGCOMM '88". ACM, August 1988.
141 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
142 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
143 * of rpcs is in progress.
144 * (The sent count and cwnd are scaled for integer arith.)
145 * Variants of "slow start" were tried and were found to be too much of a
146 * performance hit (ave. rtt 3 times larger),
147 * I suspect due to the large rtt that nfs rpcs have.
149 #define NFS_CWNDSCALE 256
150 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
151 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
153 struct nfsrtt nfsrtt;
154 struct callout nfs_timer_handle;
156 static int nfs_msg (struct thread *,char *,char *);
157 static int nfs_rcvlock (struct nfsreq *);
158 static void nfs_rcvunlock (struct nfsreq *);
159 static void nfs_realign (struct mbuf **pm, int hsiz);
160 static int nfs_receive (struct nfsreq *rep, struct sockaddr **aname,
162 static void nfs_softterm (struct nfsreq *rep);
163 static int nfs_reconnect (struct nfsreq *rep);
165 static int nfsrv_getstream (struct nfssvc_sock *, int, int *);
167 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd,
168 struct nfssvc_sock *slp,
170 struct mbuf **mreqp) = {
198 #endif /* NFS_NOSERVER */
201 * Initialize sockets and congestion for a new NFS connection.
202 * We do not free the sockaddr if error.
205 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
208 int error, rcvreserve, sndreserve;
210 struct sockaddr *saddr;
211 struct sockaddr_in *sin;
212 struct thread *td = &thread0; /* only used for socreate and sobind */
214 nmp->nm_so = (struct socket *)0;
216 error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype,
217 nmp->nm_soproto, td);
221 nmp->nm_soflags = so->so_proto->pr_flags;
224 * Some servers require that the client port be a reserved port number.
226 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) {
229 struct sockaddr_in ssin;
231 bzero(&sopt, sizeof sopt);
232 ip = IP_PORTRANGE_LOW;
233 sopt.sopt_level = IPPROTO_IP;
234 sopt.sopt_name = IP_PORTRANGE;
235 sopt.sopt_val = (void *)&ip;
236 sopt.sopt_valsize = sizeof(ip);
238 error = sosetopt(so, &sopt);
241 bzero(&ssin, sizeof ssin);
243 sin->sin_len = sizeof (struct sockaddr_in);
244 sin->sin_family = AF_INET;
245 sin->sin_addr.s_addr = INADDR_ANY;
246 sin->sin_port = htons(0);
247 error = sobind(so, (struct sockaddr *)sin, td);
250 bzero(&sopt, sizeof sopt);
251 ip = IP_PORTRANGE_DEFAULT;
252 sopt.sopt_level = IPPROTO_IP;
253 sopt.sopt_name = IP_PORTRANGE;
254 sopt.sopt_val = (void *)&ip;
255 sopt.sopt_valsize = sizeof(ip);
257 error = sosetopt(so, &sopt);
263 * Protocols that do not require connections may be optionally left
264 * unconnected for servers that reply from a port other than NFS_PORT.
266 if (nmp->nm_flag & NFSMNT_NOCONN) {
267 if (nmp->nm_soflags & PR_CONNREQUIRED) {
272 error = soconnect(so, nmp->nm_nam, td);
277 * Wait for the connection to complete. Cribbed from the
278 * connect system call but with the wait timing out so
279 * that interruptible mounts don't hang here for a long time.
282 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
283 (void) tsleep((caddr_t)&so->so_timeo, 0,
285 if ((so->so_state & SS_ISCONNECTING) &&
286 so->so_error == 0 && rep &&
287 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){
288 so->so_state &= ~SS_ISCONNECTING;
294 error = so->so_error;
301 so->so_rcv.sb_timeo = (5 * hz);
302 so->so_snd.sb_timeo = (5 * hz);
305 * Get buffer reservation size from sysctl, but impose reasonable
308 pktscale = nfs_bufpackets;
314 if (nmp->nm_sotype == SOCK_DGRAM) {
315 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
316 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
317 NFS_MAXPKTHDR) * pktscale;
318 } else if (nmp->nm_sotype == SOCK_SEQPACKET) {
319 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
320 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
321 NFS_MAXPKTHDR) * pktscale;
323 if (nmp->nm_sotype != SOCK_STREAM)
324 panic("nfscon sotype");
325 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
329 bzero(&sopt, sizeof sopt);
330 sopt.sopt_level = SOL_SOCKET;
331 sopt.sopt_name = SO_KEEPALIVE;
332 sopt.sopt_val = &val;
333 sopt.sopt_valsize = sizeof val;
337 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
341 bzero(&sopt, sizeof sopt);
342 sopt.sopt_level = IPPROTO_TCP;
343 sopt.sopt_name = TCP_NODELAY;
344 sopt.sopt_val = &val;
345 sopt.sopt_valsize = sizeof val;
349 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
350 sizeof (u_int32_t)) * pktscale;
351 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
352 sizeof (u_int32_t)) * pktscale;
354 error = soreserve(so, sndreserve, rcvreserve,
355 &td->td_proc->p_rlimit[RLIMIT_SBSIZE]);
358 so->so_rcv.sb_flags |= SB_NOINTR;
359 so->so_snd.sb_flags |= SB_NOINTR;
361 /* Initialize other non-zero congestion variables */
362 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
363 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
364 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
365 nmp->nm_sdrtt[3] = 0;
366 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
368 nmp->nm_timeouts = 0;
378 * Called when a connection is broken on a reliable protocol.
379 * - clean up the old socket
380 * - nfs_connect() again
381 * - set R_MUSTRESEND for all outstanding requests on mount point
382 * If this fails the mount point is DEAD!
383 * nb: Must be called with the nfs_sndlock() set on the mount point.
386 nfs_reconnect(struct nfsreq *rep)
389 struct nfsmount *nmp = rep->r_nmp;
393 while ((error = nfs_connect(nmp, rep)) != 0) {
394 if (error == EINTR || error == ERESTART)
396 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0);
400 * Loop through outstanding request list and fix up all requests
403 TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
404 if (rp->r_nmp == nmp)
405 rp->r_flags |= R_MUSTRESEND;
411 * NFS disconnect. Clean up and unlink.
414 nfs_disconnect(struct nfsmount *nmp)
420 nmp->nm_so = (struct socket *)0;
427 nfs_safedisconnect(struct nfsmount *nmp)
429 struct nfsreq dummyreq;
431 bzero(&dummyreq, sizeof(dummyreq));
432 dummyreq.r_nmp = nmp;
433 dummyreq.r_td = NULL;
434 nfs_rcvlock(&dummyreq);
436 nfs_rcvunlock(&dummyreq);
440 * This is the nfs send routine. For connection based socket types, it
441 * must be called with an nfs_sndlock() on the socket.
442 * "rep == NULL" indicates that it has been called from a server.
443 * For the client side:
444 * - return EINTR if the RPC is terminated, 0 otherwise
445 * - set R_MUSTRESEND if the send fails for any reason
446 * - do any cleanup required by recoverable socket errors (?)
447 * For the server side:
448 * - return EINTR or ERESTART if interrupted by a signal
449 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
450 * - do any cleanup required by recoverable socket errors (?)
453 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
456 struct sockaddr *sendnam;
457 int error, soflags, flags;
460 if (rep->r_flags & R_SOFTTERM) {
464 if ((so = rep->r_nmp->nm_so) == NULL) {
465 rep->r_flags |= R_MUSTRESEND;
469 rep->r_flags &= ~R_MUSTRESEND;
470 soflags = rep->r_nmp->nm_soflags;
472 soflags = so->so_proto->pr_flags;
473 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
474 sendnam = (struct sockaddr *)0;
477 if (so->so_type == SOCK_SEQPACKET)
482 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags,
485 * ENOBUFS for dgram sockets is transient and non fatal.
486 * No need to log, and no need to break a soft mount.
488 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
490 if (rep) /* do backoff retransmit on client */
491 rep->r_flags |= R_MUSTRESEND;
496 log(LOG_INFO, "nfs send error %d for server %s\n",error,
497 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
499 * Deal with errors for the client side.
501 if (rep->r_flags & R_SOFTTERM)
504 rep->r_flags |= R_MUSTRESEND;
506 log(LOG_INFO, "nfsd send error %d\n", error);
509 * Handle any recoverable (soft) socket errors here. (?)
511 if (error != EINTR && error != ERESTART &&
512 error != EWOULDBLOCK && error != EPIPE)
519 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
520 * done by soreceive(), but for SOCK_STREAM we must deal with the Record
521 * Mark and consolidate the data into a new mbuf list.
522 * nb: Sometimes TCP passes the data up to soreceive() in long lists of
524 * For SOCK_STREAM we must be very careful to read an entire record once
525 * we have read any of it, even if the system call has been interrupted.
528 nfs_receive(struct nfsreq *rep, struct sockaddr **aname, struct mbuf **mp)
534 struct mbuf *control;
536 struct sockaddr **getnam;
537 int error, sotype, rcvflg;
538 struct thread *td = curthread; /* XXX */
541 * Set up arguments for soreceive()
543 *mp = (struct mbuf *)0;
544 *aname = (struct sockaddr *)0;
545 sotype = rep->r_nmp->nm_sotype;
548 * For reliable protocols, lock against other senders/receivers
549 * in case a reconnect is necessary.
550 * For SOCK_STREAM, first get the Record Mark to find out how much
551 * more there is to get.
552 * We must lock the socket against other receivers
553 * until we have an entire rpc request/reply.
555 if (sotype != SOCK_DGRAM) {
556 error = nfs_sndlock(rep);
561 * Check for fatal errors and resending request.
564 * Ugh: If a reconnect attempt just happened, nm_so
565 * would have changed. NULL indicates a failed
566 * attempt that has essentially shut down this
569 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) {
573 so = rep->r_nmp->nm_so;
575 error = nfs_reconnect(rep);
582 while (rep->r_flags & R_MUSTRESEND) {
583 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
584 nfsstats.rpcretries++;
585 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
587 if (error == EINTR || error == ERESTART ||
588 (error = nfs_reconnect(rep)) != 0) {
596 if (sotype == SOCK_STREAM) {
597 aio.iov_base = (caddr_t) &len;
598 aio.iov_len = sizeof(u_int32_t);
601 auio.uio_segflg = UIO_SYSSPACE;
602 auio.uio_rw = UIO_READ;
604 auio.uio_resid = sizeof(u_int32_t);
607 rcvflg = MSG_WAITALL;
608 error = so_pru_soreceive(so, NULL, &auio, NULL,
610 if (error == EWOULDBLOCK && rep) {
611 if (rep->r_flags & R_SOFTTERM)
614 } while (error == EWOULDBLOCK);
615 if (!error && auio.uio_resid > 0) {
617 * Don't log a 0 byte receive; it means
618 * that the socket has been closed, and
619 * can happen during normal operation
620 * (forcible unmount or Solaris server).
622 if (auio.uio_resid != sizeof (u_int32_t))
624 "short receive (%d/%d) from nfs server %s\n",
625 (int)(sizeof(u_int32_t) - auio.uio_resid),
626 (int)sizeof(u_int32_t),
627 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
632 len = ntohl(len) & ~0x80000000;
634 * This is SERIOUS! We are out of sync with the sender
635 * and forcing a disconnect/reconnect is all I can do.
637 if (len > NFS_MAXPACKET) {
638 log(LOG_ERR, "%s (%d) from nfs server %s\n",
639 "impossible packet length",
641 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
645 auio.uio_resid = len;
647 rcvflg = MSG_WAITALL;
648 error = so_pru_soreceive(so, NULL, &auio, mp,
650 } while (error == EWOULDBLOCK || error == EINTR ||
652 if (!error && auio.uio_resid > 0) {
653 if (len != auio.uio_resid)
655 "short receive (%d/%d) from nfs server %s\n",
656 len - auio.uio_resid, len,
657 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
662 * NB: Since uio_resid is big, MSG_WAITALL is ignored
663 * and soreceive() will return when it has either a
664 * control msg or a data msg.
665 * We have no use for control msg., but must grab them
666 * and then throw them away so we know what is going
669 auio.uio_resid = len = 100000000; /* Anything Big */
673 error = so_pru_soreceive(so, NULL, &auio, mp,
677 if (error == EWOULDBLOCK && rep) {
678 if (rep->r_flags & R_SOFTTERM)
681 } while (error == EWOULDBLOCK ||
682 (!error && *mp == NULL && control));
683 if ((rcvflg & MSG_EOR) == 0)
685 if (!error && *mp == NULL)
687 len -= auio.uio_resid;
690 if (error && error != EINTR && error != ERESTART) {
692 *mp = (struct mbuf *)0;
695 "receive error %d from nfs server %s\n",
697 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
698 error = nfs_sndlock(rep);
700 error = nfs_reconnect(rep);
708 if ((so = rep->r_nmp->nm_so) == NULL)
710 if (so->so_state & SS_ISCONNECTED)
711 getnam = (struct sockaddr **)0;
714 auio.uio_resid = len = 1000000;
718 error = so_pru_soreceive(so, getnam, &auio, mp, NULL,
720 if (error == EWOULDBLOCK &&
721 (rep->r_flags & R_SOFTTERM))
723 } while (error == EWOULDBLOCK);
724 len -= auio.uio_resid;
728 *mp = (struct mbuf *)0;
731 * Search for any mbufs that are not a multiple of 4 bytes long
732 * or with m_data not longword aligned.
733 * These could cause pointer alignment problems, so copy them to
734 * well aligned mbufs.
736 nfs_realign(mp, 5 * NFSX_UNSIGNED);
741 * Implement receipt of reply on a socket.
742 * We must search through the list of received datagrams matching them
743 * with outstanding requests using the xid, until ours is found.
747 nfs_reply(struct nfsreq *myrep)
750 struct nfsmount *nmp = myrep->r_nmp;
752 struct mbuf *mrep, *md;
753 struct sockaddr *nam;
759 * Loop around until we get our own reply
763 * Lock against other receivers so that I don't get stuck in
764 * sbwait() after someone else has received my reply for me.
765 * Also necessary for connection based protocols to avoid
766 * race conditions during a reconnect.
767 * If nfs_rcvlock() returns EALREADY, that means that
768 * the reply has already been recieved by another
769 * process and we can return immediately. In this
770 * case, the lock is not taken to avoid races with
773 error = nfs_rcvlock(myrep);
774 if (error == EALREADY)
779 * Get the next Rpc reply off the socket
781 error = nfs_receive(myrep, &nam, &mrep);
782 nfs_rcvunlock(myrep);
786 * Ignore routing errors on connectionless protocols??
788 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) {
789 nmp->nm_so->so_error = 0;
790 if (myrep->r_flags & R_GETONEREP)
800 * Get the xid and check that it is an rpc reply
803 dpos = mtod(md, caddr_t);
804 nfsm_dissect(tl, u_int32_t *, 2*NFSX_UNSIGNED);
806 if (*tl != rpc_reply) {
808 if (nmp->nm_flag & NFSMNT_NQNFS) {
809 if (nqnfs_callback(nmp, mrep, md, dpos))
810 nfsstats.rpcinvalid++;
812 nfsstats.rpcinvalid++;
816 nfsstats.rpcinvalid++;
820 if (myrep->r_flags & R_GETONEREP)
826 * Loop through the request list to match up the reply
827 * Iff no match, just drop the datagram
829 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
830 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
838 rt = &nfsrtt.rttl[nfsrtt.pos];
839 rt->proc = rep->r_procnum;
840 rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]);
841 rt->sent = nmp->nm_sent;
842 rt->cwnd = nmp->nm_cwnd;
843 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1];
844 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1];
845 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid;
846 getmicrotime(&rt->tstamp);
847 if (rep->r_flags & R_TIMING)
848 rt->rtt = rep->r_rtt;
851 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ;
854 * Update congestion window.
855 * Do the additive increase of
858 if (nmp->nm_cwnd <= nmp->nm_sent) {
860 (NFS_CWNDSCALE * NFS_CWNDSCALE +
861 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
862 if (nmp->nm_cwnd > NFS_MAXCWND)
863 nmp->nm_cwnd = NFS_MAXCWND;
865 crit_enter(); /* nfs_timer interlock*/
866 if (rep->r_flags & R_SENT) {
867 rep->r_flags &= ~R_SENT;
868 nmp->nm_sent -= NFS_CWNDSCALE;
872 * Update rtt using a gain of 0.125 on the mean
873 * and a gain of 0.25 on the deviation.
875 if (rep->r_flags & R_TIMING) {
877 * Since the timer resolution of
878 * NFS_HZ is so course, it can often
879 * result in r_rtt == 0. Since
880 * r_rtt == N means that the actual
881 * rtt is between N+dt and N+2-dt ticks,
885 t1 -= (NFS_SRTT(rep) >> 3);
889 t1 -= (NFS_SDRTT(rep) >> 2);
890 NFS_SDRTT(rep) += t1;
892 nmp->nm_timeouts = 0;
897 * If not matched to a request, drop it.
898 * If it's mine, get out.
901 nfsstats.rpcunexpected++;
903 } else if (rep == myrep) {
904 if (rep->r_mrep == NULL)
905 panic("nfsreply nil");
908 if (myrep->r_flags & R_GETONEREP)
914 * nfs_request - goes something like this
915 * - fill in request struct
916 * - links it into list
917 * - calls nfs_send() for first transmit
918 * - calls nfs_receive() to get reply
919 * - break down rpc header and return with nfs reply pointed to
921 * nb: always frees up mreq mbuf list
924 nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
925 struct thread *td, struct ucred *cred, struct mbuf **mrp,
926 struct mbuf **mdp, caddr_t *dposp)
928 struct mbuf *mrep, *m2;
932 struct nfsmount *nmp;
933 struct mbuf *m, *md, *mheadend;
935 char nickv[RPCX_NICKVERF];
936 time_t reqtime, waituntil;
938 int t1, nqlflag, cachable, error = 0, mrest_len, auth_len, auth_type;
939 int trylater_delay = NQ_TRYLATERDEL, trylater_cnt = 0, failed_auth = 0;
940 int verf_len, verf_type;
943 char *auth_str, *verf_str;
944 NFSKERBKEY_T key; /* save session key */
946 /* Reject requests while attempting a forced unmount. */
947 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
951 nmp = VFSTONFS(vp->v_mount);
952 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
956 rep->r_procnum = procnum;
966 * Get the RPC header with authorization.
969 verf_str = auth_str = (char *)0;
970 if (nmp->nm_flag & NFSMNT_KERB) {
972 verf_len = sizeof (nickv);
973 auth_type = RPCAUTH_KERB4;
974 bzero((caddr_t)key, sizeof (key));
975 if (failed_auth || nfs_getnickauth(nmp, cred, &auth_str,
976 &auth_len, verf_str, verf_len)) {
977 error = nfs_getauth(nmp, rep, cred, &auth_str,
978 &auth_len, verf_str, &verf_len, key);
980 free((caddr_t)rep, M_NFSREQ);
986 auth_type = RPCAUTH_UNIX;
987 if (cred->cr_ngroups < 1)
988 panic("nfsreq nogrps");
989 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
990 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
993 m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len,
994 auth_str, verf_len, verf_str, mrest, mrest_len, &mheadend, &xid);
996 free(auth_str, M_TEMP);
999 * For stream protocols, insert a Sun RPC Record Mark.
1001 if (nmp->nm_sotype == SOCK_STREAM) {
1002 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT);
1004 free(rep, M_NFSREQ);
1007 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1008 (m->m_pkthdr.len - NFSX_UNSIGNED));
1013 if (nmp->nm_flag & NFSMNT_SOFT)
1014 rep->r_retry = nmp->nm_retry;
1016 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
1017 rep->r_rtt = rep->r_rexmit = 0;
1018 if (proct[procnum] > 0)
1019 rep->r_flags = R_TIMING | R_MASKTIMER;
1021 rep->r_flags = R_MASKTIMER;
1025 * Do the client side RPC.
1027 nfsstats.rpcrequests++;
1030 * Chain request into list of outstanding requests. Be sure
1031 * to put it LAST so timer finds oldest requests first. Note
1032 * that R_MASKTIMER is set at the moment to prevent any timer
1033 * action on this request while we are still doing processing on
1034 * it below. splsoftclock() primarily protects nm_sent. Note
1035 * that we may block in this code so there is no atomicy guarentee.
1038 TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain);
1040 /* Get send time for nqnfs */
1041 reqtime = time_second;
1044 * If backing off another request or avoiding congestion, don't
1045 * send this one now but let timer do it. If not timing a request,
1048 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM ||
1049 (nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1050 nmp->nm_sent < nmp->nm_cwnd)) {
1051 if (nmp->nm_soflags & PR_CONNREQUIRED)
1052 error = nfs_sndlock(rep);
1054 m2 = m_copym(m, 0, M_COPYALL, MB_WAIT);
1055 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
1056 if (nmp->nm_soflags & PR_CONNREQUIRED)
1059 if (!error && (rep->r_flags & R_MUSTRESEND) == 0) {
1060 nmp->nm_sent += NFS_CWNDSCALE;
1061 rep->r_flags |= R_SENT;
1068 * Let the timer do what it will with the request, then
1069 * wait for the reply from our send or the timer's.
1071 rep->r_flags &= ~R_MASKTIMER;
1073 if (!error || error == EPIPE)
1074 error = nfs_reply(rep);
1077 * RPC done, unlink the request.
1080 TAILQ_REMOVE(&nfs_reqq, rep, r_chain);
1083 * Decrement the outstanding request count.
1085 if (rep->r_flags & R_SENT) {
1086 rep->r_flags &= ~R_SENT;
1087 nmp->nm_sent -= NFS_CWNDSCALE;
1092 * If there was a successful reply and a tprintf msg.
1093 * tprintf a response.
1095 if (!error && (rep->r_flags & R_TPRINTFMSG))
1096 nfs_msg(rep->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1102 m_freem(rep->r_mreq);
1103 free((caddr_t)rep, M_NFSREQ);
1108 * break down the rpc header and check if ok
1110 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1111 if (*tl++ == rpc_msgdenied) {
1112 if (*tl == rpc_mismatch)
1114 else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) {
1117 mheadend->m_next = (struct mbuf *)0;
1119 m_freem(rep->r_mreq);
1126 m_freem(rep->r_mreq);
1127 free((caddr_t)rep, M_NFSREQ);
1132 * Grab any Kerberos verifier, otherwise just throw it away.
1134 verf_type = fxdr_unsigned(int, *tl++);
1135 i = fxdr_unsigned(int32_t, *tl);
1136 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) {
1137 error = nfs_savenickauth(nmp, cred, i, key, &md, &dpos, mrep);
1141 nfsm_adv(nfsm_rndup(i));
1142 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1145 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1147 error = fxdr_unsigned(int, *tl);
1148 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
1149 error == NFSERR_TRYLATER) {
1152 waituntil = time_second + trylater_delay;
1153 while (time_second < waituntil)
1154 (void) tsleep((caddr_t)&lbolt,
1156 trylater_delay *= nfs_backoff[trylater_cnt];
1157 if (trylater_cnt < 7)
1163 * If the File Handle was stale, invalidate the
1164 * lookup cache, just in case.
1166 if (error == ESTALE)
1167 cache_inval_vp(vp, CINV_CHILDREN);
1168 if (nmp->nm_flag & NFSMNT_NFSV3) {
1172 error |= NFSERR_RETERR;
1175 m_freem(rep->r_mreq);
1176 free((caddr_t)rep, M_NFSREQ);
1181 * For nqnfs, get any lease in reply
1183 if (nmp->nm_flag & NFSMNT_NQNFS) {
1184 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1187 nqlflag = fxdr_unsigned(int, *tl);
1188 nfsm_dissect(tl, u_int32_t *, 4*NFSX_UNSIGNED);
1189 cachable = fxdr_unsigned(int, *tl++);
1190 reqtime += fxdr_unsigned(int, *tl++);
1191 if (reqtime > time_second) {
1192 frev = fxdr_hyper(tl);
1193 nqnfs_clientlease(nmp, np, nqlflag,
1194 cachable, reqtime, frev);
1201 m_freem(rep->r_mreq);
1202 FREE((caddr_t)rep, M_NFSREQ);
1206 error = EPROTONOSUPPORT;
1208 m_freem(rep->r_mreq);
1209 free((caddr_t)rep, M_NFSREQ);
1213 #ifndef NFS_NOSERVER
1215 * Generate the rpc reply header
1216 * siz arg. is used to decide if adding a cluster is worthwhile
1219 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp,
1220 int err, int cache, u_quad_t *frev, struct mbuf **mrq,
1221 struct mbuf **mbp, caddr_t *bposp)
1226 struct mbuf *mb, *mb2;
1228 siz += RPC_REPLYSIZ;
1229 mb = mreq = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
1230 mreq->m_pkthdr.len = 0;
1232 * If this is not a cluster, try and leave leading space
1233 * for the lower level headers.
1235 if ((max_hdr + siz) < MINCLSIZE)
1236 mreq->m_data += max_hdr;
1237 tl = mtod(mreq, u_int32_t *);
1238 mreq->m_len = 6 * NFSX_UNSIGNED;
1239 bpos = ((caddr_t)tl) + mreq->m_len;
1240 *tl++ = txdr_unsigned(nd->nd_retxid);
1242 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
1243 *tl++ = rpc_msgdenied;
1244 if (err & NFSERR_AUTHERR) {
1245 *tl++ = rpc_autherr;
1246 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR);
1247 mreq->m_len -= NFSX_UNSIGNED;
1248 bpos -= NFSX_UNSIGNED;
1250 *tl++ = rpc_mismatch;
1251 *tl++ = txdr_unsigned(RPC_VER2);
1252 *tl = txdr_unsigned(RPC_VER2);
1255 *tl++ = rpc_msgaccepted;
1258 * For Kerberos authentication, we must send the nickname
1259 * verifier back, otherwise just RPCAUTH_NULL.
1261 if (nd->nd_flag & ND_KERBFULL) {
1262 struct nfsuid *nuidp;
1263 struct timeval ktvin, ktvout;
1265 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first;
1266 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1267 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid &&
1268 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp),
1269 &nuidp->nu_haddr, nd->nd_nam2)))
1274 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1);
1276 txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1279 * Encrypt the timestamp in ecb mode using the
1286 *tl++ = rpc_auth_kerb;
1287 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED);
1288 *tl = ktvout.tv_sec;
1289 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1290 *tl++ = ktvout.tv_usec;
1291 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid);
1302 *tl = txdr_unsigned(RPC_PROGUNAVAIL);
1305 *tl = txdr_unsigned(RPC_PROGMISMATCH);
1306 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1307 if (nd->nd_flag & ND_NQNFS) {
1308 *tl++ = txdr_unsigned(3);
1309 *tl = txdr_unsigned(3);
1311 *tl++ = txdr_unsigned(2);
1312 *tl = txdr_unsigned(3);
1316 *tl = txdr_unsigned(RPC_PROCUNAVAIL);
1319 *tl = txdr_unsigned(RPC_GARBAGE);
1323 if (err != NFSERR_RETVOID) {
1324 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1326 *tl = txdr_unsigned(nfsrv_errmap(nd, err));
1335 * For nqnfs, piggyback lease as requested.
1337 if ((nd->nd_flag & ND_NQNFS) && err == 0) {
1338 if (nd->nd_flag & ND_LEASE) {
1339 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1340 *tl++ = txdr_unsigned(nd->nd_flag & ND_LEASE);
1341 *tl++ = txdr_unsigned(cache);
1342 *tl++ = txdr_unsigned(nd->nd_duration);
1343 txdr_hyper(*frev, tl);
1345 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1353 if (err != 0 && err != NFSERR_RETVOID)
1354 nfsstats.srvrpc_errs++;
1359 #endif /* NFS_NOSERVER */
1362 * Scan the nfsreq list and retranmit any requests that have timed out
1363 * To avoid retransmission attempts on STREAM sockets (in the future) make
1364 * sure to set the r_retry field to 0 (implies nm_retry == 0).
1367 nfs_timer(void *arg /* never used */)
1372 struct nfsmount *nmp;
1375 #ifndef NFS_NOSERVER
1376 static long lasttime = 0;
1377 struct nfssvc_sock *slp;
1379 #endif /* NFS_NOSERVER */
1380 struct thread *td = &thread0; /* XXX for credentials, will break if sleep */
1383 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
1385 if (rep->r_mrep || (rep->r_flags & (R_SOFTTERM|R_MASKTIMER)))
1387 if (nfs_sigintr(nmp, rep, rep->r_td)) {
1391 if (rep->r_rtt >= 0) {
1393 if (nmp->nm_flag & NFSMNT_DUMBTIMR)
1394 timeo = nmp->nm_timeo;
1396 timeo = NFS_RTO(nmp, proct[rep->r_procnum]);
1397 if (nmp->nm_timeouts > 0)
1398 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
1399 if (rep->r_rtt <= timeo)
1401 if (nmp->nm_timeouts < 8)
1405 * Check for server not responding
1407 if ((rep->r_flags & R_TPRINTFMSG) == 0 &&
1408 rep->r_rexmit > nmp->nm_deadthresh) {
1410 nmp->nm_mountp->mnt_stat.f_mntfromname,
1412 rep->r_flags |= R_TPRINTFMSG;
1414 if (rep->r_rexmit >= rep->r_retry) { /* too many */
1415 nfsstats.rpctimeouts++;
1419 if (nmp->nm_sotype != SOCK_DGRAM) {
1420 if (++rep->r_rexmit > NFS_MAXREXMIT)
1421 rep->r_rexmit = NFS_MAXREXMIT;
1424 if ((so = nmp->nm_so) == NULL)
1428 * If there is enough space and the window allows..
1430 * Set r_rtt to -1 in case we fail to send it now.
1433 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
1434 ((nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1435 (rep->r_flags & R_SENT) ||
1436 nmp->nm_sent < nmp->nm_cwnd) &&
1437 (m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){
1438 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
1439 error = so_pru_send(so, 0, m, (struct sockaddr *)0,
1440 (struct mbuf *)0, td);
1442 error = so_pru_send(so, 0, m, nmp->nm_nam,
1443 (struct mbuf *)0, td);
1445 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
1449 * Iff first send, start timing
1450 * else turn timing off, backoff timer
1451 * and divide congestion window by 2.
1453 if (rep->r_flags & R_SENT) {
1454 rep->r_flags &= ~R_TIMING;
1455 if (++rep->r_rexmit > NFS_MAXREXMIT)
1456 rep->r_rexmit = NFS_MAXREXMIT;
1458 if (nmp->nm_cwnd < NFS_CWNDSCALE)
1459 nmp->nm_cwnd = NFS_CWNDSCALE;
1460 nfsstats.rpcretries++;
1462 rep->r_flags |= R_SENT;
1463 nmp->nm_sent += NFS_CWNDSCALE;
1469 #ifndef NFS_NOSERVER
1471 * Call the nqnfs server timer once a second to handle leases.
1473 if (lasttime != time_second) {
1474 lasttime = time_second;
1479 * Scan the write gathering queues for writes that need to be
1482 cur_usec = nfs_curusec();
1483 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
1484 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec)
1485 nfsrv_wakenfsd(slp, 1);
1487 #endif /* NFS_NOSERVER */
1489 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer, NULL);
1493 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1494 * wait for all requests to complete. This is used by forced unmounts
1495 * to terminate any outstanding RPCs.
1498 nfs_nmcancelreqs(struct nfsmount *nmp)
1504 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
1505 if (nmp != req->r_nmp || req->r_mrep != NULL ||
1506 (req->r_flags & R_SOFTTERM))
1512 for (i = 0; i < 30; i++) {
1514 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
1515 if (nmp == req->r_nmp)
1521 tsleep(&lbolt, 0, "nfscancel", 0);
1527 * Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT).
1528 * The nm_send count is decremented now to avoid deadlocks when the process in
1529 * soreceive() hasn't yet managed to send its own request.
1531 * This routine must be called at splsoftclock() to protect r_flags and
1536 nfs_softterm(struct nfsreq *rep)
1538 rep->r_flags |= R_SOFTTERM;
1540 if (rep->r_flags & R_SENT) {
1541 rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
1542 rep->r_flags &= ~R_SENT;
1547 * Test for a termination condition pending on the process.
1548 * This is used for NFSMNT_INT mounts.
1551 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
1556 if (rep && (rep->r_flags & R_SOFTTERM))
1558 /* Terminate all requests while attempting a forced unmount. */
1559 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
1561 if (!(nmp->nm_flag & NFSMNT_INT))
1563 /* td might be NULL YYY */
1564 if (td == NULL || (p = td->td_proc) == NULL)
1567 tmpset = p->p_siglist;
1568 SIGSETNAND(tmpset, p->p_sigmask);
1569 SIGSETNAND(tmpset, p->p_sigignore);
1570 if (SIGNOTEMPTY(p->p_siglist) && NFSINT_SIGMASK(tmpset))
1577 * Lock a socket against others.
1578 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
1579 * and also to avoid race conditions between the processes with nfs requests
1580 * in progress when a reconnect is necessary.
1583 nfs_sndlock(struct nfsreq *rep)
1585 int *statep = &rep->r_nmp->nm_state;
1594 if (rep->r_nmp->nm_flag & NFSMNT_INT)
1599 while (*statep & NFSSTA_SNDLOCK) {
1600 *statep |= NFSSTA_WANTSND;
1601 if (nfs_sigintr(rep->r_nmp, rep, td)) {
1605 tsleep((caddr_t)statep, slpflag, "nfsndlck", slptimeo);
1606 if (slpflag == PCATCH) {
1611 /* Always fail if our request has been cancelled. */
1612 if ((rep->r_flags & R_SOFTTERM))
1615 *statep |= NFSSTA_SNDLOCK;
1621 * Unlock the stream socket for others.
1624 nfs_sndunlock(struct nfsreq *rep)
1626 int *statep = &rep->r_nmp->nm_state;
1628 if ((*statep & NFSSTA_SNDLOCK) == 0)
1629 panic("nfs sndunlock");
1631 *statep &= ~NFSSTA_SNDLOCK;
1632 if (*statep & NFSSTA_WANTSND) {
1633 *statep &= ~NFSSTA_WANTSND;
1634 wakeup((caddr_t)statep);
1640 nfs_rcvlock(struct nfsreq *rep)
1642 int *statep = &rep->r_nmp->nm_state;
1648 * Unconditionally check for completion in case another nfsiod
1649 * get the packet while the caller was blocked, before the caller
1650 * called us. Packet reception is handled by mainline code which
1651 * is protected by the BGL at the moment.
1653 * We do not strictly need the second check just before the
1654 * tsleep(), but it's good defensive programming.
1656 if (rep->r_mrep != NULL)
1659 if (rep->r_nmp->nm_flag & NFSMNT_INT)
1666 while (*statep & NFSSTA_RCVLOCK) {
1667 if (nfs_sigintr(rep->r_nmp, rep, rep->r_td)) {
1671 if (rep->r_mrep != NULL) {
1675 *statep |= NFSSTA_WANTRCV;
1676 tsleep((caddr_t)statep, slpflag, "nfsrcvlk", slptimeo);
1678 * If our reply was recieved while we were sleeping,
1679 * then just return without taking the lock to avoid a
1680 * situation where a single iod could 'capture' the
1683 if (rep->r_mrep != NULL) {
1687 if (slpflag == PCATCH) {
1693 *statep |= NFSSTA_RCVLOCK;
1694 rep->r_nmp->nm_rcvlock_td = curthread; /* DEBUGGING */
1701 * Unlock the stream socket for others.
1704 nfs_rcvunlock(struct nfsreq *rep)
1706 int *statep = &rep->r_nmp->nm_state;
1708 if ((*statep & NFSSTA_RCVLOCK) == 0)
1709 panic("nfs rcvunlock");
1711 rep->r_nmp->nm_rcvlock_td = (void *)-1; /* DEBUGGING */
1712 *statep &= ~NFSSTA_RCVLOCK;
1713 if (*statep & NFSSTA_WANTRCV) {
1714 *statep &= ~NFSSTA_WANTRCV;
1715 wakeup((caddr_t)statep);
1723 * Check for badly aligned mbuf data and realign by copying the unaligned
1724 * portion of the data into a new mbuf chain and freeing the portions
1725 * of the old chain that were replaced.
1727 * We cannot simply realign the data within the existing mbuf chain
1728 * because the underlying buffers may contain other rpc commands and
1729 * we cannot afford to overwrite them.
1731 * We would prefer to avoid this situation entirely. The situation does
1732 * not occur with NFS/UDP and is supposed to only occassionally occur
1733 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
1736 nfs_realign(struct mbuf **pm, int hsiz)
1739 struct mbuf *n = NULL;
1744 while ((m = *pm) != NULL) {
1745 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
1746 n = m_getl(m->m_len, MB_WAIT, MT_DATA, 0, NULL);
1754 * If n is non-NULL, loop on m copying data, then replace the
1755 * portion of the chain that had to be realigned.
1758 ++nfs_realign_count;
1760 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
1769 #ifndef NFS_NOSERVER
1772 * Parse an RPC request
1774 * - fill in the cred struct.
1777 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header)
1784 caddr_t dpos, cp2, cp;
1785 u_int32_t nfsvers, auth_type;
1787 int error = 0, nqnfs = 0, ticklen;
1788 struct mbuf *mrep, *md;
1789 struct nfsuid *nuidp;
1790 struct timeval tvin, tvout;
1791 #if 0 /* until encrypted keys are implemented */
1792 NFSKERBKEYSCHED_T keys; /* stores key schedule */
1799 nfsm_dissect(tl, u_int32_t *, 10 * NFSX_UNSIGNED);
1800 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++);
1801 if (*tl++ != rpc_call) {
1806 nfsm_dissect(tl, u_int32_t *, 8 * NFSX_UNSIGNED);
1809 if (*tl++ != rpc_vers) {
1810 nd->nd_repstat = ERPCMISMATCH;
1811 nd->nd_procnum = NFSPROC_NOOP;
1814 if (*tl != nfs_prog) {
1815 if (*tl == nqnfs_prog)
1818 nd->nd_repstat = EPROGUNAVAIL;
1819 nd->nd_procnum = NFSPROC_NOOP;
1824 nfsvers = fxdr_unsigned(u_int32_t, *tl++);
1825 if (((nfsvers < NFS_VER2 || nfsvers > NFS_VER3) && !nqnfs) ||
1826 (nfsvers != NQNFS_VER3 && nqnfs)) {
1827 nd->nd_repstat = EPROGMISMATCH;
1828 nd->nd_procnum = NFSPROC_NOOP;
1832 nd->nd_flag = (ND_NFSV3 | ND_NQNFS);
1833 else if (nfsvers == NFS_VER3)
1834 nd->nd_flag = ND_NFSV3;
1835 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++);
1836 if (nd->nd_procnum == NFSPROC_NULL)
1838 if (nd->nd_procnum >= NFS_NPROCS ||
1839 (!nqnfs && nd->nd_procnum >= NQNFSPROC_GETLEASE) ||
1840 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) {
1841 nd->nd_repstat = EPROCUNAVAIL;
1842 nd->nd_procnum = NFSPROC_NOOP;
1845 if ((nd->nd_flag & ND_NFSV3) == 0)
1846 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
1848 len = fxdr_unsigned(int, *tl++);
1849 if (len < 0 || len > RPCAUTH_MAXSIZ) {
1854 nd->nd_flag &= ~ND_KERBAUTH;
1856 * Handle auth_unix or auth_kerb.
1858 if (auth_type == rpc_auth_unix) {
1859 len = fxdr_unsigned(int, *++tl);
1860 if (len < 0 || len > NFS_MAXNAMLEN) {
1864 nfsm_adv(nfsm_rndup(len));
1865 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1866 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred));
1867 nd->nd_cr.cr_ref = 1;
1868 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++);
1869 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++);
1870 len = fxdr_unsigned(int, *tl);
1871 if (len < 0 || len > RPCAUTH_UNIXGIDS) {
1875 nfsm_dissect(tl, u_int32_t *, (len + 2) * NFSX_UNSIGNED);
1876 for (i = 1; i <= len; i++)
1878 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++);
1881 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
1882 if (nd->nd_cr.cr_ngroups > 1)
1883 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups);
1884 len = fxdr_unsigned(int, *++tl);
1885 if (len < 0 || len > RPCAUTH_MAXSIZ) {
1890 nfsm_adv(nfsm_rndup(len));
1891 } else if (auth_type == rpc_auth_kerb) {
1892 switch (fxdr_unsigned(int, *tl++)) {
1893 case RPCAKN_FULLNAME:
1894 ticklen = fxdr_unsigned(int, *tl);
1895 *((u_int32_t *)nfsd->nfsd_authstr) = *tl;
1896 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED;
1897 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED;
1898 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) {
1905 uio.uio_segflg = UIO_SYSSPACE;
1906 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4];
1907 iov.iov_len = RPCAUTH_MAXSIZ - 4;
1908 nfsm_mtouio(&uio, uio.uio_resid);
1909 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1910 if (*tl++ != rpc_auth_kerb ||
1911 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) {
1912 printf("Bad kerb verifier\n");
1913 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
1914 nd->nd_procnum = NFSPROC_NOOP;
1917 nfsm_dissect(cp, caddr_t, 4 * NFSX_UNSIGNED);
1918 tl = (u_int32_t *)cp;
1919 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) {
1920 printf("Not fullname kerb verifier\n");
1921 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
1922 nd->nd_procnum = NFSPROC_NOOP;
1925 cp += NFSX_UNSIGNED;
1926 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED);
1927 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED;
1928 nd->nd_flag |= ND_KERBFULL;
1929 nfsd->nfsd_flag |= NFSD_NEEDAUTH;
1931 case RPCAKN_NICKNAME:
1932 if (len != 2 * NFSX_UNSIGNED) {
1933 printf("Kerb nickname short\n");
1934 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED);
1935 nd->nd_procnum = NFSPROC_NOOP;
1938 nickuid = fxdr_unsigned(uid_t, *tl);
1939 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1940 if (*tl++ != rpc_auth_kerb ||
1941 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) {
1942 printf("Kerb nick verifier bad\n");
1943 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
1944 nd->nd_procnum = NFSPROC_NOOP;
1947 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1948 tvin.tv_sec = *tl++;
1951 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first;
1952 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1953 if (nuidp->nu_cr.cr_uid == nickuid &&
1955 netaddr_match(NU_NETFAM(nuidp),
1956 &nuidp->nu_haddr, nd->nd_nam2)))
1961 (NFSERR_AUTHERR|AUTH_REJECTCRED);
1962 nd->nd_procnum = NFSPROC_NOOP;
1967 * Now, decrypt the timestamp using the session key
1974 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec);
1975 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec);
1976 if (nuidp->nu_expire < time_second ||
1977 nuidp->nu_timestamp.tv_sec > tvout.tv_sec ||
1978 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec &&
1979 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) {
1980 nuidp->nu_expire = 0;
1982 (NFSERR_AUTHERR|AUTH_REJECTVERF);
1983 nd->nd_procnum = NFSPROC_NOOP;
1986 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr);
1987 nd->nd_flag |= ND_KERBNICK;
1990 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
1991 nd->nd_procnum = NFSPROC_NOOP;
1996 * For nqnfs, get piggybacked lease request.
1998 if (nqnfs && nd->nd_procnum != NQNFSPROC_EVICTED) {
1999 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2000 nd->nd_flag |= fxdr_unsigned(int, *tl);
2001 if (nd->nd_flag & ND_LEASE) {
2002 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2003 nd->nd_duration = fxdr_unsigned(int32_t, *tl);
2005 nd->nd_duration = NQ_MINLEASE;
2007 nd->nd_duration = NQ_MINLEASE;
2018 * Send a message to the originating process's terminal. The thread and/or
2019 * process may be NULL. YYY the thread should not be NULL but there may
2020 * still be some uio_td's that are still being passed as NULL through to
2024 nfs_msg(struct thread *td, char *server, char *msg)
2028 if (td && td->td_proc)
2029 tpr = tprintf_open(td->td_proc);
2032 tprintf(tpr, "nfs server %s: %s\n", server, msg);
2037 #ifndef NFS_NOSERVER
2039 * Socket upcall routine for the nfsd sockets.
2040 * The caddr_t arg is a pointer to the "struct nfssvc_sock".
2041 * Essentially do as much as possible non-blocking, else punt and it will
2042 * be called with MB_WAIT from an nfsd.
2045 nfsrv_rcv(struct socket *so, void *arg, int waitflag)
2047 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
2050 struct sockaddr *nam;
2053 int nparallel_wakeup = 0;
2055 if ((slp->ns_flag & SLP_VALID) == 0)
2059 * Do not allow an infinite number of completed RPC records to build
2060 * up before we stop reading data from the socket. Otherwise we could
2061 * end up holding onto an unreasonable number of mbufs for requests
2062 * waiting for service.
2064 * This should give pretty good feedback to the TCP
2065 * layer and prevents a memory crunch for other protocols.
2067 * Note that the same service socket can be dispatched to several
2068 * nfs servers simultaniously.
2070 * the tcp protocol callback calls us with MB_DONTWAIT.
2071 * nfsd calls us with MB_WAIT (typically).
2073 if (waitflag == MB_DONTWAIT && slp->ns_numrec >= nfsd_waiting / 2 + 1) {
2074 slp->ns_flag |= SLP_NEEDQ;
2079 * Handle protocol specifics to parse an RPC request. We always
2080 * pull from the socket using non-blocking I/O.
2083 if (so->so_type == SOCK_STREAM) {
2085 * The data has to be read in an orderly fashion from a TCP
2086 * stream, unlike a UDP socket. It is possible for soreceive
2087 * and/or nfsrv_getstream() to block, so make sure only one
2088 * entity is messing around with the TCP stream at any given
2089 * moment. The receive sockbuf's lock in soreceive is not
2092 * Note that this procedure can be called from any number of
2093 * NFS severs *OR* can be upcalled directly from a TCP
2096 if (slp->ns_flag & SLP_GETSTREAM) {
2097 slp->ns_flag |= SLP_NEEDQ;
2100 slp->ns_flag |= SLP_GETSTREAM;
2105 auio.uio_resid = 1000000000;
2106 flags = MSG_DONTWAIT;
2107 error = so_pru_soreceive(so, &nam, &auio, &mp, NULL, &flags);
2108 if (error || mp == (struct mbuf *)0) {
2109 if (error == EWOULDBLOCK)
2110 slp->ns_flag |= SLP_NEEDQ;
2112 slp->ns_flag |= SLP_DISCONN;
2113 slp->ns_flag &= ~SLP_GETSTREAM;
2117 if (slp->ns_rawend) {
2118 slp->ns_rawend->m_next = m;
2119 slp->ns_cc += 1000000000 - auio.uio_resid;
2122 slp->ns_cc = 1000000000 - auio.uio_resid;
2129 * Now try and parse as many record(s) as we can out of the
2132 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup);
2135 slp->ns_flag |= SLP_DISCONN;
2137 slp->ns_flag |= SLP_NEEDQ;
2139 slp->ns_flag &= ~SLP_GETSTREAM;
2142 * For UDP soreceive typically pulls just one packet, loop
2143 * to get the whole batch.
2146 auio.uio_resid = 1000000000;
2147 flags = MSG_DONTWAIT;
2148 error = so_pru_soreceive(so, &nam, &auio, &mp, NULL,
2151 struct nfsrv_rec *rec;
2152 int mf = (waitflag & MB_DONTWAIT) ?
2153 M_NOWAIT : M_WAITOK;
2154 rec = malloc(sizeof(struct nfsrv_rec),
2158 FREE(nam, M_SONAME);
2162 nfs_realign(&mp, 10 * NFSX_UNSIGNED);
2163 rec->nr_address = nam;
2164 rec->nr_packet = mp;
2165 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2170 if ((so->so_proto->pr_flags & PR_CONNREQUIRED)
2171 && error != EWOULDBLOCK) {
2172 slp->ns_flag |= SLP_DISCONN;
2180 * If we were upcalled from the tcp protocol layer and we have
2181 * fully parsed records ready to go, or there is new data pending,
2182 * or something went wrong, try to wake up an nfsd thread to deal
2186 if (waitflag == MB_DONTWAIT && (slp->ns_numrec > 0
2187 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) {
2188 nfsrv_wakenfsd(slp, nparallel_wakeup);
2193 * Try and extract an RPC request from the mbuf data list received on a
2194 * stream socket. The "waitflag" argument indicates whether or not it
2198 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp)
2200 struct mbuf *m, **mpp;
2203 struct mbuf *om, *m2, *recm;
2207 if (slp->ns_reclen == 0) {
2208 if (slp->ns_cc < NFSX_UNSIGNED)
2211 if (m->m_len >= NFSX_UNSIGNED) {
2212 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED);
2213 m->m_data += NFSX_UNSIGNED;
2214 m->m_len -= NFSX_UNSIGNED;
2216 cp1 = (caddr_t)&recmark;
2217 cp2 = mtod(m, caddr_t);
2218 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
2219 while (m->m_len == 0) {
2221 cp2 = mtod(m, caddr_t);
2228 slp->ns_cc -= NFSX_UNSIGNED;
2229 recmark = ntohl(recmark);
2230 slp->ns_reclen = recmark & ~0x80000000;
2231 if (recmark & 0x80000000)
2232 slp->ns_flag |= SLP_LASTFRAG;
2234 slp->ns_flag &= ~SLP_LASTFRAG;
2235 if (slp->ns_reclen > NFS_MAXPACKET) {
2236 log(LOG_ERR, "%s (%d) from nfs client\n",
2237 "impossible packet length",
2244 * Now get the record part.
2246 * Note that slp->ns_reclen may be 0. Linux sometimes
2247 * generates 0-length RPCs
2250 if (slp->ns_cc == slp->ns_reclen) {
2252 slp->ns_raw = slp->ns_rawend = (struct mbuf *)0;
2253 slp->ns_cc = slp->ns_reclen = 0;
2254 } else if (slp->ns_cc > slp->ns_reclen) {
2257 om = (struct mbuf *)0;
2259 while (len < slp->ns_reclen) {
2260 if ((len + m->m_len) > slp->ns_reclen) {
2261 m2 = m_copym(m, 0, slp->ns_reclen - len,
2269 m->m_data += slp->ns_reclen - len;
2270 m->m_len -= slp->ns_reclen - len;
2271 len = slp->ns_reclen;
2273 return (EWOULDBLOCK);
2275 } else if ((len + m->m_len) == slp->ns_reclen) {
2280 om->m_next = (struct mbuf *)0;
2295 * Accumulate the fragments into a record.
2297 mpp = &slp->ns_frag;
2299 mpp = &((*mpp)->m_next);
2301 if (slp->ns_flag & SLP_LASTFRAG) {
2302 struct nfsrv_rec *rec;
2303 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK;
2304 rec = malloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf);
2306 m_freem(slp->ns_frag);
2308 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED);
2309 rec->nr_address = (struct sockaddr *)0;
2310 rec->nr_packet = slp->ns_frag;
2311 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2315 slp->ns_frag = (struct mbuf *)0;
2321 * Parse an RPC header.
2324 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd,
2325 struct nfsrv_descript **ndp)
2327 struct nfsrv_rec *rec;
2329 struct sockaddr *nam;
2330 struct nfsrv_descript *nd;
2334 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec))
2336 rec = STAILQ_FIRST(&slp->ns_rec);
2337 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link);
2338 KKASSERT(slp->ns_numrec > 0);
2340 nam = rec->nr_address;
2342 free(rec, M_NFSRVDESC);
2343 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript),
2344 M_NFSRVDESC, M_WAITOK);
2345 nd->nd_md = nd->nd_mrep = m;
2347 nd->nd_dpos = mtod(m, caddr_t);
2348 error = nfs_getreq(nd, nfsd, TRUE);
2351 FREE(nam, M_SONAME);
2353 free((caddr_t)nd, M_NFSRVDESC);
2362 * Try to assign service sockets to nfsd threads based on the number
2363 * of new rpc requests that have been queued on the service socket.
2365 * If no nfsd's are available or additonal requests are pending, set the
2366 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for
2367 * the work in the nfssvc_sock list when it is finished processing its
2368 * current work. This flag is only cleared when an nfsd can not find
2369 * any new work to perform.
2372 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel)
2376 if ((slp->ns_flag & SLP_VALID) == 0)
2380 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) {
2381 if (nd->nfsd_flag & NFSD_WAITING) {
2382 nd->nfsd_flag &= ~NFSD_WAITING;
2384 panic("nfsd wakeup");
2387 wakeup((caddr_t)nd);
2388 if (--nparallel == 0)
2393 slp->ns_flag |= SLP_DOREC;
2394 nfsd_head_flag |= NFSD_CHECKSLP;
2397 #endif /* NFS_NOSERVER */