2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $
38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.37 2006/09/05 03:48:13 dillon Exp $
42 * Socket operations for use by nfs
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/kernel.h>
52 #include <sys/vnode.h>
53 #include <sys/fcntl.h>
54 #include <sys/protosw.h>
55 #include <sys/resourcevar.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/socketops.h>
59 #include <sys/syslog.h>
60 #include <sys/thread.h>
61 #include <sys/tprintf.h>
62 #include <sys/sysctl.h>
63 #include <sys/signalvar.h>
65 #include <netinet/in.h>
66 #include <netinet/tcp.h>
67 #include <sys/thread2.h>
73 #include "nfsm_subs.h"
82 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
83 * Use the mean and mean deviation of rtt for the appropriate type of rpc
84 * for the frequent rpcs and a default for the others.
85 * The justification for doing "other" this way is that these rpcs
86 * happen so infrequently that timer est. would probably be stale.
87 * Also, since many of these rpcs are
88 * non-idempotent, a conservative timeout is desired.
89 * getattr, lookup - A+2D
93 #define NFS_RTO(n, t) \
94 ((t) == 0 ? (n)->nm_timeo : \
96 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
97 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
98 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
99 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
101 * External data, mostly RPC constants in XDR form
103 extern u_int32_t rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers,
104 rpc_auth_unix, rpc_msgaccepted, rpc_call, rpc_autherr,
106 extern u_int32_t nfs_prog;
107 extern struct nfsstats nfsstats;
108 extern int nfsv3_procid[NFS_NPROCS];
109 extern int nfs_ticks;
112 * Defines which timer to use for the procnum.
119 static int proct[NFS_NPROCS] = {
120 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0,
124 static int nfs_realign_test;
125 static int nfs_realign_count;
126 static int nfs_bufpackets = 4;
127 static int nfs_timer_raced;
129 SYSCTL_DECL(_vfs_nfs);
131 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
132 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
133 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, "");
137 * There is a congestion window for outstanding rpcs maintained per mount
138 * point. The cwnd size is adjusted in roughly the way that:
139 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
140 * SIGCOMM '88". ACM, August 1988.
141 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
142 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
143 * of rpcs is in progress.
144 * (The sent count and cwnd are scaled for integer arith.)
145 * Variants of "slow start" were tried and were found to be too much of a
146 * performance hit (ave. rtt 3 times larger),
147 * I suspect due to the large rtt that nfs rpcs have.
149 #define NFS_CWNDSCALE 256
150 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
151 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
153 struct nfsrtt nfsrtt;
154 struct callout nfs_timer_handle;
156 static int nfs_msg (struct thread *,char *,char *);
157 static int nfs_rcvlock (struct nfsreq *);
158 static void nfs_rcvunlock (struct nfsreq *);
159 static void nfs_realign (struct mbuf **pm, int hsiz);
160 static int nfs_receive (struct nfsreq *rep, struct sockaddr **aname,
162 static void nfs_softterm (struct nfsreq *rep);
163 static int nfs_reconnect (struct nfsreq *rep);
165 static int nfsrv_getstream (struct nfssvc_sock *, int, int *);
167 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd,
168 struct nfssvc_sock *slp,
170 struct mbuf **mreqp) = {
198 #endif /* NFS_NOSERVER */
201 * Initialize sockets and congestion for a new NFS connection.
202 * We do not free the sockaddr if error.
205 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
208 int error, rcvreserve, sndreserve;
210 struct sockaddr *saddr;
211 struct sockaddr_in *sin;
212 struct thread *td = &thread0; /* only used for socreate and sobind */
214 nmp->nm_so = (struct socket *)0;
216 error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype,
217 nmp->nm_soproto, td);
221 nmp->nm_soflags = so->so_proto->pr_flags;
224 * Some servers require that the client port be a reserved port number.
226 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) {
229 struct sockaddr_in ssin;
231 bzero(&sopt, sizeof sopt);
232 ip = IP_PORTRANGE_LOW;
233 sopt.sopt_level = IPPROTO_IP;
234 sopt.sopt_name = IP_PORTRANGE;
235 sopt.sopt_val = (void *)&ip;
236 sopt.sopt_valsize = sizeof(ip);
238 error = sosetopt(so, &sopt);
241 bzero(&ssin, sizeof ssin);
243 sin->sin_len = sizeof (struct sockaddr_in);
244 sin->sin_family = AF_INET;
245 sin->sin_addr.s_addr = INADDR_ANY;
246 sin->sin_port = htons(0);
247 error = sobind(so, (struct sockaddr *)sin, td);
250 bzero(&sopt, sizeof sopt);
251 ip = IP_PORTRANGE_DEFAULT;
252 sopt.sopt_level = IPPROTO_IP;
253 sopt.sopt_name = IP_PORTRANGE;
254 sopt.sopt_val = (void *)&ip;
255 sopt.sopt_valsize = sizeof(ip);
257 error = sosetopt(so, &sopt);
263 * Protocols that do not require connections may be optionally left
264 * unconnected for servers that reply from a port other than NFS_PORT.
266 if (nmp->nm_flag & NFSMNT_NOCONN) {
267 if (nmp->nm_soflags & PR_CONNREQUIRED) {
272 error = soconnect(so, nmp->nm_nam, td);
277 * Wait for the connection to complete. Cribbed from the
278 * connect system call but with the wait timing out so
279 * that interruptible mounts don't hang here for a long time.
282 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
283 (void) tsleep((caddr_t)&so->so_timeo, 0,
285 if ((so->so_state & SS_ISCONNECTING) &&
286 so->so_error == 0 && rep &&
287 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){
288 so->so_state &= ~SS_ISCONNECTING;
294 error = so->so_error;
301 so->so_rcv.sb_timeo = (5 * hz);
302 so->so_snd.sb_timeo = (5 * hz);
305 * Get buffer reservation size from sysctl, but impose reasonable
308 pktscale = nfs_bufpackets;
314 if (nmp->nm_sotype == SOCK_DGRAM) {
315 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
316 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
317 NFS_MAXPKTHDR) * pktscale;
318 } else if (nmp->nm_sotype == SOCK_SEQPACKET) {
319 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
320 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
321 NFS_MAXPKTHDR) * pktscale;
323 if (nmp->nm_sotype != SOCK_STREAM)
324 panic("nfscon sotype");
325 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
329 bzero(&sopt, sizeof sopt);
330 sopt.sopt_level = SOL_SOCKET;
331 sopt.sopt_name = SO_KEEPALIVE;
332 sopt.sopt_val = &val;
333 sopt.sopt_valsize = sizeof val;
337 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
341 bzero(&sopt, sizeof sopt);
342 sopt.sopt_level = IPPROTO_TCP;
343 sopt.sopt_name = TCP_NODELAY;
344 sopt.sopt_val = &val;
345 sopt.sopt_valsize = sizeof val;
349 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
350 sizeof (u_int32_t)) * pktscale;
351 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
352 sizeof (u_int32_t)) * pktscale;
354 error = soreserve(so, sndreserve, rcvreserve,
355 &td->td_proc->p_rlimit[RLIMIT_SBSIZE]);
358 so->so_rcv.sb_flags |= SB_NOINTR;
359 so->so_snd.sb_flags |= SB_NOINTR;
361 /* Initialize other non-zero congestion variables */
362 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
363 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
364 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
365 nmp->nm_sdrtt[3] = 0;
366 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
368 nmp->nm_timeouts = 0;
378 * Called when a connection is broken on a reliable protocol.
379 * - clean up the old socket
380 * - nfs_connect() again
381 * - set R_MUSTRESEND for all outstanding requests on mount point
382 * If this fails the mount point is DEAD!
383 * nb: Must be called with the nfs_sndlock() set on the mount point.
386 nfs_reconnect(struct nfsreq *rep)
389 struct nfsmount *nmp = rep->r_nmp;
393 while ((error = nfs_connect(nmp, rep)) != 0) {
394 if (error == EINTR || error == ERESTART)
396 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0);
400 * Loop through outstanding request list and fix up all requests
404 TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
405 if (rp->r_nmp == nmp)
406 rp->r_flags |= R_MUSTRESEND;
413 * NFS disconnect. Clean up and unlink.
416 nfs_disconnect(struct nfsmount *nmp)
422 nmp->nm_so = (struct socket *)0;
424 soclose(so, FNONBLOCK);
429 nfs_safedisconnect(struct nfsmount *nmp)
431 struct nfsreq dummyreq;
433 bzero(&dummyreq, sizeof(dummyreq));
434 dummyreq.r_nmp = nmp;
435 dummyreq.r_td = NULL;
436 nfs_rcvlock(&dummyreq);
438 nfs_rcvunlock(&dummyreq);
442 * This is the nfs send routine. For connection based socket types, it
443 * must be called with an nfs_sndlock() on the socket.
444 * "rep == NULL" indicates that it has been called from a server.
445 * For the client side:
446 * - return EINTR if the RPC is terminated, 0 otherwise
447 * - set R_MUSTRESEND if the send fails for any reason
448 * - do any cleanup required by recoverable socket errors (?)
449 * For the server side:
450 * - return EINTR or ERESTART if interrupted by a signal
451 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
452 * - do any cleanup required by recoverable socket errors (?)
455 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
458 struct sockaddr *sendnam;
459 int error, soflags, flags;
462 if (rep->r_flags & R_SOFTTERM) {
466 if ((so = rep->r_nmp->nm_so) == NULL) {
467 rep->r_flags |= R_MUSTRESEND;
471 rep->r_flags &= ~R_MUSTRESEND;
472 soflags = rep->r_nmp->nm_soflags;
474 soflags = so->so_proto->pr_flags;
475 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
476 sendnam = (struct sockaddr *)0;
479 if (so->so_type == SOCK_SEQPACKET)
484 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags,
487 * ENOBUFS for dgram sockets is transient and non fatal.
488 * No need to log, and no need to break a soft mount.
490 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
492 if (rep) /* do backoff retransmit on client */
493 rep->r_flags |= R_MUSTRESEND;
498 log(LOG_INFO, "nfs send error %d for server %s\n",error,
499 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
501 * Deal with errors for the client side.
503 if (rep->r_flags & R_SOFTTERM)
506 rep->r_flags |= R_MUSTRESEND;
508 log(LOG_INFO, "nfsd send error %d\n", error);
511 * Handle any recoverable (soft) socket errors here. (?)
513 if (error != EINTR && error != ERESTART &&
514 error != EWOULDBLOCK && error != EPIPE)
521 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
522 * done by soreceive(), but for SOCK_STREAM we must deal with the Record
523 * Mark and consolidate the data into a new mbuf list.
524 * nb: Sometimes TCP passes the data up to soreceive() in long lists of
526 * For SOCK_STREAM we must be very careful to read an entire record once
527 * we have read any of it, even if the system call has been interrupted.
530 nfs_receive(struct nfsreq *rep, struct sockaddr **aname, struct mbuf **mp)
536 struct mbuf *control;
538 struct sockaddr **getnam;
539 int error, sotype, rcvflg;
540 struct thread *td = curthread; /* XXX */
543 * Set up arguments for soreceive()
545 *mp = (struct mbuf *)0;
546 *aname = (struct sockaddr *)0;
547 sotype = rep->r_nmp->nm_sotype;
550 * For reliable protocols, lock against other senders/receivers
551 * in case a reconnect is necessary.
552 * For SOCK_STREAM, first get the Record Mark to find out how much
553 * more there is to get.
554 * We must lock the socket against other receivers
555 * until we have an entire rpc request/reply.
557 if (sotype != SOCK_DGRAM) {
558 error = nfs_sndlock(rep);
563 * Check for fatal errors and resending request.
566 * Ugh: If a reconnect attempt just happened, nm_so
567 * would have changed. NULL indicates a failed
568 * attempt that has essentially shut down this
571 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) {
575 so = rep->r_nmp->nm_so;
577 error = nfs_reconnect(rep);
584 while (rep->r_flags & R_MUSTRESEND) {
585 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
586 nfsstats.rpcretries++;
587 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
589 if (error == EINTR || error == ERESTART ||
590 (error = nfs_reconnect(rep)) != 0) {
598 if (sotype == SOCK_STREAM) {
599 aio.iov_base = (caddr_t) &len;
600 aio.iov_len = sizeof(u_int32_t);
603 auio.uio_segflg = UIO_SYSSPACE;
604 auio.uio_rw = UIO_READ;
606 auio.uio_resid = sizeof(u_int32_t);
609 rcvflg = MSG_WAITALL;
610 error = so_pru_soreceive(so, NULL, &auio, NULL,
612 if (error == EWOULDBLOCK && rep) {
613 if (rep->r_flags & R_SOFTTERM)
616 } while (error == EWOULDBLOCK);
617 if (!error && auio.uio_resid > 0) {
619 * Don't log a 0 byte receive; it means
620 * that the socket has been closed, and
621 * can happen during normal operation
622 * (forcible unmount or Solaris server).
624 if (auio.uio_resid != sizeof (u_int32_t))
626 "short receive (%d/%d) from nfs server %s\n",
627 (int)(sizeof(u_int32_t) - auio.uio_resid),
628 (int)sizeof(u_int32_t),
629 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
634 len = ntohl(len) & ~0x80000000;
636 * This is SERIOUS! We are out of sync with the sender
637 * and forcing a disconnect/reconnect is all I can do.
639 if (len > NFS_MAXPACKET) {
640 log(LOG_ERR, "%s (%d) from nfs server %s\n",
641 "impossible packet length",
643 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
647 auio.uio_resid = len;
649 rcvflg = MSG_WAITALL;
650 error = so_pru_soreceive(so, NULL, &auio, mp,
652 } while (error == EWOULDBLOCK || error == EINTR ||
654 if (!error && auio.uio_resid > 0) {
655 if (len != auio.uio_resid)
657 "short receive (%d/%d) from nfs server %s\n",
658 len - auio.uio_resid, len,
659 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
664 * NB: Since uio_resid is big, MSG_WAITALL is ignored
665 * and soreceive() will return when it has either a
666 * control msg or a data msg.
667 * We have no use for control msg., but must grab them
668 * and then throw them away so we know what is going
671 auio.uio_resid = len = 100000000; /* Anything Big */
675 error = so_pru_soreceive(so, NULL, &auio, mp,
679 if (error == EWOULDBLOCK && rep) {
680 if (rep->r_flags & R_SOFTTERM)
683 } while (error == EWOULDBLOCK ||
684 (!error && *mp == NULL && control));
685 if ((rcvflg & MSG_EOR) == 0)
687 if (!error && *mp == NULL)
689 len -= auio.uio_resid;
692 if (error && error != EINTR && error != ERESTART) {
694 *mp = (struct mbuf *)0;
697 "receive error %d from nfs server %s\n",
699 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
700 error = nfs_sndlock(rep);
702 error = nfs_reconnect(rep);
710 if ((so = rep->r_nmp->nm_so) == NULL)
712 if (so->so_state & SS_ISCONNECTED)
713 getnam = (struct sockaddr **)0;
716 auio.uio_resid = len = 1000000;
720 error = so_pru_soreceive(so, getnam, &auio, mp, NULL,
722 if (error == EWOULDBLOCK &&
723 (rep->r_flags & R_SOFTTERM))
725 } while (error == EWOULDBLOCK);
726 len -= auio.uio_resid;
730 *mp = (struct mbuf *)0;
733 * Search for any mbufs that are not a multiple of 4 bytes long
734 * or with m_data not longword aligned.
735 * These could cause pointer alignment problems, so copy them to
736 * well aligned mbufs.
738 nfs_realign(mp, 5 * NFSX_UNSIGNED);
743 * Implement receipt of reply on a socket.
744 * We must search through the list of received datagrams matching them
745 * with outstanding requests using the xid, until ours is found.
749 nfs_reply(struct nfsreq *myrep)
752 struct nfsmount *nmp = myrep->r_nmp;
754 struct mbuf *mrep, *md;
755 struct sockaddr *nam;
761 * Loop around until we get our own reply
765 * Lock against other receivers so that I don't get stuck in
766 * sbwait() after someone else has received my reply for me.
767 * Also necessary for connection based protocols to avoid
768 * race conditions during a reconnect.
769 * If nfs_rcvlock() returns EALREADY, that means that
770 * the reply has already been recieved by another
771 * process and we can return immediately. In this
772 * case, the lock is not taken to avoid races with
775 error = nfs_rcvlock(myrep);
776 if (error == EALREADY)
781 * Get the next Rpc reply off the socket
783 error = nfs_receive(myrep, &nam, &mrep);
784 nfs_rcvunlock(myrep);
787 * Ignore routing errors on connectionless protocols??
789 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) {
790 nmp->nm_so->so_error = 0;
791 if (myrep->r_flags & R_GETONEREP)
801 * Get the xid and check that it is an rpc reply
804 dpos = mtod(md, caddr_t);
805 nfsm_dissect(tl, u_int32_t *, 2*NFSX_UNSIGNED);
807 if (*tl != rpc_reply) {
808 nfsstats.rpcinvalid++;
811 if (myrep->r_flags & R_GETONEREP)
817 * Loop through the request list to match up the reply
818 * Iff no match, just drop the datagram. On match, set
819 * r_mrep atomically to prevent the timer from messing
820 * around with the request after we have exited the critical
824 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
825 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
833 * Fill in the rest of the reply if we found a match.
841 rt = &nfsrtt.rttl[nfsrtt.pos];
842 rt->proc = rep->r_procnum;
843 rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]);
844 rt->sent = nmp->nm_sent;
845 rt->cwnd = nmp->nm_cwnd;
846 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1];
847 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1];
848 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid;
849 getmicrotime(&rt->tstamp);
850 if (rep->r_flags & R_TIMING)
851 rt->rtt = rep->r_rtt;
854 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ;
857 * Update congestion window.
858 * Do the additive increase of
861 if (nmp->nm_cwnd <= nmp->nm_sent) {
863 (NFS_CWNDSCALE * NFS_CWNDSCALE +
864 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
865 if (nmp->nm_cwnd > NFS_MAXCWND)
866 nmp->nm_cwnd = NFS_MAXCWND;
868 crit_enter(); /* nfs_timer interlock for nm_sent */
869 if (rep->r_flags & R_SENT) {
870 rep->r_flags &= ~R_SENT;
871 nmp->nm_sent -= NFS_CWNDSCALE;
875 * Update rtt using a gain of 0.125 on the mean
876 * and a gain of 0.25 on the deviation.
878 if (rep->r_flags & R_TIMING) {
880 * Since the timer resolution of
881 * NFS_HZ is so course, it can often
882 * result in r_rtt == 0. Since
883 * r_rtt == N means that the actual
884 * rtt is between N+dt and N+2-dt ticks,
888 t1 -= (NFS_SRTT(rep) >> 3);
892 t1 -= (NFS_SDRTT(rep) >> 2);
893 NFS_SDRTT(rep) += t1;
895 nmp->nm_timeouts = 0;
898 * If not matched to a request, drop it.
899 * If it's mine, get out.
902 nfsstats.rpcunexpected++;
904 } else if (rep == myrep) {
905 if (rep->r_mrep == NULL)
906 panic("nfsreply nil");
909 if (myrep->r_flags & R_GETONEREP)
915 * nfs_request - goes something like this
916 * - fill in request struct
917 * - links it into list
918 * - calls nfs_send() for first transmit
919 * - calls nfs_receive() to get reply
920 * - break down rpc header and return with nfs reply pointed to
922 * nb: always frees up mreq mbuf list
925 nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
926 struct thread *td, struct ucred *cred, struct mbuf **mrp,
927 struct mbuf **mdp, caddr_t *dposp)
929 struct mbuf *mrep, *m2;
933 struct nfsmount *nmp;
934 struct mbuf *m, *md, *mheadend;
935 char nickv[RPCX_NICKVERF];
938 int t1, error = 0, mrest_len, auth_len, auth_type;
939 int trylater_delay = 15, trylater_cnt = 0, failed_auth = 0;
940 int verf_len, verf_type;
942 char *auth_str, *verf_str;
943 NFSKERBKEY_T key; /* save session key */
945 /* Reject requests while attempting a forced unmount. */
946 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
950 nmp = VFSTONFS(vp->v_mount);
951 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
955 rep->r_procnum = procnum;
966 * Get the RPC header with authorization.
969 verf_str = auth_str = (char *)0;
970 if (nmp->nm_flag & NFSMNT_KERB) {
972 verf_len = sizeof (nickv);
973 auth_type = RPCAUTH_KERB4;
974 bzero((caddr_t)key, sizeof (key));
975 if (failed_auth || nfs_getnickauth(nmp, cred, &auth_str,
976 &auth_len, verf_str, verf_len)) {
977 error = nfs_getauth(nmp, rep, cred, &auth_str,
978 &auth_len, verf_str, &verf_len, key);
980 kfree((caddr_t)rep, M_NFSREQ);
986 auth_type = RPCAUTH_UNIX;
987 if (cred->cr_ngroups < 1)
988 panic("nfsreq nogrps");
989 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
990 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
993 m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len,
994 auth_str, verf_len, verf_str, mrest, mrest_len, &mheadend, &xid);
996 kfree(auth_str, M_TEMP);
999 * For stream protocols, insert a Sun RPC Record Mark.
1001 if (nmp->nm_sotype == SOCK_STREAM) {
1002 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT);
1004 kfree(rep, M_NFSREQ);
1007 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1008 (m->m_pkthdr.len - NFSX_UNSIGNED));
1013 if (nmp->nm_flag & NFSMNT_SOFT)
1014 rep->r_retry = nmp->nm_retry;
1016 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
1017 rep->r_rtt = rep->r_rexmit = 0;
1018 if (proct[procnum] > 0)
1019 rep->r_flags = R_TIMING | R_MASKTIMER;
1021 rep->r_flags = R_MASKTIMER;
1025 * Do the client side RPC.
1027 nfsstats.rpcrequests++;
1030 * Chain request into list of outstanding requests. Be sure
1031 * to put it LAST so timer finds oldest requests first. Note
1032 * that R_MASKTIMER is set at the moment to prevent any timer
1033 * action on this request while we are still doing processing on
1034 * it below. splsoftclock() primarily protects nm_sent. Note
1035 * that we may block in this code so there is no atomicy guarentee.
1038 TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain);
1041 * If backing off another request or avoiding congestion, don't
1042 * send this one now but let timer do it. If not timing a request,
1045 * Even though the timer will not mess with our request there is
1046 * still the possibility that we will race a reply (which clears
1047 * R_SENT), especially on localhost connections, so be very careful
1048 * when setting R_SENT. We could set R_SENT prior to calling
1049 * nfs_send() but why bother if the response occurs that quickly?
1051 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM ||
1052 (nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1053 nmp->nm_sent < nmp->nm_cwnd)) {
1054 if (nmp->nm_soflags & PR_CONNREQUIRED)
1055 error = nfs_sndlock(rep);
1057 m2 = m_copym(m, 0, M_COPYALL, MB_WAIT);
1058 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
1059 if (nmp->nm_soflags & PR_CONNREQUIRED)
1062 if (!error && (rep->r_flags & R_MUSTRESEND) == 0 &&
1063 rep->r_mrep == NULL) {
1064 KASSERT((rep->r_flags & R_SENT) == 0,
1065 ("R_SENT ASSERT %p", rep));
1066 nmp->nm_sent += NFS_CWNDSCALE;
1067 rep->r_flags |= R_SENT;
1074 * Let the timer do what it will with the request, then
1075 * wait for the reply from our send or the timer's.
1077 if (!error || error == EPIPE) {
1078 rep->r_flags &= ~R_MASKTIMER;
1080 error = nfs_reply(rep);
1085 * RPC done, unlink the request, but don't rip it out from under
1086 * the callout timer.
1088 while (rep->r_flags & R_LOCKED) {
1089 nfs_timer_raced = 1;
1090 tsleep(&nfs_timer_raced, 0, "nfstrac", 0);
1092 TAILQ_REMOVE(&nfs_reqq, rep, r_chain);
1095 * Decrement the outstanding request count.
1097 if (rep->r_flags & R_SENT) {
1098 rep->r_flags &= ~R_SENT;
1099 nmp->nm_sent -= NFS_CWNDSCALE;
1104 * If there was a successful reply and a tprintf msg.
1105 * tprintf a response.
1107 if (!error && (rep->r_flags & R_TPRINTFMSG))
1108 nfs_msg(rep->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1114 m_freem(rep->r_mreq);
1115 kfree((caddr_t)rep, M_NFSREQ);
1120 * break down the rpc header and check if ok
1122 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1123 if (*tl++ == rpc_msgdenied) {
1124 if (*tl == rpc_mismatch)
1126 else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) {
1129 mheadend->m_next = (struct mbuf *)0;
1131 m_freem(rep->r_mreq);
1138 m_freem(rep->r_mreq);
1139 kfree((caddr_t)rep, M_NFSREQ);
1144 * Grab any Kerberos verifier, otherwise just throw it away.
1146 verf_type = fxdr_unsigned(int, *tl++);
1147 i = fxdr_unsigned(int32_t, *tl);
1148 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) {
1149 error = nfs_savenickauth(nmp, cred, i, key, &md, &dpos, mrep);
1153 nfsm_adv(nfsm_rndup(i));
1154 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1157 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1159 error = fxdr_unsigned(int, *tl);
1160 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
1161 error == NFSERR_TRYLATER) {
1164 waituntil = time_second + trylater_delay;
1165 while (time_second < waituntil)
1166 (void) tsleep((caddr_t)&lbolt,
1168 trylater_delay *= nfs_backoff[trylater_cnt];
1169 if (trylater_cnt < 7)
1175 * If the File Handle was stale, invalidate the
1176 * lookup cache, just in case.
1178 if (error == ESTALE) {
1179 cache_inval_vp(vp, CINV_CHILDREN);
1181 if (nmp->nm_flag & NFSMNT_NFSV3) {
1185 error |= NFSERR_RETERR;
1188 m_freem(rep->r_mreq);
1189 kfree((caddr_t)rep, M_NFSREQ);
1196 m_freem(rep->r_mreq);
1197 FREE((caddr_t)rep, M_NFSREQ);
1201 error = EPROTONOSUPPORT;
1203 m_freem(rep->r_mreq);
1204 kfree((caddr_t)rep, M_NFSREQ);
1208 #ifndef NFS_NOSERVER
1210 * Generate the rpc reply header
1211 * siz arg. is used to decide if adding a cluster is worthwhile
1214 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp,
1215 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp)
1220 struct mbuf *mb, *mb2;
1222 siz += RPC_REPLYSIZ;
1223 mb = mreq = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
1224 mreq->m_pkthdr.len = 0;
1226 * If this is not a cluster, try and leave leading space
1227 * for the lower level headers.
1229 if ((max_hdr + siz) < MINCLSIZE)
1230 mreq->m_data += max_hdr;
1231 tl = mtod(mreq, u_int32_t *);
1232 mreq->m_len = 6 * NFSX_UNSIGNED;
1233 bpos = ((caddr_t)tl) + mreq->m_len;
1234 *tl++ = txdr_unsigned(nd->nd_retxid);
1236 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
1237 *tl++ = rpc_msgdenied;
1238 if (err & NFSERR_AUTHERR) {
1239 *tl++ = rpc_autherr;
1240 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR);
1241 mreq->m_len -= NFSX_UNSIGNED;
1242 bpos -= NFSX_UNSIGNED;
1244 *tl++ = rpc_mismatch;
1245 *tl++ = txdr_unsigned(RPC_VER2);
1246 *tl = txdr_unsigned(RPC_VER2);
1249 *tl++ = rpc_msgaccepted;
1252 * For Kerberos authentication, we must send the nickname
1253 * verifier back, otherwise just RPCAUTH_NULL.
1255 if (nd->nd_flag & ND_KERBFULL) {
1256 struct nfsuid *nuidp;
1257 struct timeval ktvin, ktvout;
1259 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first;
1260 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1261 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid &&
1262 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp),
1263 &nuidp->nu_haddr, nd->nd_nam2)))
1268 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1);
1270 txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1273 * Encrypt the timestamp in ecb mode using the
1280 *tl++ = rpc_auth_kerb;
1281 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED);
1282 *tl = ktvout.tv_sec;
1283 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1284 *tl++ = ktvout.tv_usec;
1285 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid);
1296 *tl = txdr_unsigned(RPC_PROGUNAVAIL);
1299 *tl = txdr_unsigned(RPC_PROGMISMATCH);
1300 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1301 *tl++ = txdr_unsigned(2);
1302 *tl = txdr_unsigned(3);
1305 *tl = txdr_unsigned(RPC_PROCUNAVAIL);
1308 *tl = txdr_unsigned(RPC_GARBAGE);
1312 if (err != NFSERR_RETVOID) {
1313 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1315 *tl = txdr_unsigned(nfsrv_errmap(nd, err));
1327 if (err != 0 && err != NFSERR_RETVOID)
1328 nfsstats.srvrpc_errs++;
1333 #endif /* NFS_NOSERVER */
1336 * Scan the nfsreq list and retranmit any requests that have timed out
1337 * To avoid retransmission attempts on STREAM sockets (in the future) make
1338 * sure to set the r_retry field to 0 (implies nm_retry == 0).
1341 nfs_timer(void *arg /* never used */)
1346 struct nfsmount *nmp;
1349 #ifndef NFS_NOSERVER
1350 struct nfssvc_sock *slp;
1352 #endif /* NFS_NOSERVER */
1353 struct thread *td = &thread0; /* XXX for credentials, will break if sleep */
1356 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
1358 if (rep->r_mrep || (rep->r_flags & (R_SOFTTERM|R_MASKTIMER)))
1360 rep->r_flags |= R_LOCKED;
1361 if (nfs_sigintr(nmp, rep, rep->r_td)) {
1365 if (rep->r_rtt >= 0) {
1367 if (nmp->nm_flag & NFSMNT_DUMBTIMR)
1368 timeo = nmp->nm_timeo;
1370 timeo = NFS_RTO(nmp, proct[rep->r_procnum]);
1371 if (nmp->nm_timeouts > 0)
1372 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
1373 if (rep->r_rtt <= timeo)
1375 if (nmp->nm_timeouts < 8)
1379 * Check for server not responding
1381 if ((rep->r_flags & R_TPRINTFMSG) == 0 &&
1382 rep->r_rexmit > nmp->nm_deadthresh) {
1384 nmp->nm_mountp->mnt_stat.f_mntfromname,
1386 rep->r_flags |= R_TPRINTFMSG;
1388 if (rep->r_rexmit >= rep->r_retry) { /* too many */
1389 nfsstats.rpctimeouts++;
1393 if (nmp->nm_sotype != SOCK_DGRAM) {
1394 if (++rep->r_rexmit > NFS_MAXREXMIT)
1395 rep->r_rexmit = NFS_MAXREXMIT;
1398 if ((so = nmp->nm_so) == NULL)
1402 * If there is enough space and the window allows..
1404 * Set r_rtt to -1 in case we fail to send it now.
1407 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
1408 ((nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1409 (rep->r_flags & R_SENT) ||
1410 nmp->nm_sent < nmp->nm_cwnd) &&
1411 (m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){
1412 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
1413 error = so_pru_send(so, 0, m, (struct sockaddr *)0,
1414 (struct mbuf *)0, td);
1416 error = so_pru_send(so, 0, m, nmp->nm_nam,
1417 (struct mbuf *)0, td);
1419 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
1421 } else if (rep->r_mrep == NULL) {
1423 * Iff first send, start timing
1424 * else turn timing off, backoff timer
1425 * and divide congestion window by 2.
1427 * It is possible for the so_pru_send() to
1428 * block and for us to race a reply so we
1429 * only do this if the reply field has not
1430 * been filled in. R_LOCKED will prevent
1431 * the request from being ripped out from under
1434 if (rep->r_flags & R_SENT) {
1435 rep->r_flags &= ~R_TIMING;
1436 if (++rep->r_rexmit > NFS_MAXREXMIT)
1437 rep->r_rexmit = NFS_MAXREXMIT;
1439 if (nmp->nm_cwnd < NFS_CWNDSCALE)
1440 nmp->nm_cwnd = NFS_CWNDSCALE;
1441 nfsstats.rpcretries++;
1443 rep->r_flags |= R_SENT;
1444 nmp->nm_sent += NFS_CWNDSCALE;
1450 rep->r_flags &= ~R_LOCKED;
1452 #ifndef NFS_NOSERVER
1455 * Scan the write gathering queues for writes that need to be
1458 cur_usec = nfs_curusec();
1459 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
1460 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec)
1461 nfsrv_wakenfsd(slp, 1);
1463 #endif /* NFS_NOSERVER */
1466 * Due to possible blocking, a client operation may be waiting for
1467 * us to finish processing this request so it can remove it.
1469 if (nfs_timer_raced) {
1470 nfs_timer_raced = 0;
1471 wakeup(&nfs_timer_raced);
1474 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer, NULL);
1478 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1479 * wait for all requests to complete. This is used by forced unmounts
1480 * to terminate any outstanding RPCs.
1483 nfs_nmcancelreqs(struct nfsmount *nmp)
1489 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
1490 if (nmp != req->r_nmp || req->r_mrep != NULL ||
1491 (req->r_flags & R_SOFTTERM)) {
1498 for (i = 0; i < 30; i++) {
1500 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
1501 if (nmp == req->r_nmp)
1507 tsleep(&lbolt, 0, "nfscancel", 0);
1513 * Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT).
1514 * The nm_send count is decremented now to avoid deadlocks when the process in
1515 * soreceive() hasn't yet managed to send its own request.
1517 * This routine must be called at splsoftclock() to protect r_flags and
1522 nfs_softterm(struct nfsreq *rep)
1524 rep->r_flags |= R_SOFTTERM;
1526 if (rep->r_flags & R_SENT) {
1527 rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
1528 rep->r_flags &= ~R_SENT;
1533 * Test for a termination condition pending on the process.
1534 * This is used for NFSMNT_INT mounts.
1537 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
1542 if (rep && (rep->r_flags & R_SOFTTERM))
1544 /* Terminate all requests while attempting a forced unmount. */
1545 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
1547 if (!(nmp->nm_flag & NFSMNT_INT))
1549 /* td might be NULL YYY */
1550 if (td == NULL || (p = td->td_proc) == NULL)
1553 tmpset = p->p_siglist;
1554 SIGSETNAND(tmpset, p->p_sigmask);
1555 SIGSETNAND(tmpset, p->p_sigignore);
1556 if (SIGNOTEMPTY(p->p_siglist) && NFSINT_SIGMASK(tmpset))
1563 * Lock a socket against others.
1564 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
1565 * and also to avoid race conditions between the processes with nfs requests
1566 * in progress when a reconnect is necessary.
1569 nfs_sndlock(struct nfsreq *rep)
1571 int *statep = &rep->r_nmp->nm_state;
1580 if (rep->r_nmp->nm_flag & NFSMNT_INT)
1585 while (*statep & NFSSTA_SNDLOCK) {
1586 *statep |= NFSSTA_WANTSND;
1587 if (nfs_sigintr(rep->r_nmp, rep, td)) {
1591 tsleep((caddr_t)statep, slpflag, "nfsndlck", slptimeo);
1592 if (slpflag == PCATCH) {
1597 /* Always fail if our request has been cancelled. */
1598 if ((rep->r_flags & R_SOFTTERM))
1601 *statep |= NFSSTA_SNDLOCK;
1607 * Unlock the stream socket for others.
1610 nfs_sndunlock(struct nfsreq *rep)
1612 int *statep = &rep->r_nmp->nm_state;
1614 if ((*statep & NFSSTA_SNDLOCK) == 0)
1615 panic("nfs sndunlock");
1617 *statep &= ~NFSSTA_SNDLOCK;
1618 if (*statep & NFSSTA_WANTSND) {
1619 *statep &= ~NFSSTA_WANTSND;
1620 wakeup((caddr_t)statep);
1626 nfs_rcvlock(struct nfsreq *rep)
1628 int *statep = &rep->r_nmp->nm_state;
1634 * Unconditionally check for completion in case another nfsiod
1635 * get the packet while the caller was blocked, before the caller
1636 * called us. Packet reception is handled by mainline code which
1637 * is protected by the BGL at the moment.
1639 * We do not strictly need the second check just before the
1640 * tsleep(), but it's good defensive programming.
1642 if (rep->r_mrep != NULL)
1645 if (rep->r_nmp->nm_flag & NFSMNT_INT)
1652 while (*statep & NFSSTA_RCVLOCK) {
1653 if (nfs_sigintr(rep->r_nmp, rep, rep->r_td)) {
1657 if (rep->r_mrep != NULL) {
1661 *statep |= NFSSTA_WANTRCV;
1662 tsleep((caddr_t)statep, slpflag, "nfsrcvlk", slptimeo);
1664 * If our reply was recieved while we were sleeping,
1665 * then just return without taking the lock to avoid a
1666 * situation where a single iod could 'capture' the
1669 if (rep->r_mrep != NULL) {
1673 if (slpflag == PCATCH) {
1679 *statep |= NFSSTA_RCVLOCK;
1680 rep->r_nmp->nm_rcvlock_td = curthread; /* DEBUGGING */
1687 * Unlock the stream socket for others.
1690 nfs_rcvunlock(struct nfsreq *rep)
1692 int *statep = &rep->r_nmp->nm_state;
1694 if ((*statep & NFSSTA_RCVLOCK) == 0)
1695 panic("nfs rcvunlock");
1697 rep->r_nmp->nm_rcvlock_td = (void *)-1; /* DEBUGGING */
1698 *statep &= ~NFSSTA_RCVLOCK;
1699 if (*statep & NFSSTA_WANTRCV) {
1700 *statep &= ~NFSSTA_WANTRCV;
1701 wakeup((caddr_t)statep);
1709 * Check for badly aligned mbuf data and realign by copying the unaligned
1710 * portion of the data into a new mbuf chain and freeing the portions
1711 * of the old chain that were replaced.
1713 * We cannot simply realign the data within the existing mbuf chain
1714 * because the underlying buffers may contain other rpc commands and
1715 * we cannot afford to overwrite them.
1717 * We would prefer to avoid this situation entirely. The situation does
1718 * not occur with NFS/UDP and is supposed to only occassionally occur
1719 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
1722 nfs_realign(struct mbuf **pm, int hsiz)
1725 struct mbuf *n = NULL;
1730 while ((m = *pm) != NULL) {
1731 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
1732 n = m_getl(m->m_len, MB_WAIT, MT_DATA, 0, NULL);
1740 * If n is non-NULL, loop on m copying data, then replace the
1741 * portion of the chain that had to be realigned.
1744 ++nfs_realign_count;
1746 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
1755 #ifndef NFS_NOSERVER
1758 * Parse an RPC request
1760 * - fill in the cred struct.
1763 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header)
1770 caddr_t dpos, cp2, cp;
1771 u_int32_t nfsvers, auth_type;
1773 int error = 0, ticklen;
1774 struct mbuf *mrep, *md;
1775 struct nfsuid *nuidp;
1776 struct timeval tvin, tvout;
1777 #if 0 /* until encrypted keys are implemented */
1778 NFSKERBKEYSCHED_T keys; /* stores key schedule */
1785 nfsm_dissect(tl, u_int32_t *, 10 * NFSX_UNSIGNED);
1786 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++);
1787 if (*tl++ != rpc_call) {
1792 nfsm_dissect(tl, u_int32_t *, 8 * NFSX_UNSIGNED);
1795 if (*tl++ != rpc_vers) {
1796 nd->nd_repstat = ERPCMISMATCH;
1797 nd->nd_procnum = NFSPROC_NOOP;
1800 if (*tl != nfs_prog) {
1801 nd->nd_repstat = EPROGUNAVAIL;
1802 nd->nd_procnum = NFSPROC_NOOP;
1806 nfsvers = fxdr_unsigned(u_int32_t, *tl++);
1807 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) {
1808 nd->nd_repstat = EPROGMISMATCH;
1809 nd->nd_procnum = NFSPROC_NOOP;
1812 if (nfsvers == NFS_VER3)
1813 nd->nd_flag = ND_NFSV3;
1814 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++);
1815 if (nd->nd_procnum == NFSPROC_NULL)
1817 if (nd->nd_procnum >= NFS_NPROCS ||
1818 (nd->nd_procnum >= NQNFSPROC_GETLEASE) ||
1819 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) {
1820 nd->nd_repstat = EPROCUNAVAIL;
1821 nd->nd_procnum = NFSPROC_NOOP;
1824 if ((nd->nd_flag & ND_NFSV3) == 0)
1825 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
1827 len = fxdr_unsigned(int, *tl++);
1828 if (len < 0 || len > RPCAUTH_MAXSIZ) {
1833 nd->nd_flag &= ~ND_KERBAUTH;
1835 * Handle auth_unix or auth_kerb.
1837 if (auth_type == rpc_auth_unix) {
1838 len = fxdr_unsigned(int, *++tl);
1839 if (len < 0 || len > NFS_MAXNAMLEN) {
1843 nfsm_adv(nfsm_rndup(len));
1844 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1845 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred));
1846 nd->nd_cr.cr_ref = 1;
1847 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++);
1848 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++);
1849 len = fxdr_unsigned(int, *tl);
1850 if (len < 0 || len > RPCAUTH_UNIXGIDS) {
1854 nfsm_dissect(tl, u_int32_t *, (len + 2) * NFSX_UNSIGNED);
1855 for (i = 1; i <= len; i++)
1857 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++);
1860 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
1861 if (nd->nd_cr.cr_ngroups > 1)
1862 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups);
1863 len = fxdr_unsigned(int, *++tl);
1864 if (len < 0 || len > RPCAUTH_MAXSIZ) {
1869 nfsm_adv(nfsm_rndup(len));
1870 } else if (auth_type == rpc_auth_kerb) {
1871 switch (fxdr_unsigned(int, *tl++)) {
1872 case RPCAKN_FULLNAME:
1873 ticklen = fxdr_unsigned(int, *tl);
1874 *((u_int32_t *)nfsd->nfsd_authstr) = *tl;
1875 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED;
1876 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED;
1877 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) {
1884 uio.uio_segflg = UIO_SYSSPACE;
1885 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4];
1886 iov.iov_len = RPCAUTH_MAXSIZ - 4;
1887 nfsm_mtouio(&uio, uio.uio_resid);
1888 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1889 if (*tl++ != rpc_auth_kerb ||
1890 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) {
1891 printf("Bad kerb verifier\n");
1892 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
1893 nd->nd_procnum = NFSPROC_NOOP;
1896 nfsm_dissect(cp, caddr_t, 4 * NFSX_UNSIGNED);
1897 tl = (u_int32_t *)cp;
1898 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) {
1899 printf("Not fullname kerb verifier\n");
1900 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
1901 nd->nd_procnum = NFSPROC_NOOP;
1904 cp += NFSX_UNSIGNED;
1905 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED);
1906 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED;
1907 nd->nd_flag |= ND_KERBFULL;
1908 nfsd->nfsd_flag |= NFSD_NEEDAUTH;
1910 case RPCAKN_NICKNAME:
1911 if (len != 2 * NFSX_UNSIGNED) {
1912 printf("Kerb nickname short\n");
1913 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED);
1914 nd->nd_procnum = NFSPROC_NOOP;
1917 nickuid = fxdr_unsigned(uid_t, *tl);
1918 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1919 if (*tl++ != rpc_auth_kerb ||
1920 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) {
1921 printf("Kerb nick verifier bad\n");
1922 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
1923 nd->nd_procnum = NFSPROC_NOOP;
1926 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1927 tvin.tv_sec = *tl++;
1930 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first;
1931 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1932 if (nuidp->nu_cr.cr_uid == nickuid &&
1934 netaddr_match(NU_NETFAM(nuidp),
1935 &nuidp->nu_haddr, nd->nd_nam2)))
1940 (NFSERR_AUTHERR|AUTH_REJECTCRED);
1941 nd->nd_procnum = NFSPROC_NOOP;
1946 * Now, decrypt the timestamp using the session key
1953 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec);
1954 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec);
1955 if (nuidp->nu_expire < time_second ||
1956 nuidp->nu_timestamp.tv_sec > tvout.tv_sec ||
1957 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec &&
1958 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) {
1959 nuidp->nu_expire = 0;
1961 (NFSERR_AUTHERR|AUTH_REJECTVERF);
1962 nd->nd_procnum = NFSPROC_NOOP;
1965 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr);
1966 nd->nd_flag |= ND_KERBNICK;
1969 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
1970 nd->nd_procnum = NFSPROC_NOOP;
1984 * Send a message to the originating process's terminal. The thread and/or
1985 * process may be NULL. YYY the thread should not be NULL but there may
1986 * still be some uio_td's that are still being passed as NULL through to
1990 nfs_msg(struct thread *td, char *server, char *msg)
1994 if (td && td->td_proc)
1995 tpr = tprintf_open(td->td_proc);
1998 tprintf(tpr, "nfs server %s: %s\n", server, msg);
2003 #ifndef NFS_NOSERVER
2005 * Socket upcall routine for the nfsd sockets.
2006 * The caddr_t arg is a pointer to the "struct nfssvc_sock".
2007 * Essentially do as much as possible non-blocking, else punt and it will
2008 * be called with MB_WAIT from an nfsd.
2011 nfsrv_rcv(struct socket *so, void *arg, int waitflag)
2013 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
2016 struct sockaddr *nam;
2019 int nparallel_wakeup = 0;
2021 if ((slp->ns_flag & SLP_VALID) == 0)
2025 * Do not allow an infinite number of completed RPC records to build
2026 * up before we stop reading data from the socket. Otherwise we could
2027 * end up holding onto an unreasonable number of mbufs for requests
2028 * waiting for service.
2030 * This should give pretty good feedback to the TCP
2031 * layer and prevents a memory crunch for other protocols.
2033 * Note that the same service socket can be dispatched to several
2034 * nfs servers simultaniously.
2036 * the tcp protocol callback calls us with MB_DONTWAIT.
2037 * nfsd calls us with MB_WAIT (typically).
2039 if (waitflag == MB_DONTWAIT && slp->ns_numrec >= nfsd_waiting / 2 + 1) {
2040 slp->ns_flag |= SLP_NEEDQ;
2045 * Handle protocol specifics to parse an RPC request. We always
2046 * pull from the socket using non-blocking I/O.
2049 if (so->so_type == SOCK_STREAM) {
2051 * The data has to be read in an orderly fashion from a TCP
2052 * stream, unlike a UDP socket. It is possible for soreceive
2053 * and/or nfsrv_getstream() to block, so make sure only one
2054 * entity is messing around with the TCP stream at any given
2055 * moment. The receive sockbuf's lock in soreceive is not
2058 * Note that this procedure can be called from any number of
2059 * NFS severs *OR* can be upcalled directly from a TCP
2062 if (slp->ns_flag & SLP_GETSTREAM) {
2063 slp->ns_flag |= SLP_NEEDQ;
2066 slp->ns_flag |= SLP_GETSTREAM;
2071 auio.uio_resid = 1000000000;
2072 flags = MSG_DONTWAIT;
2073 error = so_pru_soreceive(so, &nam, &auio, &mp, NULL, &flags);
2074 if (error || mp == (struct mbuf *)0) {
2075 if (error == EWOULDBLOCK)
2076 slp->ns_flag |= SLP_NEEDQ;
2078 slp->ns_flag |= SLP_DISCONN;
2079 slp->ns_flag &= ~SLP_GETSTREAM;
2083 if (slp->ns_rawend) {
2084 slp->ns_rawend->m_next = m;
2085 slp->ns_cc += 1000000000 - auio.uio_resid;
2088 slp->ns_cc = 1000000000 - auio.uio_resid;
2095 * Now try and parse as many record(s) as we can out of the
2098 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup);
2101 slp->ns_flag |= SLP_DISCONN;
2103 slp->ns_flag |= SLP_NEEDQ;
2105 slp->ns_flag &= ~SLP_GETSTREAM;
2108 * For UDP soreceive typically pulls just one packet, loop
2109 * to get the whole batch.
2112 auio.uio_resid = 1000000000;
2113 flags = MSG_DONTWAIT;
2114 error = so_pru_soreceive(so, &nam, &auio, &mp, NULL,
2117 struct nfsrv_rec *rec;
2118 int mf = (waitflag & MB_DONTWAIT) ?
2119 M_NOWAIT : M_WAITOK;
2120 rec = kmalloc(sizeof(struct nfsrv_rec),
2124 FREE(nam, M_SONAME);
2128 nfs_realign(&mp, 10 * NFSX_UNSIGNED);
2129 rec->nr_address = nam;
2130 rec->nr_packet = mp;
2131 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2136 if ((so->so_proto->pr_flags & PR_CONNREQUIRED)
2137 && error != EWOULDBLOCK) {
2138 slp->ns_flag |= SLP_DISCONN;
2146 * If we were upcalled from the tcp protocol layer and we have
2147 * fully parsed records ready to go, or there is new data pending,
2148 * or something went wrong, try to wake up an nfsd thread to deal
2152 if (waitflag == MB_DONTWAIT && (slp->ns_numrec > 0
2153 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) {
2154 nfsrv_wakenfsd(slp, nparallel_wakeup);
2159 * Try and extract an RPC request from the mbuf data list received on a
2160 * stream socket. The "waitflag" argument indicates whether or not it
2164 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp)
2166 struct mbuf *m, **mpp;
2169 struct mbuf *om, *m2, *recm;
2173 if (slp->ns_reclen == 0) {
2174 if (slp->ns_cc < NFSX_UNSIGNED)
2177 if (m->m_len >= NFSX_UNSIGNED) {
2178 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED);
2179 m->m_data += NFSX_UNSIGNED;
2180 m->m_len -= NFSX_UNSIGNED;
2182 cp1 = (caddr_t)&recmark;
2183 cp2 = mtod(m, caddr_t);
2184 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
2185 while (m->m_len == 0) {
2187 cp2 = mtod(m, caddr_t);
2194 slp->ns_cc -= NFSX_UNSIGNED;
2195 recmark = ntohl(recmark);
2196 slp->ns_reclen = recmark & ~0x80000000;
2197 if (recmark & 0x80000000)
2198 slp->ns_flag |= SLP_LASTFRAG;
2200 slp->ns_flag &= ~SLP_LASTFRAG;
2201 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) {
2202 log(LOG_ERR, "%s (%d) from nfs client\n",
2203 "impossible packet length",
2210 * Now get the record part.
2212 * Note that slp->ns_reclen may be 0. Linux sometimes
2213 * generates 0-length RPCs
2216 if (slp->ns_cc == slp->ns_reclen) {
2218 slp->ns_raw = slp->ns_rawend = (struct mbuf *)0;
2219 slp->ns_cc = slp->ns_reclen = 0;
2220 } else if (slp->ns_cc > slp->ns_reclen) {
2223 om = (struct mbuf *)0;
2225 while (len < slp->ns_reclen) {
2226 if ((len + m->m_len) > slp->ns_reclen) {
2227 m2 = m_copym(m, 0, slp->ns_reclen - len,
2235 m->m_data += slp->ns_reclen - len;
2236 m->m_len -= slp->ns_reclen - len;
2237 len = slp->ns_reclen;
2239 return (EWOULDBLOCK);
2241 } else if ((len + m->m_len) == slp->ns_reclen) {
2246 om->m_next = (struct mbuf *)0;
2261 * Accumulate the fragments into a record.
2263 mpp = &slp->ns_frag;
2265 mpp = &((*mpp)->m_next);
2267 if (slp->ns_flag & SLP_LASTFRAG) {
2268 struct nfsrv_rec *rec;
2269 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK;
2270 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf);
2272 m_freem(slp->ns_frag);
2274 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED);
2275 rec->nr_address = (struct sockaddr *)0;
2276 rec->nr_packet = slp->ns_frag;
2277 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2281 slp->ns_frag = (struct mbuf *)0;
2287 * Parse an RPC header.
2290 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd,
2291 struct nfsrv_descript **ndp)
2293 struct nfsrv_rec *rec;
2295 struct sockaddr *nam;
2296 struct nfsrv_descript *nd;
2300 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec))
2302 rec = STAILQ_FIRST(&slp->ns_rec);
2303 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link);
2304 KKASSERT(slp->ns_numrec > 0);
2306 nam = rec->nr_address;
2308 kfree(rec, M_NFSRVDESC);
2309 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript),
2310 M_NFSRVDESC, M_WAITOK);
2311 nd->nd_md = nd->nd_mrep = m;
2313 nd->nd_dpos = mtod(m, caddr_t);
2314 error = nfs_getreq(nd, nfsd, TRUE);
2317 FREE(nam, M_SONAME);
2319 kfree((caddr_t)nd, M_NFSRVDESC);
2328 * Try to assign service sockets to nfsd threads based on the number
2329 * of new rpc requests that have been queued on the service socket.
2331 * If no nfsd's are available or additonal requests are pending, set the
2332 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for
2333 * the work in the nfssvc_sock list when it is finished processing its
2334 * current work. This flag is only cleared when an nfsd can not find
2335 * any new work to perform.
2338 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel)
2342 if ((slp->ns_flag & SLP_VALID) == 0)
2346 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) {
2347 if (nd->nfsd_flag & NFSD_WAITING) {
2348 nd->nfsd_flag &= ~NFSD_WAITING;
2350 panic("nfsd wakeup");
2353 wakeup((caddr_t)nd);
2354 if (--nparallel == 0)
2359 slp->ns_flag |= SLP_DOREC;
2360 nfsd_head_flag |= NFSD_CHECKSLP;
2363 #endif /* NFS_NOSERVER */