2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $
38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.33 2006/03/27 16:18:39 dillon Exp $
42 * Socket operations for use by nfs
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/kernel.h>
52 #include <sys/vnode.h>
53 #include <sys/protosw.h>
54 #include <sys/resourcevar.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/socketops.h>
58 #include <sys/syslog.h>
59 #include <sys/thread.h>
60 #include <sys/tprintf.h>
61 #include <sys/sysctl.h>
62 #include <sys/signalvar.h>
64 #include <netinet/in.h>
65 #include <netinet/tcp.h>
66 #include <sys/thread2.h>
72 #include "nfsm_subs.h"
81 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
82 * Use the mean and mean deviation of rtt for the appropriate type of rpc
83 * for the frequent rpcs and a default for the others.
84 * The justification for doing "other" this way is that these rpcs
85 * happen so infrequently that timer est. would probably be stale.
86 * Also, since many of these rpcs are
87 * non-idempotent, a conservative timeout is desired.
88 * getattr, lookup - A+2D
92 #define NFS_RTO(n, t) \
93 ((t) == 0 ? (n)->nm_timeo : \
95 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
96 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
97 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
98 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
100 * External data, mostly RPC constants in XDR form
102 extern u_int32_t rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers,
103 rpc_auth_unix, rpc_msgaccepted, rpc_call, rpc_autherr,
105 extern u_int32_t nfs_prog;
106 extern struct nfsstats nfsstats;
107 extern int nfsv3_procid[NFS_NPROCS];
108 extern int nfs_ticks;
111 * Defines which timer to use for the procnum.
118 static int proct[NFS_NPROCS] = {
119 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0,
123 static int nfs_realign_test;
124 static int nfs_realign_count;
125 static int nfs_bufpackets = 4;
126 static int nfs_timer_raced;
128 SYSCTL_DECL(_vfs_nfs);
130 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
131 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
132 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, "");
136 * There is a congestion window for outstanding rpcs maintained per mount
137 * point. The cwnd size is adjusted in roughly the way that:
138 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
139 * SIGCOMM '88". ACM, August 1988.
140 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
141 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
142 * of rpcs is in progress.
143 * (The sent count and cwnd are scaled for integer arith.)
144 * Variants of "slow start" were tried and were found to be too much of a
145 * performance hit (ave. rtt 3 times larger),
146 * I suspect due to the large rtt that nfs rpcs have.
148 #define NFS_CWNDSCALE 256
149 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
150 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
152 struct nfsrtt nfsrtt;
153 struct callout nfs_timer_handle;
155 static int nfs_msg (struct thread *,char *,char *);
156 static int nfs_rcvlock (struct nfsreq *);
157 static void nfs_rcvunlock (struct nfsreq *);
158 static void nfs_realign (struct mbuf **pm, int hsiz);
159 static int nfs_receive (struct nfsreq *rep, struct sockaddr **aname,
161 static void nfs_softterm (struct nfsreq *rep);
162 static int nfs_reconnect (struct nfsreq *rep);
164 static int nfsrv_getstream (struct nfssvc_sock *, int, int *);
166 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd,
167 struct nfssvc_sock *slp,
169 struct mbuf **mreqp) = {
197 #endif /* NFS_NOSERVER */
200 * Initialize sockets and congestion for a new NFS connection.
201 * We do not free the sockaddr if error.
204 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
207 int error, rcvreserve, sndreserve;
209 struct sockaddr *saddr;
210 struct sockaddr_in *sin;
211 struct thread *td = &thread0; /* only used for socreate and sobind */
213 nmp->nm_so = (struct socket *)0;
215 error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype,
216 nmp->nm_soproto, td);
220 nmp->nm_soflags = so->so_proto->pr_flags;
223 * Some servers require that the client port be a reserved port number.
225 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) {
228 struct sockaddr_in ssin;
230 bzero(&sopt, sizeof sopt);
231 ip = IP_PORTRANGE_LOW;
232 sopt.sopt_level = IPPROTO_IP;
233 sopt.sopt_name = IP_PORTRANGE;
234 sopt.sopt_val = (void *)&ip;
235 sopt.sopt_valsize = sizeof(ip);
237 error = sosetopt(so, &sopt);
240 bzero(&ssin, sizeof ssin);
242 sin->sin_len = sizeof (struct sockaddr_in);
243 sin->sin_family = AF_INET;
244 sin->sin_addr.s_addr = INADDR_ANY;
245 sin->sin_port = htons(0);
246 error = sobind(so, (struct sockaddr *)sin, td);
249 bzero(&sopt, sizeof sopt);
250 ip = IP_PORTRANGE_DEFAULT;
251 sopt.sopt_level = IPPROTO_IP;
252 sopt.sopt_name = IP_PORTRANGE;
253 sopt.sopt_val = (void *)&ip;
254 sopt.sopt_valsize = sizeof(ip);
256 error = sosetopt(so, &sopt);
262 * Protocols that do not require connections may be optionally left
263 * unconnected for servers that reply from a port other than NFS_PORT.
265 if (nmp->nm_flag & NFSMNT_NOCONN) {
266 if (nmp->nm_soflags & PR_CONNREQUIRED) {
271 error = soconnect(so, nmp->nm_nam, td);
276 * Wait for the connection to complete. Cribbed from the
277 * connect system call but with the wait timing out so
278 * that interruptible mounts don't hang here for a long time.
281 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
282 (void) tsleep((caddr_t)&so->so_timeo, 0,
284 if ((so->so_state & SS_ISCONNECTING) &&
285 so->so_error == 0 && rep &&
286 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){
287 so->so_state &= ~SS_ISCONNECTING;
293 error = so->so_error;
300 so->so_rcv.sb_timeo = (5 * hz);
301 so->so_snd.sb_timeo = (5 * hz);
304 * Get buffer reservation size from sysctl, but impose reasonable
307 pktscale = nfs_bufpackets;
313 if (nmp->nm_sotype == SOCK_DGRAM) {
314 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
315 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
316 NFS_MAXPKTHDR) * pktscale;
317 } else if (nmp->nm_sotype == SOCK_SEQPACKET) {
318 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
319 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
320 NFS_MAXPKTHDR) * pktscale;
322 if (nmp->nm_sotype != SOCK_STREAM)
323 panic("nfscon sotype");
324 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
328 bzero(&sopt, sizeof sopt);
329 sopt.sopt_level = SOL_SOCKET;
330 sopt.sopt_name = SO_KEEPALIVE;
331 sopt.sopt_val = &val;
332 sopt.sopt_valsize = sizeof val;
336 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
340 bzero(&sopt, sizeof sopt);
341 sopt.sopt_level = IPPROTO_TCP;
342 sopt.sopt_name = TCP_NODELAY;
343 sopt.sopt_val = &val;
344 sopt.sopt_valsize = sizeof val;
348 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
349 sizeof (u_int32_t)) * pktscale;
350 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
351 sizeof (u_int32_t)) * pktscale;
353 error = soreserve(so, sndreserve, rcvreserve,
354 &td->td_proc->p_rlimit[RLIMIT_SBSIZE]);
357 so->so_rcv.sb_flags |= SB_NOINTR;
358 so->so_snd.sb_flags |= SB_NOINTR;
360 /* Initialize other non-zero congestion variables */
361 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
362 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
363 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
364 nmp->nm_sdrtt[3] = 0;
365 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
367 nmp->nm_timeouts = 0;
377 * Called when a connection is broken on a reliable protocol.
378 * - clean up the old socket
379 * - nfs_connect() again
380 * - set R_MUSTRESEND for all outstanding requests on mount point
381 * If this fails the mount point is DEAD!
382 * nb: Must be called with the nfs_sndlock() set on the mount point.
385 nfs_reconnect(struct nfsreq *rep)
388 struct nfsmount *nmp = rep->r_nmp;
392 while ((error = nfs_connect(nmp, rep)) != 0) {
393 if (error == EINTR || error == ERESTART)
395 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0);
399 * Loop through outstanding request list and fix up all requests
403 TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
404 if (rp->r_nmp == nmp)
405 rp->r_flags |= R_MUSTRESEND;
412 * NFS disconnect. Clean up and unlink.
415 nfs_disconnect(struct nfsmount *nmp)
421 nmp->nm_so = (struct socket *)0;
428 nfs_safedisconnect(struct nfsmount *nmp)
430 struct nfsreq dummyreq;
432 bzero(&dummyreq, sizeof(dummyreq));
433 dummyreq.r_nmp = nmp;
434 dummyreq.r_td = NULL;
435 nfs_rcvlock(&dummyreq);
437 nfs_rcvunlock(&dummyreq);
441 * This is the nfs send routine. For connection based socket types, it
442 * must be called with an nfs_sndlock() on the socket.
443 * "rep == NULL" indicates that it has been called from a server.
444 * For the client side:
445 * - return EINTR if the RPC is terminated, 0 otherwise
446 * - set R_MUSTRESEND if the send fails for any reason
447 * - do any cleanup required by recoverable socket errors (?)
448 * For the server side:
449 * - return EINTR or ERESTART if interrupted by a signal
450 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
451 * - do any cleanup required by recoverable socket errors (?)
454 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
457 struct sockaddr *sendnam;
458 int error, soflags, flags;
461 if (rep->r_flags & R_SOFTTERM) {
465 if ((so = rep->r_nmp->nm_so) == NULL) {
466 rep->r_flags |= R_MUSTRESEND;
470 rep->r_flags &= ~R_MUSTRESEND;
471 soflags = rep->r_nmp->nm_soflags;
473 soflags = so->so_proto->pr_flags;
474 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
475 sendnam = (struct sockaddr *)0;
478 if (so->so_type == SOCK_SEQPACKET)
483 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags,
486 * ENOBUFS for dgram sockets is transient and non fatal.
487 * No need to log, and no need to break a soft mount.
489 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
491 if (rep) /* do backoff retransmit on client */
492 rep->r_flags |= R_MUSTRESEND;
497 log(LOG_INFO, "nfs send error %d for server %s\n",error,
498 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
500 * Deal with errors for the client side.
502 if (rep->r_flags & R_SOFTTERM)
505 rep->r_flags |= R_MUSTRESEND;
507 log(LOG_INFO, "nfsd send error %d\n", error);
510 * Handle any recoverable (soft) socket errors here. (?)
512 if (error != EINTR && error != ERESTART &&
513 error != EWOULDBLOCK && error != EPIPE)
520 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
521 * done by soreceive(), but for SOCK_STREAM we must deal with the Record
522 * Mark and consolidate the data into a new mbuf list.
523 * nb: Sometimes TCP passes the data up to soreceive() in long lists of
525 * For SOCK_STREAM we must be very careful to read an entire record once
526 * we have read any of it, even if the system call has been interrupted.
529 nfs_receive(struct nfsreq *rep, struct sockaddr **aname, struct mbuf **mp)
535 struct mbuf *control;
537 struct sockaddr **getnam;
538 int error, sotype, rcvflg;
539 struct thread *td = curthread; /* XXX */
542 * Set up arguments for soreceive()
544 *mp = (struct mbuf *)0;
545 *aname = (struct sockaddr *)0;
546 sotype = rep->r_nmp->nm_sotype;
549 * For reliable protocols, lock against other senders/receivers
550 * in case a reconnect is necessary.
551 * For SOCK_STREAM, first get the Record Mark to find out how much
552 * more there is to get.
553 * We must lock the socket against other receivers
554 * until we have an entire rpc request/reply.
556 if (sotype != SOCK_DGRAM) {
557 error = nfs_sndlock(rep);
562 * Check for fatal errors and resending request.
565 * Ugh: If a reconnect attempt just happened, nm_so
566 * would have changed. NULL indicates a failed
567 * attempt that has essentially shut down this
570 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) {
574 so = rep->r_nmp->nm_so;
576 error = nfs_reconnect(rep);
583 while (rep->r_flags & R_MUSTRESEND) {
584 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
585 nfsstats.rpcretries++;
586 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
588 if (error == EINTR || error == ERESTART ||
589 (error = nfs_reconnect(rep)) != 0) {
597 if (sotype == SOCK_STREAM) {
598 aio.iov_base = (caddr_t) &len;
599 aio.iov_len = sizeof(u_int32_t);
602 auio.uio_segflg = UIO_SYSSPACE;
603 auio.uio_rw = UIO_READ;
605 auio.uio_resid = sizeof(u_int32_t);
608 rcvflg = MSG_WAITALL;
609 error = so_pru_soreceive(so, NULL, &auio, NULL,
611 if (error == EWOULDBLOCK && rep) {
612 if (rep->r_flags & R_SOFTTERM)
615 } while (error == EWOULDBLOCK);
616 if (!error && auio.uio_resid > 0) {
618 * Don't log a 0 byte receive; it means
619 * that the socket has been closed, and
620 * can happen during normal operation
621 * (forcible unmount or Solaris server).
623 if (auio.uio_resid != sizeof (u_int32_t))
625 "short receive (%d/%d) from nfs server %s\n",
626 (int)(sizeof(u_int32_t) - auio.uio_resid),
627 (int)sizeof(u_int32_t),
628 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
633 len = ntohl(len) & ~0x80000000;
635 * This is SERIOUS! We are out of sync with the sender
636 * and forcing a disconnect/reconnect is all I can do.
638 if (len > NFS_MAXPACKET) {
639 log(LOG_ERR, "%s (%d) from nfs server %s\n",
640 "impossible packet length",
642 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
646 auio.uio_resid = len;
648 rcvflg = MSG_WAITALL;
649 error = so_pru_soreceive(so, NULL, &auio, mp,
651 } while (error == EWOULDBLOCK || error == EINTR ||
653 if (!error && auio.uio_resid > 0) {
654 if (len != auio.uio_resid)
656 "short receive (%d/%d) from nfs server %s\n",
657 len - auio.uio_resid, len,
658 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
663 * NB: Since uio_resid is big, MSG_WAITALL is ignored
664 * and soreceive() will return when it has either a
665 * control msg or a data msg.
666 * We have no use for control msg., but must grab them
667 * and then throw them away so we know what is going
670 auio.uio_resid = len = 100000000; /* Anything Big */
674 error = so_pru_soreceive(so, NULL, &auio, mp,
678 if (error == EWOULDBLOCK && rep) {
679 if (rep->r_flags & R_SOFTTERM)
682 } while (error == EWOULDBLOCK ||
683 (!error && *mp == NULL && control));
684 if ((rcvflg & MSG_EOR) == 0)
686 if (!error && *mp == NULL)
688 len -= auio.uio_resid;
691 if (error && error != EINTR && error != ERESTART) {
693 *mp = (struct mbuf *)0;
696 "receive error %d from nfs server %s\n",
698 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
699 error = nfs_sndlock(rep);
701 error = nfs_reconnect(rep);
709 if ((so = rep->r_nmp->nm_so) == NULL)
711 if (so->so_state & SS_ISCONNECTED)
712 getnam = (struct sockaddr **)0;
715 auio.uio_resid = len = 1000000;
719 error = so_pru_soreceive(so, getnam, &auio, mp, NULL,
721 if (error == EWOULDBLOCK &&
722 (rep->r_flags & R_SOFTTERM))
724 } while (error == EWOULDBLOCK);
725 len -= auio.uio_resid;
729 *mp = (struct mbuf *)0;
732 * Search for any mbufs that are not a multiple of 4 bytes long
733 * or with m_data not longword aligned.
734 * These could cause pointer alignment problems, so copy them to
735 * well aligned mbufs.
737 nfs_realign(mp, 5 * NFSX_UNSIGNED);
742 * Implement receipt of reply on a socket.
743 * We must search through the list of received datagrams matching them
744 * with outstanding requests using the xid, until ours is found.
748 nfs_reply(struct nfsreq *myrep)
751 struct nfsmount *nmp = myrep->r_nmp;
753 struct mbuf *mrep, *md;
754 struct sockaddr *nam;
760 * Loop around until we get our own reply
764 * Lock against other receivers so that I don't get stuck in
765 * sbwait() after someone else has received my reply for me.
766 * Also necessary for connection based protocols to avoid
767 * race conditions during a reconnect.
768 * If nfs_rcvlock() returns EALREADY, that means that
769 * the reply has already been recieved by another
770 * process and we can return immediately. In this
771 * case, the lock is not taken to avoid races with
774 error = nfs_rcvlock(myrep);
775 if (error == EALREADY)
780 * Get the next Rpc reply off the socket
782 error = nfs_receive(myrep, &nam, &mrep);
783 nfs_rcvunlock(myrep);
786 * Ignore routing errors on connectionless protocols??
788 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) {
789 nmp->nm_so->so_error = 0;
790 if (myrep->r_flags & R_GETONEREP)
800 * Get the xid and check that it is an rpc reply
803 dpos = mtod(md, caddr_t);
804 nfsm_dissect(tl, u_int32_t *, 2*NFSX_UNSIGNED);
806 if (*tl != rpc_reply) {
807 nfsstats.rpcinvalid++;
810 if (myrep->r_flags & R_GETONEREP)
816 * Loop through the request list to match up the reply
817 * Iff no match, just drop the datagram. On match, set
818 * r_mrep atomically to prevent the timer from messing
819 * around with the request after we have exited the critical
823 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
824 if (rep->r_mrep == NULL && rxid == rep->r_xid) {
832 * Fill in the rest of the reply if we found a match.
840 rt = &nfsrtt.rttl[nfsrtt.pos];
841 rt->proc = rep->r_procnum;
842 rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]);
843 rt->sent = nmp->nm_sent;
844 rt->cwnd = nmp->nm_cwnd;
845 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1];
846 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1];
847 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid;
848 getmicrotime(&rt->tstamp);
849 if (rep->r_flags & R_TIMING)
850 rt->rtt = rep->r_rtt;
853 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ;
856 * Update congestion window.
857 * Do the additive increase of
860 if (nmp->nm_cwnd <= nmp->nm_sent) {
862 (NFS_CWNDSCALE * NFS_CWNDSCALE +
863 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
864 if (nmp->nm_cwnd > NFS_MAXCWND)
865 nmp->nm_cwnd = NFS_MAXCWND;
867 crit_enter(); /* nfs_timer interlock for nm_sent */
868 if (rep->r_flags & R_SENT) {
869 rep->r_flags &= ~R_SENT;
870 nmp->nm_sent -= NFS_CWNDSCALE;
874 * Update rtt using a gain of 0.125 on the mean
875 * and a gain of 0.25 on the deviation.
877 if (rep->r_flags & R_TIMING) {
879 * Since the timer resolution of
880 * NFS_HZ is so course, it can often
881 * result in r_rtt == 0. Since
882 * r_rtt == N means that the actual
883 * rtt is between N+dt and N+2-dt ticks,
887 t1 -= (NFS_SRTT(rep) >> 3);
891 t1 -= (NFS_SDRTT(rep) >> 2);
892 NFS_SDRTT(rep) += t1;
894 nmp->nm_timeouts = 0;
897 * If not matched to a request, drop it.
898 * If it's mine, get out.
901 nfsstats.rpcunexpected++;
903 } else if (rep == myrep) {
904 if (rep->r_mrep == NULL)
905 panic("nfsreply nil");
908 if (myrep->r_flags & R_GETONEREP)
914 * nfs_request - goes something like this
915 * - fill in request struct
916 * - links it into list
917 * - calls nfs_send() for first transmit
918 * - calls nfs_receive() to get reply
919 * - break down rpc header and return with nfs reply pointed to
921 * nb: always frees up mreq mbuf list
924 nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
925 struct thread *td, struct ucred *cred, struct mbuf **mrp,
926 struct mbuf **mdp, caddr_t *dposp)
928 struct mbuf *mrep, *m2;
932 struct nfsmount *nmp;
933 struct mbuf *m, *md, *mheadend;
934 char nickv[RPCX_NICKVERF];
937 int t1, error = 0, mrest_len, auth_len, auth_type;
938 int trylater_delay = 15, trylater_cnt = 0, failed_auth = 0;
939 int verf_len, verf_type;
942 char *auth_str, *verf_str;
943 NFSKERBKEY_T key; /* save session key */
945 /* Reject requests while attempting a forced unmount. */
946 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
950 nmp = VFSTONFS(vp->v_mount);
951 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
955 rep->r_procnum = procnum;
966 * Get the RPC header with authorization.
969 verf_str = auth_str = (char *)0;
970 if (nmp->nm_flag & NFSMNT_KERB) {
972 verf_len = sizeof (nickv);
973 auth_type = RPCAUTH_KERB4;
974 bzero((caddr_t)key, sizeof (key));
975 if (failed_auth || nfs_getnickauth(nmp, cred, &auth_str,
976 &auth_len, verf_str, verf_len)) {
977 error = nfs_getauth(nmp, rep, cred, &auth_str,
978 &auth_len, verf_str, &verf_len, key);
980 free((caddr_t)rep, M_NFSREQ);
986 auth_type = RPCAUTH_UNIX;
987 if (cred->cr_ngroups < 1)
988 panic("nfsreq nogrps");
989 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
990 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
993 m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len,
994 auth_str, verf_len, verf_str, mrest, mrest_len, &mheadend, &xid);
996 free(auth_str, M_TEMP);
999 * For stream protocols, insert a Sun RPC Record Mark.
1001 if (nmp->nm_sotype == SOCK_STREAM) {
1002 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT);
1004 free(rep, M_NFSREQ);
1007 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1008 (m->m_pkthdr.len - NFSX_UNSIGNED));
1013 if (nmp->nm_flag & NFSMNT_SOFT)
1014 rep->r_retry = nmp->nm_retry;
1016 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
1017 rep->r_rtt = rep->r_rexmit = 0;
1018 if (proct[procnum] > 0)
1019 rep->r_flags = R_TIMING | R_MASKTIMER;
1021 rep->r_flags = R_MASKTIMER;
1025 * Do the client side RPC.
1027 nfsstats.rpcrequests++;
1030 * Chain request into list of outstanding requests. Be sure
1031 * to put it LAST so timer finds oldest requests first. Note
1032 * that R_MASKTIMER is set at the moment to prevent any timer
1033 * action on this request while we are still doing processing on
1034 * it below. splsoftclock() primarily protects nm_sent. Note
1035 * that we may block in this code so there is no atomicy guarentee.
1038 TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain);
1041 * If backing off another request or avoiding congestion, don't
1042 * send this one now but let timer do it. If not timing a request,
1045 * Even though the timer will not mess with our request there is
1046 * still the possibility that we will race a reply (which clears
1047 * R_SENT), especially on localhost connections, so be very careful
1048 * when setting R_SENT. We could set R_SENT prior to calling
1049 * nfs_send() but why bother if the response occurs that quickly?
1051 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM ||
1052 (nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1053 nmp->nm_sent < nmp->nm_cwnd)) {
1054 if (nmp->nm_soflags & PR_CONNREQUIRED)
1055 error = nfs_sndlock(rep);
1057 m2 = m_copym(m, 0, M_COPYALL, MB_WAIT);
1058 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
1059 if (nmp->nm_soflags & PR_CONNREQUIRED)
1062 if (!error && (rep->r_flags & R_MUSTRESEND) == 0 &&
1063 rep->r_mrep == NULL) {
1064 KASSERT((rep->r_flags & R_SENT) == 0,
1065 ("R_SENT ASSERT %p", rep));
1066 nmp->nm_sent += NFS_CWNDSCALE;
1067 rep->r_flags |= R_SENT;
1074 * Let the timer do what it will with the request, then
1075 * wait for the reply from our send or the timer's.
1077 if (!error || error == EPIPE) {
1078 rep->r_flags &= ~R_MASKTIMER;
1080 error = nfs_reply(rep);
1085 * RPC done, unlink the request, but don't rip it out from under
1086 * the callout timer.
1088 while (rep->r_flags & R_LOCKED) {
1089 nfs_timer_raced = 1;
1090 tsleep(&nfs_timer_raced, 0, "nfstrac", 0);
1092 TAILQ_REMOVE(&nfs_reqq, rep, r_chain);
1095 * Decrement the outstanding request count.
1097 if (rep->r_flags & R_SENT) {
1098 rep->r_flags &= ~R_SENT;
1099 nmp->nm_sent -= NFS_CWNDSCALE;
1104 * If there was a successful reply and a tprintf msg.
1105 * tprintf a response.
1107 if (!error && (rep->r_flags & R_TPRINTFMSG))
1108 nfs_msg(rep->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1114 m_freem(rep->r_mreq);
1115 free((caddr_t)rep, M_NFSREQ);
1120 * break down the rpc header and check if ok
1122 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1123 if (*tl++ == rpc_msgdenied) {
1124 if (*tl == rpc_mismatch)
1126 else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) {
1129 mheadend->m_next = (struct mbuf *)0;
1131 m_freem(rep->r_mreq);
1138 m_freem(rep->r_mreq);
1139 free((caddr_t)rep, M_NFSREQ);
1144 * Grab any Kerberos verifier, otherwise just throw it away.
1146 verf_type = fxdr_unsigned(int, *tl++);
1147 i = fxdr_unsigned(int32_t, *tl);
1148 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) {
1149 error = nfs_savenickauth(nmp, cred, i, key, &md, &dpos, mrep);
1153 nfsm_adv(nfsm_rndup(i));
1154 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1157 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
1159 error = fxdr_unsigned(int, *tl);
1160 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
1161 error == NFSERR_TRYLATER) {
1164 waituntil = time_second + trylater_delay;
1165 while (time_second < waituntil)
1166 (void) tsleep((caddr_t)&lbolt,
1168 trylater_delay *= nfs_backoff[trylater_cnt];
1169 if (trylater_cnt < 7)
1175 * If the File Handle was stale, invalidate the
1176 * lookup cache, just in case.
1178 if (error == ESTALE) {
1180 cache_inval_vp(vp, CINV_CHILDREN, &retdummy);
1182 if (nmp->nm_flag & NFSMNT_NFSV3) {
1186 error |= NFSERR_RETERR;
1189 m_freem(rep->r_mreq);
1190 free((caddr_t)rep, M_NFSREQ);
1197 m_freem(rep->r_mreq);
1198 FREE((caddr_t)rep, M_NFSREQ);
1202 error = EPROTONOSUPPORT;
1204 m_freem(rep->r_mreq);
1205 free((caddr_t)rep, M_NFSREQ);
1209 #ifndef NFS_NOSERVER
1211 * Generate the rpc reply header
1212 * siz arg. is used to decide if adding a cluster is worthwhile
1215 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp,
1216 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp)
1221 struct mbuf *mb, *mb2;
1223 siz += RPC_REPLYSIZ;
1224 mb = mreq = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
1225 mreq->m_pkthdr.len = 0;
1227 * If this is not a cluster, try and leave leading space
1228 * for the lower level headers.
1230 if ((max_hdr + siz) < MINCLSIZE)
1231 mreq->m_data += max_hdr;
1232 tl = mtod(mreq, u_int32_t *);
1233 mreq->m_len = 6 * NFSX_UNSIGNED;
1234 bpos = ((caddr_t)tl) + mreq->m_len;
1235 *tl++ = txdr_unsigned(nd->nd_retxid);
1237 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
1238 *tl++ = rpc_msgdenied;
1239 if (err & NFSERR_AUTHERR) {
1240 *tl++ = rpc_autherr;
1241 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR);
1242 mreq->m_len -= NFSX_UNSIGNED;
1243 bpos -= NFSX_UNSIGNED;
1245 *tl++ = rpc_mismatch;
1246 *tl++ = txdr_unsigned(RPC_VER2);
1247 *tl = txdr_unsigned(RPC_VER2);
1250 *tl++ = rpc_msgaccepted;
1253 * For Kerberos authentication, we must send the nickname
1254 * verifier back, otherwise just RPCAUTH_NULL.
1256 if (nd->nd_flag & ND_KERBFULL) {
1257 struct nfsuid *nuidp;
1258 struct timeval ktvin, ktvout;
1260 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first;
1261 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1262 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid &&
1263 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp),
1264 &nuidp->nu_haddr, nd->nd_nam2)))
1269 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1);
1271 txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1274 * Encrypt the timestamp in ecb mode using the
1281 *tl++ = rpc_auth_kerb;
1282 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED);
1283 *tl = ktvout.tv_sec;
1284 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1285 *tl++ = ktvout.tv_usec;
1286 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid);
1297 *tl = txdr_unsigned(RPC_PROGUNAVAIL);
1300 *tl = txdr_unsigned(RPC_PROGMISMATCH);
1301 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1302 *tl++ = txdr_unsigned(2);
1303 *tl = txdr_unsigned(3);
1306 *tl = txdr_unsigned(RPC_PROCUNAVAIL);
1309 *tl = txdr_unsigned(RPC_GARBAGE);
1313 if (err != NFSERR_RETVOID) {
1314 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1316 *tl = txdr_unsigned(nfsrv_errmap(nd, err));
1328 if (err != 0 && err != NFSERR_RETVOID)
1329 nfsstats.srvrpc_errs++;
1334 #endif /* NFS_NOSERVER */
1337 * Scan the nfsreq list and retranmit any requests that have timed out
1338 * To avoid retransmission attempts on STREAM sockets (in the future) make
1339 * sure to set the r_retry field to 0 (implies nm_retry == 0).
1342 nfs_timer(void *arg /* never used */)
1347 struct nfsmount *nmp;
1350 #ifndef NFS_NOSERVER
1351 struct nfssvc_sock *slp;
1353 #endif /* NFS_NOSERVER */
1354 struct thread *td = &thread0; /* XXX for credentials, will break if sleep */
1357 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
1359 if (rep->r_mrep || (rep->r_flags & (R_SOFTTERM|R_MASKTIMER)))
1361 rep->r_flags |= R_LOCKED;
1362 if (nfs_sigintr(nmp, rep, rep->r_td)) {
1366 if (rep->r_rtt >= 0) {
1368 if (nmp->nm_flag & NFSMNT_DUMBTIMR)
1369 timeo = nmp->nm_timeo;
1371 timeo = NFS_RTO(nmp, proct[rep->r_procnum]);
1372 if (nmp->nm_timeouts > 0)
1373 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
1374 if (rep->r_rtt <= timeo)
1376 if (nmp->nm_timeouts < 8)
1380 * Check for server not responding
1382 if ((rep->r_flags & R_TPRINTFMSG) == 0 &&
1383 rep->r_rexmit > nmp->nm_deadthresh) {
1385 nmp->nm_mountp->mnt_stat.f_mntfromname,
1387 rep->r_flags |= R_TPRINTFMSG;
1389 if (rep->r_rexmit >= rep->r_retry) { /* too many */
1390 nfsstats.rpctimeouts++;
1394 if (nmp->nm_sotype != SOCK_DGRAM) {
1395 if (++rep->r_rexmit > NFS_MAXREXMIT)
1396 rep->r_rexmit = NFS_MAXREXMIT;
1399 if ((so = nmp->nm_so) == NULL)
1403 * If there is enough space and the window allows..
1405 * Set r_rtt to -1 in case we fail to send it now.
1408 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
1409 ((nmp->nm_flag & NFSMNT_DUMBTIMR) ||
1410 (rep->r_flags & R_SENT) ||
1411 nmp->nm_sent < nmp->nm_cwnd) &&
1412 (m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){
1413 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
1414 error = so_pru_send(so, 0, m, (struct sockaddr *)0,
1415 (struct mbuf *)0, td);
1417 error = so_pru_send(so, 0, m, nmp->nm_nam,
1418 (struct mbuf *)0, td);
1420 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
1422 } else if (rep->r_mrep == NULL) {
1424 * Iff first send, start timing
1425 * else turn timing off, backoff timer
1426 * and divide congestion window by 2.
1428 * It is possible for the so_pru_send() to
1429 * block and for us to race a reply so we
1430 * only do this if the reply field has not
1431 * been filled in. R_LOCKED will prevent
1432 * the request from being ripped out from under
1435 if (rep->r_flags & R_SENT) {
1436 rep->r_flags &= ~R_TIMING;
1437 if (++rep->r_rexmit > NFS_MAXREXMIT)
1438 rep->r_rexmit = NFS_MAXREXMIT;
1440 if (nmp->nm_cwnd < NFS_CWNDSCALE)
1441 nmp->nm_cwnd = NFS_CWNDSCALE;
1442 nfsstats.rpcretries++;
1444 rep->r_flags |= R_SENT;
1445 nmp->nm_sent += NFS_CWNDSCALE;
1451 rep->r_flags &= ~R_LOCKED;
1453 #ifndef NFS_NOSERVER
1456 * Scan the write gathering queues for writes that need to be
1459 cur_usec = nfs_curusec();
1460 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
1461 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec)
1462 nfsrv_wakenfsd(slp, 1);
1464 #endif /* NFS_NOSERVER */
1467 * Due to possible blocking, a client operation may be waiting for
1468 * us to finish processing this request so it can remove it.
1470 if (nfs_timer_raced) {
1471 nfs_timer_raced = 0;
1472 wakeup(&nfs_timer_raced);
1475 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer, NULL);
1479 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1480 * wait for all requests to complete. This is used by forced unmounts
1481 * to terminate any outstanding RPCs.
1484 nfs_nmcancelreqs(struct nfsmount *nmp)
1490 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
1491 if (nmp != req->r_nmp || req->r_mrep != NULL ||
1492 (req->r_flags & R_SOFTTERM)) {
1499 for (i = 0; i < 30; i++) {
1501 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
1502 if (nmp == req->r_nmp)
1508 tsleep(&lbolt, 0, "nfscancel", 0);
1514 * Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT).
1515 * The nm_send count is decremented now to avoid deadlocks when the process in
1516 * soreceive() hasn't yet managed to send its own request.
1518 * This routine must be called at splsoftclock() to protect r_flags and
1523 nfs_softterm(struct nfsreq *rep)
1525 rep->r_flags |= R_SOFTTERM;
1527 if (rep->r_flags & R_SENT) {
1528 rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
1529 rep->r_flags &= ~R_SENT;
1534 * Test for a termination condition pending on the process.
1535 * This is used for NFSMNT_INT mounts.
1538 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
1543 if (rep && (rep->r_flags & R_SOFTTERM))
1545 /* Terminate all requests while attempting a forced unmount. */
1546 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
1548 if (!(nmp->nm_flag & NFSMNT_INT))
1550 /* td might be NULL YYY */
1551 if (td == NULL || (p = td->td_proc) == NULL)
1554 tmpset = p->p_siglist;
1555 SIGSETNAND(tmpset, p->p_sigmask);
1556 SIGSETNAND(tmpset, p->p_sigignore);
1557 if (SIGNOTEMPTY(p->p_siglist) && NFSINT_SIGMASK(tmpset))
1564 * Lock a socket against others.
1565 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
1566 * and also to avoid race conditions between the processes with nfs requests
1567 * in progress when a reconnect is necessary.
1570 nfs_sndlock(struct nfsreq *rep)
1572 int *statep = &rep->r_nmp->nm_state;
1581 if (rep->r_nmp->nm_flag & NFSMNT_INT)
1586 while (*statep & NFSSTA_SNDLOCK) {
1587 *statep |= NFSSTA_WANTSND;
1588 if (nfs_sigintr(rep->r_nmp, rep, td)) {
1592 tsleep((caddr_t)statep, slpflag, "nfsndlck", slptimeo);
1593 if (slpflag == PCATCH) {
1598 /* Always fail if our request has been cancelled. */
1599 if ((rep->r_flags & R_SOFTTERM))
1602 *statep |= NFSSTA_SNDLOCK;
1608 * Unlock the stream socket for others.
1611 nfs_sndunlock(struct nfsreq *rep)
1613 int *statep = &rep->r_nmp->nm_state;
1615 if ((*statep & NFSSTA_SNDLOCK) == 0)
1616 panic("nfs sndunlock");
1618 *statep &= ~NFSSTA_SNDLOCK;
1619 if (*statep & NFSSTA_WANTSND) {
1620 *statep &= ~NFSSTA_WANTSND;
1621 wakeup((caddr_t)statep);
1627 nfs_rcvlock(struct nfsreq *rep)
1629 int *statep = &rep->r_nmp->nm_state;
1635 * Unconditionally check for completion in case another nfsiod
1636 * get the packet while the caller was blocked, before the caller
1637 * called us. Packet reception is handled by mainline code which
1638 * is protected by the BGL at the moment.
1640 * We do not strictly need the second check just before the
1641 * tsleep(), but it's good defensive programming.
1643 if (rep->r_mrep != NULL)
1646 if (rep->r_nmp->nm_flag & NFSMNT_INT)
1653 while (*statep & NFSSTA_RCVLOCK) {
1654 if (nfs_sigintr(rep->r_nmp, rep, rep->r_td)) {
1658 if (rep->r_mrep != NULL) {
1662 *statep |= NFSSTA_WANTRCV;
1663 tsleep((caddr_t)statep, slpflag, "nfsrcvlk", slptimeo);
1665 * If our reply was recieved while we were sleeping,
1666 * then just return without taking the lock to avoid a
1667 * situation where a single iod could 'capture' the
1670 if (rep->r_mrep != NULL) {
1674 if (slpflag == PCATCH) {
1680 *statep |= NFSSTA_RCVLOCK;
1681 rep->r_nmp->nm_rcvlock_td = curthread; /* DEBUGGING */
1688 * Unlock the stream socket for others.
1691 nfs_rcvunlock(struct nfsreq *rep)
1693 int *statep = &rep->r_nmp->nm_state;
1695 if ((*statep & NFSSTA_RCVLOCK) == 0)
1696 panic("nfs rcvunlock");
1698 rep->r_nmp->nm_rcvlock_td = (void *)-1; /* DEBUGGING */
1699 *statep &= ~NFSSTA_RCVLOCK;
1700 if (*statep & NFSSTA_WANTRCV) {
1701 *statep &= ~NFSSTA_WANTRCV;
1702 wakeup((caddr_t)statep);
1710 * Check for badly aligned mbuf data and realign by copying the unaligned
1711 * portion of the data into a new mbuf chain and freeing the portions
1712 * of the old chain that were replaced.
1714 * We cannot simply realign the data within the existing mbuf chain
1715 * because the underlying buffers may contain other rpc commands and
1716 * we cannot afford to overwrite them.
1718 * We would prefer to avoid this situation entirely. The situation does
1719 * not occur with NFS/UDP and is supposed to only occassionally occur
1720 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
1723 nfs_realign(struct mbuf **pm, int hsiz)
1726 struct mbuf *n = NULL;
1731 while ((m = *pm) != NULL) {
1732 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
1733 n = m_getl(m->m_len, MB_WAIT, MT_DATA, 0, NULL);
1741 * If n is non-NULL, loop on m copying data, then replace the
1742 * portion of the chain that had to be realigned.
1745 ++nfs_realign_count;
1747 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
1756 #ifndef NFS_NOSERVER
1759 * Parse an RPC request
1761 * - fill in the cred struct.
1764 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header)
1771 caddr_t dpos, cp2, cp;
1772 u_int32_t nfsvers, auth_type;
1774 int error = 0, ticklen;
1775 struct mbuf *mrep, *md;
1776 struct nfsuid *nuidp;
1777 struct timeval tvin, tvout;
1778 #if 0 /* until encrypted keys are implemented */
1779 NFSKERBKEYSCHED_T keys; /* stores key schedule */
1786 nfsm_dissect(tl, u_int32_t *, 10 * NFSX_UNSIGNED);
1787 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++);
1788 if (*tl++ != rpc_call) {
1793 nfsm_dissect(tl, u_int32_t *, 8 * NFSX_UNSIGNED);
1796 if (*tl++ != rpc_vers) {
1797 nd->nd_repstat = ERPCMISMATCH;
1798 nd->nd_procnum = NFSPROC_NOOP;
1801 if (*tl != nfs_prog) {
1802 nd->nd_repstat = EPROGUNAVAIL;
1803 nd->nd_procnum = NFSPROC_NOOP;
1807 nfsvers = fxdr_unsigned(u_int32_t, *tl++);
1808 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) {
1809 nd->nd_repstat = EPROGMISMATCH;
1810 nd->nd_procnum = NFSPROC_NOOP;
1813 if (nfsvers == NFS_VER3)
1814 nd->nd_flag = ND_NFSV3;
1815 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++);
1816 if (nd->nd_procnum == NFSPROC_NULL)
1818 if (nd->nd_procnum >= NFS_NPROCS ||
1819 (nd->nd_procnum >= NQNFSPROC_GETLEASE) ||
1820 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) {
1821 nd->nd_repstat = EPROCUNAVAIL;
1822 nd->nd_procnum = NFSPROC_NOOP;
1825 if ((nd->nd_flag & ND_NFSV3) == 0)
1826 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
1828 len = fxdr_unsigned(int, *tl++);
1829 if (len < 0 || len > RPCAUTH_MAXSIZ) {
1834 nd->nd_flag &= ~ND_KERBAUTH;
1836 * Handle auth_unix or auth_kerb.
1838 if (auth_type == rpc_auth_unix) {
1839 len = fxdr_unsigned(int, *++tl);
1840 if (len < 0 || len > NFS_MAXNAMLEN) {
1844 nfsm_adv(nfsm_rndup(len));
1845 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1846 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred));
1847 nd->nd_cr.cr_ref = 1;
1848 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++);
1849 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++);
1850 len = fxdr_unsigned(int, *tl);
1851 if (len < 0 || len > RPCAUTH_UNIXGIDS) {
1855 nfsm_dissect(tl, u_int32_t *, (len + 2) * NFSX_UNSIGNED);
1856 for (i = 1; i <= len; i++)
1858 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++);
1861 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
1862 if (nd->nd_cr.cr_ngroups > 1)
1863 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups);
1864 len = fxdr_unsigned(int, *++tl);
1865 if (len < 0 || len > RPCAUTH_MAXSIZ) {
1870 nfsm_adv(nfsm_rndup(len));
1871 } else if (auth_type == rpc_auth_kerb) {
1872 switch (fxdr_unsigned(int, *tl++)) {
1873 case RPCAKN_FULLNAME:
1874 ticklen = fxdr_unsigned(int, *tl);
1875 *((u_int32_t *)nfsd->nfsd_authstr) = *tl;
1876 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED;
1877 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED;
1878 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) {
1885 uio.uio_segflg = UIO_SYSSPACE;
1886 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4];
1887 iov.iov_len = RPCAUTH_MAXSIZ - 4;
1888 nfsm_mtouio(&uio, uio.uio_resid);
1889 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1890 if (*tl++ != rpc_auth_kerb ||
1891 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) {
1892 printf("Bad kerb verifier\n");
1893 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
1894 nd->nd_procnum = NFSPROC_NOOP;
1897 nfsm_dissect(cp, caddr_t, 4 * NFSX_UNSIGNED);
1898 tl = (u_int32_t *)cp;
1899 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) {
1900 printf("Not fullname kerb verifier\n");
1901 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
1902 nd->nd_procnum = NFSPROC_NOOP;
1905 cp += NFSX_UNSIGNED;
1906 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED);
1907 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED;
1908 nd->nd_flag |= ND_KERBFULL;
1909 nfsd->nfsd_flag |= NFSD_NEEDAUTH;
1911 case RPCAKN_NICKNAME:
1912 if (len != 2 * NFSX_UNSIGNED) {
1913 printf("Kerb nickname short\n");
1914 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED);
1915 nd->nd_procnum = NFSPROC_NOOP;
1918 nickuid = fxdr_unsigned(uid_t, *tl);
1919 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1920 if (*tl++ != rpc_auth_kerb ||
1921 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) {
1922 printf("Kerb nick verifier bad\n");
1923 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
1924 nd->nd_procnum = NFSPROC_NOOP;
1927 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1928 tvin.tv_sec = *tl++;
1931 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first;
1932 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1933 if (nuidp->nu_cr.cr_uid == nickuid &&
1935 netaddr_match(NU_NETFAM(nuidp),
1936 &nuidp->nu_haddr, nd->nd_nam2)))
1941 (NFSERR_AUTHERR|AUTH_REJECTCRED);
1942 nd->nd_procnum = NFSPROC_NOOP;
1947 * Now, decrypt the timestamp using the session key
1954 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec);
1955 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec);
1956 if (nuidp->nu_expire < time_second ||
1957 nuidp->nu_timestamp.tv_sec > tvout.tv_sec ||
1958 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec &&
1959 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) {
1960 nuidp->nu_expire = 0;
1962 (NFSERR_AUTHERR|AUTH_REJECTVERF);
1963 nd->nd_procnum = NFSPROC_NOOP;
1966 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr);
1967 nd->nd_flag |= ND_KERBNICK;
1970 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
1971 nd->nd_procnum = NFSPROC_NOOP;
1985 * Send a message to the originating process's terminal. The thread and/or
1986 * process may be NULL. YYY the thread should not be NULL but there may
1987 * still be some uio_td's that are still being passed as NULL through to
1991 nfs_msg(struct thread *td, char *server, char *msg)
1995 if (td && td->td_proc)
1996 tpr = tprintf_open(td->td_proc);
1999 tprintf(tpr, "nfs server %s: %s\n", server, msg);
2004 #ifndef NFS_NOSERVER
2006 * Socket upcall routine for the nfsd sockets.
2007 * The caddr_t arg is a pointer to the "struct nfssvc_sock".
2008 * Essentially do as much as possible non-blocking, else punt and it will
2009 * be called with MB_WAIT from an nfsd.
2012 nfsrv_rcv(struct socket *so, void *arg, int waitflag)
2014 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
2017 struct sockaddr *nam;
2020 int nparallel_wakeup = 0;
2022 if ((slp->ns_flag & SLP_VALID) == 0)
2026 * Do not allow an infinite number of completed RPC records to build
2027 * up before we stop reading data from the socket. Otherwise we could
2028 * end up holding onto an unreasonable number of mbufs for requests
2029 * waiting for service.
2031 * This should give pretty good feedback to the TCP
2032 * layer and prevents a memory crunch for other protocols.
2034 * Note that the same service socket can be dispatched to several
2035 * nfs servers simultaniously.
2037 * the tcp protocol callback calls us with MB_DONTWAIT.
2038 * nfsd calls us with MB_WAIT (typically).
2040 if (waitflag == MB_DONTWAIT && slp->ns_numrec >= nfsd_waiting / 2 + 1) {
2041 slp->ns_flag |= SLP_NEEDQ;
2046 * Handle protocol specifics to parse an RPC request. We always
2047 * pull from the socket using non-blocking I/O.
2050 if (so->so_type == SOCK_STREAM) {
2052 * The data has to be read in an orderly fashion from a TCP
2053 * stream, unlike a UDP socket. It is possible for soreceive
2054 * and/or nfsrv_getstream() to block, so make sure only one
2055 * entity is messing around with the TCP stream at any given
2056 * moment. The receive sockbuf's lock in soreceive is not
2059 * Note that this procedure can be called from any number of
2060 * NFS severs *OR* can be upcalled directly from a TCP
2063 if (slp->ns_flag & SLP_GETSTREAM) {
2064 slp->ns_flag |= SLP_NEEDQ;
2067 slp->ns_flag |= SLP_GETSTREAM;
2072 auio.uio_resid = 1000000000;
2073 flags = MSG_DONTWAIT;
2074 error = so_pru_soreceive(so, &nam, &auio, &mp, NULL, &flags);
2075 if (error || mp == (struct mbuf *)0) {
2076 if (error == EWOULDBLOCK)
2077 slp->ns_flag |= SLP_NEEDQ;
2079 slp->ns_flag |= SLP_DISCONN;
2080 slp->ns_flag &= ~SLP_GETSTREAM;
2084 if (slp->ns_rawend) {
2085 slp->ns_rawend->m_next = m;
2086 slp->ns_cc += 1000000000 - auio.uio_resid;
2089 slp->ns_cc = 1000000000 - auio.uio_resid;
2096 * Now try and parse as many record(s) as we can out of the
2099 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup);
2102 slp->ns_flag |= SLP_DISCONN;
2104 slp->ns_flag |= SLP_NEEDQ;
2106 slp->ns_flag &= ~SLP_GETSTREAM;
2109 * For UDP soreceive typically pulls just one packet, loop
2110 * to get the whole batch.
2113 auio.uio_resid = 1000000000;
2114 flags = MSG_DONTWAIT;
2115 error = so_pru_soreceive(so, &nam, &auio, &mp, NULL,
2118 struct nfsrv_rec *rec;
2119 int mf = (waitflag & MB_DONTWAIT) ?
2120 M_NOWAIT : M_WAITOK;
2121 rec = malloc(sizeof(struct nfsrv_rec),
2125 FREE(nam, M_SONAME);
2129 nfs_realign(&mp, 10 * NFSX_UNSIGNED);
2130 rec->nr_address = nam;
2131 rec->nr_packet = mp;
2132 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2137 if ((so->so_proto->pr_flags & PR_CONNREQUIRED)
2138 && error != EWOULDBLOCK) {
2139 slp->ns_flag |= SLP_DISCONN;
2147 * If we were upcalled from the tcp protocol layer and we have
2148 * fully parsed records ready to go, or there is new data pending,
2149 * or something went wrong, try to wake up an nfsd thread to deal
2153 if (waitflag == MB_DONTWAIT && (slp->ns_numrec > 0
2154 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) {
2155 nfsrv_wakenfsd(slp, nparallel_wakeup);
2160 * Try and extract an RPC request from the mbuf data list received on a
2161 * stream socket. The "waitflag" argument indicates whether or not it
2165 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp)
2167 struct mbuf *m, **mpp;
2170 struct mbuf *om, *m2, *recm;
2174 if (slp->ns_reclen == 0) {
2175 if (slp->ns_cc < NFSX_UNSIGNED)
2178 if (m->m_len >= NFSX_UNSIGNED) {
2179 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED);
2180 m->m_data += NFSX_UNSIGNED;
2181 m->m_len -= NFSX_UNSIGNED;
2183 cp1 = (caddr_t)&recmark;
2184 cp2 = mtod(m, caddr_t);
2185 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
2186 while (m->m_len == 0) {
2188 cp2 = mtod(m, caddr_t);
2195 slp->ns_cc -= NFSX_UNSIGNED;
2196 recmark = ntohl(recmark);
2197 slp->ns_reclen = recmark & ~0x80000000;
2198 if (recmark & 0x80000000)
2199 slp->ns_flag |= SLP_LASTFRAG;
2201 slp->ns_flag &= ~SLP_LASTFRAG;
2202 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) {
2203 log(LOG_ERR, "%s (%d) from nfs client\n",
2204 "impossible packet length",
2211 * Now get the record part.
2213 * Note that slp->ns_reclen may be 0. Linux sometimes
2214 * generates 0-length RPCs
2217 if (slp->ns_cc == slp->ns_reclen) {
2219 slp->ns_raw = slp->ns_rawend = (struct mbuf *)0;
2220 slp->ns_cc = slp->ns_reclen = 0;
2221 } else if (slp->ns_cc > slp->ns_reclen) {
2224 om = (struct mbuf *)0;
2226 while (len < slp->ns_reclen) {
2227 if ((len + m->m_len) > slp->ns_reclen) {
2228 m2 = m_copym(m, 0, slp->ns_reclen - len,
2236 m->m_data += slp->ns_reclen - len;
2237 m->m_len -= slp->ns_reclen - len;
2238 len = slp->ns_reclen;
2240 return (EWOULDBLOCK);
2242 } else if ((len + m->m_len) == slp->ns_reclen) {
2247 om->m_next = (struct mbuf *)0;
2262 * Accumulate the fragments into a record.
2264 mpp = &slp->ns_frag;
2266 mpp = &((*mpp)->m_next);
2268 if (slp->ns_flag & SLP_LASTFRAG) {
2269 struct nfsrv_rec *rec;
2270 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK;
2271 rec = malloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf);
2273 m_freem(slp->ns_frag);
2275 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED);
2276 rec->nr_address = (struct sockaddr *)0;
2277 rec->nr_packet = slp->ns_frag;
2278 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2282 slp->ns_frag = (struct mbuf *)0;
2288 * Parse an RPC header.
2291 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd,
2292 struct nfsrv_descript **ndp)
2294 struct nfsrv_rec *rec;
2296 struct sockaddr *nam;
2297 struct nfsrv_descript *nd;
2301 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec))
2303 rec = STAILQ_FIRST(&slp->ns_rec);
2304 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link);
2305 KKASSERT(slp->ns_numrec > 0);
2307 nam = rec->nr_address;
2309 free(rec, M_NFSRVDESC);
2310 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript),
2311 M_NFSRVDESC, M_WAITOK);
2312 nd->nd_md = nd->nd_mrep = m;
2314 nd->nd_dpos = mtod(m, caddr_t);
2315 error = nfs_getreq(nd, nfsd, TRUE);
2318 FREE(nam, M_SONAME);
2320 free((caddr_t)nd, M_NFSRVDESC);
2329 * Try to assign service sockets to nfsd threads based on the number
2330 * of new rpc requests that have been queued on the service socket.
2332 * If no nfsd's are available or additonal requests are pending, set the
2333 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for
2334 * the work in the nfssvc_sock list when it is finished processing its
2335 * current work. This flag is only cleared when an nfsd can not find
2336 * any new work to perform.
2339 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel)
2343 if ((slp->ns_flag & SLP_VALID) == 0)
2347 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) {
2348 if (nd->nfsd_flag & NFSD_WAITING) {
2349 nd->nfsd_flag &= ~NFSD_WAITING;
2351 panic("nfsd wakeup");
2354 wakeup((caddr_t)nd);
2355 if (--nparallel == 0)
2360 slp->ns_flag |= SLP_DOREC;
2361 nfsd_head_flag |= NFSD_CHECKSLP;
2364 #endif /* NFS_NOSERVER */