2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $
41 * Socket operations for use by nfs
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/kernel.h>
51 #include <sys/vnode.h>
52 #include <sys/fcntl.h>
53 #include <sys/protosw.h>
54 #include <sys/resourcevar.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/socketops.h>
58 #include <sys/syslog.h>
59 #include <sys/thread.h>
60 #include <sys/tprintf.h>
61 #include <sys/sysctl.h>
62 #include <sys/signalvar.h>
64 #include <sys/signal2.h>
65 #include <sys/mutex2.h>
66 #include <sys/socketvar2.h>
68 #include <netinet/in.h>
69 #include <netinet/tcp.h>
70 #include <sys/thread2.h>
76 #include "nfsm_subs.h"
85 * RTT calculations are scaled by 256 (8 bits). A proper fractional
86 * RTT will still be calculated even with a slow NFS timer.
88 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum]]
89 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum]]
90 #define NFS_RTT_SCALE_BITS 8 /* bits */
91 #define NFS_RTT_SCALE 256 /* value */
94 * Defines which timer to use for the procnum.
101 static int proct[NFS_NPROCS] = {
102 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, /* 00-09 */
103 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, /* 10-19 */
104 0, 5, 0, 0, 0, 0, /* 20-29 */
107 static int multt[NFS_NPROCS] = {
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-09 */
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10-19 */
110 1, 2, 1, 1, 1, 1, /* 20-29 */
113 static int nfs_backoff[8] = { 2, 3, 5, 8, 13, 21, 34, 55 };
114 static int nfs_realign_test;
115 static int nfs_realign_count;
116 static int nfs_showrtt;
117 static int nfs_showrexmit;
118 int nfs_maxasyncbio = NFS_MAXASYNCBIO;
120 SYSCTL_DECL(_vfs_nfs);
122 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0,
123 "Number of times mbufs have been tested for bad alignment");
124 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0,
125 "Number of realignments for badly aligned mbuf data");
126 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrtt, CTLFLAG_RW, &nfs_showrtt, 0,
127 "Show round trip time output");
128 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrexmit, CTLFLAG_RW, &nfs_showrexmit, 0,
129 "Show retransmits info");
130 SYSCTL_INT(_vfs_nfs, OID_AUTO, maxasyncbio, CTLFLAG_RW, &nfs_maxasyncbio, 0,
131 "Max number of asynchronous bio's");
133 static int nfs_request_setup(nfsm_info_t info);
134 static int nfs_request_auth(struct nfsreq *rep);
135 static int nfs_request_try(struct nfsreq *rep);
136 static int nfs_request_waitreply(struct nfsreq *rep);
137 static int nfs_request_processreply(nfsm_info_t info, int);
140 struct nfsrtt nfsrtt;
141 struct callout nfs_timer_handle;
143 static int nfs_msg (struct thread *,char *,char *);
144 static int nfs_rcvlock (struct nfsmount *nmp, struct nfsreq *myreq);
145 static void nfs_rcvunlock (struct nfsmount *nmp);
146 static void nfs_realign (struct mbuf **pm, int hsiz);
147 static int nfs_receive (struct nfsmount *nmp, struct nfsreq *rep,
148 struct sockaddr **aname, struct mbuf **mp);
149 static void nfs_softterm (struct nfsreq *rep, int islocked);
150 static void nfs_hardterm (struct nfsreq *rep, int islocked);
151 static int nfs_reconnect (struct nfsmount *nmp, struct nfsreq *rep);
153 static int nfsrv_getstream (struct nfssvc_sock *, int, int *);
154 static void nfs_timer_req(struct nfsreq *req);
155 static void nfs_checkpkt(struct mbuf *m, int len);
157 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd,
158 struct nfssvc_sock *slp,
160 struct mbuf **mreqp) = {
188 #endif /* NFS_NOSERVER */
191 * Initialize sockets and congestion for a new NFS connection.
192 * We do not free the sockaddr if error.
195 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
199 struct sockaddr *saddr;
200 struct sockaddr_in *sin;
201 struct thread *td = &thread0; /* only used for socreate and sobind */
203 nmp->nm_so = so = NULL;
204 if (nmp->nm_flag & NFSMNT_FORCE)
207 error = socreate(saddr->sa_family, &so, nmp->nm_sotype,
208 nmp->nm_soproto, td);
211 nmp->nm_soflags = so->so_proto->pr_flags;
214 * Some servers require that the client port be a reserved port number.
216 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) {
219 struct sockaddr_in ssin;
221 bzero(&sopt, sizeof sopt);
222 ip = IP_PORTRANGE_LOW;
223 sopt.sopt_level = IPPROTO_IP;
224 sopt.sopt_name = IP_PORTRANGE;
225 sopt.sopt_val = (void *)&ip;
226 sopt.sopt_valsize = sizeof(ip);
228 error = sosetopt(so, &sopt);
231 bzero(&ssin, sizeof ssin);
233 sin->sin_len = sizeof (struct sockaddr_in);
234 sin->sin_family = AF_INET;
235 sin->sin_addr.s_addr = INADDR_ANY;
236 sin->sin_port = htons(0);
237 error = sobind(so, (struct sockaddr *)sin, td);
240 bzero(&sopt, sizeof sopt);
241 ip = IP_PORTRANGE_DEFAULT;
242 sopt.sopt_level = IPPROTO_IP;
243 sopt.sopt_name = IP_PORTRANGE;
244 sopt.sopt_val = (void *)&ip;
245 sopt.sopt_valsize = sizeof(ip);
247 error = sosetopt(so, &sopt);
253 * Protocols that do not require connections may be optionally left
254 * unconnected for servers that reply from a port other than NFS_PORT.
256 if (nmp->nm_flag & NFSMNT_NOCONN) {
257 if (nmp->nm_soflags & PR_CONNREQUIRED) {
262 error = soconnect(so, nmp->nm_nam, td);
267 * Wait for the connection to complete. Cribbed from the
268 * connect system call but with the wait timing out so
269 * that interruptible mounts don't hang here for a long time.
272 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
273 (void) tsleep((caddr_t)&so->so_timeo, 0,
275 if ((so->so_state & SS_ISCONNECTING) &&
276 so->so_error == 0 && rep &&
277 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){
278 soclrstate(so, SS_ISCONNECTING);
284 error = so->so_error;
291 so->so_rcv.ssb_timeo = (5 * hz);
292 so->so_snd.ssb_timeo = (5 * hz);
295 * Get buffer reservation size from sysctl, but impose reasonable
298 if (nmp->nm_sotype == SOCK_STREAM) {
299 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
303 bzero(&sopt, sizeof sopt);
304 sopt.sopt_level = SOL_SOCKET;
305 sopt.sopt_name = SO_KEEPALIVE;
306 sopt.sopt_val = &val;
307 sopt.sopt_valsize = sizeof val;
311 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
315 bzero(&sopt, sizeof sopt);
316 sopt.sopt_level = IPPROTO_TCP;
317 sopt.sopt_name = TCP_NODELAY;
318 sopt.sopt_val = &val;
319 sopt.sopt_valsize = sizeof val;
323 bzero(&sopt, sizeof sopt);
324 sopt.sopt_level = IPPROTO_TCP;
325 sopt.sopt_name = TCP_FASTKEEP;
326 sopt.sopt_val = &val;
327 sopt.sopt_valsize = sizeof val;
332 error = soreserve(so, nfs_soreserve, nfs_soreserve, NULL);
335 atomic_set_int(&so->so_rcv.ssb_flags, SSB_NOINTR);
336 atomic_set_int(&so->so_snd.ssb_flags, SSB_NOINTR);
338 /* Initialize other non-zero congestion variables */
339 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
340 nmp->nm_srtt[3] = (NFS_TIMEO << NFS_RTT_SCALE_BITS);
341 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
342 nmp->nm_sdrtt[3] = 0;
343 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED;
344 nmp->nm_timeouts = 0;
347 * Assign nm_so last. The moment nm_so is assigned the nfs_timer()
348 * can mess with the socket.
355 soshutdown(so, SHUT_RDWR);
356 soclose(so, FNONBLOCK);
363 * Called when a connection is broken on a reliable protocol.
364 * - clean up the old socket
365 * - nfs_connect() again
366 * - set R_NEEDSXMIT for all outstanding requests on mount point
367 * If this fails the mount point is DEAD!
368 * nb: Must be called with the nfs_sndlock() set on the mount point.
371 nfs_reconnect(struct nfsmount *nmp, struct nfsreq *rep)
377 if (nmp->nm_rxstate >= NFSSVC_STOPPING)
379 while ((error = nfs_connect(nmp, rep)) != 0) {
380 if (error == EINTR || error == ERESTART)
384 if (nmp->nm_rxstate >= NFSSVC_STOPPING)
386 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0);
390 * Loop through outstanding request list and fix up all requests
394 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
395 KKASSERT(req->r_nmp == nmp);
396 req->r_flags |= R_NEEDSXMIT;
403 * NFS disconnect. Clean up and unlink.
406 nfs_disconnect(struct nfsmount *nmp)
413 soshutdown(so, SHUT_RDWR);
414 soclose(so, FNONBLOCK);
419 nfs_safedisconnect(struct nfsmount *nmp)
421 nfs_rcvlock(nmp, NULL);
427 * This is the nfs send routine. For connection based socket types, it
428 * must be called with an nfs_sndlock() on the socket.
429 * "rep == NULL" indicates that it has been called from a server.
430 * For the client side:
431 * - return EINTR if the RPC is terminated, 0 otherwise
432 * - set R_NEEDSXMIT if the send fails for any reason
433 * - do any cleanup required by recoverable socket errors (?)
434 * For the server side:
435 * - return EINTR or ERESTART if interrupted by a signal
436 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
437 * - do any cleanup required by recoverable socket errors (?)
440 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
443 struct sockaddr *sendnam;
444 int error, soflags, flags;
447 if (rep->r_flags & R_SOFTTERM) {
451 if ((so = rep->r_nmp->nm_so) == NULL) {
452 rep->r_flags |= R_NEEDSXMIT;
456 rep->r_flags &= ~R_NEEDSXMIT;
457 soflags = rep->r_nmp->nm_soflags;
459 soflags = so->so_proto->pr_flags;
461 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
465 if (so->so_type == SOCK_SEQPACKET)
471 * calls pru_sosend -> sosend -> so_pru_send -> netrpc
473 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags,
477 * ENOBUFS for dgram sockets is transient and non fatal.
478 * No need to log, and no need to break a soft mount.
480 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
483 * do backoff retransmit on client
486 if ((rep->r_nmp->nm_state & NFSSTA_SENDSPACE) == 0) {
487 rep->r_nmp->nm_state |= NFSSTA_SENDSPACE;
488 kprintf("Warning: NFS: Insufficient sendspace "
490 "\t You must increase vfs.nfs.soreserve"
491 "or decrease vfs.nfs.maxasyncbio\n",
492 so->so_snd.ssb_hiwat);
494 rep->r_flags |= R_NEEDSXMIT;
500 log(LOG_INFO, "nfs send error %d for server %s\n",error,
501 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
503 * Deal with errors for the client side.
505 if (rep->r_flags & R_SOFTTERM)
508 rep->r_flags |= R_NEEDSXMIT;
510 log(LOG_INFO, "nfsd send error %d\n", error);
514 * Handle any recoverable (soft) socket errors here. (?)
516 if (error != EINTR && error != ERESTART &&
517 error != EWOULDBLOCK && error != EPIPE)
524 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
525 * done by soreceive(), but for SOCK_STREAM we must deal with the Record
526 * Mark and consolidate the data into a new mbuf list.
527 * nb: Sometimes TCP passes the data up to soreceive() in long lists of
529 * For SOCK_STREAM we must be very careful to read an entire record once
530 * we have read any of it, even if the system call has been interrupted.
533 nfs_receive(struct nfsmount *nmp, struct nfsreq *rep,
534 struct sockaddr **aname, struct mbuf **mp)
541 struct mbuf *control;
543 struct sockaddr **getnam;
544 int error, sotype, rcvflg;
545 struct thread *td = curthread; /* XXX */
548 * Set up arguments for soreceive()
552 sotype = nmp->nm_sotype;
555 * For reliable protocols, lock against other senders/receivers
556 * in case a reconnect is necessary.
557 * For SOCK_STREAM, first get the Record Mark to find out how much
558 * more there is to get.
559 * We must lock the socket against other receivers
560 * until we have an entire rpc request/reply.
562 if (sotype != SOCK_DGRAM) {
563 error = nfs_sndlock(nmp, rep);
568 * Check for fatal errors and resending request.
571 * Ugh: If a reconnect attempt just happened, nm_so
572 * would have changed. NULL indicates a failed
573 * attempt that has essentially shut down this
576 if (rep && (rep->r_mrep || (rep->r_flags & R_SOFTTERM))) {
582 error = nfs_reconnect(nmp, rep);
589 while (rep && (rep->r_flags & R_NEEDSXMIT)) {
590 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
591 nfsstats.rpcretries++;
592 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
594 if (error == EINTR || error == ERESTART ||
595 (error = nfs_reconnect(nmp, rep)) != 0) {
603 if (sotype == SOCK_STREAM) {
605 * Get the length marker from the stream
607 aio.iov_base = (caddr_t)&len;
608 aio.iov_len = sizeof(u_int32_t);
611 auio.uio_segflg = UIO_SYSSPACE;
612 auio.uio_rw = UIO_READ;
614 auio.uio_resid = sizeof(u_int32_t);
617 rcvflg = MSG_WAITALL;
618 error = so_pru_soreceive(so, NULL, &auio, NULL,
620 if (error == EWOULDBLOCK && rep) {
621 if (rep->r_flags & R_SOFTTERM)
624 } while (error == EWOULDBLOCK);
626 if (error == 0 && auio.uio_resid > 0) {
628 * Only log short packets if not EOF
630 if (auio.uio_resid != sizeof(u_int32_t))
632 "short receive (%d/%d) from nfs server %s\n",
633 (int)(sizeof(u_int32_t) - auio.uio_resid),
634 (int)sizeof(u_int32_t),
635 nmp->nm_mountp->mnt_stat.f_mntfromname);
640 len = ntohl(len) & ~0x80000000;
642 * This is SERIOUS! We are out of sync with the sender
643 * and forcing a disconnect/reconnect is all I can do.
645 if (len > NFS_MAXPACKET) {
646 log(LOG_ERR, "%s (%d) from nfs server %s\n",
647 "impossible packet length",
649 nmp->nm_mountp->mnt_stat.f_mntfromname);
655 * Get the rest of the packet as an mbuf chain
659 rcvflg = MSG_WAITALL;
660 error = so_pru_soreceive(so, NULL, NULL, &sio,
662 } while (error == EWOULDBLOCK || error == EINTR ||
664 if (error == 0 && sio.sb_cc != len) {
667 "short receive (%zu/%d) from nfs server %s\n",
668 (size_t)len - auio.uio_resid, len,
669 nmp->nm_mountp->mnt_stat.f_mntfromname);
675 * Non-stream, so get the whole packet by not
676 * specifying MSG_WAITALL and by specifying a large
679 * We have no use for control msg., but must grab them
680 * and then throw them away so we know what is going
683 sbinit(&sio, 100000000);
686 error = so_pru_soreceive(so, NULL, NULL, &sio,
690 if (error == EWOULDBLOCK && rep) {
691 if (rep->r_flags & R_SOFTTERM) {
696 } while (error == EWOULDBLOCK ||
697 (error == 0 && sio.sb_mb == NULL && control));
698 if ((rcvflg & MSG_EOR) == 0)
700 if (error == 0 && sio.sb_mb == NULL)
706 if (error && error != EINTR && error != ERESTART) {
709 if (error != EPIPE) {
711 "receive error %d from nfs server %s\n",
713 nmp->nm_mountp->mnt_stat.f_mntfromname);
715 error = nfs_sndlock(nmp, rep);
717 error = nfs_reconnect(nmp, rep);
725 if ((so = nmp->nm_so) == NULL)
727 if (so->so_state & SS_ISCONNECTED)
731 sbinit(&sio, 100000000);
734 error = so_pru_soreceive(so, getnam, NULL, &sio,
736 if (error == EWOULDBLOCK && rep &&
737 (rep->r_flags & R_SOFTTERM)) {
741 } while (error == EWOULDBLOCK);
747 * A shutdown may result in no error and no mbuf.
750 if (*mp == NULL && error == 0)
759 * Search for any mbufs that are not a multiple of 4 bytes long
760 * or with m_data not longword aligned.
761 * These could cause pointer alignment problems, so copy them to
762 * well aligned mbufs.
764 nfs_realign(mp, 5 * NFSX_UNSIGNED);
769 * Implement receipt of reply on a socket.
771 * We must search through the list of received datagrams matching them
772 * with outstanding requests using the xid, until ours is found.
774 * If myrep is NULL we process packets on the socket until
775 * interrupted or until nm_reqrxq is non-empty.
779 nfs_reply(struct nfsmount *nmp, struct nfsreq *myrep)
782 struct sockaddr *nam;
786 struct nfsm_info info;
789 * Loop around until we get our own reply
793 * Lock against other receivers so that I don't get stuck in
794 * sbwait() after someone else has received my reply for me.
795 * Also necessary for connection based protocols to avoid
796 * race conditions during a reconnect.
798 * If nfs_rcvlock() returns EALREADY, that means that
799 * the reply has already been recieved by another
800 * process and we can return immediately. In this
801 * case, the lock is not taken to avoid races with
806 error = nfs_rcvlock(nmp, myrep);
807 if (error == EALREADY)
813 * If myrep is NULL we are the receiver helper thread.
814 * Stop waiting for incoming replies if there are
815 * messages sitting on reqrxq that we need to process,
816 * or if a shutdown request is pending.
818 if (myrep == NULL && (TAILQ_FIRST(&nmp->nm_reqrxq) ||
819 nmp->nm_rxstate > NFSSVC_PENDING)) {
825 * Get the next Rpc reply off the socket
827 * We cannot release the receive lock until we've
828 * filled in rep->r_mrep, otherwise a waiting
829 * thread may deadlock in soreceive with no incoming
832 error = nfs_receive(nmp, myrep, &nam, &info.mrep);
835 * Ignore routing errors on connectionless protocols??
838 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) {
839 if (nmp->nm_so == NULL)
841 nmp->nm_so->so_error = 0;
847 kfree(nam, M_SONAME);
850 * Get the xid and check that it is an rpc reply
853 info.dpos = mtod(info.md, caddr_t);
854 NULLOUT(tl = nfsm_dissect(&info, 2*NFSX_UNSIGNED));
856 if (*tl != rpc_reply) {
857 nfsstats.rpcinvalid++;
866 * Loop through the request list to match up the reply
867 * Iff no match, just drop the datagram. On match, set
868 * r_mrep atomically to prevent the timer from messing
869 * around with the request after we have exited the critical
873 TAILQ_FOREACH(rep, &nmp->nm_reqq, r_chain) {
874 if (rep->r_mrep == NULL && rxid == rep->r_xid)
879 * Fill in the rest of the reply if we found a match.
881 * Deal with duplicate responses if there was no match.
885 rep->r_dpos = info.dpos;
889 rt = &nfsrtt.rttl[nfsrtt.pos];
890 rt->proc = rep->r_procnum;
893 rt->cwnd = nmp->nm_maxasync_scaled;
894 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1];
895 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1];
896 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid;
897 getmicrotime(&rt->tstamp);
898 if (rep->r_flags & R_TIMING)
899 rt->rtt = rep->r_rtt;
902 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ;
906 * New congestion control is based only on async
909 if (nmp->nm_maxasync_scaled < NFS_MAXASYNC_SCALED)
910 ++nmp->nm_maxasync_scaled;
911 if (rep->r_flags & R_SENT) {
912 rep->r_flags &= ~R_SENT;
915 * Update rtt using a gain of 0.125 on the mean
916 * and a gain of 0.25 on the deviation.
918 * NOTE SRTT/SDRTT are only good if R_TIMING is set.
920 if ((rep->r_flags & R_TIMING) && rep->r_rexmit == 0) {
922 * Since the timer resolution of
923 * NFS_HZ is so course, it can often
924 * result in r_rtt == 0. Since
925 * r_rtt == N means that the actual
926 * rtt is between N+dt and N+2-dt ticks,
932 #define NFSRSB NFS_RTT_SCALE_BITS
933 n = ((NFS_SRTT(rep) * 7) +
934 (rep->r_rtt << NFSRSB)) >> 3;
935 d = n - NFS_SRTT(rep);
939 * Don't let the jitter calculation decay
940 * too quickly, but we want a fast rampup.
945 if (d < NFS_SDRTT(rep))
946 n = ((NFS_SDRTT(rep) * 15) + d) >> 4;
948 n = ((NFS_SDRTT(rep) * 3) + d) >> 2;
952 nmp->nm_timeouts = 0;
953 rep->r_mrep = info.mrep;
954 nfs_hardterm(rep, 0);
957 * Extract vers, prog, nfsver, procnum. A duplicate
958 * response means we didn't wait long enough so
959 * we increase the SRTT to avoid future spurious
962 u_int procnum = nmp->nm_lastreprocnum;
965 if (procnum < NFS_NPROCS && proct[procnum]) {
968 n = nmp->nm_srtt[proct[procnum]];
969 n += NFS_ASYSCALE * NFS_HZ;
970 if (n < NFS_ASYSCALE * NFS_HZ * 10)
971 n = NFS_ASYSCALE * NFS_HZ * 10;
972 nmp->nm_srtt[proct[procnum]] = n;
979 * If not matched to a request, drop it.
980 * If it's mine, get out.
983 nfsstats.rpcunexpected++;
986 } else if (rep == myrep) {
987 if (rep->r_mrep == NULL)
988 panic("nfsreply nil");
995 * Run the request state machine until the target state is reached
996 * or a fatal error occurs. The target state is not run. Specifying
997 * a target of NFSM_STATE_DONE runs the state machine until the rpc
1000 * EINPROGRESS is returned for all states other then the DONE state,
1001 * indicating that the rpc is still in progress.
1004 nfs_request(struct nfsm_info *info, nfsm_state_t bstate, nfsm_state_t estate)
1008 while (info->state >= bstate && info->state < estate) {
1009 switch(info->state) {
1010 case NFSM_STATE_SETUP:
1012 * Setup the nfsreq. Any error which occurs during
1013 * this state is fatal.
1015 info->error = nfs_request_setup(info);
1017 info->state = NFSM_STATE_DONE;
1018 return (info->error);
1021 req->r_mrp = &info->mrep;
1022 req->r_mdp = &info->md;
1023 req->r_dposp = &info->dpos;
1024 info->state = NFSM_STATE_AUTH;
1027 case NFSM_STATE_AUTH:
1029 * Authenticate the nfsreq. Any error which occurs
1030 * during this state is fatal.
1032 info->error = nfs_request_auth(info->req);
1034 info->state = NFSM_STATE_DONE;
1035 return (info->error);
1037 info->state = NFSM_STATE_TRY;
1040 case NFSM_STATE_TRY:
1042 * Transmit or retransmit attempt. An error in this
1043 * state is ignored and we always move on to the
1046 * This can trivially race the receiver if the
1047 * request is asynchronous. nfs_request_try()
1048 * will thus set the state for us and we
1049 * must also return immediately if we are
1050 * running an async state machine, because
1051 * info can become invalid due to races after
1054 if (info->req->r_flags & R_ASYNC) {
1055 nfs_request_try(info->req);
1056 if (estate == NFSM_STATE_WAITREPLY)
1057 return (EINPROGRESS);
1059 nfs_request_try(info->req);
1060 info->state = NFSM_STATE_WAITREPLY;
1063 case NFSM_STATE_WAITREPLY:
1065 * Wait for a reply or timeout and move on to the
1066 * next state. The error returned by this state
1067 * is passed to the processing code in the next
1070 info->error = nfs_request_waitreply(info->req);
1071 info->state = NFSM_STATE_PROCESSREPLY;
1073 case NFSM_STATE_PROCESSREPLY:
1075 * Process the reply or timeout. Errors which occur
1076 * in this state may cause the state machine to
1077 * go back to an earlier state, and are fatal
1080 info->error = nfs_request_processreply(info,
1082 switch(info->error) {
1084 info->state = NFSM_STATE_AUTH;
1087 info->state = NFSM_STATE_TRY;
1091 * Operation complete, with or without an
1092 * error. We are done.
1095 info->state = NFSM_STATE_DONE;
1096 return (info->error);
1099 case NFSM_STATE_DONE:
1101 * Shouldn't be reached
1103 return (info->error);
1109 * If we are done return the error code (if any).
1110 * Otherwise return EINPROGRESS.
1112 if (info->state == NFSM_STATE_DONE)
1113 return (info->error);
1114 return (EINPROGRESS);
1118 * nfs_request - goes something like this
1119 * - fill in request struct
1120 * - links it into list
1121 * - calls nfs_send() for first transmit
1122 * - calls nfs_receive() to get reply
1123 * - break down rpc header and return with nfs reply pointed to
1125 * nb: always frees up mreq mbuf list
1128 nfs_request_setup(nfsm_info_t info)
1131 struct nfsmount *nmp;
1136 * Reject requests while attempting a forced unmount.
1138 if (info->vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
1139 m_freem(info->mreq);
1143 nmp = VFSTONFS(info->vp->v_mount);
1144 req = kmalloc(sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
1146 req->r_vp = info->vp;
1147 req->r_td = info->td;
1148 req->r_procnum = info->procnum;
1150 req->r_cred = info->cred;
1158 req->r_mrest = info->mreq;
1159 req->r_mrest_len = i;
1162 * The presence of a non-NULL r_info in req indicates
1163 * async completion via our helper threads. See the receiver
1168 req->r_flags = R_ASYNC;
1178 nfs_request_auth(struct nfsreq *rep)
1180 struct nfsmount *nmp = rep->r_nmp;
1182 char nickv[RPCX_NICKVERF];
1183 int error = 0, auth_len, auth_type;
1186 char *auth_str, *verf_str;
1190 rep->r_failed_auth = 0;
1193 * Get the RPC header with authorization.
1195 verf_str = auth_str = NULL;
1196 if (nmp->nm_flag & NFSMNT_KERB) {
1198 verf_len = sizeof (nickv);
1199 auth_type = RPCAUTH_KERB4;
1200 bzero((caddr_t)rep->r_key, sizeof(rep->r_key));
1201 if (rep->r_failed_auth ||
1202 nfs_getnickauth(nmp, cred, &auth_str, &auth_len,
1203 verf_str, verf_len)) {
1204 error = nfs_getauth(nmp, rep, cred, &auth_str,
1205 &auth_len, verf_str, &verf_len, rep->r_key);
1207 m_freem(rep->r_mrest);
1208 rep->r_mrest = NULL;
1209 kfree((caddr_t)rep, M_NFSREQ);
1214 auth_type = RPCAUTH_UNIX;
1215 if (cred->cr_ngroups < 1)
1216 panic("nfsreq nogrps");
1217 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
1218 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
1222 nfs_checkpkt(rep->r_mrest, rep->r_mrest_len);
1223 m = nfsm_rpchead(cred, nmp->nm_flag, rep->r_procnum, auth_type,
1224 auth_len, auth_str, verf_len, verf_str,
1225 rep->r_mrest, rep->r_mrest_len, &rep->r_mheadend, &xid);
1226 rep->r_mrest = NULL;
1228 kfree(auth_str, M_TEMP);
1231 * For stream protocols, insert a Sun RPC Record Mark.
1233 if (nmp->nm_sotype == SOCK_STREAM) {
1234 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT);
1236 kfree(rep, M_NFSREQ);
1239 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1240 (m->m_pkthdr.len - NFSX_UNSIGNED));
1243 nfs_checkpkt(m, m->m_pkthdr.len);
1251 nfs_request_try(struct nfsreq *rep)
1253 struct nfsmount *nmp = rep->r_nmp;
1258 * Request is not on any queue, only the owner has access to it
1259 * so it should not be locked by anyone atm.
1261 * Interlock to prevent races. While locked the only remote
1262 * action possible is for r_mrep to be set (once we enqueue it).
1264 if (rep->r_flags == 0xdeadc0de) {
1265 print_backtrace(-1);
1266 panic("flags nbad");
1268 KKASSERT((rep->r_flags & (R_LOCKED | R_ONREQQ)) == 0);
1269 if (nmp->nm_flag & NFSMNT_SOFT)
1270 rep->r_retry = nmp->nm_retry;
1272 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
1273 rep->r_rtt = rep->r_rexmit = 0;
1274 if (proct[rep->r_procnum] > 0)
1275 rep->r_flags |= R_TIMING | R_LOCKED;
1277 rep->r_flags |= R_LOCKED;
1280 nfsstats.rpcrequests++;
1282 if (nmp->nm_flag & NFSMNT_FORCE) {
1283 rep->r_flags |= R_SOFTTERM;
1284 rep->r_flags &= ~R_LOCKED;
1287 rep->r_flags |= R_NEEDSXMIT; /* in case send lock races us */
1290 * Do the client side RPC.
1292 * Chain request into list of outstanding requests. Be sure
1293 * to put it LAST so timer finds oldest requests first. Note
1294 * that our control of R_LOCKED prevents the request from
1295 * getting ripped out from under us or transmitted by the
1298 * For requests with info structures we must atomically set the
1299 * info's state because the structure could become invalid upon
1300 * return due to races (i.e., if async)
1303 mtx_link_init(&rep->r_link);
1304 KKASSERT((rep->r_flags & R_ONREQQ) == 0);
1305 TAILQ_INSERT_TAIL(&nmp->nm_reqq, rep, r_chain);
1306 rep->r_flags |= R_ONREQQ;
1308 if (rep->r_flags & R_ASYNC)
1309 rep->r_info->state = NFSM_STATE_WAITREPLY;
1315 * Send if we can. Congestion control is not handled here any more
1316 * becausing trying to defer the initial send based on the nfs_timer
1317 * requires having a very fast nfs_timer, which is silly.
1320 if (nmp->nm_soflags & PR_CONNREQUIRED)
1321 error = nfs_sndlock(nmp, rep);
1322 if (error == 0 && (rep->r_flags & R_NEEDSXMIT)) {
1323 m2 = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
1324 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
1325 rep->r_flags &= ~R_NEEDSXMIT;
1326 if ((rep->r_flags & R_SENT) == 0) {
1327 rep->r_flags |= R_SENT;
1329 if (nmp->nm_soflags & PR_CONNREQUIRED)
1339 * Release the lock. The only remote action that may have occurred
1340 * would have been the setting of rep->r_mrep. If this occured
1341 * and the request was async we have to move it to the reader
1342 * thread's queue for action.
1344 * For async requests also make sure the reader is woken up so
1345 * it gets on the socket to read responses.
1348 if (rep->r_flags & R_ASYNC) {
1350 nfs_hardterm(rep, 1);
1351 rep->r_flags &= ~R_LOCKED;
1352 nfssvc_iod_reader_wakeup(nmp);
1354 rep->r_flags &= ~R_LOCKED;
1356 if (rep->r_flags & R_WANTED) {
1357 rep->r_flags &= ~R_WANTED;
1365 * This code is only called for synchronous requests. Completed synchronous
1366 * requests are left on reqq and we remove them before moving on to the
1370 nfs_request_waitreply(struct nfsreq *rep)
1372 struct nfsmount *nmp = rep->r_nmp;
1375 KKASSERT((rep->r_flags & R_ASYNC) == 0);
1378 * Wait until the request is finished.
1380 error = nfs_reply(nmp, rep);
1383 * RPC done, unlink the request, but don't rip it out from under
1384 * the callout timer.
1386 * Once unlinked no other receiver or the timer will have
1387 * visibility, so we do not have to set R_LOCKED.
1390 while (rep->r_flags & R_LOCKED) {
1391 rep->r_flags |= R_WANTED;
1392 tsleep(rep, 0, "nfstrac", 0);
1394 KKASSERT(rep->r_flags & R_ONREQQ);
1395 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain);
1396 rep->r_flags &= ~R_ONREQQ;
1398 if (TAILQ_FIRST(&nmp->nm_bioq) &&
1399 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) {
1400 nfssvc_iod_writer_wakeup(nmp);
1405 * Decrement the outstanding request count.
1407 if (rep->r_flags & R_SENT) {
1408 rep->r_flags &= ~R_SENT;
1414 * Process reply with error returned from nfs_requet_waitreply().
1416 * Returns EAGAIN if it wants us to loop up to nfs_request_try() again.
1417 * Returns ENEEDAUTH if it wants us to loop up to nfs_request_auth() again.
1420 nfs_request_processreply(nfsm_info_t info, int error)
1422 struct nfsreq *req = info->req;
1423 struct nfsmount *nmp = req->r_nmp;
1429 * If there was a successful reply and a tprintf msg.
1430 * tprintf a response.
1432 if (error == 0 && (req->r_flags & R_TPRINTFMSG)) {
1433 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1436 info->mrep = req->r_mrep;
1437 info->md = req->r_md;
1438 info->dpos = req->r_dpos;
1440 m_freem(req->r_mreq);
1442 kfree(req, M_NFSREQ);
1448 * break down the rpc header and check if ok
1450 NULLOUT(tl = nfsm_dissect(info, 3 * NFSX_UNSIGNED));
1451 if (*tl++ == rpc_msgdenied) {
1452 if (*tl == rpc_mismatch) {
1454 } else if ((nmp->nm_flag & NFSMNT_KERB) &&
1455 *tl++ == rpc_autherr) {
1456 if (req->r_failed_auth == 0) {
1457 req->r_failed_auth++;
1458 req->r_mheadend->m_next = NULL;
1459 m_freem(info->mrep);
1461 m_freem(req->r_mreq);
1470 m_freem(info->mrep);
1472 m_freem(req->r_mreq);
1474 kfree(req, M_NFSREQ);
1480 * Grab any Kerberos verifier, otherwise just throw it away.
1482 verf_type = fxdr_unsigned(int, *tl++);
1483 i = fxdr_unsigned(int32_t, *tl);
1484 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) {
1485 error = nfs_savenickauth(nmp, req->r_cred, i, req->r_key,
1486 &info->md, &info->dpos, info->mrep);
1490 ERROROUT(nfsm_adv(info, nfsm_rndup(i)));
1492 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1495 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1497 error = fxdr_unsigned(int, *tl);
1500 * Does anyone even implement this? Just impose
1503 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
1504 error == NFSERR_TRYLATER) {
1505 m_freem(info->mrep);
1509 tsleep((caddr_t)&lbolt, 0, "nqnfstry", 0);
1510 return (EAGAIN); /* goto tryagain */
1514 * If the File Handle was stale, invalidate the
1515 * lookup cache, just in case.
1517 * To avoid namecache<->vnode deadlocks we must
1518 * release the vnode lock if we hold it.
1520 if (error == ESTALE) {
1521 struct vnode *vp = req->r_vp;
1524 ltype = lockstatus(&vp->v_lock, curthread);
1525 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED)
1526 lockmgr(&vp->v_lock, LK_RELEASE);
1527 cache_inval_vp(vp, CINV_CHILDREN);
1528 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED)
1529 lockmgr(&vp->v_lock, ltype);
1531 if (nmp->nm_flag & NFSMNT_NFSV3) {
1532 KKASSERT(*req->r_mrp == info->mrep);
1533 KKASSERT(*req->r_mdp == info->md);
1534 KKASSERT(*req->r_dposp == info->dpos);
1535 error |= NFSERR_RETERR;
1537 m_freem(info->mrep);
1540 m_freem(req->r_mreq);
1542 kfree(req, M_NFSREQ);
1547 KKASSERT(*req->r_mrp == info->mrep);
1548 KKASSERT(*req->r_mdp == info->md);
1549 KKASSERT(*req->r_dposp == info->dpos);
1550 m_freem(req->r_mreq);
1552 kfree(req, M_NFSREQ);
1555 m_freem(info->mrep);
1557 error = EPROTONOSUPPORT;
1559 m_freem(req->r_mreq);
1561 kfree(req, M_NFSREQ);
1566 #ifndef NFS_NOSERVER
1568 * Generate the rpc reply header
1569 * siz arg. is used to decide if adding a cluster is worthwhile
1572 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp,
1573 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp)
1576 struct nfsm_info info;
1578 siz += RPC_REPLYSIZ;
1579 info.mb = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
1580 info.mreq = info.mb;
1581 info.mreq->m_pkthdr.len = 0;
1583 * If this is not a cluster, try and leave leading space
1584 * for the lower level headers.
1586 if ((max_hdr + siz) < MINCLSIZE)
1587 info.mreq->m_data += max_hdr;
1588 tl = mtod(info.mreq, u_int32_t *);
1589 info.mreq->m_len = 6 * NFSX_UNSIGNED;
1590 info.bpos = ((caddr_t)tl) + info.mreq->m_len;
1591 *tl++ = txdr_unsigned(nd->nd_retxid);
1593 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
1594 *tl++ = rpc_msgdenied;
1595 if (err & NFSERR_AUTHERR) {
1596 *tl++ = rpc_autherr;
1597 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR);
1598 info.mreq->m_len -= NFSX_UNSIGNED;
1599 info.bpos -= NFSX_UNSIGNED;
1601 *tl++ = rpc_mismatch;
1602 *tl++ = txdr_unsigned(RPC_VER2);
1603 *tl = txdr_unsigned(RPC_VER2);
1606 *tl++ = rpc_msgaccepted;
1609 * For Kerberos authentication, we must send the nickname
1610 * verifier back, otherwise just RPCAUTH_NULL.
1612 if (nd->nd_flag & ND_KERBFULL) {
1613 struct nfsuid *nuidp;
1614 struct timeval ktvin, ktvout;
1616 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first;
1617 nuidp != NULL; nuidp = nuidp->nu_hash.le_next) {
1618 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid &&
1619 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp),
1620 &nuidp->nu_haddr, nd->nd_nam2)))
1625 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1);
1627 txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1630 * Encrypt the timestamp in ecb mode using the
1640 *tl++ = rpc_auth_kerb;
1641 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED);
1642 *tl = ktvout.tv_sec;
1643 tl = nfsm_build(&info, 3 * NFSX_UNSIGNED);
1644 *tl++ = ktvout.tv_usec;
1645 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid);
1656 *tl = txdr_unsigned(RPC_PROGUNAVAIL);
1659 *tl = txdr_unsigned(RPC_PROGMISMATCH);
1660 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED);
1661 *tl++ = txdr_unsigned(2);
1662 *tl = txdr_unsigned(3);
1665 *tl = txdr_unsigned(RPC_PROCUNAVAIL);
1668 *tl = txdr_unsigned(RPC_GARBAGE);
1672 if (err != NFSERR_RETVOID) {
1673 tl = nfsm_build(&info, NFSX_UNSIGNED);
1675 *tl = txdr_unsigned(nfsrv_errmap(nd, err));
1687 if (err != 0 && err != NFSERR_RETVOID)
1688 nfsstats.srvrpc_errs++;
1693 #endif /* NFS_NOSERVER */
1696 * Nfs timer routine.
1698 * Scan the nfsreq list and retranmit any requests that have timed out
1699 * To avoid retransmission attempts on STREAM sockets (in the future) make
1700 * sure to set the r_retry field to 0 (implies nm_retry == 0).
1702 * Requests with attached responses, terminated requests, and
1703 * locked requests are ignored. Locked requests will be picked up
1704 * in a later timer call.
1707 nfs_timer_callout(void *arg /* never used */)
1709 struct nfsmount *nmp;
1711 #ifndef NFS_NOSERVER
1712 struct nfssvc_sock *slp;
1714 #endif /* NFS_NOSERVER */
1716 lwkt_gettoken(&nfs_token);
1717 TAILQ_FOREACH(nmp, &nfs_mountq, nm_entry) {
1718 lwkt_gettoken(&nmp->nm_token);
1719 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1720 KKASSERT(nmp == req->r_nmp);
1723 if (req->r_flags & (R_SOFTTERM | R_LOCKED))
1727 * Handle timeout/retry. Be sure to process r_mrep
1728 * for async requests that completed while we had
1729 * the request locked or they will hang in the reqq
1732 req->r_flags |= R_LOCKED;
1733 if (nfs_sigintr(nmp, req, req->r_td)) {
1734 nfs_softterm(req, 1);
1735 req->r_flags &= ~R_LOCKED;
1738 if (req->r_flags & R_ASYNC) {
1740 nfs_hardterm(req, 1);
1741 req->r_flags &= ~R_LOCKED;
1742 nfssvc_iod_reader_wakeup(nmp);
1744 req->r_flags &= ~R_LOCKED;
1747 if (req->r_flags & R_WANTED) {
1748 req->r_flags &= ~R_WANTED;
1752 lwkt_reltoken(&nmp->nm_token);
1754 #ifndef NFS_NOSERVER
1757 * Scan the write gathering queues for writes that need to be
1760 cur_usec = nfs_curusec();
1762 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
1763 /* XXX race against removal */
1764 if (lwkt_trytoken(&slp->ns_token)) {
1765 if (slp->ns_tq.lh_first &&
1766 (slp->ns_tq.lh_first->nd_time <= cur_usec)) {
1767 nfsrv_wakenfsd(slp, 1);
1769 lwkt_reltoken(&slp->ns_token);
1772 #endif /* NFS_NOSERVER */
1774 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer_callout, NULL);
1775 lwkt_reltoken(&nfs_token);
1780 nfs_timer_req(struct nfsreq *req)
1782 struct thread *td = &thread0; /* XXX for creds, will break if sleep */
1783 struct nfsmount *nmp = req->r_nmp;
1790 * rtt ticks and timeout calculation. Return if the timeout
1791 * has not been reached yet, unless the packet is flagged
1792 * for an immediate send.
1794 * The mean rtt doesn't help when we get random I/Os, we have
1795 * to multiply by fairly large numbers.
1797 if (req->r_rtt >= 0) {
1799 * Calculate the timeout to test against.
1802 if (nmp->nm_flag & NFSMNT_DUMBTIMR) {
1803 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS;
1804 } else if (req->r_flags & R_TIMING) {
1805 timeo = NFS_SRTT(req) + NFS_SDRTT(req);
1807 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS;
1809 timeo *= multt[req->r_procnum];
1810 /* timeo is still scaled by SCALE_BITS */
1812 #define NFSFS (NFS_RTT_SCALE * NFS_HZ)
1813 if (req->r_flags & R_TIMING) {
1814 static long last_time;
1815 if (nfs_showrtt && last_time != time_second) {
1816 kprintf("rpccmd %d NFS SRTT %d SDRTT %d "
1818 proct[req->r_procnum],
1819 NFS_SRTT(req), NFS_SDRTT(req),
1821 timeo % NFSFS * 1000 / NFSFS);
1822 last_time = time_second;
1828 * deal with nfs_timer jitter.
1830 timeo = (timeo >> NFS_RTT_SCALE_BITS) + 1;
1834 if (nmp->nm_timeouts > 0)
1835 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
1836 if (timeo > NFS_MAXTIMEO)
1837 timeo = NFS_MAXTIMEO;
1838 if (req->r_rtt <= timeo) {
1839 if ((req->r_flags & R_NEEDSXMIT) == 0)
1841 } else if (nmp->nm_timeouts < 8) {
1847 * Check for server not responding
1849 if ((req->r_flags & R_TPRINTFMSG) == 0 &&
1850 req->r_rexmit > nmp->nm_deadthresh) {
1851 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1853 req->r_flags |= R_TPRINTFMSG;
1855 if (req->r_rexmit >= req->r_retry) { /* too many */
1856 nfsstats.rpctimeouts++;
1857 nfs_softterm(req, 1);
1862 * Generally disable retransmission on reliable sockets,
1863 * unless the request is flagged for immediate send.
1865 if (nmp->nm_sotype != SOCK_DGRAM) {
1866 if (++req->r_rexmit > NFS_MAXREXMIT)
1867 req->r_rexmit = NFS_MAXREXMIT;
1868 if ((req->r_flags & R_NEEDSXMIT) == 0)
1873 * Stop here if we do not have a socket!
1875 if ((so = nmp->nm_so) == NULL)
1879 * If there is enough space and the window allows.. resend it.
1881 * r_rtt is left intact in case we get an answer after the
1882 * retry that was a reply to the original packet.
1884 * NOTE: so_pru_send()
1886 if (ssb_space(&so->so_snd) >= req->r_mreq->m_pkthdr.len &&
1887 (req->r_flags & (R_SENT | R_NEEDSXMIT)) &&
1888 (m = m_copym(req->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){
1889 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
1890 error = so_pru_send(so, 0, m, NULL, NULL, td);
1892 error = so_pru_send(so, 0, m, nmp->nm_nam, NULL, td);
1894 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
1896 req->r_flags |= R_NEEDSXMIT;
1897 } else if (req->r_mrep == NULL) {
1899 * Iff first send, start timing
1900 * else turn timing off, backoff timer
1901 * and divide congestion window by 2.
1903 * It is possible for the so_pru_send() to
1904 * block and for us to race a reply so we
1905 * only do this if the reply field has not
1906 * been filled in. R_LOCKED will prevent
1907 * the request from being ripped out from under
1910 * Record the last resent procnum to aid us
1911 * in duplicate detection on receive.
1913 if ((req->r_flags & R_NEEDSXMIT) == 0) {
1916 if (++req->r_rexmit > NFS_MAXREXMIT)
1917 req->r_rexmit = NFS_MAXREXMIT;
1918 nmp->nm_maxasync_scaled >>= 1;
1919 if (nmp->nm_maxasync_scaled < NFS_MINASYNC_SCALED)
1920 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED;
1921 nfsstats.rpcretries++;
1922 nmp->nm_lastreprocnum = req->r_procnum;
1924 req->r_flags |= R_SENT;
1925 req->r_flags &= ~R_NEEDSXMIT;
1932 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1933 * wait for all requests to complete. This is used by forced unmounts
1934 * to terminate any outstanding RPCs.
1936 * Locked requests cannot be canceled but will be marked for
1940 nfs_nmcancelreqs(struct nfsmount *nmp)
1946 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1947 if (req->r_mrep != NULL || (req->r_flags & R_SOFTTERM))
1949 nfs_softterm(req, 0);
1951 /* XXX the other two queues as well */
1954 for (i = 0; i < 30; i++) {
1956 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1957 if (nmp == req->r_nmp)
1963 tsleep(&lbolt, 0, "nfscancel", 0);
1969 * Soft-terminate a request, effectively marking it as failed.
1971 * Must be called from within a critical section.
1974 nfs_softterm(struct nfsreq *rep, int islocked)
1976 rep->r_flags |= R_SOFTTERM;
1977 nfs_hardterm(rep, islocked);
1981 * Hard-terminate a request, typically after getting a response.
1983 * The state machine can still decide to re-issue it later if necessary.
1985 * Must be called from within a critical section.
1988 nfs_hardterm(struct nfsreq *rep, int islocked)
1990 struct nfsmount *nmp = rep->r_nmp;
1993 * The nm_send count is decremented now to avoid deadlocks
1994 * when the process in soreceive() hasn't yet managed to send
1997 if (rep->r_flags & R_SENT) {
1998 rep->r_flags &= ~R_SENT;
2002 * If we locked the request or nobody else has locked the request,
2003 * and the request is async, we can move it to the reader thread's
2004 * queue now and fix up the state.
2006 * If we locked the request or nobody else has locked the request,
2007 * we can wake up anyone blocked waiting for a response on the
2010 if (islocked || (rep->r_flags & R_LOCKED) == 0) {
2011 if ((rep->r_flags & (R_ONREQQ | R_ASYNC)) ==
2012 (R_ONREQQ | R_ASYNC)) {
2013 rep->r_flags &= ~R_ONREQQ;
2014 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain);
2016 TAILQ_INSERT_TAIL(&nmp->nm_reqrxq, rep, r_chain);
2017 KKASSERT(rep->r_info->state == NFSM_STATE_TRY ||
2018 rep->r_info->state == NFSM_STATE_WAITREPLY);
2019 rep->r_info->state = NFSM_STATE_PROCESSREPLY;
2020 nfssvc_iod_reader_wakeup(nmp);
2021 if (TAILQ_FIRST(&nmp->nm_bioq) &&
2022 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) {
2023 nfssvc_iod_writer_wakeup(nmp);
2026 mtx_abort_ex_link(&nmp->nm_rxlock, &rep->r_link);
2031 * Test for a termination condition pending on the process.
2032 * This is used for NFSMNT_INT mounts.
2035 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
2041 if (rep && (rep->r_flags & R_SOFTTERM))
2043 /* Terminate all requests while attempting a forced unmount. */
2044 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
2046 if (!(nmp->nm_flag & NFSMNT_INT))
2048 /* td might be NULL YYY */
2049 if (td == NULL || (p = td->td_proc) == NULL)
2053 tmpset = lwp_sigpend(lp);
2054 SIGSETNAND(tmpset, lp->lwp_sigmask);
2055 SIGSETNAND(tmpset, p->p_sigignore);
2056 if (SIGNOTEMPTY(tmpset) && NFSINT_SIGMASK(tmpset))
2063 * Lock a socket against others.
2064 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
2065 * and also to avoid race conditions between the processes with nfs requests
2066 * in progress when a reconnect is necessary.
2069 nfs_sndlock(struct nfsmount *nmp, struct nfsreq *rep)
2071 mtx_t mtx = &nmp->nm_txlock;
2079 td = rep ? rep->r_td : NULL;
2080 if (nmp->nm_flag & NFSMNT_INT)
2083 while ((error = mtx_lock_ex_try(mtx)) != 0) {
2084 if (nfs_sigintr(nmp, rep, td)) {
2088 error = mtx_lock_ex(mtx, "nfsndlck", slpflag, slptimeo);
2091 if (slpflag == PCATCH) {
2096 /* Always fail if our request has been cancelled. */
2097 if (rep && (rep->r_flags & R_SOFTTERM)) {
2106 * Unlock the stream socket for others.
2109 nfs_sndunlock(struct nfsmount *nmp)
2111 mtx_unlock(&nmp->nm_txlock);
2115 * Lock the receiver side of the socket.
2120 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep)
2122 mtx_t mtx = &nmp->nm_rxlock;
2128 * Unconditionally check for completion in case another nfsiod
2129 * get the packet while the caller was blocked, before the caller
2130 * called us. Packet reception is handled by mainline code which
2131 * is protected by the BGL at the moment.
2133 * We do not strictly need the second check just before the
2134 * tsleep(), but it's good defensive programming.
2136 if (rep && rep->r_mrep != NULL)
2139 if (nmp->nm_flag & NFSMNT_INT)
2145 while ((error = mtx_lock_ex_try(mtx)) != 0) {
2146 if (nfs_sigintr(nmp, rep, (rep ? rep->r_td : NULL))) {
2150 if (rep && rep->r_mrep != NULL) {
2156 * NOTE: can return ENOLCK, but in that case rep->r_mrep
2157 * will already be set.
2160 error = mtx_lock_ex_link(mtx, &rep->r_link,
2164 error = mtx_lock_ex(mtx, "nfsrcvlk", slpflag, slptimeo);
2170 * If our reply was recieved while we were sleeping,
2171 * then just return without taking the lock to avoid a
2172 * situation where a single iod could 'capture' the
2175 if (rep && rep->r_mrep != NULL) {
2179 if (slpflag == PCATCH) {
2185 if (rep && rep->r_mrep != NULL) {
2194 * Unlock the stream socket for others.
2197 nfs_rcvunlock(struct nfsmount *nmp)
2199 mtx_unlock(&nmp->nm_rxlock);
2205 * Check for badly aligned mbuf data and realign by copying the unaligned
2206 * portion of the data into a new mbuf chain and freeing the portions
2207 * of the old chain that were replaced.
2209 * We cannot simply realign the data within the existing mbuf chain
2210 * because the underlying buffers may contain other rpc commands and
2211 * we cannot afford to overwrite them.
2213 * We would prefer to avoid this situation entirely. The situation does
2214 * not occur with NFS/UDP and is supposed to only occassionally occur
2215 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
2217 * NOTE! MB_DONTWAIT cannot be used here. The mbufs must be acquired
2218 * because the rpc request OR reply cannot be thrown away. TCP NFS
2219 * mounts do not retry their RPCs unless the TCP connection itself
2220 * is dropped so throwing away a RPC will basically cause the NFS
2221 * operation to lockup indefinitely.
2224 nfs_realign(struct mbuf **pm, int hsiz)
2227 struct mbuf *n = NULL;
2230 * Check for misalignemnt
2233 while ((m = *pm) != NULL) {
2234 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3))
2240 * If misalignment found make a completely new copy.
2243 ++nfs_realign_count;
2244 n = m_dup_data(m, MB_WAIT);
2250 #ifndef NFS_NOSERVER
2253 * Parse an RPC request
2255 * - fill in the cred struct.
2258 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header)
2265 u_int32_t nfsvers, auth_type;
2267 int error = 0, ticklen;
2268 struct nfsuid *nuidp;
2269 struct timeval tvin, tvout;
2270 struct nfsm_info info;
2271 #if 0 /* until encrypted keys are implemented */
2272 NFSKERBKEYSCHED_T keys; /* stores key schedule */
2275 info.mrep = nd->nd_mrep;
2276 info.md = nd->nd_md;
2277 info.dpos = nd->nd_dpos;
2280 NULLOUT(tl = nfsm_dissect(&info, 10 * NFSX_UNSIGNED));
2281 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++);
2282 if (*tl++ != rpc_call) {
2287 NULLOUT(tl = nfsm_dissect(&info, 8 * NFSX_UNSIGNED));
2291 if (*tl++ != rpc_vers) {
2292 nd->nd_repstat = ERPCMISMATCH;
2293 nd->nd_procnum = NFSPROC_NOOP;
2296 if (*tl != nfs_prog) {
2297 nd->nd_repstat = EPROGUNAVAIL;
2298 nd->nd_procnum = NFSPROC_NOOP;
2302 nfsvers = fxdr_unsigned(u_int32_t, *tl++);
2303 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) {
2304 nd->nd_repstat = EPROGMISMATCH;
2305 nd->nd_procnum = NFSPROC_NOOP;
2308 if (nfsvers == NFS_VER3)
2309 nd->nd_flag = ND_NFSV3;
2310 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++);
2311 if (nd->nd_procnum == NFSPROC_NULL)
2313 if (nd->nd_procnum >= NFS_NPROCS ||
2314 (nd->nd_procnum >= NQNFSPROC_GETLEASE) ||
2315 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) {
2316 nd->nd_repstat = EPROCUNAVAIL;
2317 nd->nd_procnum = NFSPROC_NOOP;
2320 if ((nd->nd_flag & ND_NFSV3) == 0)
2321 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
2323 len = fxdr_unsigned(int, *tl++);
2324 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2329 nd->nd_flag &= ~ND_KERBAUTH;
2331 * Handle auth_unix or auth_kerb.
2333 if (auth_type == rpc_auth_unix) {
2334 len = fxdr_unsigned(int, *++tl);
2335 if (len < 0 || len > NFS_MAXNAMLEN) {
2339 ERROROUT(nfsm_adv(&info, nfsm_rndup(len)));
2340 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED));
2341 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred));
2342 nd->nd_cr.cr_ref = 1;
2343 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++);
2344 nd->nd_cr.cr_ruid = nd->nd_cr.cr_svuid = nd->nd_cr.cr_uid;
2345 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++);
2346 nd->nd_cr.cr_rgid = nd->nd_cr.cr_svgid = nd->nd_cr.cr_gid;
2347 len = fxdr_unsigned(int, *tl);
2348 if (len < 0 || len > RPCAUTH_UNIXGIDS) {
2352 NULLOUT(tl = nfsm_dissect(&info, (len + 2) * NFSX_UNSIGNED));
2353 for (i = 1; i <= len; i++)
2355 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++);
2358 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
2359 if (nd->nd_cr.cr_ngroups > 1)
2360 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups);
2361 len = fxdr_unsigned(int, *++tl);
2362 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2367 ERROROUT(nfsm_adv(&info, nfsm_rndup(len)));
2369 } else if (auth_type == rpc_auth_kerb) {
2370 switch (fxdr_unsigned(int, *tl++)) {
2371 case RPCAKN_FULLNAME:
2372 ticklen = fxdr_unsigned(int, *tl);
2373 *((u_int32_t *)nfsd->nfsd_authstr) = *tl;
2374 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED;
2375 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED;
2376 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) {
2383 uio.uio_segflg = UIO_SYSSPACE;
2384 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4];
2385 iov.iov_len = RPCAUTH_MAXSIZ - 4;
2386 ERROROUT(nfsm_mtouio(&info, &uio, uio.uio_resid));
2387 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED));
2388 if (*tl++ != rpc_auth_kerb ||
2389 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) {
2390 kprintf("Bad kerb verifier\n");
2391 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2392 nd->nd_procnum = NFSPROC_NOOP;
2395 NULLOUT(cp = nfsm_dissect(&info, 4 * NFSX_UNSIGNED));
2396 tl = (u_int32_t *)cp;
2397 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) {
2398 kprintf("Not fullname kerb verifier\n");
2399 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2400 nd->nd_procnum = NFSPROC_NOOP;
2403 cp += NFSX_UNSIGNED;
2404 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED);
2405 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED;
2406 nd->nd_flag |= ND_KERBFULL;
2407 nfsd->nfsd_flag |= NFSD_NEEDAUTH;
2409 case RPCAKN_NICKNAME:
2410 if (len != 2 * NFSX_UNSIGNED) {
2411 kprintf("Kerb nickname short\n");
2412 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED);
2413 nd->nd_procnum = NFSPROC_NOOP;
2416 nickuid = fxdr_unsigned(uid_t, *tl);
2417 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED));
2418 if (*tl++ != rpc_auth_kerb ||
2419 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) {
2420 kprintf("Kerb nick verifier bad\n");
2421 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2422 nd->nd_procnum = NFSPROC_NOOP;
2425 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED));
2426 tvin.tv_sec = *tl++;
2429 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first;
2430 nuidp != NULL; nuidp = nuidp->nu_hash.le_next) {
2431 if (nuidp->nu_cr.cr_uid == nickuid &&
2433 netaddr_match(NU_NETFAM(nuidp),
2434 &nuidp->nu_haddr, nd->nd_nam2)))
2439 (NFSERR_AUTHERR|AUTH_REJECTCRED);
2440 nd->nd_procnum = NFSPROC_NOOP;
2445 * Now, decrypt the timestamp using the session key
2455 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec);
2456 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec);
2457 if (nuidp->nu_expire < time_second ||
2458 nuidp->nu_timestamp.tv_sec > tvout.tv_sec ||
2459 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec &&
2460 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) {
2461 nuidp->nu_expire = 0;
2463 (NFSERR_AUTHERR|AUTH_REJECTVERF);
2464 nd->nd_procnum = NFSPROC_NOOP;
2467 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr);
2468 nd->nd_flag |= ND_KERBNICK;
2471 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
2472 nd->nd_procnum = NFSPROC_NOOP;
2476 nd->nd_md = info.md;
2477 nd->nd_dpos = info.dpos;
2486 * Send a message to the originating process's terminal. The thread and/or
2487 * process may be NULL. YYY the thread should not be NULL but there may
2488 * still be some uio_td's that are still being passed as NULL through to
2492 nfs_msg(struct thread *td, char *server, char *msg)
2496 if (td && td->td_proc)
2497 tpr = tprintf_open(td->td_proc);
2500 tprintf(tpr, "nfs server %s: %s\n", server, msg);
2505 #ifndef NFS_NOSERVER
2508 * Socket upcall routine for nfsd sockets. This runs in the protocol
2509 * thread and passes waitflag == MB_DONTWAIT.
2512 nfsrv_rcv_upcall(struct socket *so, void *arg, int waitflag)
2514 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
2516 if (slp->ns_needq_upcall == 0) {
2517 slp->ns_needq_upcall = 1; /* ok to race */
2518 lwkt_gettoken(&nfs_token);
2519 nfsrv_wakenfsd(slp, 1);
2520 lwkt_reltoken(&nfs_token);
2523 lwkt_gettoken(&slp->ns_token);
2524 slp->ns_flag |= SLP_NEEDQ;
2525 nfsrv_rcv(so, arg, waitflag);
2526 lwkt_reltoken(&slp->ns_token);
2531 * Process new data on a receive socket. Essentially do as much as we can
2532 * non-blocking, else punt and it will be called with MB_WAIT from an nfsd.
2534 * slp->ns_token is held on call
2537 nfsrv_rcv(struct socket *so, void *arg, int waitflag)
2539 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
2541 struct sockaddr *nam;
2544 int nparallel_wakeup = 0;
2546 ASSERT_LWKT_TOKEN_HELD(&slp->ns_token);
2548 if ((slp->ns_flag & SLP_VALID) == 0)
2552 * Do not allow an infinite number of completed RPC records to build
2553 * up before we stop reading data from the socket. Otherwise we could
2554 * end up holding onto an unreasonable number of mbufs for requests
2555 * waiting for service.
2557 * This should give pretty good feedback to the TCP layer and
2558 * prevents a memory crunch for other protocols.
2560 * Note that the same service socket can be dispatched to several
2561 * nfs servers simultaniously. The tcp protocol callback calls us
2562 * with MB_DONTWAIT. nfsd calls us with MB_WAIT (typically).
2564 if (NFSRV_RECLIMIT(slp))
2568 * Handle protocol specifics to parse an RPC request. We always
2569 * pull from the socket using non-blocking I/O.
2571 if (so->so_type == SOCK_STREAM) {
2573 * The data has to be read in an orderly fashion from a TCP
2574 * stream, unlike a UDP socket. It is possible for soreceive
2575 * and/or nfsrv_getstream() to block, so make sure only one
2576 * entity is messing around with the TCP stream at any given
2577 * moment. The receive sockbuf's lock in soreceive is not
2580 if (slp->ns_flag & SLP_GETSTREAM)
2582 slp->ns_flag |= SLP_GETSTREAM;
2585 * Do soreceive(). Pull out as much data as possible without
2588 sbinit(&sio, 1000000000);
2589 flags = MSG_DONTWAIT;
2590 error = so_pru_soreceive(so, &nam, NULL, &sio, NULL, &flags);
2591 if (error || sio.sb_mb == NULL) {
2592 if (error != EWOULDBLOCK)
2593 slp->ns_flag |= SLP_DISCONN;
2594 slp->ns_flag &= ~(SLP_GETSTREAM | SLP_NEEDQ);
2598 if (slp->ns_rawend) {
2599 slp->ns_rawend->m_next = m;
2600 slp->ns_cc += sio.sb_cc;
2603 slp->ns_cc = sio.sb_cc;
2610 * Now try and parse as many record(s) as we can out of the
2611 * raw stream data. This will set SLP_DOREC.
2613 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup);
2614 if (error && error != EWOULDBLOCK)
2615 slp->ns_flag |= SLP_DISCONN;
2616 slp->ns_flag &= ~SLP_GETSTREAM;
2619 * For UDP soreceive typically pulls just one packet, loop
2620 * to get the whole batch.
2623 sbinit(&sio, 1000000000);
2624 flags = MSG_DONTWAIT;
2625 error = so_pru_soreceive(so, &nam, NULL, &sio,
2628 struct nfsrv_rec *rec;
2629 int mf = (waitflag & MB_DONTWAIT) ?
2630 M_NOWAIT : M_WAITOK;
2631 rec = kmalloc(sizeof(struct nfsrv_rec),
2635 kfree(nam, M_SONAME);
2639 nfs_realign(&sio.sb_mb, 10 * NFSX_UNSIGNED);
2640 rec->nr_address = nam;
2641 rec->nr_packet = sio.sb_mb;
2642 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2644 slp->ns_flag |= SLP_DOREC;
2647 slp->ns_flag &= ~SLP_NEEDQ;
2650 if ((so->so_proto->pr_flags & PR_CONNREQUIRED)
2651 && error != EWOULDBLOCK) {
2652 slp->ns_flag |= SLP_DISCONN;
2656 if (NFSRV_RECLIMIT(slp))
2658 } while (sio.sb_mb);
2662 * If we were upcalled from the tcp protocol layer and we have
2663 * fully parsed records ready to go, or there is new data pending,
2664 * or something went wrong, try to wake up a nfsd thread to deal
2668 /* XXX this code is currently not executed (nfsrv_rcv_upcall) */
2669 if (waitflag == MB_DONTWAIT && (slp->ns_flag & SLP_ACTION_MASK)) {
2670 lwkt_gettoken(&nfs_token);
2671 nfsrv_wakenfsd(slp, nparallel_wakeup);
2672 lwkt_reltoken(&nfs_token);
2677 * Try and extract an RPC request from the mbuf data list received on a
2678 * stream socket. The "waitflag" argument indicates whether or not it
2682 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp)
2684 struct mbuf *m, **mpp;
2687 struct mbuf *om, *m2, *recm;
2691 if (slp->ns_reclen == 0) {
2692 if (slp->ns_cc < NFSX_UNSIGNED)
2695 if (m->m_len >= NFSX_UNSIGNED) {
2696 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED);
2697 m->m_data += NFSX_UNSIGNED;
2698 m->m_len -= NFSX_UNSIGNED;
2700 cp1 = (caddr_t)&recmark;
2701 cp2 = mtod(m, caddr_t);
2702 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
2703 while (m->m_len == 0) {
2705 cp2 = mtod(m, caddr_t);
2712 slp->ns_cc -= NFSX_UNSIGNED;
2713 recmark = ntohl(recmark);
2714 slp->ns_reclen = recmark & ~0x80000000;
2715 if (recmark & 0x80000000)
2716 slp->ns_flag |= SLP_LASTFRAG;
2718 slp->ns_flag &= ~SLP_LASTFRAG;
2719 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) {
2720 log(LOG_ERR, "%s (%d) from nfs client\n",
2721 "impossible packet length",
2728 * Now get the record part.
2730 * Note that slp->ns_reclen may be 0. Linux sometimes
2731 * generates 0-length RPCs
2734 if (slp->ns_cc == slp->ns_reclen) {
2736 slp->ns_raw = slp->ns_rawend = NULL;
2737 slp->ns_cc = slp->ns_reclen = 0;
2738 } else if (slp->ns_cc > slp->ns_reclen) {
2743 while (len < slp->ns_reclen) {
2744 if ((len + m->m_len) > slp->ns_reclen) {
2745 m2 = m_copym(m, 0, slp->ns_reclen - len,
2753 m->m_data += slp->ns_reclen - len;
2754 m->m_len -= slp->ns_reclen - len;
2755 len = slp->ns_reclen;
2757 return (EWOULDBLOCK);
2759 } else if ((len + m->m_len) == slp->ns_reclen) {
2779 * Accumulate the fragments into a record.
2781 mpp = &slp->ns_frag;
2783 mpp = &((*mpp)->m_next);
2785 if (slp->ns_flag & SLP_LASTFRAG) {
2786 struct nfsrv_rec *rec;
2787 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK;
2788 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf);
2790 m_freem(slp->ns_frag);
2792 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED);
2793 rec->nr_address = NULL;
2794 rec->nr_packet = slp->ns_frag;
2795 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2797 slp->ns_flag |= SLP_DOREC;
2800 slp->ns_frag = NULL;
2808 * Sanity check our mbuf chain.
2811 nfs_checkpkt(struct mbuf *m, int len)
2819 panic("nfs_checkpkt: len mismatch %d/%d mbuf %p",
2827 nfs_checkpkt(struct mbuf *m __unused, int len __unused)
2834 * Parse an RPC header.
2836 * If the socket is invalid or no records are pending we return ENOBUFS.
2837 * The caller must deal with NEEDQ races.
2840 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd,
2841 struct nfsrv_descript **ndp)
2843 struct nfsrv_rec *rec;
2845 struct sockaddr *nam;
2846 struct nfsrv_descript *nd;
2850 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec))
2852 rec = STAILQ_FIRST(&slp->ns_rec);
2853 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link);
2854 KKASSERT(slp->ns_numrec > 0);
2855 if (--slp->ns_numrec == 0)
2856 slp->ns_flag &= ~SLP_DOREC;
2857 nam = rec->nr_address;
2859 kfree(rec, M_NFSRVDESC);
2860 nd = kmalloc(sizeof(struct nfsrv_descript), M_NFSRVDESC, M_WAITOK);
2861 nd->nd_md = nd->nd_mrep = m;
2863 nd->nd_dpos = mtod(m, caddr_t);
2864 error = nfs_getreq(nd, nfsd, TRUE);
2867 kfree(nam, M_SONAME);
2869 kfree((caddr_t)nd, M_NFSRVDESC);
2878 * Try to assign service sockets to nfsd threads based on the number
2879 * of new rpc requests that have been queued on the service socket.
2881 * If no nfsd's are available or additonal requests are pending, set the
2882 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for
2883 * the work in the nfssvc_sock list when it is finished processing its
2884 * current work. This flag is only cleared when an nfsd can not find
2885 * any new work to perform.
2888 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel)
2892 if ((slp->ns_flag & SLP_VALID) == 0)
2896 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) {
2897 if (nd->nfsd_flag & NFSD_WAITING) {
2898 nd->nfsd_flag &= ~NFSD_WAITING;
2900 panic("nfsd wakeup");
2903 wakeup((caddr_t)nd);
2904 if (--nparallel == 0)
2910 * If we couldn't assign slp then the NFSDs are all busy and
2911 * we set a flag indicating that there is pending work.
2914 nfsd_head_flag |= NFSD_CHECKSLP;
2916 #endif /* NFS_NOSERVER */