2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $
38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.45 2007/05/18 17:05:13 dillon Exp $
42 * Socket operations for use by nfs
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/kernel.h>
52 #include <sys/vnode.h>
53 #include <sys/fcntl.h>
54 #include <sys/protosw.h>
55 #include <sys/resourcevar.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/socketops.h>
59 #include <sys/syslog.h>
60 #include <sys/thread.h>
61 #include <sys/tprintf.h>
62 #include <sys/sysctl.h>
63 #include <sys/signalvar.h>
64 #include <sys/mutex.h>
66 #include <sys/signal2.h>
67 #include <sys/mutex2.h>
69 #include <netinet/in.h>
70 #include <netinet/tcp.h>
71 #include <sys/thread2.h>
77 #include "nfsm_subs.h"
86 * RTT calculations are scaled by 256 (8 bits). A proper fractional
87 * RTT will still be calculated even with a slow NFS timer.
89 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum]]
90 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum]]
91 #define NFS_RTT_SCALE_BITS 8 /* bits */
92 #define NFS_RTT_SCALE 256 /* value */
95 * Defines which timer to use for the procnum.
102 static int proct[NFS_NPROCS] = {
103 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, /* 00-09 */
104 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, /* 10-19 */
105 0, 5, 0, 0, 0, 0, /* 20-29 */
108 static int multt[NFS_NPROCS] = {
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-09 */
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10-19 */
111 1, 2, 1, 1, 1, 1, /* 20-29 */
114 static int nfs_backoff[8] = { 2, 3, 5, 8, 13, 21, 34, 55 };
115 static int nfs_realign_test;
116 static int nfs_realign_count;
117 static int nfs_showrtt;
118 static int nfs_showrexmit;
119 int nfs_maxasyncbio = NFS_MAXASYNCBIO;
121 SYSCTL_DECL(_vfs_nfs);
123 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
124 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
125 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrtt, CTLFLAG_RW, &nfs_showrtt, 0, "");
126 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrexmit, CTLFLAG_RW, &nfs_showrexmit, 0, "");
127 SYSCTL_INT(_vfs_nfs, OID_AUTO, maxasyncbio, CTLFLAG_RW, &nfs_maxasyncbio, 0, "");
129 static int nfs_request_setup(nfsm_info_t info);
130 static int nfs_request_auth(struct nfsreq *rep);
131 static int nfs_request_try(struct nfsreq *rep);
132 static int nfs_request_waitreply(struct nfsreq *rep);
133 static int nfs_request_processreply(nfsm_info_t info, int);
136 struct nfsrtt nfsrtt;
137 struct callout nfs_timer_handle;
139 static int nfs_msg (struct thread *,char *,char *);
140 static int nfs_rcvlock (struct nfsmount *nmp, struct nfsreq *myreq);
141 static void nfs_rcvunlock (struct nfsmount *nmp);
142 static void nfs_realign (struct mbuf **pm, int hsiz);
143 static int nfs_receive (struct nfsmount *nmp, struct nfsreq *rep,
144 struct sockaddr **aname, struct mbuf **mp);
145 static void nfs_softterm (struct nfsreq *rep, int islocked);
146 static void nfs_hardterm (struct nfsreq *rep, int islocked);
147 static int nfs_reconnect (struct nfsmount *nmp, struct nfsreq *rep);
149 static int nfsrv_getstream (struct nfssvc_sock *, int, int *);
150 static void nfs_timer_req(struct nfsreq *req);
151 static void nfs_checkpkt(struct mbuf *m, int len);
153 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd,
154 struct nfssvc_sock *slp,
156 struct mbuf **mreqp) = {
184 #endif /* NFS_NOSERVER */
187 * Initialize sockets and congestion for a new NFS connection.
188 * We do not free the sockaddr if error.
191 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
195 struct sockaddr *saddr;
196 struct sockaddr_in *sin;
197 struct thread *td = &thread0; /* only used for socreate and sobind */
199 nmp->nm_so = so = NULL;
200 if (nmp->nm_flag & NFSMNT_FORCE)
203 error = socreate(saddr->sa_family, &so, nmp->nm_sotype,
204 nmp->nm_soproto, td);
207 nmp->nm_soflags = so->so_proto->pr_flags;
210 * Some servers require that the client port be a reserved port number.
212 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) {
215 struct sockaddr_in ssin;
217 bzero(&sopt, sizeof sopt);
218 ip = IP_PORTRANGE_LOW;
219 sopt.sopt_level = IPPROTO_IP;
220 sopt.sopt_name = IP_PORTRANGE;
221 sopt.sopt_val = (void *)&ip;
222 sopt.sopt_valsize = sizeof(ip);
224 error = sosetopt(so, &sopt);
227 bzero(&ssin, sizeof ssin);
229 sin->sin_len = sizeof (struct sockaddr_in);
230 sin->sin_family = AF_INET;
231 sin->sin_addr.s_addr = INADDR_ANY;
232 sin->sin_port = htons(0);
233 error = sobind(so, (struct sockaddr *)sin, td);
236 bzero(&sopt, sizeof sopt);
237 ip = IP_PORTRANGE_DEFAULT;
238 sopt.sopt_level = IPPROTO_IP;
239 sopt.sopt_name = IP_PORTRANGE;
240 sopt.sopt_val = (void *)&ip;
241 sopt.sopt_valsize = sizeof(ip);
243 error = sosetopt(so, &sopt);
249 * Protocols that do not require connections may be optionally left
250 * unconnected for servers that reply from a port other than NFS_PORT.
252 if (nmp->nm_flag & NFSMNT_NOCONN) {
253 if (nmp->nm_soflags & PR_CONNREQUIRED) {
258 error = soconnect(so, nmp->nm_nam, td);
263 * Wait for the connection to complete. Cribbed from the
264 * connect system call but with the wait timing out so
265 * that interruptible mounts don't hang here for a long time.
268 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
269 (void) tsleep((caddr_t)&so->so_timeo, 0,
271 if ((so->so_state & SS_ISCONNECTING) &&
272 so->so_error == 0 && rep &&
273 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){
274 so->so_state &= ~SS_ISCONNECTING;
280 error = so->so_error;
287 so->so_rcv.ssb_timeo = (5 * hz);
288 so->so_snd.ssb_timeo = (5 * hz);
291 * Get buffer reservation size from sysctl, but impose reasonable
294 if (nmp->nm_sotype == SOCK_STREAM) {
295 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
299 bzero(&sopt, sizeof sopt);
300 sopt.sopt_level = SOL_SOCKET;
301 sopt.sopt_name = SO_KEEPALIVE;
302 sopt.sopt_val = &val;
303 sopt.sopt_valsize = sizeof val;
307 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
311 bzero(&sopt, sizeof sopt);
312 sopt.sopt_level = IPPROTO_TCP;
313 sopt.sopt_name = TCP_NODELAY;
314 sopt.sopt_val = &val;
315 sopt.sopt_valsize = sizeof val;
320 error = soreserve(so, nfs_soreserve, nfs_soreserve, NULL);
323 atomic_set_int(&so->so_rcv.ssb_flags, SSB_NOINTR);
324 atomic_set_int(&so->so_snd.ssb_flags, SSB_NOINTR);
326 /* Initialize other non-zero congestion variables */
327 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
328 nmp->nm_srtt[3] = (NFS_TIMEO << NFS_RTT_SCALE_BITS);
329 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
330 nmp->nm_sdrtt[3] = 0;
331 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED;
332 nmp->nm_timeouts = 0;
335 * Assign nm_so last. The moment nm_so is assigned the nfs_timer()
336 * can mess with the socket.
343 soshutdown(so, SHUT_RDWR);
344 soclose(so, FNONBLOCK);
351 * Called when a connection is broken on a reliable protocol.
352 * - clean up the old socket
353 * - nfs_connect() again
354 * - set R_NEEDSXMIT for all outstanding requests on mount point
355 * If this fails the mount point is DEAD!
356 * nb: Must be called with the nfs_sndlock() set on the mount point.
359 nfs_reconnect(struct nfsmount *nmp, struct nfsreq *rep)
365 if (nmp->nm_rxstate >= NFSSVC_STOPPING)
367 while ((error = nfs_connect(nmp, rep)) != 0) {
368 if (error == EINTR || error == ERESTART)
372 if (nmp->nm_rxstate >= NFSSVC_STOPPING)
374 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0);
378 * Loop through outstanding request list and fix up all requests
382 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
383 KKASSERT(req->r_nmp == nmp);
384 req->r_flags |= R_NEEDSXMIT;
391 * NFS disconnect. Clean up and unlink.
394 nfs_disconnect(struct nfsmount *nmp)
401 soshutdown(so, SHUT_RDWR);
402 soclose(so, FNONBLOCK);
407 nfs_safedisconnect(struct nfsmount *nmp)
409 nfs_rcvlock(nmp, NULL);
415 * This is the nfs send routine. For connection based socket types, it
416 * must be called with an nfs_sndlock() on the socket.
417 * "rep == NULL" indicates that it has been called from a server.
418 * For the client side:
419 * - return EINTR if the RPC is terminated, 0 otherwise
420 * - set R_NEEDSXMIT if the send fails for any reason
421 * - do any cleanup required by recoverable socket errors (?)
422 * For the server side:
423 * - return EINTR or ERESTART if interrupted by a signal
424 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
425 * - do any cleanup required by recoverable socket errors (?)
428 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
431 struct sockaddr *sendnam;
432 int error, soflags, flags;
435 if (rep->r_flags & R_SOFTTERM) {
439 if ((so = rep->r_nmp->nm_so) == NULL) {
440 rep->r_flags |= R_NEEDSXMIT;
444 rep->r_flags &= ~R_NEEDSXMIT;
445 soflags = rep->r_nmp->nm_soflags;
447 soflags = so->so_proto->pr_flags;
449 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
453 if (so->so_type == SOCK_SEQPACKET)
459 * calls pru_sosend -> sosend -> so_pru_send -> netrpc
461 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags,
464 * ENOBUFS for dgram sockets is transient and non fatal.
465 * No need to log, and no need to break a soft mount.
467 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
470 * do backoff retransmit on client
473 if ((rep->r_nmp->nm_state & NFSSTA_SENDSPACE) == 0) {
474 rep->r_nmp->nm_state |= NFSSTA_SENDSPACE;
475 kprintf("Warning: NFS: Insufficient sendspace "
477 "\t You must increase vfs.nfs.soreserve"
478 "or decrease vfs.nfs.maxasyncbio\n",
479 so->so_snd.ssb_hiwat);
481 rep->r_flags |= R_NEEDSXMIT;
487 log(LOG_INFO, "nfs send error %d for server %s\n",error,
488 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
490 * Deal with errors for the client side.
492 if (rep->r_flags & R_SOFTTERM)
495 rep->r_flags |= R_NEEDSXMIT;
497 log(LOG_INFO, "nfsd send error %d\n", error);
501 * Handle any recoverable (soft) socket errors here. (?)
503 if (error != EINTR && error != ERESTART &&
504 error != EWOULDBLOCK && error != EPIPE)
511 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
512 * done by soreceive(), but for SOCK_STREAM we must deal with the Record
513 * Mark and consolidate the data into a new mbuf list.
514 * nb: Sometimes TCP passes the data up to soreceive() in long lists of
516 * For SOCK_STREAM we must be very careful to read an entire record once
517 * we have read any of it, even if the system call has been interrupted.
520 nfs_receive(struct nfsmount *nmp, struct nfsreq *rep,
521 struct sockaddr **aname, struct mbuf **mp)
528 struct mbuf *control;
530 struct sockaddr **getnam;
531 int error, sotype, rcvflg;
532 struct thread *td = curthread; /* XXX */
535 * Set up arguments for soreceive()
539 sotype = nmp->nm_sotype;
542 * For reliable protocols, lock against other senders/receivers
543 * in case a reconnect is necessary.
544 * For SOCK_STREAM, first get the Record Mark to find out how much
545 * more there is to get.
546 * We must lock the socket against other receivers
547 * until we have an entire rpc request/reply.
549 if (sotype != SOCK_DGRAM) {
550 error = nfs_sndlock(nmp, rep);
555 * Check for fatal errors and resending request.
558 * Ugh: If a reconnect attempt just happened, nm_so
559 * would have changed. NULL indicates a failed
560 * attempt that has essentially shut down this
563 if (rep && (rep->r_mrep || (rep->r_flags & R_SOFTTERM))) {
569 error = nfs_reconnect(nmp, rep);
576 while (rep && (rep->r_flags & R_NEEDSXMIT)) {
577 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
578 nfsstats.rpcretries++;
579 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
581 if (error == EINTR || error == ERESTART ||
582 (error = nfs_reconnect(nmp, rep)) != 0) {
590 if (sotype == SOCK_STREAM) {
592 * Get the length marker from the stream
594 aio.iov_base = (caddr_t)&len;
595 aio.iov_len = sizeof(u_int32_t);
598 auio.uio_segflg = UIO_SYSSPACE;
599 auio.uio_rw = UIO_READ;
601 auio.uio_resid = sizeof(u_int32_t);
604 rcvflg = MSG_WAITALL;
605 error = so_pru_soreceive(so, NULL, &auio, NULL,
607 if (error == EWOULDBLOCK && rep) {
608 if (rep->r_flags & R_SOFTTERM)
611 } while (error == EWOULDBLOCK);
613 if (error == 0 && auio.uio_resid > 0) {
615 * Only log short packets if not EOF
617 if (auio.uio_resid != sizeof(u_int32_t))
619 "short receive (%d/%d) from nfs server %s\n",
620 (int)(sizeof(u_int32_t) - auio.uio_resid),
621 (int)sizeof(u_int32_t),
622 nmp->nm_mountp->mnt_stat.f_mntfromname);
627 len = ntohl(len) & ~0x80000000;
629 * This is SERIOUS! We are out of sync with the sender
630 * and forcing a disconnect/reconnect is all I can do.
632 if (len > NFS_MAXPACKET) {
633 log(LOG_ERR, "%s (%d) from nfs server %s\n",
634 "impossible packet length",
636 nmp->nm_mountp->mnt_stat.f_mntfromname);
642 * Get the rest of the packet as an mbuf chain
646 rcvflg = MSG_WAITALL;
647 error = so_pru_soreceive(so, NULL, NULL, &sio,
649 } while (error == EWOULDBLOCK || error == EINTR ||
651 if (error == 0 && sio.sb_cc != len) {
654 "short receive (%zu/%d) from nfs server %s\n",
655 (size_t)len - auio.uio_resid, len,
656 nmp->nm_mountp->mnt_stat.f_mntfromname);
662 * Non-stream, so get the whole packet by not
663 * specifying MSG_WAITALL and by specifying a large
666 * We have no use for control msg., but must grab them
667 * and then throw them away so we know what is going
670 sbinit(&sio, 100000000);
673 error = so_pru_soreceive(so, NULL, NULL, &sio,
677 if (error == EWOULDBLOCK && rep) {
678 if (rep->r_flags & R_SOFTTERM) {
683 } while (error == EWOULDBLOCK ||
684 (error == 0 && sio.sb_mb == NULL && control));
685 if ((rcvflg & MSG_EOR) == 0)
687 if (error == 0 && sio.sb_mb == NULL)
693 if (error && error != EINTR && error != ERESTART) {
696 if (error != EPIPE) {
698 "receive error %d from nfs server %s\n",
700 nmp->nm_mountp->mnt_stat.f_mntfromname);
702 error = nfs_sndlock(nmp, rep);
704 error = nfs_reconnect(nmp, rep);
712 if ((so = nmp->nm_so) == NULL)
714 if (so->so_state & SS_ISCONNECTED)
718 sbinit(&sio, 100000000);
721 error = so_pru_soreceive(so, getnam, NULL, &sio,
723 if (error == EWOULDBLOCK && rep &&
724 (rep->r_flags & R_SOFTTERM)) {
728 } while (error == EWOULDBLOCK);
734 * A shutdown may result in no error and no mbuf.
737 if (*mp == NULL && error == 0)
746 * Search for any mbufs that are not a multiple of 4 bytes long
747 * or with m_data not longword aligned.
748 * These could cause pointer alignment problems, so copy them to
749 * well aligned mbufs.
751 nfs_realign(mp, 5 * NFSX_UNSIGNED);
756 * Implement receipt of reply on a socket.
758 * We must search through the list of received datagrams matching them
759 * with outstanding requests using the xid, until ours is found.
761 * If myrep is NULL we process packets on the socket until
762 * interrupted or until nm_reqrxq is non-empty.
766 nfs_reply(struct nfsmount *nmp, struct nfsreq *myrep)
769 struct sockaddr *nam;
773 struct nfsm_info info;
776 * Loop around until we get our own reply
780 * Lock against other receivers so that I don't get stuck in
781 * sbwait() after someone else has received my reply for me.
782 * Also necessary for connection based protocols to avoid
783 * race conditions during a reconnect.
785 * If nfs_rcvlock() returns EALREADY, that means that
786 * the reply has already been recieved by another
787 * process and we can return immediately. In this
788 * case, the lock is not taken to avoid races with
793 error = nfs_rcvlock(nmp, myrep);
794 if (error == EALREADY)
800 * If myrep is NULL we are the receiver helper thread.
801 * Stop waiting for incoming replies if there are
802 * messages sitting on reqrxq that we need to process,
803 * or if a shutdown request is pending.
805 if (myrep == NULL && (TAILQ_FIRST(&nmp->nm_reqrxq) ||
806 nmp->nm_rxstate > NFSSVC_PENDING)) {
812 * Get the next Rpc reply off the socket
814 * We cannot release the receive lock until we've
815 * filled in rep->r_mrep, otherwise a waiting
816 * thread may deadlock in soreceive with no incoming
819 error = nfs_receive(nmp, myrep, &nam, &info.mrep);
822 * Ignore routing errors on connectionless protocols??
825 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) {
826 if (nmp->nm_so == NULL)
828 nmp->nm_so->so_error = 0;
837 * Get the xid and check that it is an rpc reply
840 info.dpos = mtod(info.md, caddr_t);
841 NULLOUT(tl = nfsm_dissect(&info, 2*NFSX_UNSIGNED));
843 if (*tl != rpc_reply) {
844 nfsstats.rpcinvalid++;
853 * Loop through the request list to match up the reply
854 * Iff no match, just drop the datagram. On match, set
855 * r_mrep atomically to prevent the timer from messing
856 * around with the request after we have exited the critical
860 TAILQ_FOREACH(rep, &nmp->nm_reqq, r_chain) {
861 if (rep->r_mrep == NULL && rxid == rep->r_xid)
866 * Fill in the rest of the reply if we found a match.
868 * Deal with duplicate responses if there was no match.
872 rep->r_dpos = info.dpos;
876 rt = &nfsrtt.rttl[nfsrtt.pos];
877 rt->proc = rep->r_procnum;
880 rt->cwnd = nmp->nm_maxasync_scaled;
881 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1];
882 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1];
883 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid;
884 getmicrotime(&rt->tstamp);
885 if (rep->r_flags & R_TIMING)
886 rt->rtt = rep->r_rtt;
889 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ;
893 * New congestion control is based only on async
896 if (nmp->nm_maxasync_scaled < NFS_MAXASYNC_SCALED)
897 ++nmp->nm_maxasync_scaled;
898 if (rep->r_flags & R_SENT) {
899 rep->r_flags &= ~R_SENT;
902 * Update rtt using a gain of 0.125 on the mean
903 * and a gain of 0.25 on the deviation.
905 * NOTE SRTT/SDRTT are only good if R_TIMING is set.
907 if ((rep->r_flags & R_TIMING) && rep->r_rexmit == 0) {
909 * Since the timer resolution of
910 * NFS_HZ is so course, it can often
911 * result in r_rtt == 0. Since
912 * r_rtt == N means that the actual
913 * rtt is between N+dt and N+2-dt ticks,
919 #define NFSRSB NFS_RTT_SCALE_BITS
920 n = ((NFS_SRTT(rep) * 7) +
921 (rep->r_rtt << NFSRSB)) >> 3;
922 d = n - NFS_SRTT(rep);
926 * Don't let the jitter calculation decay
927 * too quickly, but we want a fast rampup.
932 if (d < NFS_SDRTT(rep))
933 n = ((NFS_SDRTT(rep) * 15) + d) >> 4;
935 n = ((NFS_SDRTT(rep) * 3) + d) >> 2;
939 nmp->nm_timeouts = 0;
940 rep->r_mrep = info.mrep;
941 nfs_hardterm(rep, 0);
944 * Extract vers, prog, nfsver, procnum. A duplicate
945 * response means we didn't wait long enough so
946 * we increase the SRTT to avoid future spurious
949 u_int procnum = nmp->nm_lastreprocnum;
952 if (procnum < NFS_NPROCS && proct[procnum]) {
955 n = nmp->nm_srtt[proct[procnum]];
956 n += NFS_ASYSCALE * NFS_HZ;
957 if (n < NFS_ASYSCALE * NFS_HZ * 10)
958 n = NFS_ASYSCALE * NFS_HZ * 10;
959 nmp->nm_srtt[proct[procnum]] = n;
966 * If not matched to a request, drop it.
967 * If it's mine, get out.
970 nfsstats.rpcunexpected++;
973 } else if (rep == myrep) {
974 if (rep->r_mrep == NULL)
975 panic("nfsreply nil");
982 * Run the request state machine until the target state is reached
983 * or a fatal error occurs. The target state is not run. Specifying
984 * a target of NFSM_STATE_DONE runs the state machine until the rpc
987 * EINPROGRESS is returned for all states other then the DONE state,
988 * indicating that the rpc is still in progress.
991 nfs_request(struct nfsm_info *info, nfsm_state_t bstate, nfsm_state_t estate)
995 while (info->state >= bstate && info->state < estate) {
996 switch(info->state) {
997 case NFSM_STATE_SETUP:
999 * Setup the nfsreq. Any error which occurs during
1000 * this state is fatal.
1002 info->error = nfs_request_setup(info);
1004 info->state = NFSM_STATE_DONE;
1005 return (info->error);
1008 req->r_mrp = &info->mrep;
1009 req->r_mdp = &info->md;
1010 req->r_dposp = &info->dpos;
1011 info->state = NFSM_STATE_AUTH;
1014 case NFSM_STATE_AUTH:
1016 * Authenticate the nfsreq. Any error which occurs
1017 * during this state is fatal.
1019 info->error = nfs_request_auth(info->req);
1021 info->state = NFSM_STATE_DONE;
1022 return (info->error);
1024 info->state = NFSM_STATE_TRY;
1027 case NFSM_STATE_TRY:
1029 * Transmit or retransmit attempt. An error in this
1030 * state is ignored and we always move on to the
1033 * This can trivially race the receiver if the
1034 * request is asynchronous. nfs_request_try()
1035 * will thus set the state for us and we
1036 * must also return immediately if we are
1037 * running an async state machine, because
1038 * info can become invalid due to races after
1041 if (info->req->r_flags & R_ASYNC) {
1042 nfs_request_try(info->req);
1043 if (estate == NFSM_STATE_WAITREPLY)
1044 return (EINPROGRESS);
1046 nfs_request_try(info->req);
1047 info->state = NFSM_STATE_WAITREPLY;
1050 case NFSM_STATE_WAITREPLY:
1052 * Wait for a reply or timeout and move on to the
1053 * next state. The error returned by this state
1054 * is passed to the processing code in the next
1057 info->error = nfs_request_waitreply(info->req);
1058 info->state = NFSM_STATE_PROCESSREPLY;
1060 case NFSM_STATE_PROCESSREPLY:
1062 * Process the reply or timeout. Errors which occur
1063 * in this state may cause the state machine to
1064 * go back to an earlier state, and are fatal
1067 info->error = nfs_request_processreply(info,
1069 switch(info->error) {
1071 info->state = NFSM_STATE_AUTH;
1074 info->state = NFSM_STATE_TRY;
1078 * Operation complete, with or without an
1079 * error. We are done.
1082 info->state = NFSM_STATE_DONE;
1083 return (info->error);
1086 case NFSM_STATE_DONE:
1088 * Shouldn't be reached
1090 return (info->error);
1096 * If we are done return the error code (if any).
1097 * Otherwise return EINPROGRESS.
1099 if (info->state == NFSM_STATE_DONE)
1100 return (info->error);
1101 return (EINPROGRESS);
1105 * nfs_request - goes something like this
1106 * - fill in request struct
1107 * - links it into list
1108 * - calls nfs_send() for first transmit
1109 * - calls nfs_receive() to get reply
1110 * - break down rpc header and return with nfs reply pointed to
1112 * nb: always frees up mreq mbuf list
1115 nfs_request_setup(nfsm_info_t info)
1118 struct nfsmount *nmp;
1123 * Reject requests while attempting a forced unmount.
1125 if (info->vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
1126 m_freem(info->mreq);
1130 nmp = VFSTONFS(info->vp->v_mount);
1131 req = kmalloc(sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
1133 req->r_vp = info->vp;
1134 req->r_td = info->td;
1135 req->r_procnum = info->procnum;
1137 req->r_cred = info->cred;
1145 req->r_mrest = info->mreq;
1146 req->r_mrest_len = i;
1149 * The presence of a non-NULL r_info in req indicates
1150 * async completion via our helper threads. See the receiver
1155 req->r_flags = R_ASYNC;
1165 nfs_request_auth(struct nfsreq *rep)
1167 struct nfsmount *nmp = rep->r_nmp;
1169 char nickv[RPCX_NICKVERF];
1170 int error = 0, auth_len, auth_type;
1173 char *auth_str, *verf_str;
1177 rep->r_failed_auth = 0;
1180 * Get the RPC header with authorization.
1182 verf_str = auth_str = NULL;
1183 if (nmp->nm_flag & NFSMNT_KERB) {
1185 verf_len = sizeof (nickv);
1186 auth_type = RPCAUTH_KERB4;
1187 bzero((caddr_t)rep->r_key, sizeof(rep->r_key));
1188 if (rep->r_failed_auth ||
1189 nfs_getnickauth(nmp, cred, &auth_str, &auth_len,
1190 verf_str, verf_len)) {
1191 error = nfs_getauth(nmp, rep, cred, &auth_str,
1192 &auth_len, verf_str, &verf_len, rep->r_key);
1194 m_freem(rep->r_mrest);
1195 rep->r_mrest = NULL;
1196 kfree((caddr_t)rep, M_NFSREQ);
1201 auth_type = RPCAUTH_UNIX;
1202 if (cred->cr_ngroups < 1)
1203 panic("nfsreq nogrps");
1204 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
1205 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
1209 nfs_checkpkt(rep->r_mrest, rep->r_mrest_len);
1210 m = nfsm_rpchead(cred, nmp->nm_flag, rep->r_procnum, auth_type,
1211 auth_len, auth_str, verf_len, verf_str,
1212 rep->r_mrest, rep->r_mrest_len, &rep->r_mheadend, &xid);
1213 rep->r_mrest = NULL;
1215 kfree(auth_str, M_TEMP);
1218 * For stream protocols, insert a Sun RPC Record Mark.
1220 if (nmp->nm_sotype == SOCK_STREAM) {
1221 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT);
1223 kfree(rep, M_NFSREQ);
1226 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1227 (m->m_pkthdr.len - NFSX_UNSIGNED));
1230 nfs_checkpkt(m, m->m_pkthdr.len);
1238 nfs_request_try(struct nfsreq *rep)
1240 struct nfsmount *nmp = rep->r_nmp;
1245 * Request is not on any queue, only the owner has access to it
1246 * so it should not be locked by anyone atm.
1248 * Interlock to prevent races. While locked the only remote
1249 * action possible is for r_mrep to be set (once we enqueue it).
1251 if (rep->r_flags == 0xdeadc0de) {
1252 print_backtrace(-1);
1253 panic("flags nbad\n");
1255 KKASSERT((rep->r_flags & (R_LOCKED | R_ONREQQ)) == 0);
1256 if (nmp->nm_flag & NFSMNT_SOFT)
1257 rep->r_retry = nmp->nm_retry;
1259 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
1260 rep->r_rtt = rep->r_rexmit = 0;
1261 if (proct[rep->r_procnum] > 0)
1262 rep->r_flags |= R_TIMING | R_LOCKED;
1264 rep->r_flags |= R_LOCKED;
1268 * Do the client side RPC.
1270 nfsstats.rpcrequests++;
1272 if (nmp->nm_flag & NFSMNT_FORCE) {
1273 rep->r_flags |= R_SOFTTERM;
1274 rep->r_flags &= ~R_LOCKED;
1279 * Chain request into list of outstanding requests. Be sure
1280 * to put it LAST so timer finds oldest requests first. Note
1281 * that our control of R_LOCKED prevents the request from
1282 * getting ripped out from under us or transmitted by the
1285 * For requests with info structures we must atomically set the
1286 * info's state because the structure could become invalid upon
1287 * return due to races (i.e., if async)
1290 mtx_link_init(&rep->r_link);
1291 TAILQ_INSERT_TAIL(&nmp->nm_reqq, rep, r_chain);
1292 rep->r_flags |= R_ONREQQ;
1294 if (rep->r_flags & R_ASYNC)
1295 rep->r_info->state = NFSM_STATE_WAITREPLY;
1301 * Send if we can. Congestion control is not handled here any more
1302 * becausing trying to defer the initial send based on the nfs_timer
1303 * requires having a very fast nfs_timer, which is silly.
1306 if (nmp->nm_soflags & PR_CONNREQUIRED)
1307 error = nfs_sndlock(nmp, rep);
1309 m2 = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
1310 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
1311 if (nmp->nm_soflags & PR_CONNREQUIRED)
1313 rep->r_flags &= ~R_NEEDSXMIT;
1314 if ((rep->r_flags & R_SENT) == 0) {
1315 rep->r_flags |= R_SENT;
1318 rep->r_flags |= R_NEEDSXMIT;
1321 rep->r_flags |= R_NEEDSXMIT;
1328 * Release the lock. The only remote action that may have occurred
1329 * would have been the setting of rep->r_mrep. If this occured
1330 * and the request was async we have to move it to the reader
1331 * thread's queue for action.
1333 * For async requests also make sure the reader is woken up so
1334 * it gets on the socket to read responses.
1337 if (rep->r_flags & R_ASYNC) {
1339 nfs_hardterm(rep, 1);
1340 rep->r_flags &= ~R_LOCKED;
1341 nfssvc_iod_reader_wakeup(nmp);
1343 rep->r_flags &= ~R_LOCKED;
1345 if (rep->r_flags & R_WANTED) {
1346 rep->r_flags &= ~R_WANTED;
1354 * This code is only called for synchronous requests. Completed synchronous
1355 * requests are left on reqq and we remove them before moving on to the
1359 nfs_request_waitreply(struct nfsreq *rep)
1361 struct nfsmount *nmp = rep->r_nmp;
1364 KKASSERT((rep->r_flags & R_ASYNC) == 0);
1367 * Wait until the request is finished.
1369 error = nfs_reply(nmp, rep);
1372 * RPC done, unlink the request, but don't rip it out from under
1373 * the callout timer.
1375 * Once unlinked no other receiver or the timer will have
1376 * visibility, so we do not have to set R_LOCKED.
1379 while (rep->r_flags & R_LOCKED) {
1380 rep->r_flags |= R_WANTED;
1381 tsleep(rep, 0, "nfstrac", 0);
1383 KKASSERT(rep->r_flags & R_ONREQQ);
1384 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain);
1385 rep->r_flags &= ~R_ONREQQ;
1387 if (TAILQ_FIRST(&nmp->nm_bioq) &&
1388 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) {
1389 nfssvc_iod_writer_wakeup(nmp);
1394 * Decrement the outstanding request count.
1396 if (rep->r_flags & R_SENT) {
1397 rep->r_flags &= ~R_SENT;
1403 * Process reply with error returned from nfs_requet_waitreply().
1405 * Returns EAGAIN if it wants us to loop up to nfs_request_try() again.
1406 * Returns ENEEDAUTH if it wants us to loop up to nfs_request_auth() again.
1409 nfs_request_processreply(nfsm_info_t info, int error)
1411 struct nfsreq *req = info->req;
1412 struct nfsmount *nmp = req->r_nmp;
1418 * If there was a successful reply and a tprintf msg.
1419 * tprintf a response.
1421 if (error == 0 && (req->r_flags & R_TPRINTFMSG)) {
1422 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1425 info->mrep = req->r_mrep;
1426 info->md = req->r_md;
1427 info->dpos = req->r_dpos;
1429 m_freem(req->r_mreq);
1431 kfree(req, M_NFSREQ);
1437 * break down the rpc header and check if ok
1439 NULLOUT(tl = nfsm_dissect(info, 3 * NFSX_UNSIGNED));
1440 if (*tl++ == rpc_msgdenied) {
1441 if (*tl == rpc_mismatch) {
1443 } else if ((nmp->nm_flag & NFSMNT_KERB) &&
1444 *tl++ == rpc_autherr) {
1445 if (req->r_failed_auth == 0) {
1446 req->r_failed_auth++;
1447 req->r_mheadend->m_next = NULL;
1448 m_freem(info->mrep);
1450 m_freem(req->r_mreq);
1459 m_freem(info->mrep);
1461 m_freem(req->r_mreq);
1463 kfree(req, M_NFSREQ);
1469 * Grab any Kerberos verifier, otherwise just throw it away.
1471 verf_type = fxdr_unsigned(int, *tl++);
1472 i = fxdr_unsigned(int32_t, *tl);
1473 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) {
1474 error = nfs_savenickauth(nmp, req->r_cred, i, req->r_key,
1475 &info->md, &info->dpos, info->mrep);
1479 ERROROUT(nfsm_adv(info, nfsm_rndup(i)));
1481 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1484 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1486 error = fxdr_unsigned(int, *tl);
1489 * Does anyone even implement this? Just impose
1492 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
1493 error == NFSERR_TRYLATER) {
1494 m_freem(info->mrep);
1498 tsleep((caddr_t)&lbolt, 0, "nqnfstry", 0);
1499 return (EAGAIN); /* goto tryagain */
1503 * If the File Handle was stale, invalidate the
1504 * lookup cache, just in case.
1506 * To avoid namecache<->vnode deadlocks we must
1507 * release the vnode lock if we hold it.
1509 if (error == ESTALE) {
1510 struct vnode *vp = req->r_vp;
1513 ltype = lockstatus(&vp->v_lock, curthread);
1514 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED)
1515 lockmgr(&vp->v_lock, LK_RELEASE);
1516 cache_inval_vp(vp, CINV_CHILDREN);
1517 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED)
1518 lockmgr(&vp->v_lock, ltype);
1520 if (nmp->nm_flag & NFSMNT_NFSV3) {
1521 KKASSERT(*req->r_mrp == info->mrep);
1522 KKASSERT(*req->r_mdp == info->md);
1523 KKASSERT(*req->r_dposp == info->dpos);
1524 error |= NFSERR_RETERR;
1526 m_freem(info->mrep);
1529 m_freem(req->r_mreq);
1531 kfree(req, M_NFSREQ);
1536 KKASSERT(*req->r_mrp == info->mrep);
1537 KKASSERT(*req->r_mdp == info->md);
1538 KKASSERT(*req->r_dposp == info->dpos);
1539 m_freem(req->r_mreq);
1541 FREE(req, M_NFSREQ);
1544 m_freem(info->mrep);
1546 error = EPROTONOSUPPORT;
1548 m_freem(req->r_mreq);
1550 kfree(req, M_NFSREQ);
1555 #ifndef NFS_NOSERVER
1557 * Generate the rpc reply header
1558 * siz arg. is used to decide if adding a cluster is worthwhile
1561 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp,
1562 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp)
1565 struct nfsm_info info;
1567 siz += RPC_REPLYSIZ;
1568 info.mb = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
1569 info.mreq = info.mb;
1570 info.mreq->m_pkthdr.len = 0;
1572 * If this is not a cluster, try and leave leading space
1573 * for the lower level headers.
1575 if ((max_hdr + siz) < MINCLSIZE)
1576 info.mreq->m_data += max_hdr;
1577 tl = mtod(info.mreq, u_int32_t *);
1578 info.mreq->m_len = 6 * NFSX_UNSIGNED;
1579 info.bpos = ((caddr_t)tl) + info.mreq->m_len;
1580 *tl++ = txdr_unsigned(nd->nd_retxid);
1582 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
1583 *tl++ = rpc_msgdenied;
1584 if (err & NFSERR_AUTHERR) {
1585 *tl++ = rpc_autherr;
1586 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR);
1587 info.mreq->m_len -= NFSX_UNSIGNED;
1588 info.bpos -= NFSX_UNSIGNED;
1590 *tl++ = rpc_mismatch;
1591 *tl++ = txdr_unsigned(RPC_VER2);
1592 *tl = txdr_unsigned(RPC_VER2);
1595 *tl++ = rpc_msgaccepted;
1598 * For Kerberos authentication, we must send the nickname
1599 * verifier back, otherwise just RPCAUTH_NULL.
1601 if (nd->nd_flag & ND_KERBFULL) {
1602 struct nfsuid *nuidp;
1603 struct timeval ktvin, ktvout;
1605 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first;
1606 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1607 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid &&
1608 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp),
1609 &nuidp->nu_haddr, nd->nd_nam2)))
1614 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1);
1616 txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1619 * Encrypt the timestamp in ecb mode using the
1629 *tl++ = rpc_auth_kerb;
1630 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED);
1631 *tl = ktvout.tv_sec;
1632 tl = nfsm_build(&info, 3 * NFSX_UNSIGNED);
1633 *tl++ = ktvout.tv_usec;
1634 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid);
1645 *tl = txdr_unsigned(RPC_PROGUNAVAIL);
1648 *tl = txdr_unsigned(RPC_PROGMISMATCH);
1649 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED);
1650 *tl++ = txdr_unsigned(2);
1651 *tl = txdr_unsigned(3);
1654 *tl = txdr_unsigned(RPC_PROCUNAVAIL);
1657 *tl = txdr_unsigned(RPC_GARBAGE);
1661 if (err != NFSERR_RETVOID) {
1662 tl = nfsm_build(&info, NFSX_UNSIGNED);
1664 *tl = txdr_unsigned(nfsrv_errmap(nd, err));
1676 if (err != 0 && err != NFSERR_RETVOID)
1677 nfsstats.srvrpc_errs++;
1682 #endif /* NFS_NOSERVER */
1685 * Nfs timer routine.
1687 * Scan the nfsreq list and retranmit any requests that have timed out
1688 * To avoid retransmission attempts on STREAM sockets (in the future) make
1689 * sure to set the r_retry field to 0 (implies nm_retry == 0).
1691 * Requests with attached responses, terminated requests, and
1692 * locked requests are ignored. Locked requests will be picked up
1693 * in a later timer call.
1696 nfs_timer(void *arg /* never used */)
1698 struct nfsmount *nmp;
1700 #ifndef NFS_NOSERVER
1701 struct nfssvc_sock *slp;
1703 #endif /* NFS_NOSERVER */
1706 TAILQ_FOREACH(nmp, &nfs_mountq, nm_entry) {
1707 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1708 KKASSERT(nmp == req->r_nmp);
1711 if (req->r_flags & (R_SOFTTERM | R_LOCKED))
1713 req->r_flags |= R_LOCKED;
1714 if (nfs_sigintr(nmp, req, req->r_td)) {
1715 nfs_softterm(req, 1);
1719 req->r_flags &= ~R_LOCKED;
1720 if (req->r_flags & R_WANTED) {
1721 req->r_flags &= ~R_WANTED;
1726 #ifndef NFS_NOSERVER
1729 * Scan the write gathering queues for writes that need to be
1732 cur_usec = nfs_curusec();
1733 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
1734 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec)
1735 nfsrv_wakenfsd(slp, 1);
1737 #endif /* NFS_NOSERVER */
1739 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer, NULL);
1744 nfs_timer_req(struct nfsreq *req)
1746 struct thread *td = &thread0; /* XXX for creds, will break if sleep */
1747 struct nfsmount *nmp = req->r_nmp;
1754 * rtt ticks and timeout calculation. Return if the timeout
1755 * has not been reached yet, unless the packet is flagged
1756 * for an immediate send.
1758 * The mean rtt doesn't help when we get random I/Os, we have
1759 * to multiply by fairly large numbers.
1761 if (req->r_rtt >= 0) {
1763 * Calculate the timeout to test against.
1766 if (nmp->nm_flag & NFSMNT_DUMBTIMR) {
1767 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS;
1768 } else if (req->r_flags & R_TIMING) {
1769 timeo = NFS_SRTT(req) + NFS_SDRTT(req);
1771 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS;
1773 timeo *= multt[req->r_procnum];
1774 /* timeo is still scaled by SCALE_BITS */
1776 #define NFSFS (NFS_RTT_SCALE * NFS_HZ)
1777 if (req->r_flags & R_TIMING) {
1778 static long last_time;
1779 if (nfs_showrtt && last_time != time_second) {
1780 kprintf("rpccmd %d NFS SRTT %d SDRTT %d "
1782 proct[req->r_procnum],
1783 NFS_SRTT(req), NFS_SDRTT(req),
1785 timeo % NFSFS * 1000 / NFSFS);
1786 last_time = time_second;
1792 * deal with nfs_timer jitter.
1794 timeo = (timeo >> NFS_RTT_SCALE_BITS) + 1;
1798 if (nmp->nm_timeouts > 0)
1799 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
1800 if (timeo > NFS_MAXTIMEO)
1801 timeo = NFS_MAXTIMEO;
1802 if (req->r_rtt <= timeo) {
1803 if ((req->r_flags & R_NEEDSXMIT) == 0)
1805 } else if (nmp->nm_timeouts < 8) {
1811 * Check for server not responding
1813 if ((req->r_flags & R_TPRINTFMSG) == 0 &&
1814 req->r_rexmit > nmp->nm_deadthresh) {
1815 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1817 req->r_flags |= R_TPRINTFMSG;
1819 if (req->r_rexmit >= req->r_retry) { /* too many */
1820 nfsstats.rpctimeouts++;
1821 nfs_softterm(req, 1);
1826 * Generally disable retransmission on reliable sockets,
1827 * unless the request is flagged for immediate send.
1829 if (nmp->nm_sotype != SOCK_DGRAM) {
1830 if (++req->r_rexmit > NFS_MAXREXMIT)
1831 req->r_rexmit = NFS_MAXREXMIT;
1832 if ((req->r_flags & R_NEEDSXMIT) == 0)
1837 * Stop here if we do not have a socket!
1839 if ((so = nmp->nm_so) == NULL)
1843 * If there is enough space and the window allows.. resend it.
1845 * r_rtt is left intact in case we get an answer after the
1846 * retry that was a reply to the original packet.
1848 * NOTE: so_pru_send()
1850 if (ssb_space(&so->so_snd) >= req->r_mreq->m_pkthdr.len &&
1851 (req->r_flags & (R_SENT | R_NEEDSXMIT)) &&
1852 (m = m_copym(req->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){
1853 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
1854 error = so_pru_send(so, 0, m, NULL, NULL, td);
1856 error = so_pru_send(so, 0, m, nmp->nm_nam, NULL, td);
1858 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
1860 req->r_flags |= R_NEEDSXMIT;
1861 } else if (req->r_mrep == NULL) {
1863 * Iff first send, start timing
1864 * else turn timing off, backoff timer
1865 * and divide congestion window by 2.
1867 * It is possible for the so_pru_send() to
1868 * block and for us to race a reply so we
1869 * only do this if the reply field has not
1870 * been filled in. R_LOCKED will prevent
1871 * the request from being ripped out from under
1874 * Record the last resent procnum to aid us
1875 * in duplicate detection on receive.
1877 if ((req->r_flags & R_NEEDSXMIT) == 0) {
1880 if (++req->r_rexmit > NFS_MAXREXMIT)
1881 req->r_rexmit = NFS_MAXREXMIT;
1882 nmp->nm_maxasync_scaled >>= 1;
1883 if (nmp->nm_maxasync_scaled < NFS_MINASYNC_SCALED)
1884 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED;
1885 nfsstats.rpcretries++;
1886 nmp->nm_lastreprocnum = req->r_procnum;
1888 req->r_flags |= R_SENT;
1889 req->r_flags &= ~R_NEEDSXMIT;
1896 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1897 * wait for all requests to complete. This is used by forced unmounts
1898 * to terminate any outstanding RPCs.
1900 * Locked requests cannot be canceled but will be marked for
1904 nfs_nmcancelreqs(struct nfsmount *nmp)
1910 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1911 if (req->r_mrep != NULL || (req->r_flags & R_SOFTTERM))
1913 nfs_softterm(req, 0);
1915 /* XXX the other two queues as well */
1918 for (i = 0; i < 30; i++) {
1920 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1921 if (nmp == req->r_nmp)
1927 tsleep(&lbolt, 0, "nfscancel", 0);
1933 * Soft-terminate a request, effectively marking it as failed.
1935 * Must be called from within a critical section.
1938 nfs_softterm(struct nfsreq *rep, int islocked)
1940 rep->r_flags |= R_SOFTTERM;
1941 nfs_hardterm(rep, islocked);
1945 * Hard-terminate a request, typically after getting a response.
1947 * The state machine can still decide to re-issue it later if necessary.
1949 * Must be called from within a critical section.
1952 nfs_hardterm(struct nfsreq *rep, int islocked)
1954 struct nfsmount *nmp = rep->r_nmp;
1957 * The nm_send count is decremented now to avoid deadlocks
1958 * when the process in soreceive() hasn't yet managed to send
1961 if (rep->r_flags & R_SENT) {
1962 rep->r_flags &= ~R_SENT;
1966 * If we locked the request or nobody else has locked the request,
1967 * and the request is async, we can move it to the reader thread's
1968 * queue now and fix up the state.
1970 * If we locked the request or nobody else has locked the request,
1971 * we can wake up anyone blocked waiting for a response on the
1974 if (islocked || (rep->r_flags & R_LOCKED) == 0) {
1975 if ((rep->r_flags & (R_ONREQQ | R_ASYNC)) ==
1976 (R_ONREQQ | R_ASYNC)) {
1977 rep->r_flags &= ~R_ONREQQ;
1978 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain);
1980 TAILQ_INSERT_TAIL(&nmp->nm_reqrxq, rep, r_chain);
1981 KKASSERT(rep->r_info->state == NFSM_STATE_TRY ||
1982 rep->r_info->state == NFSM_STATE_WAITREPLY);
1983 rep->r_info->state = NFSM_STATE_PROCESSREPLY;
1984 nfssvc_iod_reader_wakeup(nmp);
1985 if (TAILQ_FIRST(&nmp->nm_bioq) &&
1986 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) {
1987 nfssvc_iod_writer_wakeup(nmp);
1990 mtx_abort_ex_link(&nmp->nm_rxlock, &rep->r_link);
1995 * Test for a termination condition pending on the process.
1996 * This is used for NFSMNT_INT mounts.
1999 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
2005 if (rep && (rep->r_flags & R_SOFTTERM))
2007 /* Terminate all requests while attempting a forced unmount. */
2008 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
2010 if (!(nmp->nm_flag & NFSMNT_INT))
2012 /* td might be NULL YYY */
2013 if (td == NULL || (p = td->td_proc) == NULL)
2017 tmpset = lwp_sigpend(lp);
2018 SIGSETNAND(tmpset, lp->lwp_sigmask);
2019 SIGSETNAND(tmpset, p->p_sigignore);
2020 if (SIGNOTEMPTY(tmpset) && NFSINT_SIGMASK(tmpset))
2027 * Lock a socket against others.
2028 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
2029 * and also to avoid race conditions between the processes with nfs requests
2030 * in progress when a reconnect is necessary.
2033 nfs_sndlock(struct nfsmount *nmp, struct nfsreq *rep)
2035 mtx_t mtx = &nmp->nm_txlock;
2043 td = rep ? rep->r_td : NULL;
2044 if (nmp->nm_flag & NFSMNT_INT)
2047 while ((error = mtx_lock_ex_try(mtx)) != 0) {
2048 if (nfs_sigintr(nmp, rep, td)) {
2052 error = mtx_lock_ex(mtx, "nfsndlck", slpflag, slptimeo);
2055 if (slpflag == PCATCH) {
2060 /* Always fail if our request has been cancelled. */
2061 if (rep && (rep->r_flags & R_SOFTTERM)) {
2070 * Unlock the stream socket for others.
2073 nfs_sndunlock(struct nfsmount *nmp)
2075 mtx_unlock(&nmp->nm_txlock);
2079 * Lock the receiver side of the socket.
2084 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep)
2086 mtx_t mtx = &nmp->nm_rxlock;
2092 * Unconditionally check for completion in case another nfsiod
2093 * get the packet while the caller was blocked, before the caller
2094 * called us. Packet reception is handled by mainline code which
2095 * is protected by the BGL at the moment.
2097 * We do not strictly need the second check just before the
2098 * tsleep(), but it's good defensive programming.
2100 if (rep && rep->r_mrep != NULL)
2103 if (nmp->nm_flag & NFSMNT_INT)
2109 while ((error = mtx_lock_ex_try(mtx)) != 0) {
2110 if (nfs_sigintr(nmp, rep, (rep ? rep->r_td : NULL))) {
2114 if (rep && rep->r_mrep != NULL) {
2120 * NOTE: can return ENOLCK, but in that case rep->r_mrep
2121 * will already be set.
2124 error = mtx_lock_ex_link(mtx, &rep->r_link,
2128 error = mtx_lock_ex(mtx, "nfsrcvlk", slpflag, slptimeo);
2134 * If our reply was recieved while we were sleeping,
2135 * then just return without taking the lock to avoid a
2136 * situation where a single iod could 'capture' the
2139 if (rep && rep->r_mrep != NULL) {
2143 if (slpflag == PCATCH) {
2149 if (rep && rep->r_mrep != NULL) {
2158 * Unlock the stream socket for others.
2161 nfs_rcvunlock(struct nfsmount *nmp)
2163 mtx_unlock(&nmp->nm_rxlock);
2169 * Check for badly aligned mbuf data and realign by copying the unaligned
2170 * portion of the data into a new mbuf chain and freeing the portions
2171 * of the old chain that were replaced.
2173 * We cannot simply realign the data within the existing mbuf chain
2174 * because the underlying buffers may contain other rpc commands and
2175 * we cannot afford to overwrite them.
2177 * We would prefer to avoid this situation entirely. The situation does
2178 * not occur with NFS/UDP and is supposed to only occassionally occur
2179 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
2181 * NOTE! MB_DONTWAIT cannot be used here. The mbufs must be acquired
2182 * because the rpc request OR reply cannot be thrown away. TCP NFS
2183 * mounts do not retry their RPCs unless the TCP connection itself
2184 * is dropped so throwing away a RPC will basically cause the NFS
2185 * operation to lockup indefinitely.
2188 nfs_realign(struct mbuf **pm, int hsiz)
2191 struct mbuf *n = NULL;
2194 * Check for misalignemnt
2197 while ((m = *pm) != NULL) {
2198 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3))
2204 * If misalignment found make a completely new copy.
2207 ++nfs_realign_count;
2208 n = m_dup_data(m, MB_WAIT);
2214 #ifndef NFS_NOSERVER
2217 * Parse an RPC request
2219 * - fill in the cred struct.
2222 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header)
2229 u_int32_t nfsvers, auth_type;
2231 int error = 0, ticklen;
2232 struct nfsuid *nuidp;
2233 struct timeval tvin, tvout;
2234 struct nfsm_info info;
2235 #if 0 /* until encrypted keys are implemented */
2236 NFSKERBKEYSCHED_T keys; /* stores key schedule */
2239 info.mrep = nd->nd_mrep;
2240 info.md = nd->nd_md;
2241 info.dpos = nd->nd_dpos;
2244 NULLOUT(tl = nfsm_dissect(&info, 10 * NFSX_UNSIGNED));
2245 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++);
2246 if (*tl++ != rpc_call) {
2251 NULLOUT(tl = nfsm_dissect(&info, 8 * NFSX_UNSIGNED));
2255 if (*tl++ != rpc_vers) {
2256 nd->nd_repstat = ERPCMISMATCH;
2257 nd->nd_procnum = NFSPROC_NOOP;
2260 if (*tl != nfs_prog) {
2261 nd->nd_repstat = EPROGUNAVAIL;
2262 nd->nd_procnum = NFSPROC_NOOP;
2266 nfsvers = fxdr_unsigned(u_int32_t, *tl++);
2267 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) {
2268 nd->nd_repstat = EPROGMISMATCH;
2269 nd->nd_procnum = NFSPROC_NOOP;
2272 if (nfsvers == NFS_VER3)
2273 nd->nd_flag = ND_NFSV3;
2274 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++);
2275 if (nd->nd_procnum == NFSPROC_NULL)
2277 if (nd->nd_procnum >= NFS_NPROCS ||
2278 (nd->nd_procnum >= NQNFSPROC_GETLEASE) ||
2279 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) {
2280 nd->nd_repstat = EPROCUNAVAIL;
2281 nd->nd_procnum = NFSPROC_NOOP;
2284 if ((nd->nd_flag & ND_NFSV3) == 0)
2285 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
2287 len = fxdr_unsigned(int, *tl++);
2288 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2293 nd->nd_flag &= ~ND_KERBAUTH;
2295 * Handle auth_unix or auth_kerb.
2297 if (auth_type == rpc_auth_unix) {
2298 len = fxdr_unsigned(int, *++tl);
2299 if (len < 0 || len > NFS_MAXNAMLEN) {
2303 ERROROUT(nfsm_adv(&info, nfsm_rndup(len)));
2304 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED));
2305 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred));
2306 nd->nd_cr.cr_ref = 1;
2307 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++);
2308 nd->nd_cr.cr_ruid = nd->nd_cr.cr_svuid = nd->nd_cr.cr_uid;
2309 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++);
2310 nd->nd_cr.cr_rgid = nd->nd_cr.cr_svgid = nd->nd_cr.cr_gid;
2311 len = fxdr_unsigned(int, *tl);
2312 if (len < 0 || len > RPCAUTH_UNIXGIDS) {
2316 NULLOUT(tl = nfsm_dissect(&info, (len + 2) * NFSX_UNSIGNED));
2317 for (i = 1; i <= len; i++)
2319 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++);
2322 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
2323 if (nd->nd_cr.cr_ngroups > 1)
2324 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups);
2325 len = fxdr_unsigned(int, *++tl);
2326 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2331 ERROROUT(nfsm_adv(&info, nfsm_rndup(len)));
2333 } else if (auth_type == rpc_auth_kerb) {
2334 switch (fxdr_unsigned(int, *tl++)) {
2335 case RPCAKN_FULLNAME:
2336 ticklen = fxdr_unsigned(int, *tl);
2337 *((u_int32_t *)nfsd->nfsd_authstr) = *tl;
2338 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED;
2339 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED;
2340 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) {
2347 uio.uio_segflg = UIO_SYSSPACE;
2348 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4];
2349 iov.iov_len = RPCAUTH_MAXSIZ - 4;
2350 ERROROUT(nfsm_mtouio(&info, &uio, uio.uio_resid));
2351 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED));
2352 if (*tl++ != rpc_auth_kerb ||
2353 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) {
2354 kprintf("Bad kerb verifier\n");
2355 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2356 nd->nd_procnum = NFSPROC_NOOP;
2359 NULLOUT(cp = nfsm_dissect(&info, 4 * NFSX_UNSIGNED));
2360 tl = (u_int32_t *)cp;
2361 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) {
2362 kprintf("Not fullname kerb verifier\n");
2363 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2364 nd->nd_procnum = NFSPROC_NOOP;
2367 cp += NFSX_UNSIGNED;
2368 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED);
2369 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED;
2370 nd->nd_flag |= ND_KERBFULL;
2371 nfsd->nfsd_flag |= NFSD_NEEDAUTH;
2373 case RPCAKN_NICKNAME:
2374 if (len != 2 * NFSX_UNSIGNED) {
2375 kprintf("Kerb nickname short\n");
2376 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED);
2377 nd->nd_procnum = NFSPROC_NOOP;
2380 nickuid = fxdr_unsigned(uid_t, *tl);
2381 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED));
2382 if (*tl++ != rpc_auth_kerb ||
2383 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) {
2384 kprintf("Kerb nick verifier bad\n");
2385 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2386 nd->nd_procnum = NFSPROC_NOOP;
2389 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED));
2390 tvin.tv_sec = *tl++;
2393 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first;
2394 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
2395 if (nuidp->nu_cr.cr_uid == nickuid &&
2397 netaddr_match(NU_NETFAM(nuidp),
2398 &nuidp->nu_haddr, nd->nd_nam2)))
2403 (NFSERR_AUTHERR|AUTH_REJECTCRED);
2404 nd->nd_procnum = NFSPROC_NOOP;
2409 * Now, decrypt the timestamp using the session key
2419 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec);
2420 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec);
2421 if (nuidp->nu_expire < time_second ||
2422 nuidp->nu_timestamp.tv_sec > tvout.tv_sec ||
2423 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec &&
2424 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) {
2425 nuidp->nu_expire = 0;
2427 (NFSERR_AUTHERR|AUTH_REJECTVERF);
2428 nd->nd_procnum = NFSPROC_NOOP;
2431 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr);
2432 nd->nd_flag |= ND_KERBNICK;
2435 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
2436 nd->nd_procnum = NFSPROC_NOOP;
2440 nd->nd_md = info.md;
2441 nd->nd_dpos = info.dpos;
2450 * Send a message to the originating process's terminal. The thread and/or
2451 * process may be NULL. YYY the thread should not be NULL but there may
2452 * still be some uio_td's that are still being passed as NULL through to
2456 nfs_msg(struct thread *td, char *server, char *msg)
2460 if (td && td->td_proc)
2461 tpr = tprintf_open(td->td_proc);
2464 tprintf(tpr, "nfs server %s: %s\n", server, msg);
2469 #ifndef NFS_NOSERVER
2471 * Socket upcall routine for the nfsd sockets.
2472 * The caddr_t arg is a pointer to the "struct nfssvc_sock".
2473 * Essentially do as much as possible non-blocking, else punt and it will
2474 * be called with MB_WAIT from an nfsd.
2477 nfsrv_rcv(struct socket *so, void *arg, int waitflag)
2479 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
2481 struct sockaddr *nam;
2484 int nparallel_wakeup = 0;
2486 if ((slp->ns_flag & SLP_VALID) == 0)
2490 * Do not allow an infinite number of completed RPC records to build
2491 * up before we stop reading data from the socket. Otherwise we could
2492 * end up holding onto an unreasonable number of mbufs for requests
2493 * waiting for service.
2495 * This should give pretty good feedback to the TCP
2496 * layer and prevents a memory crunch for other protocols.
2498 * Note that the same service socket can be dispatched to several
2499 * nfs servers simultaniously.
2501 * the tcp protocol callback calls us with MB_DONTWAIT.
2502 * nfsd calls us with MB_WAIT (typically).
2504 if (waitflag == MB_DONTWAIT && slp->ns_numrec >= nfsd_waiting / 2 + 1) {
2505 slp->ns_flag |= SLP_NEEDQ;
2510 * Handle protocol specifics to parse an RPC request. We always
2511 * pull from the socket using non-blocking I/O.
2513 if (so->so_type == SOCK_STREAM) {
2515 * The data has to be read in an orderly fashion from a TCP
2516 * stream, unlike a UDP socket. It is possible for soreceive
2517 * and/or nfsrv_getstream() to block, so make sure only one
2518 * entity is messing around with the TCP stream at any given
2519 * moment. The receive sockbuf's lock in soreceive is not
2522 * Note that this procedure can be called from any number of
2523 * NFS severs *OR* can be upcalled directly from a TCP
2526 if (slp->ns_flag & SLP_GETSTREAM) {
2527 slp->ns_flag |= SLP_NEEDQ;
2530 slp->ns_flag |= SLP_GETSTREAM;
2533 * Do soreceive(). Pull out as much data as possible without
2536 sbinit(&sio, 1000000000);
2537 flags = MSG_DONTWAIT;
2538 error = so_pru_soreceive(so, &nam, NULL, &sio, NULL, &flags);
2539 if (error || sio.sb_mb == NULL) {
2540 if (error == EWOULDBLOCK)
2541 slp->ns_flag |= SLP_NEEDQ;
2543 slp->ns_flag |= SLP_DISCONN;
2544 slp->ns_flag &= ~SLP_GETSTREAM;
2548 if (slp->ns_rawend) {
2549 slp->ns_rawend->m_next = m;
2550 slp->ns_cc += sio.sb_cc;
2553 slp->ns_cc = sio.sb_cc;
2560 * Now try and parse as many record(s) as we can out of the
2563 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup);
2566 slp->ns_flag |= SLP_DISCONN;
2568 slp->ns_flag |= SLP_NEEDQ;
2570 slp->ns_flag &= ~SLP_GETSTREAM;
2573 * For UDP soreceive typically pulls just one packet, loop
2574 * to get the whole batch.
2577 sbinit(&sio, 1000000000);
2578 flags = MSG_DONTWAIT;
2579 error = so_pru_soreceive(so, &nam, NULL, &sio,
2582 struct nfsrv_rec *rec;
2583 int mf = (waitflag & MB_DONTWAIT) ?
2584 M_NOWAIT : M_WAITOK;
2585 rec = kmalloc(sizeof(struct nfsrv_rec),
2589 FREE(nam, M_SONAME);
2593 nfs_realign(&sio.sb_mb, 10 * NFSX_UNSIGNED);
2594 rec->nr_address = nam;
2595 rec->nr_packet = sio.sb_mb;
2596 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2601 if ((so->so_proto->pr_flags & PR_CONNREQUIRED)
2602 && error != EWOULDBLOCK) {
2603 slp->ns_flag |= SLP_DISCONN;
2607 } while (sio.sb_mb);
2611 * If we were upcalled from the tcp protocol layer and we have
2612 * fully parsed records ready to go, or there is new data pending,
2613 * or something went wrong, try to wake up an nfsd thread to deal
2617 if (waitflag == MB_DONTWAIT && (slp->ns_numrec > 0
2618 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) {
2619 nfsrv_wakenfsd(slp, nparallel_wakeup);
2624 * Try and extract an RPC request from the mbuf data list received on a
2625 * stream socket. The "waitflag" argument indicates whether or not it
2629 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp)
2631 struct mbuf *m, **mpp;
2634 struct mbuf *om, *m2, *recm;
2638 if (slp->ns_reclen == 0) {
2639 if (slp->ns_cc < NFSX_UNSIGNED)
2642 if (m->m_len >= NFSX_UNSIGNED) {
2643 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED);
2644 m->m_data += NFSX_UNSIGNED;
2645 m->m_len -= NFSX_UNSIGNED;
2647 cp1 = (caddr_t)&recmark;
2648 cp2 = mtod(m, caddr_t);
2649 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
2650 while (m->m_len == 0) {
2652 cp2 = mtod(m, caddr_t);
2659 slp->ns_cc -= NFSX_UNSIGNED;
2660 recmark = ntohl(recmark);
2661 slp->ns_reclen = recmark & ~0x80000000;
2662 if (recmark & 0x80000000)
2663 slp->ns_flag |= SLP_LASTFRAG;
2665 slp->ns_flag &= ~SLP_LASTFRAG;
2666 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) {
2667 log(LOG_ERR, "%s (%d) from nfs client\n",
2668 "impossible packet length",
2675 * Now get the record part.
2677 * Note that slp->ns_reclen may be 0. Linux sometimes
2678 * generates 0-length RPCs
2681 if (slp->ns_cc == slp->ns_reclen) {
2683 slp->ns_raw = slp->ns_rawend = NULL;
2684 slp->ns_cc = slp->ns_reclen = 0;
2685 } else if (slp->ns_cc > slp->ns_reclen) {
2690 while (len < slp->ns_reclen) {
2691 if ((len + m->m_len) > slp->ns_reclen) {
2692 m2 = m_copym(m, 0, slp->ns_reclen - len,
2700 m->m_data += slp->ns_reclen - len;
2701 m->m_len -= slp->ns_reclen - len;
2702 len = slp->ns_reclen;
2704 return (EWOULDBLOCK);
2706 } else if ((len + m->m_len) == slp->ns_reclen) {
2726 * Accumulate the fragments into a record.
2728 mpp = &slp->ns_frag;
2730 mpp = &((*mpp)->m_next);
2732 if (slp->ns_flag & SLP_LASTFRAG) {
2733 struct nfsrv_rec *rec;
2734 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK;
2735 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf);
2737 m_freem(slp->ns_frag);
2739 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED);
2740 rec->nr_address = NULL;
2741 rec->nr_packet = slp->ns_frag;
2742 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2746 slp->ns_frag = NULL;
2754 * Sanity check our mbuf chain.
2757 nfs_checkpkt(struct mbuf *m, int len)
2765 panic("nfs_checkpkt: len mismatch %d/%d mbuf %p\n",
2773 nfs_checkpkt(struct mbuf *m __unused, int len __unused)
2780 * Parse an RPC header.
2783 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd,
2784 struct nfsrv_descript **ndp)
2786 struct nfsrv_rec *rec;
2788 struct sockaddr *nam;
2789 struct nfsrv_descript *nd;
2793 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec))
2795 rec = STAILQ_FIRST(&slp->ns_rec);
2796 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link);
2797 KKASSERT(slp->ns_numrec > 0);
2799 nam = rec->nr_address;
2801 kfree(rec, M_NFSRVDESC);
2802 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript),
2803 M_NFSRVDESC, M_WAITOK);
2804 nd->nd_md = nd->nd_mrep = m;
2806 nd->nd_dpos = mtod(m, caddr_t);
2807 error = nfs_getreq(nd, nfsd, TRUE);
2810 FREE(nam, M_SONAME);
2812 kfree((caddr_t)nd, M_NFSRVDESC);
2821 * Try to assign service sockets to nfsd threads based on the number
2822 * of new rpc requests that have been queued on the service socket.
2824 * If no nfsd's are available or additonal requests are pending, set the
2825 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for
2826 * the work in the nfssvc_sock list when it is finished processing its
2827 * current work. This flag is only cleared when an nfsd can not find
2828 * any new work to perform.
2831 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel)
2835 if ((slp->ns_flag & SLP_VALID) == 0)
2839 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) {
2840 if (nd->nfsd_flag & NFSD_WAITING) {
2841 nd->nfsd_flag &= ~NFSD_WAITING;
2843 panic("nfsd wakeup");
2846 wakeup((caddr_t)nd);
2847 if (--nparallel == 0)
2852 slp->ns_flag |= SLP_DOREC;
2853 nfsd_head_flag |= NFSD_CHECKSLP;
2856 #endif /* NFS_NOSERVER */