2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $
41 * Socket operations for use by nfs
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/kernel.h>
51 #include <sys/vnode.h>
52 #include <sys/fcntl.h>
53 #include <sys/protosw.h>
54 #include <sys/resourcevar.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/socketops.h>
58 #include <sys/syslog.h>
59 #include <sys/thread.h>
60 #include <sys/tprintf.h>
61 #include <sys/sysctl.h>
62 #include <sys/signalvar.h>
64 #include <sys/signal2.h>
65 #include <sys/mutex2.h>
66 #include <sys/socketvar2.h>
68 #include <netinet/in.h>
69 #include <netinet/tcp.h>
70 #include <sys/thread2.h>
76 #include "nfsm_subs.h"
85 * RTT calculations are scaled by 256 (8 bits). A proper fractional
86 * RTT will still be calculated even with a slow NFS timer.
88 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum]]
89 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum]]
90 #define NFS_RTT_SCALE_BITS 8 /* bits */
91 #define NFS_RTT_SCALE 256 /* value */
94 * Defines which timer to use for the procnum.
101 static int proct[NFS_NPROCS] = {
102 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, /* 00-09 */
103 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, /* 10-19 */
104 0, 5, 0, 0, 0, 0, /* 20-29 */
107 static int multt[NFS_NPROCS] = {
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-09 */
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10-19 */
110 1, 2, 1, 1, 1, 1, /* 20-29 */
113 static int nfs_backoff[8] = { 2, 3, 5, 8, 13, 21, 34, 55 };
114 static int nfs_realign_test;
115 static int nfs_realign_count;
116 static int nfs_showrtt;
117 static int nfs_showrexmit;
118 int nfs_maxasyncbio = NFS_MAXASYNCBIO;
120 SYSCTL_DECL(_vfs_nfs);
122 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0,
123 "Number of times mbufs have been tested for bad alignment");
124 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0,
125 "Number of realignments for badly aligned mbuf data");
126 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrtt, CTLFLAG_RW, &nfs_showrtt, 0,
127 "Show round trip time output");
128 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrexmit, CTLFLAG_RW, &nfs_showrexmit, 0,
129 "Show retransmits info");
130 SYSCTL_INT(_vfs_nfs, OID_AUTO, maxasyncbio, CTLFLAG_RW, &nfs_maxasyncbio, 0,
131 "Max number of asynchronous bio's");
133 static int nfs_request_setup(nfsm_info_t info);
134 static int nfs_request_auth(struct nfsreq *rep);
135 static int nfs_request_try(struct nfsreq *rep);
136 static int nfs_request_waitreply(struct nfsreq *rep);
137 static int nfs_request_processreply(nfsm_info_t info, int);
140 struct nfsrtt nfsrtt;
141 struct callout nfs_timer_handle;
143 static int nfs_msg (struct thread *,char *,char *);
144 static int nfs_rcvlock (struct nfsmount *nmp, struct nfsreq *myreq);
145 static void nfs_rcvunlock (struct nfsmount *nmp);
146 static void nfs_realign (struct mbuf **pm, int hsiz);
147 static int nfs_receive (struct nfsmount *nmp, struct nfsreq *rep,
148 struct sockaddr **aname, struct mbuf **mp);
149 static void nfs_softterm (struct nfsreq *rep, int islocked);
150 static void nfs_hardterm (struct nfsreq *rep, int islocked);
151 static int nfs_reconnect (struct nfsmount *nmp, struct nfsreq *rep);
153 static int nfsrv_getstream (struct nfssvc_sock *, int, int *);
154 static void nfs_timer_req(struct nfsreq *req);
155 static void nfs_checkpkt(struct mbuf *m, int len);
157 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd,
158 struct nfssvc_sock *slp,
160 struct mbuf **mreqp) = {
188 #endif /* NFS_NOSERVER */
191 * Initialize sockets and congestion for a new NFS connection.
192 * We do not free the sockaddr if error.
195 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
199 struct sockaddr *saddr;
200 struct sockaddr_in *sin;
201 struct thread *td = &thread0; /* only used for socreate and sobind */
203 nmp->nm_so = so = NULL;
204 if (nmp->nm_flag & NFSMNT_FORCE)
207 error = socreate(saddr->sa_family, &so, nmp->nm_sotype,
208 nmp->nm_soproto, td);
211 nmp->nm_soflags = so->so_proto->pr_flags;
214 * Some servers require that the client port be a reserved port number.
216 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) {
219 struct sockaddr_in ssin;
221 bzero(&sopt, sizeof sopt);
222 ip = IP_PORTRANGE_LOW;
223 sopt.sopt_level = IPPROTO_IP;
224 sopt.sopt_name = IP_PORTRANGE;
225 sopt.sopt_val = (void *)&ip;
226 sopt.sopt_valsize = sizeof(ip);
228 error = sosetopt(so, &sopt);
231 bzero(&ssin, sizeof ssin);
233 sin->sin_len = sizeof (struct sockaddr_in);
234 sin->sin_family = AF_INET;
235 sin->sin_addr.s_addr = INADDR_ANY;
236 sin->sin_port = htons(0);
237 error = sobind(so, (struct sockaddr *)sin, td);
240 bzero(&sopt, sizeof sopt);
241 ip = IP_PORTRANGE_DEFAULT;
242 sopt.sopt_level = IPPROTO_IP;
243 sopt.sopt_name = IP_PORTRANGE;
244 sopt.sopt_val = (void *)&ip;
245 sopt.sopt_valsize = sizeof(ip);
247 error = sosetopt(so, &sopt);
253 * Protocols that do not require connections may be optionally left
254 * unconnected for servers that reply from a port other than NFS_PORT.
256 if (nmp->nm_flag & NFSMNT_NOCONN) {
257 if (nmp->nm_soflags & PR_CONNREQUIRED) {
262 error = soconnect(so, nmp->nm_nam, td);
267 * Wait for the connection to complete. Cribbed from the
268 * connect system call but with the wait timing out so
269 * that interruptible mounts don't hang here for a long time.
272 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
273 (void) tsleep((caddr_t)&so->so_timeo, 0,
275 if ((so->so_state & SS_ISCONNECTING) &&
276 so->so_error == 0 && rep &&
277 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){
278 soclrstate(so, SS_ISCONNECTING);
284 error = so->so_error;
291 so->so_rcv.ssb_timeo = (5 * hz);
292 so->so_snd.ssb_timeo = (5 * hz);
295 * Get buffer reservation size from sysctl, but impose reasonable
298 if (nmp->nm_sotype == SOCK_STREAM) {
299 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
303 bzero(&sopt, sizeof sopt);
304 sopt.sopt_level = SOL_SOCKET;
305 sopt.sopt_name = SO_KEEPALIVE;
306 sopt.sopt_val = &val;
307 sopt.sopt_valsize = sizeof val;
311 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
315 bzero(&sopt, sizeof sopt);
316 sopt.sopt_level = IPPROTO_TCP;
317 sopt.sopt_name = TCP_NODELAY;
318 sopt.sopt_val = &val;
319 sopt.sopt_valsize = sizeof val;
323 bzero(&sopt, sizeof sopt);
324 sopt.sopt_level = IPPROTO_TCP;
325 sopt.sopt_name = TCP_FASTKEEP;
326 sopt.sopt_val = &val;
327 sopt.sopt_valsize = sizeof val;
332 error = soreserve(so, nfs_soreserve, nfs_soreserve, NULL);
335 atomic_set_int(&so->so_rcv.ssb_flags, SSB_NOINTR);
336 atomic_set_int(&so->so_snd.ssb_flags, SSB_NOINTR);
338 /* Initialize other non-zero congestion variables */
339 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
340 nmp->nm_srtt[3] = (NFS_TIMEO << NFS_RTT_SCALE_BITS);
341 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
342 nmp->nm_sdrtt[3] = 0;
343 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED;
344 nmp->nm_timeouts = 0;
347 * Assign nm_so last. The moment nm_so is assigned the nfs_timer()
348 * can mess with the socket.
355 soshutdown(so, SHUT_RDWR);
356 soclose(so, FNONBLOCK);
363 * Called when a connection is broken on a reliable protocol.
364 * - clean up the old socket
365 * - nfs_connect() again
366 * - set R_NEEDSXMIT for all outstanding requests on mount point
367 * If this fails the mount point is DEAD!
368 * nb: Must be called with the nfs_sndlock() set on the mount point.
371 nfs_reconnect(struct nfsmount *nmp, struct nfsreq *rep)
377 if (nmp->nm_rxstate >= NFSSVC_STOPPING)
379 while ((error = nfs_connect(nmp, rep)) != 0) {
380 if (error == EINTR || error == ERESTART)
384 if (nmp->nm_rxstate >= NFSSVC_STOPPING)
386 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0);
390 * Loop through outstanding request list and fix up all requests
394 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
395 KKASSERT(req->r_nmp == nmp);
396 req->r_flags |= R_NEEDSXMIT;
403 * NFS disconnect. Clean up and unlink.
406 nfs_disconnect(struct nfsmount *nmp)
413 soshutdown(so, SHUT_RDWR);
414 soclose(so, FNONBLOCK);
419 nfs_safedisconnect(struct nfsmount *nmp)
421 nfs_rcvlock(nmp, NULL);
427 * This is the nfs send routine. For connection based socket types, it
428 * must be called with an nfs_sndlock() on the socket.
429 * "rep == NULL" indicates that it has been called from a server.
430 * For the client side:
431 * - return EINTR if the RPC is terminated, 0 otherwise
432 * - set R_NEEDSXMIT if the send fails for any reason
433 * - do any cleanup required by recoverable socket errors (?)
434 * For the server side:
435 * - return EINTR or ERESTART if interrupted by a signal
436 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
437 * - do any cleanup required by recoverable socket errors (?)
440 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
443 struct sockaddr *sendnam;
444 int error, soflags, flags;
447 if (rep->r_flags & R_SOFTTERM) {
451 if ((so = rep->r_nmp->nm_so) == NULL) {
452 rep->r_flags |= R_NEEDSXMIT;
456 rep->r_flags &= ~R_NEEDSXMIT;
457 soflags = rep->r_nmp->nm_soflags;
459 soflags = so->so_proto->pr_flags;
461 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
465 if (so->so_type == SOCK_SEQPACKET)
471 * calls pru_sosend -> sosend -> so_pru_send -> netrpc
473 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags,
477 * ENOBUFS for dgram sockets is transient and non fatal.
478 * No need to log, and no need to break a soft mount.
480 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
483 * do backoff retransmit on client
486 if ((rep->r_nmp->nm_state & NFSSTA_SENDSPACE) == 0) {
487 rep->r_nmp->nm_state |= NFSSTA_SENDSPACE;
488 kprintf("Warning: NFS: Insufficient sendspace "
490 "\t You must increase vfs.nfs.soreserve"
491 "or decrease vfs.nfs.maxasyncbio\n",
492 so->so_snd.ssb_hiwat);
494 rep->r_flags |= R_NEEDSXMIT;
500 log(LOG_INFO, "nfs send error %d for server %s\n",error,
501 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
503 * Deal with errors for the client side.
505 if (rep->r_flags & R_SOFTTERM)
508 rep->r_flags |= R_NEEDSXMIT;
510 log(LOG_INFO, "nfsd send error %d\n", error);
514 * Handle any recoverable (soft) socket errors here. (?)
516 if (error != EINTR && error != ERESTART &&
517 error != EWOULDBLOCK && error != EPIPE)
524 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
525 * done by soreceive(), but for SOCK_STREAM we must deal with the Record
526 * Mark and consolidate the data into a new mbuf list.
527 * nb: Sometimes TCP passes the data up to soreceive() in long lists of
529 * For SOCK_STREAM we must be very careful to read an entire record once
530 * we have read any of it, even if the system call has been interrupted.
533 nfs_receive(struct nfsmount *nmp, struct nfsreq *rep,
534 struct sockaddr **aname, struct mbuf **mp)
541 struct mbuf *control;
543 struct sockaddr **getnam;
544 int error, sotype, rcvflg;
545 struct thread *td = curthread; /* XXX */
548 * Set up arguments for soreceive()
552 sotype = nmp->nm_sotype;
555 * For reliable protocols, lock against other senders/receivers
556 * in case a reconnect is necessary.
557 * For SOCK_STREAM, first get the Record Mark to find out how much
558 * more there is to get.
559 * We must lock the socket against other receivers
560 * until we have an entire rpc request/reply.
562 if (sotype != SOCK_DGRAM) {
563 error = nfs_sndlock(nmp, rep);
568 * Check for fatal errors and resending request.
571 * Ugh: If a reconnect attempt just happened, nm_so
572 * would have changed. NULL indicates a failed
573 * attempt that has essentially shut down this
576 if (rep && (rep->r_mrep || (rep->r_flags & R_SOFTTERM))) {
582 error = nfs_reconnect(nmp, rep);
589 while (rep && (rep->r_flags & R_NEEDSXMIT)) {
590 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
591 nfsstats.rpcretries++;
592 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
594 if (error == EINTR || error == ERESTART ||
595 (error = nfs_reconnect(nmp, rep)) != 0) {
603 if (sotype == SOCK_STREAM) {
605 * Get the length marker from the stream
607 aio.iov_base = (caddr_t)&len;
608 aio.iov_len = sizeof(u_int32_t);
611 auio.uio_segflg = UIO_SYSSPACE;
612 auio.uio_rw = UIO_READ;
614 auio.uio_resid = sizeof(u_int32_t);
617 rcvflg = MSG_WAITALL;
618 error = so_pru_soreceive(so, NULL, &auio, NULL,
620 if (error == EWOULDBLOCK && rep) {
621 if (rep->r_flags & R_SOFTTERM)
624 } while (error == EWOULDBLOCK);
626 if (error == 0 && auio.uio_resid > 0) {
628 * Only log short packets if not EOF
630 if (auio.uio_resid != sizeof(u_int32_t)) {
632 "short receive (%d/%d) from nfs server %s\n",
633 (int)(sizeof(u_int32_t) - auio.uio_resid),
634 (int)sizeof(u_int32_t),
635 nmp->nm_mountp->mnt_stat.f_mntfromname);
641 len = ntohl(len) & ~0x80000000;
643 * This is SERIOUS! We are out of sync with the sender
644 * and forcing a disconnect/reconnect is all I can do.
646 if (len > NFS_MAXPACKET) {
647 log(LOG_ERR, "%s (%d) from nfs server %s\n",
648 "impossible packet length",
650 nmp->nm_mountp->mnt_stat.f_mntfromname);
656 * Get the rest of the packet as an mbuf chain
660 rcvflg = MSG_WAITALL;
661 error = so_pru_soreceive(so, NULL, NULL, &sio,
663 } while (error == EWOULDBLOCK || error == EINTR ||
665 if (error == 0 && sio.sb_cc != len) {
666 if (sio.sb_cc != 0) {
668 "short receive (%zu/%d) from nfs server %s\n",
669 (size_t)len - auio.uio_resid, len,
670 nmp->nm_mountp->mnt_stat.f_mntfromname);
677 * Non-stream, so get the whole packet by not
678 * specifying MSG_WAITALL and by specifying a large
681 * We have no use for control msg., but must grab them
682 * and then throw them away so we know what is going
685 sbinit(&sio, 100000000);
688 error = so_pru_soreceive(so, NULL, NULL, &sio,
692 if (error == EWOULDBLOCK && rep) {
693 if (rep->r_flags & R_SOFTTERM) {
698 } while (error == EWOULDBLOCK ||
699 (error == 0 && sio.sb_mb == NULL && control));
700 if ((rcvflg & MSG_EOR) == 0)
702 if (error == 0 && sio.sb_mb == NULL)
708 if (error && error != EINTR && error != ERESTART) {
711 if (error != EPIPE) {
713 "receive error %d from nfs server %s\n",
715 nmp->nm_mountp->mnt_stat.f_mntfromname);
717 error = nfs_sndlock(nmp, rep);
719 error = nfs_reconnect(nmp, rep);
727 if ((so = nmp->nm_so) == NULL)
729 if (so->so_state & SS_ISCONNECTED)
733 sbinit(&sio, 100000000);
736 error = so_pru_soreceive(so, getnam, NULL, &sio,
738 if (error == EWOULDBLOCK && rep &&
739 (rep->r_flags & R_SOFTTERM)) {
743 } while (error == EWOULDBLOCK);
749 * A shutdown may result in no error and no mbuf.
752 if (*mp == NULL && error == 0)
761 * Search for any mbufs that are not a multiple of 4 bytes long
762 * or with m_data not longword aligned.
763 * These could cause pointer alignment problems, so copy them to
764 * well aligned mbufs.
766 nfs_realign(mp, 5 * NFSX_UNSIGNED);
771 * Implement receipt of reply on a socket.
773 * We must search through the list of received datagrams matching them
774 * with outstanding requests using the xid, until ours is found.
776 * If myrep is NULL we process packets on the socket until
777 * interrupted or until nm_reqrxq is non-empty.
781 nfs_reply(struct nfsmount *nmp, struct nfsreq *myrep)
784 struct sockaddr *nam;
788 struct nfsm_info info;
791 * Loop around until we get our own reply
795 * Lock against other receivers so that I don't get stuck in
796 * sbwait() after someone else has received my reply for me.
797 * Also necessary for connection based protocols to avoid
798 * race conditions during a reconnect.
800 * If nfs_rcvlock() returns EALREADY, that means that
801 * the reply has already been recieved by another
802 * process and we can return immediately. In this
803 * case, the lock is not taken to avoid races with
808 error = nfs_rcvlock(nmp, myrep);
809 if (error == EALREADY)
815 * If myrep is NULL we are the receiver helper thread.
816 * Stop waiting for incoming replies if there are
817 * messages sitting on reqrxq that we need to process,
818 * or if a shutdown request is pending.
820 if (myrep == NULL && (TAILQ_FIRST(&nmp->nm_reqrxq) ||
821 nmp->nm_rxstate > NFSSVC_PENDING)) {
827 * Get the next Rpc reply off the socket
829 * We cannot release the receive lock until we've
830 * filled in rep->r_mrep, otherwise a waiting
831 * thread may deadlock in soreceive with no incoming
834 error = nfs_receive(nmp, myrep, &nam, &info.mrep);
837 * Ignore routing errors on connectionless protocols??
840 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) {
841 if (nmp->nm_so == NULL)
843 nmp->nm_so->so_error = 0;
849 kfree(nam, M_SONAME);
852 * Get the xid and check that it is an rpc reply
855 info.dpos = mtod(info.md, caddr_t);
856 NULLOUT(tl = nfsm_dissect(&info, 2*NFSX_UNSIGNED));
858 if (*tl != rpc_reply) {
859 nfsstats.rpcinvalid++;
868 * Loop through the request list to match up the reply
869 * Iff no match, just drop the datagram. On match, set
870 * r_mrep atomically to prevent the timer from messing
871 * around with the request after we have exited the critical
875 TAILQ_FOREACH(rep, &nmp->nm_reqq, r_chain) {
876 if (rep->r_mrep == NULL && rxid == rep->r_xid)
881 * Fill in the rest of the reply if we found a match.
883 * Deal with duplicate responses if there was no match.
887 rep->r_dpos = info.dpos;
891 rt = &nfsrtt.rttl[nfsrtt.pos];
892 rt->proc = rep->r_procnum;
895 rt->cwnd = nmp->nm_maxasync_scaled;
896 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1];
897 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1];
898 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid;
899 getmicrotime(&rt->tstamp);
900 if (rep->r_flags & R_TIMING)
901 rt->rtt = rep->r_rtt;
904 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ;
908 * New congestion control is based only on async
911 if (nmp->nm_maxasync_scaled < NFS_MAXASYNC_SCALED)
912 ++nmp->nm_maxasync_scaled;
913 if (rep->r_flags & R_SENT) {
914 rep->r_flags &= ~R_SENT;
917 * Update rtt using a gain of 0.125 on the mean
918 * and a gain of 0.25 on the deviation.
920 * NOTE SRTT/SDRTT are only good if R_TIMING is set.
922 if ((rep->r_flags & R_TIMING) && rep->r_rexmit == 0) {
924 * Since the timer resolution of
925 * NFS_HZ is so course, it can often
926 * result in r_rtt == 0. Since
927 * r_rtt == N means that the actual
928 * rtt is between N+dt and N+2-dt ticks,
934 #define NFSRSB NFS_RTT_SCALE_BITS
935 n = ((NFS_SRTT(rep) * 7) +
936 (rep->r_rtt << NFSRSB)) >> 3;
937 d = n - NFS_SRTT(rep);
941 * Don't let the jitter calculation decay
942 * too quickly, but we want a fast rampup.
947 if (d < NFS_SDRTT(rep))
948 n = ((NFS_SDRTT(rep) * 15) + d) >> 4;
950 n = ((NFS_SDRTT(rep) * 3) + d) >> 2;
954 nmp->nm_timeouts = 0;
955 rep->r_mrep = info.mrep;
956 nfs_hardterm(rep, 0);
959 * Extract vers, prog, nfsver, procnum. A duplicate
960 * response means we didn't wait long enough so
961 * we increase the SRTT to avoid future spurious
964 u_int procnum = nmp->nm_lastreprocnum;
967 if (procnum < NFS_NPROCS && proct[procnum]) {
970 n = nmp->nm_srtt[proct[procnum]];
971 n += NFS_ASYSCALE * NFS_HZ;
972 if (n < NFS_ASYSCALE * NFS_HZ * 10)
973 n = NFS_ASYSCALE * NFS_HZ * 10;
974 nmp->nm_srtt[proct[procnum]] = n;
981 * If not matched to a request, drop it.
982 * If it's mine, get out.
985 nfsstats.rpcunexpected++;
988 } else if (rep == myrep) {
989 if (rep->r_mrep == NULL)
990 panic("nfsreply nil");
997 * Run the request state machine until the target state is reached
998 * or a fatal error occurs. The target state is not run. Specifying
999 * a target of NFSM_STATE_DONE runs the state machine until the rpc
1002 * EINPROGRESS is returned for all states other then the DONE state,
1003 * indicating that the rpc is still in progress.
1006 nfs_request(struct nfsm_info *info, nfsm_state_t bstate, nfsm_state_t estate)
1010 while (info->state >= bstate && info->state < estate) {
1011 switch(info->state) {
1012 case NFSM_STATE_SETUP:
1014 * Setup the nfsreq. Any error which occurs during
1015 * this state is fatal.
1017 info->error = nfs_request_setup(info);
1019 info->state = NFSM_STATE_DONE;
1020 return (info->error);
1023 req->r_mrp = &info->mrep;
1024 req->r_mdp = &info->md;
1025 req->r_dposp = &info->dpos;
1026 info->state = NFSM_STATE_AUTH;
1029 case NFSM_STATE_AUTH:
1031 * Authenticate the nfsreq. Any error which occurs
1032 * during this state is fatal.
1034 info->error = nfs_request_auth(info->req);
1036 info->state = NFSM_STATE_DONE;
1037 return (info->error);
1039 info->state = NFSM_STATE_TRY;
1042 case NFSM_STATE_TRY:
1044 * Transmit or retransmit attempt. An error in this
1045 * state is ignored and we always move on to the
1048 * This can trivially race the receiver if the
1049 * request is asynchronous. nfs_request_try()
1050 * will thus set the state for us and we
1051 * must also return immediately if we are
1052 * running an async state machine, because
1053 * info can become invalid due to races after
1056 if (info->req->r_flags & R_ASYNC) {
1057 nfs_request_try(info->req);
1058 if (estate == NFSM_STATE_WAITREPLY)
1059 return (EINPROGRESS);
1061 nfs_request_try(info->req);
1062 info->state = NFSM_STATE_WAITREPLY;
1065 case NFSM_STATE_WAITREPLY:
1067 * Wait for a reply or timeout and move on to the
1068 * next state. The error returned by this state
1069 * is passed to the processing code in the next
1072 info->error = nfs_request_waitreply(info->req);
1073 info->state = NFSM_STATE_PROCESSREPLY;
1075 case NFSM_STATE_PROCESSREPLY:
1077 * Process the reply or timeout. Errors which occur
1078 * in this state may cause the state machine to
1079 * go back to an earlier state, and are fatal
1082 info->error = nfs_request_processreply(info,
1084 switch(info->error) {
1086 info->state = NFSM_STATE_AUTH;
1089 info->state = NFSM_STATE_TRY;
1093 * Operation complete, with or without an
1094 * error. We are done.
1097 info->state = NFSM_STATE_DONE;
1098 return (info->error);
1101 case NFSM_STATE_DONE:
1103 * Shouldn't be reached
1105 return (info->error);
1111 * If we are done return the error code (if any).
1112 * Otherwise return EINPROGRESS.
1114 if (info->state == NFSM_STATE_DONE)
1115 return (info->error);
1116 return (EINPROGRESS);
1120 * nfs_request - goes something like this
1121 * - fill in request struct
1122 * - links it into list
1123 * - calls nfs_send() for first transmit
1124 * - calls nfs_receive() to get reply
1125 * - break down rpc header and return with nfs reply pointed to
1127 * nb: always frees up mreq mbuf list
1130 nfs_request_setup(nfsm_info_t info)
1133 struct nfsmount *nmp;
1138 * Reject requests while attempting a forced unmount.
1140 if (info->vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
1141 m_freem(info->mreq);
1145 nmp = VFSTONFS(info->vp->v_mount);
1146 req = kmalloc(sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
1148 req->r_vp = info->vp;
1149 req->r_td = info->td;
1150 req->r_procnum = info->procnum;
1152 req->r_cred = info->cred;
1160 req->r_mrest = info->mreq;
1161 req->r_mrest_len = i;
1164 * The presence of a non-NULL r_info in req indicates
1165 * async completion via our helper threads. See the receiver
1170 req->r_flags = R_ASYNC;
1180 nfs_request_auth(struct nfsreq *rep)
1182 struct nfsmount *nmp = rep->r_nmp;
1184 char nickv[RPCX_NICKVERF];
1185 int error = 0, auth_len, auth_type;
1188 char *auth_str, *verf_str;
1192 rep->r_failed_auth = 0;
1195 * Get the RPC header with authorization.
1197 verf_str = auth_str = NULL;
1198 if (nmp->nm_flag & NFSMNT_KERB) {
1200 verf_len = sizeof (nickv);
1201 auth_type = RPCAUTH_KERB4;
1202 bzero((caddr_t)rep->r_key, sizeof(rep->r_key));
1203 if (rep->r_failed_auth ||
1204 nfs_getnickauth(nmp, cred, &auth_str, &auth_len,
1205 verf_str, verf_len)) {
1206 error = nfs_getauth(nmp, rep, cred, &auth_str,
1207 &auth_len, verf_str, &verf_len, rep->r_key);
1209 m_freem(rep->r_mrest);
1210 rep->r_mrest = NULL;
1211 kfree((caddr_t)rep, M_NFSREQ);
1216 auth_type = RPCAUTH_UNIX;
1217 if (cred->cr_ngroups < 1)
1218 panic("nfsreq nogrps");
1219 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
1220 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
1224 nfs_checkpkt(rep->r_mrest, rep->r_mrest_len);
1225 m = nfsm_rpchead(cred, nmp->nm_flag, rep->r_procnum, auth_type,
1226 auth_len, auth_str, verf_len, verf_str,
1227 rep->r_mrest, rep->r_mrest_len, &rep->r_mheadend, &xid);
1228 rep->r_mrest = NULL;
1230 kfree(auth_str, M_TEMP);
1233 * For stream protocols, insert a Sun RPC Record Mark.
1235 if (nmp->nm_sotype == SOCK_STREAM) {
1236 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT);
1238 kfree(rep, M_NFSREQ);
1241 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1242 (m->m_pkthdr.len - NFSX_UNSIGNED));
1245 nfs_checkpkt(m, m->m_pkthdr.len);
1253 nfs_request_try(struct nfsreq *rep)
1255 struct nfsmount *nmp = rep->r_nmp;
1260 * Request is not on any queue, only the owner has access to it
1261 * so it should not be locked by anyone atm.
1263 * Interlock to prevent races. While locked the only remote
1264 * action possible is for r_mrep to be set (once we enqueue it).
1266 if (rep->r_flags == 0xdeadc0de) {
1267 print_backtrace(-1);
1268 panic("flags nbad");
1270 KKASSERT((rep->r_flags & (R_LOCKED | R_ONREQQ)) == 0);
1271 if (nmp->nm_flag & NFSMNT_SOFT)
1272 rep->r_retry = nmp->nm_retry;
1274 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
1275 rep->r_rtt = rep->r_rexmit = 0;
1276 if (proct[rep->r_procnum] > 0)
1277 rep->r_flags |= R_TIMING | R_LOCKED;
1279 rep->r_flags |= R_LOCKED;
1282 nfsstats.rpcrequests++;
1284 if (nmp->nm_flag & NFSMNT_FORCE) {
1285 rep->r_flags |= R_SOFTTERM;
1286 rep->r_flags &= ~R_LOCKED;
1289 rep->r_flags |= R_NEEDSXMIT; /* in case send lock races us */
1292 * Do the client side RPC.
1294 * Chain request into list of outstanding requests. Be sure
1295 * to put it LAST so timer finds oldest requests first. Note
1296 * that our control of R_LOCKED prevents the request from
1297 * getting ripped out from under us or transmitted by the
1300 * For requests with info structures we must atomically set the
1301 * info's state because the structure could become invalid upon
1302 * return due to races (i.e., if async)
1305 mtx_link_init(&rep->r_link);
1306 KKASSERT((rep->r_flags & R_ONREQQ) == 0);
1307 TAILQ_INSERT_TAIL(&nmp->nm_reqq, rep, r_chain);
1308 rep->r_flags |= R_ONREQQ;
1310 if (rep->r_flags & R_ASYNC)
1311 rep->r_info->state = NFSM_STATE_WAITREPLY;
1317 * Send if we can. Congestion control is not handled here any more
1318 * becausing trying to defer the initial send based on the nfs_timer
1319 * requires having a very fast nfs_timer, which is silly.
1322 if (nmp->nm_soflags & PR_CONNREQUIRED)
1323 error = nfs_sndlock(nmp, rep);
1324 if (error == 0 && (rep->r_flags & R_NEEDSXMIT)) {
1325 m2 = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
1326 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
1327 rep->r_flags &= ~R_NEEDSXMIT;
1328 if ((rep->r_flags & R_SENT) == 0) {
1329 rep->r_flags |= R_SENT;
1331 if (nmp->nm_soflags & PR_CONNREQUIRED)
1341 * Release the lock. The only remote action that may have occurred
1342 * would have been the setting of rep->r_mrep. If this occured
1343 * and the request was async we have to move it to the reader
1344 * thread's queue for action.
1346 * For async requests also make sure the reader is woken up so
1347 * it gets on the socket to read responses.
1350 if (rep->r_flags & R_ASYNC) {
1352 nfs_hardterm(rep, 1);
1353 rep->r_flags &= ~R_LOCKED;
1354 nfssvc_iod_reader_wakeup(nmp);
1356 rep->r_flags &= ~R_LOCKED;
1358 if (rep->r_flags & R_WANTED) {
1359 rep->r_flags &= ~R_WANTED;
1367 * This code is only called for synchronous requests. Completed synchronous
1368 * requests are left on reqq and we remove them before moving on to the
1372 nfs_request_waitreply(struct nfsreq *rep)
1374 struct nfsmount *nmp = rep->r_nmp;
1377 KKASSERT((rep->r_flags & R_ASYNC) == 0);
1380 * Wait until the request is finished.
1382 error = nfs_reply(nmp, rep);
1385 * RPC done, unlink the request, but don't rip it out from under
1386 * the callout timer.
1388 * Once unlinked no other receiver or the timer will have
1389 * visibility, so we do not have to set R_LOCKED.
1392 while (rep->r_flags & R_LOCKED) {
1393 rep->r_flags |= R_WANTED;
1394 tsleep(rep, 0, "nfstrac", 0);
1396 KKASSERT(rep->r_flags & R_ONREQQ);
1397 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain);
1398 rep->r_flags &= ~R_ONREQQ;
1400 if (TAILQ_FIRST(&nmp->nm_bioq) &&
1401 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) {
1402 nfssvc_iod_writer_wakeup(nmp);
1407 * Decrement the outstanding request count.
1409 if (rep->r_flags & R_SENT) {
1410 rep->r_flags &= ~R_SENT;
1416 * Process reply with error returned from nfs_requet_waitreply().
1418 * Returns EAGAIN if it wants us to loop up to nfs_request_try() again.
1419 * Returns ENEEDAUTH if it wants us to loop up to nfs_request_auth() again.
1422 nfs_request_processreply(nfsm_info_t info, int error)
1424 struct nfsreq *req = info->req;
1425 struct nfsmount *nmp = req->r_nmp;
1431 * If there was a successful reply and a tprintf msg.
1432 * tprintf a response.
1434 if (error == 0 && (req->r_flags & R_TPRINTFMSG)) {
1435 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1438 info->mrep = req->r_mrep;
1439 info->md = req->r_md;
1440 info->dpos = req->r_dpos;
1442 m_freem(req->r_mreq);
1444 kfree(req, M_NFSREQ);
1450 * break down the rpc header and check if ok
1452 NULLOUT(tl = nfsm_dissect(info, 3 * NFSX_UNSIGNED));
1453 if (*tl++ == rpc_msgdenied) {
1454 if (*tl == rpc_mismatch) {
1456 } else if ((nmp->nm_flag & NFSMNT_KERB) &&
1457 *tl++ == rpc_autherr) {
1458 if (req->r_failed_auth == 0) {
1459 req->r_failed_auth++;
1460 req->r_mheadend->m_next = NULL;
1461 m_freem(info->mrep);
1463 m_freem(req->r_mreq);
1472 m_freem(info->mrep);
1474 m_freem(req->r_mreq);
1476 kfree(req, M_NFSREQ);
1482 * Grab any Kerberos verifier, otherwise just throw it away.
1484 verf_type = fxdr_unsigned(int, *tl++);
1485 i = fxdr_unsigned(int32_t, *tl);
1486 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) {
1487 error = nfs_savenickauth(nmp, req->r_cred, i, req->r_key,
1488 &info->md, &info->dpos, info->mrep);
1492 ERROROUT(nfsm_adv(info, nfsm_rndup(i)));
1494 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1497 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1499 error = fxdr_unsigned(int, *tl);
1502 * Does anyone even implement this? Just impose
1505 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
1506 error == NFSERR_TRYLATER) {
1507 m_freem(info->mrep);
1511 tsleep((caddr_t)&lbolt, 0, "nqnfstry", 0);
1512 return (EAGAIN); /* goto tryagain */
1516 * If the File Handle was stale, invalidate the
1517 * lookup cache, just in case.
1519 * To avoid namecache<->vnode deadlocks we must
1520 * release the vnode lock if we hold it.
1522 if (error == ESTALE) {
1523 struct vnode *vp = req->r_vp;
1526 ltype = lockstatus(&vp->v_lock, curthread);
1527 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED)
1528 lockmgr(&vp->v_lock, LK_RELEASE);
1529 cache_inval_vp(vp, CINV_CHILDREN);
1530 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED)
1531 lockmgr(&vp->v_lock, ltype);
1533 if (nmp->nm_flag & NFSMNT_NFSV3) {
1534 KKASSERT(*req->r_mrp == info->mrep);
1535 KKASSERT(*req->r_mdp == info->md);
1536 KKASSERT(*req->r_dposp == info->dpos);
1537 error |= NFSERR_RETERR;
1539 m_freem(info->mrep);
1542 m_freem(req->r_mreq);
1544 kfree(req, M_NFSREQ);
1549 KKASSERT(*req->r_mrp == info->mrep);
1550 KKASSERT(*req->r_mdp == info->md);
1551 KKASSERT(*req->r_dposp == info->dpos);
1552 m_freem(req->r_mreq);
1554 kfree(req, M_NFSREQ);
1557 m_freem(info->mrep);
1559 error = EPROTONOSUPPORT;
1561 m_freem(req->r_mreq);
1563 kfree(req, M_NFSREQ);
1568 #ifndef NFS_NOSERVER
1570 * Generate the rpc reply header
1571 * siz arg. is used to decide if adding a cluster is worthwhile
1574 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp,
1575 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp)
1578 struct nfsm_info info;
1580 siz += RPC_REPLYSIZ;
1581 info.mb = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
1582 info.mreq = info.mb;
1583 info.mreq->m_pkthdr.len = 0;
1585 * If this is not a cluster, try and leave leading space
1586 * for the lower level headers.
1588 if ((max_hdr + siz) < MINCLSIZE)
1589 info.mreq->m_data += max_hdr;
1590 tl = mtod(info.mreq, u_int32_t *);
1591 info.mreq->m_len = 6 * NFSX_UNSIGNED;
1592 info.bpos = ((caddr_t)tl) + info.mreq->m_len;
1593 *tl++ = txdr_unsigned(nd->nd_retxid);
1595 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
1596 *tl++ = rpc_msgdenied;
1597 if (err & NFSERR_AUTHERR) {
1598 *tl++ = rpc_autherr;
1599 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR);
1600 info.mreq->m_len -= NFSX_UNSIGNED;
1601 info.bpos -= NFSX_UNSIGNED;
1603 *tl++ = rpc_mismatch;
1604 *tl++ = txdr_unsigned(RPC_VER2);
1605 *tl = txdr_unsigned(RPC_VER2);
1608 *tl++ = rpc_msgaccepted;
1611 * For Kerberos authentication, we must send the nickname
1612 * verifier back, otherwise just RPCAUTH_NULL.
1614 if (nd->nd_flag & ND_KERBFULL) {
1615 struct nfsuid *nuidp;
1616 struct timeval ktvout;
1618 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first;
1619 nuidp != NULL; nuidp = nuidp->nu_hash.le_next) {
1620 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid &&
1621 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp),
1622 &nuidp->nu_haddr, nd->nd_nam2)))
1627 * Encrypt the timestamp in ecb mode using the
1637 *tl++ = rpc_auth_kerb;
1638 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED);
1639 *tl = ktvout.tv_sec;
1640 tl = nfsm_build(&info, 3 * NFSX_UNSIGNED);
1641 *tl++ = ktvout.tv_usec;
1642 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid);
1653 *tl = txdr_unsigned(RPC_PROGUNAVAIL);
1656 *tl = txdr_unsigned(RPC_PROGMISMATCH);
1657 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED);
1658 *tl++ = txdr_unsigned(2);
1659 *tl = txdr_unsigned(3);
1662 *tl = txdr_unsigned(RPC_PROCUNAVAIL);
1665 *tl = txdr_unsigned(RPC_GARBAGE);
1669 if (err != NFSERR_RETVOID) {
1670 tl = nfsm_build(&info, NFSX_UNSIGNED);
1672 *tl = txdr_unsigned(nfsrv_errmap(nd, err));
1684 if (err != 0 && err != NFSERR_RETVOID)
1685 nfsstats.srvrpc_errs++;
1690 #endif /* NFS_NOSERVER */
1693 * Nfs timer routine.
1695 * Scan the nfsreq list and retranmit any requests that have timed out
1696 * To avoid retransmission attempts on STREAM sockets (in the future) make
1697 * sure to set the r_retry field to 0 (implies nm_retry == 0).
1699 * Requests with attached responses, terminated requests, and
1700 * locked requests are ignored. Locked requests will be picked up
1701 * in a later timer call.
1704 nfs_timer_callout(void *arg /* never used */)
1706 struct nfsmount *nmp;
1708 #ifndef NFS_NOSERVER
1709 struct nfssvc_sock *slp;
1711 #endif /* NFS_NOSERVER */
1713 lwkt_gettoken(&nfs_token);
1714 TAILQ_FOREACH(nmp, &nfs_mountq, nm_entry) {
1715 lwkt_gettoken(&nmp->nm_token);
1716 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1717 KKASSERT(nmp == req->r_nmp);
1720 if (req->r_flags & (R_SOFTTERM | R_LOCKED))
1724 * Handle timeout/retry. Be sure to process r_mrep
1725 * for async requests that completed while we had
1726 * the request locked or they will hang in the reqq
1729 req->r_flags |= R_LOCKED;
1730 if (nfs_sigintr(nmp, req, req->r_td)) {
1731 nfs_softterm(req, 1);
1732 req->r_flags &= ~R_LOCKED;
1735 if (req->r_flags & R_ASYNC) {
1737 nfs_hardterm(req, 1);
1738 req->r_flags &= ~R_LOCKED;
1739 nfssvc_iod_reader_wakeup(nmp);
1741 req->r_flags &= ~R_LOCKED;
1744 if (req->r_flags & R_WANTED) {
1745 req->r_flags &= ~R_WANTED;
1749 lwkt_reltoken(&nmp->nm_token);
1751 #ifndef NFS_NOSERVER
1754 * Scan the write gathering queues for writes that need to be
1757 cur_usec = nfs_curusec();
1759 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
1760 /* XXX race against removal */
1761 if (lwkt_trytoken(&slp->ns_token)) {
1762 if (slp->ns_tq.lh_first &&
1763 (slp->ns_tq.lh_first->nd_time <= cur_usec)) {
1764 nfsrv_wakenfsd(slp, 1);
1766 lwkt_reltoken(&slp->ns_token);
1769 #endif /* NFS_NOSERVER */
1771 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer_callout, NULL);
1772 lwkt_reltoken(&nfs_token);
1777 nfs_timer_req(struct nfsreq *req)
1779 struct thread *td = &thread0; /* XXX for creds, will break if sleep */
1780 struct nfsmount *nmp = req->r_nmp;
1787 * rtt ticks and timeout calculation. Return if the timeout
1788 * has not been reached yet, unless the packet is flagged
1789 * for an immediate send.
1791 * The mean rtt doesn't help when we get random I/Os, we have
1792 * to multiply by fairly large numbers.
1794 if (req->r_rtt >= 0) {
1796 * Calculate the timeout to test against.
1799 if (nmp->nm_flag & NFSMNT_DUMBTIMR) {
1800 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS;
1801 } else if (req->r_flags & R_TIMING) {
1802 timeo = NFS_SRTT(req) + NFS_SDRTT(req);
1804 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS;
1806 timeo *= multt[req->r_procnum];
1807 /* timeo is still scaled by SCALE_BITS */
1809 #define NFSFS (NFS_RTT_SCALE * NFS_HZ)
1810 if (req->r_flags & R_TIMING) {
1811 static long last_time;
1812 if (nfs_showrtt && last_time != time_second) {
1813 kprintf("rpccmd %d NFS SRTT %d SDRTT %d "
1815 proct[req->r_procnum],
1816 NFS_SRTT(req), NFS_SDRTT(req),
1818 timeo % NFSFS * 1000 / NFSFS);
1819 last_time = time_second;
1825 * deal with nfs_timer jitter.
1827 timeo = (timeo >> NFS_RTT_SCALE_BITS) + 1;
1831 if (nmp->nm_timeouts > 0)
1832 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
1833 if (timeo > NFS_MAXTIMEO)
1834 timeo = NFS_MAXTIMEO;
1835 if (req->r_rtt <= timeo) {
1836 if ((req->r_flags & R_NEEDSXMIT) == 0)
1838 } else if (nmp->nm_timeouts < 8) {
1844 * Check for server not responding
1846 if ((req->r_flags & R_TPRINTFMSG) == 0 &&
1847 req->r_rexmit > nmp->nm_deadthresh) {
1848 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1850 req->r_flags |= R_TPRINTFMSG;
1852 if (req->r_rexmit >= req->r_retry) { /* too many */
1853 nfsstats.rpctimeouts++;
1854 nfs_softterm(req, 1);
1859 * Generally disable retransmission on reliable sockets,
1860 * unless the request is flagged for immediate send.
1862 if (nmp->nm_sotype != SOCK_DGRAM) {
1863 if (++req->r_rexmit > NFS_MAXREXMIT)
1864 req->r_rexmit = NFS_MAXREXMIT;
1865 if ((req->r_flags & R_NEEDSXMIT) == 0)
1870 * Stop here if we do not have a socket!
1872 if ((so = nmp->nm_so) == NULL)
1876 * If there is enough space and the window allows.. resend it.
1878 * r_rtt is left intact in case we get an answer after the
1879 * retry that was a reply to the original packet.
1881 * NOTE: so_pru_send()
1883 if (ssb_space(&so->so_snd) >= req->r_mreq->m_pkthdr.len &&
1884 (req->r_flags & (R_SENT | R_NEEDSXMIT)) &&
1885 (m = m_copym(req->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){
1886 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
1887 error = so_pru_send(so, 0, m, NULL, NULL, td);
1889 error = so_pru_send(so, 0, m, nmp->nm_nam, NULL, td);
1891 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
1893 req->r_flags |= R_NEEDSXMIT;
1894 } else if (req->r_mrep == NULL) {
1896 * Iff first send, start timing
1897 * else turn timing off, backoff timer
1898 * and divide congestion window by 2.
1900 * It is possible for the so_pru_send() to
1901 * block and for us to race a reply so we
1902 * only do this if the reply field has not
1903 * been filled in. R_LOCKED will prevent
1904 * the request from being ripped out from under
1907 * Record the last resent procnum to aid us
1908 * in duplicate detection on receive.
1910 if ((req->r_flags & R_NEEDSXMIT) == 0) {
1913 if (++req->r_rexmit > NFS_MAXREXMIT)
1914 req->r_rexmit = NFS_MAXREXMIT;
1915 nmp->nm_maxasync_scaled >>= 1;
1916 if (nmp->nm_maxasync_scaled < NFS_MINASYNC_SCALED)
1917 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED;
1918 nfsstats.rpcretries++;
1919 nmp->nm_lastreprocnum = req->r_procnum;
1921 req->r_flags |= R_SENT;
1922 req->r_flags &= ~R_NEEDSXMIT;
1929 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1930 * wait for all requests to complete. This is used by forced unmounts
1931 * to terminate any outstanding RPCs.
1933 * Locked requests cannot be canceled but will be marked for
1937 nfs_nmcancelreqs(struct nfsmount *nmp)
1943 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1944 if (req->r_mrep != NULL || (req->r_flags & R_SOFTTERM))
1946 nfs_softterm(req, 0);
1948 /* XXX the other two queues as well */
1951 for (i = 0; i < 30; i++) {
1953 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1954 if (nmp == req->r_nmp)
1960 tsleep(&lbolt, 0, "nfscancel", 0);
1966 * Soft-terminate a request, effectively marking it as failed.
1968 * Must be called from within a critical section.
1971 nfs_softterm(struct nfsreq *rep, int islocked)
1973 rep->r_flags |= R_SOFTTERM;
1974 nfs_hardterm(rep, islocked);
1978 * Hard-terminate a request, typically after getting a response.
1980 * The state machine can still decide to re-issue it later if necessary.
1982 * Must be called from within a critical section.
1985 nfs_hardterm(struct nfsreq *rep, int islocked)
1987 struct nfsmount *nmp = rep->r_nmp;
1990 * The nm_send count is decremented now to avoid deadlocks
1991 * when the process in soreceive() hasn't yet managed to send
1994 if (rep->r_flags & R_SENT) {
1995 rep->r_flags &= ~R_SENT;
1999 * If we locked the request or nobody else has locked the request,
2000 * and the request is async, we can move it to the reader thread's
2001 * queue now and fix up the state.
2003 * If we locked the request or nobody else has locked the request,
2004 * we can wake up anyone blocked waiting for a response on the
2007 if (islocked || (rep->r_flags & R_LOCKED) == 0) {
2008 if ((rep->r_flags & (R_ONREQQ | R_ASYNC)) ==
2009 (R_ONREQQ | R_ASYNC)) {
2010 rep->r_flags &= ~R_ONREQQ;
2011 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain);
2013 TAILQ_INSERT_TAIL(&nmp->nm_reqrxq, rep, r_chain);
2014 KKASSERT(rep->r_info->state == NFSM_STATE_TRY ||
2015 rep->r_info->state == NFSM_STATE_WAITREPLY);
2016 rep->r_info->state = NFSM_STATE_PROCESSREPLY;
2017 nfssvc_iod_reader_wakeup(nmp);
2018 if (TAILQ_FIRST(&nmp->nm_bioq) &&
2019 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) {
2020 nfssvc_iod_writer_wakeup(nmp);
2023 mtx_abort_ex_link(&nmp->nm_rxlock, &rep->r_link);
2028 * Test for a termination condition pending on the process.
2029 * This is used for NFSMNT_INT mounts.
2032 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
2038 if (rep && (rep->r_flags & R_SOFTTERM))
2040 /* Terminate all requests while attempting a forced unmount. */
2041 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
2043 if (!(nmp->nm_flag & NFSMNT_INT))
2045 /* td might be NULL YYY */
2046 if (td == NULL || (p = td->td_proc) == NULL)
2050 tmpset = lwp_sigpend(lp);
2051 SIGSETNAND(tmpset, lp->lwp_sigmask);
2052 SIGSETNAND(tmpset, p->p_sigignore);
2053 if (SIGNOTEMPTY(tmpset) && NFSINT_SIGMASK(tmpset))
2060 * Lock a socket against others.
2061 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
2062 * and also to avoid race conditions between the processes with nfs requests
2063 * in progress when a reconnect is necessary.
2066 nfs_sndlock(struct nfsmount *nmp, struct nfsreq *rep)
2068 mtx_t mtx = &nmp->nm_txlock;
2076 td = rep ? rep->r_td : NULL;
2077 if (nmp->nm_flag & NFSMNT_INT)
2080 while ((error = mtx_lock_ex_try(mtx)) != 0) {
2081 if (nfs_sigintr(nmp, rep, td)) {
2085 error = mtx_lock_ex(mtx, "nfsndlck", slpflag, slptimeo);
2088 if (slpflag == PCATCH) {
2093 /* Always fail if our request has been cancelled. */
2094 if (rep && (rep->r_flags & R_SOFTTERM)) {
2103 * Unlock the stream socket for others.
2106 nfs_sndunlock(struct nfsmount *nmp)
2108 mtx_unlock(&nmp->nm_txlock);
2112 * Lock the receiver side of the socket.
2117 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep)
2119 mtx_t mtx = &nmp->nm_rxlock;
2125 * Unconditionally check for completion in case another nfsiod
2126 * get the packet while the caller was blocked, before the caller
2127 * called us. Packet reception is handled by mainline code which
2128 * is protected by the BGL at the moment.
2130 * We do not strictly need the second check just before the
2131 * tsleep(), but it's good defensive programming.
2133 if (rep && rep->r_mrep != NULL)
2136 if (nmp->nm_flag & NFSMNT_INT)
2142 while ((error = mtx_lock_ex_try(mtx)) != 0) {
2143 if (nfs_sigintr(nmp, rep, (rep ? rep->r_td : NULL))) {
2147 if (rep && rep->r_mrep != NULL) {
2153 * NOTE: can return ENOLCK, but in that case rep->r_mrep
2154 * will already be set.
2157 error = mtx_lock_ex_link(mtx, &rep->r_link,
2161 error = mtx_lock_ex(mtx, "nfsrcvlk", slpflag, slptimeo);
2167 * If our reply was recieved while we were sleeping,
2168 * then just return without taking the lock to avoid a
2169 * situation where a single iod could 'capture' the
2172 if (rep && rep->r_mrep != NULL) {
2176 if (slpflag == PCATCH) {
2182 if (rep && rep->r_mrep != NULL) {
2191 * Unlock the stream socket for others.
2194 nfs_rcvunlock(struct nfsmount *nmp)
2196 mtx_unlock(&nmp->nm_rxlock);
2202 * Check for badly aligned mbuf data and realign by copying the unaligned
2203 * portion of the data into a new mbuf chain and freeing the portions
2204 * of the old chain that were replaced.
2206 * We cannot simply realign the data within the existing mbuf chain
2207 * because the underlying buffers may contain other rpc commands and
2208 * we cannot afford to overwrite them.
2210 * We would prefer to avoid this situation entirely. The situation does
2211 * not occur with NFS/UDP and is supposed to only occassionally occur
2212 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
2214 * NOTE! MB_DONTWAIT cannot be used here. The mbufs must be acquired
2215 * because the rpc request OR reply cannot be thrown away. TCP NFS
2216 * mounts do not retry their RPCs unless the TCP connection itself
2217 * is dropped so throwing away a RPC will basically cause the NFS
2218 * operation to lockup indefinitely.
2221 nfs_realign(struct mbuf **pm, int hsiz)
2224 struct mbuf *n = NULL;
2227 * Check for misalignemnt
2230 while ((m = *pm) != NULL) {
2231 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3))
2237 * If misalignment found make a completely new copy.
2240 ++nfs_realign_count;
2241 n = m_dup_data(m, MB_WAIT);
2247 #ifndef NFS_NOSERVER
2250 * Parse an RPC request
2252 * - fill in the cred struct.
2255 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header)
2262 u_int32_t nfsvers, auth_type;
2264 int error = 0, ticklen;
2265 struct nfsuid *nuidp;
2266 struct timeval tvin, tvout;
2267 struct nfsm_info info;
2268 #if 0 /* until encrypted keys are implemented */
2269 NFSKERBKEYSCHED_T keys; /* stores key schedule */
2272 info.mrep = nd->nd_mrep;
2273 info.md = nd->nd_md;
2274 info.dpos = nd->nd_dpos;
2277 NULLOUT(tl = nfsm_dissect(&info, 10 * NFSX_UNSIGNED));
2278 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++);
2279 if (*tl++ != rpc_call) {
2284 NULLOUT(tl = nfsm_dissect(&info, 8 * NFSX_UNSIGNED));
2288 if (*tl++ != rpc_vers) {
2289 nd->nd_repstat = ERPCMISMATCH;
2290 nd->nd_procnum = NFSPROC_NOOP;
2293 if (*tl != nfs_prog) {
2294 nd->nd_repstat = EPROGUNAVAIL;
2295 nd->nd_procnum = NFSPROC_NOOP;
2299 nfsvers = fxdr_unsigned(u_int32_t, *tl++);
2300 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) {
2301 nd->nd_repstat = EPROGMISMATCH;
2302 nd->nd_procnum = NFSPROC_NOOP;
2305 if (nfsvers == NFS_VER3)
2306 nd->nd_flag = ND_NFSV3;
2307 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++);
2308 if (nd->nd_procnum == NFSPROC_NULL)
2310 if (nd->nd_procnum >= NFS_NPROCS ||
2311 (nd->nd_procnum >= NQNFSPROC_GETLEASE) ||
2312 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) {
2313 nd->nd_repstat = EPROCUNAVAIL;
2314 nd->nd_procnum = NFSPROC_NOOP;
2317 if ((nd->nd_flag & ND_NFSV3) == 0)
2318 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
2320 len = fxdr_unsigned(int, *tl++);
2321 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2326 nd->nd_flag &= ~ND_KERBAUTH;
2328 * Handle auth_unix or auth_kerb.
2330 if (auth_type == rpc_auth_unix) {
2331 len = fxdr_unsigned(int, *++tl);
2332 if (len < 0 || len > NFS_MAXNAMLEN) {
2336 ERROROUT(nfsm_adv(&info, nfsm_rndup(len)));
2337 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED));
2338 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred));
2339 nd->nd_cr.cr_ref = 1;
2340 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++);
2341 nd->nd_cr.cr_ruid = nd->nd_cr.cr_svuid = nd->nd_cr.cr_uid;
2342 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++);
2343 nd->nd_cr.cr_rgid = nd->nd_cr.cr_svgid = nd->nd_cr.cr_gid;
2344 len = fxdr_unsigned(int, *tl);
2345 if (len < 0 || len > RPCAUTH_UNIXGIDS) {
2349 NULLOUT(tl = nfsm_dissect(&info, (len + 2) * NFSX_UNSIGNED));
2350 for (i = 1; i <= len; i++)
2352 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++);
2355 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
2356 if (nd->nd_cr.cr_ngroups > 1)
2357 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups);
2358 len = fxdr_unsigned(int, *++tl);
2359 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2364 ERROROUT(nfsm_adv(&info, nfsm_rndup(len)));
2366 } else if (auth_type == rpc_auth_kerb) {
2367 switch (fxdr_unsigned(int, *tl++)) {
2368 case RPCAKN_FULLNAME:
2369 ticklen = fxdr_unsigned(int, *tl);
2370 *((u_int32_t *)nfsd->nfsd_authstr) = *tl;
2371 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED;
2372 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED;
2373 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) {
2380 uio.uio_segflg = UIO_SYSSPACE;
2381 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4];
2382 iov.iov_len = RPCAUTH_MAXSIZ - 4;
2383 ERROROUT(nfsm_mtouio(&info, &uio, uio.uio_resid));
2384 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED));
2385 if (*tl++ != rpc_auth_kerb ||
2386 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) {
2387 kprintf("Bad kerb verifier\n");
2388 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2389 nd->nd_procnum = NFSPROC_NOOP;
2392 NULLOUT(cp = nfsm_dissect(&info, 4 * NFSX_UNSIGNED));
2393 tl = (u_int32_t *)cp;
2394 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) {
2395 kprintf("Not fullname kerb verifier\n");
2396 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2397 nd->nd_procnum = NFSPROC_NOOP;
2400 cp += NFSX_UNSIGNED;
2401 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED);
2402 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED;
2403 nd->nd_flag |= ND_KERBFULL;
2404 nfsd->nfsd_flag |= NFSD_NEEDAUTH;
2406 case RPCAKN_NICKNAME:
2407 if (len != 2 * NFSX_UNSIGNED) {
2408 kprintf("Kerb nickname short\n");
2409 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED);
2410 nd->nd_procnum = NFSPROC_NOOP;
2413 nickuid = fxdr_unsigned(uid_t, *tl);
2414 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED));
2415 if (*tl++ != rpc_auth_kerb ||
2416 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) {
2417 kprintf("Kerb nick verifier bad\n");
2418 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2419 nd->nd_procnum = NFSPROC_NOOP;
2422 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED));
2423 tvin.tv_sec = *tl++;
2426 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first;
2427 nuidp != NULL; nuidp = nuidp->nu_hash.le_next) {
2428 if (nuidp->nu_cr.cr_uid == nickuid &&
2430 netaddr_match(NU_NETFAM(nuidp),
2431 &nuidp->nu_haddr, nd->nd_nam2)))
2436 (NFSERR_AUTHERR|AUTH_REJECTCRED);
2437 nd->nd_procnum = NFSPROC_NOOP;
2442 * Now, decrypt the timestamp using the session key
2452 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec);
2453 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec);
2454 if (nuidp->nu_expire < time_second ||
2455 nuidp->nu_timestamp.tv_sec > tvout.tv_sec ||
2456 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec &&
2457 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) {
2458 nuidp->nu_expire = 0;
2460 (NFSERR_AUTHERR|AUTH_REJECTVERF);
2461 nd->nd_procnum = NFSPROC_NOOP;
2464 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr);
2465 nd->nd_flag |= ND_KERBNICK;
2469 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
2470 nd->nd_procnum = NFSPROC_NOOP;
2474 nd->nd_md = info.md;
2475 nd->nd_dpos = info.dpos;
2484 * Send a message to the originating process's terminal. The thread and/or
2485 * process may be NULL. YYY the thread should not be NULL but there may
2486 * still be some uio_td's that are still being passed as NULL through to
2490 nfs_msg(struct thread *td, char *server, char *msg)
2494 if (td && td->td_proc)
2495 tpr = tprintf_open(td->td_proc);
2498 tprintf(tpr, "nfs server %s: %s\n", server, msg);
2503 #ifndef NFS_NOSERVER
2506 * Socket upcall routine for nfsd sockets. This runs in the protocol
2507 * thread and passes waitflag == MB_DONTWAIT.
2510 nfsrv_rcv_upcall(struct socket *so, void *arg, int waitflag)
2512 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
2514 if (slp->ns_needq_upcall == 0) {
2515 slp->ns_needq_upcall = 1; /* ok to race */
2516 lwkt_gettoken(&nfs_token);
2517 nfsrv_wakenfsd(slp, 1);
2518 lwkt_reltoken(&nfs_token);
2521 lwkt_gettoken(&slp->ns_token);
2522 slp->ns_flag |= SLP_NEEDQ;
2523 nfsrv_rcv(so, arg, waitflag);
2524 lwkt_reltoken(&slp->ns_token);
2529 * Process new data on a receive socket. Essentially do as much as we can
2530 * non-blocking, else punt and it will be called with MB_WAIT from an nfsd.
2532 * slp->ns_token is held on call
2535 nfsrv_rcv(struct socket *so, void *arg, int waitflag)
2537 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
2539 struct sockaddr *nam;
2542 int nparallel_wakeup = 0;
2544 ASSERT_LWKT_TOKEN_HELD(&slp->ns_token);
2546 if ((slp->ns_flag & SLP_VALID) == 0)
2550 * Do not allow an infinite number of completed RPC records to build
2551 * up before we stop reading data from the socket. Otherwise we could
2552 * end up holding onto an unreasonable number of mbufs for requests
2553 * waiting for service.
2555 * This should give pretty good feedback to the TCP layer and
2556 * prevents a memory crunch for other protocols.
2558 * Note that the same service socket can be dispatched to several
2559 * nfs servers simultaniously. The tcp protocol callback calls us
2560 * with MB_DONTWAIT. nfsd calls us with MB_WAIT (typically).
2562 if (NFSRV_RECLIMIT(slp))
2566 * Handle protocol specifics to parse an RPC request. We always
2567 * pull from the socket using non-blocking I/O.
2569 if (so->so_type == SOCK_STREAM) {
2571 * The data has to be read in an orderly fashion from a TCP
2572 * stream, unlike a UDP socket. It is possible for soreceive
2573 * and/or nfsrv_getstream() to block, so make sure only one
2574 * entity is messing around with the TCP stream at any given
2575 * moment. The receive sockbuf's lock in soreceive is not
2578 if (slp->ns_flag & SLP_GETSTREAM)
2580 slp->ns_flag |= SLP_GETSTREAM;
2583 * Do soreceive(). Pull out as much data as possible without
2586 sbinit(&sio, 1000000000);
2587 flags = MSG_DONTWAIT;
2588 error = so_pru_soreceive(so, &nam, NULL, &sio, NULL, &flags);
2589 if (error || sio.sb_mb == NULL) {
2590 if (error != EWOULDBLOCK)
2591 slp->ns_flag |= SLP_DISCONN;
2592 slp->ns_flag &= ~(SLP_GETSTREAM | SLP_NEEDQ);
2596 if (slp->ns_rawend) {
2597 slp->ns_rawend->m_next = m;
2598 slp->ns_cc += sio.sb_cc;
2601 slp->ns_cc = sio.sb_cc;
2608 * Now try and parse as many record(s) as we can out of the
2609 * raw stream data. This will set SLP_DOREC.
2611 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup);
2612 if (error && error != EWOULDBLOCK)
2613 slp->ns_flag |= SLP_DISCONN;
2614 slp->ns_flag &= ~SLP_GETSTREAM;
2617 * For UDP soreceive typically pulls just one packet, loop
2618 * to get the whole batch.
2621 sbinit(&sio, 1000000000);
2622 flags = MSG_DONTWAIT;
2623 error = so_pru_soreceive(so, &nam, NULL, &sio,
2626 struct nfsrv_rec *rec;
2627 int mf = (waitflag & MB_DONTWAIT) ?
2628 M_NOWAIT : M_WAITOK;
2629 rec = kmalloc(sizeof(struct nfsrv_rec),
2633 kfree(nam, M_SONAME);
2637 nfs_realign(&sio.sb_mb, 10 * NFSX_UNSIGNED);
2638 rec->nr_address = nam;
2639 rec->nr_packet = sio.sb_mb;
2640 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2642 slp->ns_flag |= SLP_DOREC;
2645 slp->ns_flag &= ~SLP_NEEDQ;
2648 if ((so->so_proto->pr_flags & PR_CONNREQUIRED)
2649 && error != EWOULDBLOCK) {
2650 slp->ns_flag |= SLP_DISCONN;
2654 if (NFSRV_RECLIMIT(slp))
2656 } while (sio.sb_mb);
2660 * If we were upcalled from the tcp protocol layer and we have
2661 * fully parsed records ready to go, or there is new data pending,
2662 * or something went wrong, try to wake up a nfsd thread to deal
2666 /* XXX this code is currently not executed (nfsrv_rcv_upcall) */
2667 if (waitflag == MB_DONTWAIT && (slp->ns_flag & SLP_ACTION_MASK)) {
2668 lwkt_gettoken(&nfs_token);
2669 nfsrv_wakenfsd(slp, nparallel_wakeup);
2670 lwkt_reltoken(&nfs_token);
2675 * Try and extract an RPC request from the mbuf data list received on a
2676 * stream socket. The "waitflag" argument indicates whether or not it
2680 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp)
2682 struct mbuf *m, **mpp;
2685 struct mbuf *om, *m2, *recm;
2689 if (slp->ns_reclen == 0) {
2690 if (slp->ns_cc < NFSX_UNSIGNED)
2693 if (m->m_len >= NFSX_UNSIGNED) {
2694 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED);
2695 m->m_data += NFSX_UNSIGNED;
2696 m->m_len -= NFSX_UNSIGNED;
2698 cp1 = (caddr_t)&recmark;
2699 cp2 = mtod(m, caddr_t);
2700 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
2701 while (m->m_len == 0) {
2703 cp2 = mtod(m, caddr_t);
2710 slp->ns_cc -= NFSX_UNSIGNED;
2711 recmark = ntohl(recmark);
2712 slp->ns_reclen = recmark & ~0x80000000;
2713 if (recmark & 0x80000000)
2714 slp->ns_flag |= SLP_LASTFRAG;
2716 slp->ns_flag &= ~SLP_LASTFRAG;
2717 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) {
2718 log(LOG_ERR, "%s (%d) from nfs client\n",
2719 "impossible packet length",
2726 * Now get the record part.
2728 * Note that slp->ns_reclen may be 0. Linux sometimes
2729 * generates 0-length RPCs
2732 if (slp->ns_cc == slp->ns_reclen) {
2734 slp->ns_raw = slp->ns_rawend = NULL;
2735 slp->ns_cc = slp->ns_reclen = 0;
2736 } else if (slp->ns_cc > slp->ns_reclen) {
2741 while (len < slp->ns_reclen) {
2742 if ((len + m->m_len) > slp->ns_reclen) {
2743 m2 = m_copym(m, 0, slp->ns_reclen - len,
2751 m->m_data += slp->ns_reclen - len;
2752 m->m_len -= slp->ns_reclen - len;
2753 len = slp->ns_reclen;
2755 return (EWOULDBLOCK);
2757 } else if ((len + m->m_len) == slp->ns_reclen) {
2777 * Accumulate the fragments into a record.
2779 mpp = &slp->ns_frag;
2781 mpp = &((*mpp)->m_next);
2783 if (slp->ns_flag & SLP_LASTFRAG) {
2784 struct nfsrv_rec *rec;
2785 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK;
2786 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf);
2788 m_freem(slp->ns_frag);
2790 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED);
2791 rec->nr_address = NULL;
2792 rec->nr_packet = slp->ns_frag;
2793 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2795 slp->ns_flag |= SLP_DOREC;
2798 slp->ns_frag = NULL;
2806 * Sanity check our mbuf chain.
2809 nfs_checkpkt(struct mbuf *m, int len)
2817 panic("nfs_checkpkt: len mismatch %d/%d mbuf %p",
2825 nfs_checkpkt(struct mbuf *m __unused, int len __unused)
2832 * Parse an RPC header.
2834 * If the socket is invalid or no records are pending we return ENOBUFS.
2835 * The caller must deal with NEEDQ races.
2838 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd,
2839 struct nfsrv_descript **ndp)
2841 struct nfsrv_rec *rec;
2843 struct sockaddr *nam;
2844 struct nfsrv_descript *nd;
2848 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec))
2850 rec = STAILQ_FIRST(&slp->ns_rec);
2851 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link);
2852 KKASSERT(slp->ns_numrec > 0);
2853 if (--slp->ns_numrec == 0)
2854 slp->ns_flag &= ~SLP_DOREC;
2855 nam = rec->nr_address;
2857 kfree(rec, M_NFSRVDESC);
2858 nd = kmalloc(sizeof(struct nfsrv_descript), M_NFSRVDESC, M_WAITOK);
2859 nd->nd_md = nd->nd_mrep = m;
2861 nd->nd_dpos = mtod(m, caddr_t);
2862 error = nfs_getreq(nd, nfsd, TRUE);
2865 kfree(nam, M_SONAME);
2867 kfree((caddr_t)nd, M_NFSRVDESC);
2876 * Try to assign service sockets to nfsd threads based on the number
2877 * of new rpc requests that have been queued on the service socket.
2879 * If no nfsd's are available or additonal requests are pending, set the
2880 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for
2881 * the work in the nfssvc_sock list when it is finished processing its
2882 * current work. This flag is only cleared when an nfsd can not find
2883 * any new work to perform.
2886 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel)
2890 if ((slp->ns_flag & SLP_VALID) == 0)
2894 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) {
2895 if (nd->nfsd_flag & NFSD_WAITING) {
2896 nd->nfsd_flag &= ~NFSD_WAITING;
2898 panic("nfsd wakeup");
2901 wakeup((caddr_t)nd);
2902 if (--nparallel == 0)
2908 * If we couldn't assign slp then the NFSDs are all busy and
2909 * we set a flag indicating that there is pending work.
2912 nfsd_head_flag |= NFSD_CHECKSLP;
2914 #endif /* NFS_NOSERVER */