2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $
38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.45 2007/05/18 17:05:13 dillon Exp $
42 * Socket operations for use by nfs
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/kernel.h>
52 #include <sys/vnode.h>
53 #include <sys/fcntl.h>
54 #include <sys/protosw.h>
55 #include <sys/resourcevar.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/socketops.h>
59 #include <sys/syslog.h>
60 #include <sys/thread.h>
61 #include <sys/tprintf.h>
62 #include <sys/sysctl.h>
63 #include <sys/signalvar.h>
64 #include <sys/mutex.h>
66 #include <sys/signal2.h>
67 #include <sys/mutex2.h>
69 #include <netinet/in.h>
70 #include <netinet/tcp.h>
71 #include <sys/thread2.h>
77 #include "nfsm_subs.h"
86 * RTT calculations are scaled by 256 (8 bits). A proper fractional
87 * RTT will still be calculated even with a slow NFS timer.
89 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum]]
90 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum]]
91 #define NFS_RTT_SCALE_BITS 8 /* bits */
92 #define NFS_RTT_SCALE 256 /* value */
95 * Defines which timer to use for the procnum.
102 static int proct[NFS_NPROCS] = {
103 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0,
107 static int nfs_backoff[8] = { 2, 3, 5, 8, 13, 21, 34, 55 };
108 static int nfs_realign_test;
109 static int nfs_realign_count;
110 static int nfs_bufpackets = 4;
111 static int nfs_showrtt;
112 static int nfs_showrexmit;
114 SYSCTL_DECL(_vfs_nfs);
116 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
117 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
118 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, "");
119 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrtt, CTLFLAG_RW, &nfs_showrtt, 0, "");
120 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrexmit, CTLFLAG_RW, &nfs_showrexmit, 0, "");
122 static int nfs_request_setup(nfsm_info_t info);
123 static int nfs_request_auth(struct nfsreq *rep);
124 static int nfs_request_try(struct nfsreq *rep);
125 static int nfs_request_waitreply(struct nfsreq *rep);
126 static int nfs_request_processreply(nfsm_info_t info, int);
129 struct nfsrtt nfsrtt;
130 struct callout nfs_timer_handle;
132 static int nfs_msg (struct thread *,char *,char *);
133 static int nfs_rcvlock (struct nfsmount *nmp, struct nfsreq *myreq);
134 static void nfs_rcvunlock (struct nfsmount *nmp);
135 static void nfs_realign (struct mbuf **pm, int hsiz);
136 static int nfs_receive (struct nfsmount *nmp, struct nfsreq *rep,
137 struct sockaddr **aname, struct mbuf **mp);
138 static void nfs_softterm (struct nfsreq *rep, int islocked);
139 static void nfs_hardterm (struct nfsreq *rep, int islocked);
140 static int nfs_reconnect (struct nfsmount *nmp, struct nfsreq *rep);
142 static int nfsrv_getstream (struct nfssvc_sock *, int, int *);
143 static void nfs_timer_req(struct nfsreq *req);
145 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd,
146 struct nfssvc_sock *slp,
148 struct mbuf **mreqp) = {
176 #endif /* NFS_NOSERVER */
179 * Initialize sockets and congestion for a new NFS connection.
180 * We do not free the sockaddr if error.
183 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
186 int error, rcvreserve, sndreserve;
188 struct sockaddr *saddr;
189 struct sockaddr_in *sin;
190 struct thread *td = &thread0; /* only used for socreate and sobind */
194 error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype,
195 nmp->nm_soproto, td);
199 nmp->nm_soflags = so->so_proto->pr_flags;
202 * Some servers require that the client port be a reserved port number.
204 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) {
207 struct sockaddr_in ssin;
209 bzero(&sopt, sizeof sopt);
210 ip = IP_PORTRANGE_LOW;
211 sopt.sopt_level = IPPROTO_IP;
212 sopt.sopt_name = IP_PORTRANGE;
213 sopt.sopt_val = (void *)&ip;
214 sopt.sopt_valsize = sizeof(ip);
216 error = sosetopt(so, &sopt);
219 bzero(&ssin, sizeof ssin);
221 sin->sin_len = sizeof (struct sockaddr_in);
222 sin->sin_family = AF_INET;
223 sin->sin_addr.s_addr = INADDR_ANY;
224 sin->sin_port = htons(0);
225 error = sobind(so, (struct sockaddr *)sin, td);
228 bzero(&sopt, sizeof sopt);
229 ip = IP_PORTRANGE_DEFAULT;
230 sopt.sopt_level = IPPROTO_IP;
231 sopt.sopt_name = IP_PORTRANGE;
232 sopt.sopt_val = (void *)&ip;
233 sopt.sopt_valsize = sizeof(ip);
235 error = sosetopt(so, &sopt);
241 * Protocols that do not require connections may be optionally left
242 * unconnected for servers that reply from a port other than NFS_PORT.
244 if (nmp->nm_flag & NFSMNT_NOCONN) {
245 if (nmp->nm_soflags & PR_CONNREQUIRED) {
250 error = soconnect(so, nmp->nm_nam, td);
255 * Wait for the connection to complete. Cribbed from the
256 * connect system call but with the wait timing out so
257 * that interruptible mounts don't hang here for a long time.
260 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
261 (void) tsleep((caddr_t)&so->so_timeo, 0,
263 if ((so->so_state & SS_ISCONNECTING) &&
264 so->so_error == 0 && rep &&
265 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){
266 so->so_state &= ~SS_ISCONNECTING;
272 error = so->so_error;
279 so->so_rcv.ssb_timeo = (5 * hz);
280 so->so_snd.ssb_timeo = (5 * hz);
283 * Get buffer reservation size from sysctl, but impose reasonable
286 pktscale = nfs_bufpackets;
292 if (nmp->nm_sotype == SOCK_DGRAM) {
293 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
294 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
295 NFS_MAXPKTHDR) * pktscale;
296 } else if (nmp->nm_sotype == SOCK_SEQPACKET) {
297 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
298 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
299 NFS_MAXPKTHDR) * pktscale;
301 if (nmp->nm_sotype != SOCK_STREAM)
302 panic("nfscon sotype");
303 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
307 bzero(&sopt, sizeof sopt);
308 sopt.sopt_level = SOL_SOCKET;
309 sopt.sopt_name = SO_KEEPALIVE;
310 sopt.sopt_val = &val;
311 sopt.sopt_valsize = sizeof val;
315 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
319 bzero(&sopt, sizeof sopt);
320 sopt.sopt_level = IPPROTO_TCP;
321 sopt.sopt_name = TCP_NODELAY;
322 sopt.sopt_val = &val;
323 sopt.sopt_valsize = sizeof val;
327 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
328 sizeof (u_int32_t)) * pktscale;
329 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
330 sizeof (u_int32_t)) * pktscale;
332 error = soreserve(so, sndreserve, rcvreserve,
333 &td->td_proc->p_rlimit[RLIMIT_SBSIZE]);
336 so->so_rcv.ssb_flags |= SSB_NOINTR;
337 so->so_snd.ssb_flags |= SSB_NOINTR;
339 /* Initialize other non-zero congestion variables */
340 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
341 nmp->nm_srtt[3] = (NFS_TIMEO << NFS_RTT_SCALE_BITS);
342 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
343 nmp->nm_sdrtt[3] = 0;
344 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED;
345 nmp->nm_timeouts = 0;
355 * Called when a connection is broken on a reliable protocol.
356 * - clean up the old socket
357 * - nfs_connect() again
358 * - set R_NEEDSXMIT for all outstanding requests on mount point
359 * If this fails the mount point is DEAD!
360 * nb: Must be called with the nfs_sndlock() set on the mount point.
363 nfs_reconnect(struct nfsmount *nmp, struct nfsreq *rep)
369 while ((error = nfs_connect(nmp, rep)) != 0) {
370 if (error == EINTR || error == ERESTART)
372 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0);
376 * Loop through outstanding request list and fix up all requests
380 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
381 KKASSERT(req->r_nmp == nmp);
382 req->r_flags |= R_NEEDSXMIT;
389 * NFS disconnect. Clean up and unlink.
392 nfs_disconnect(struct nfsmount *nmp)
399 soshutdown(so, SHUT_RDWR);
400 soclose(so, FNONBLOCK);
405 nfs_safedisconnect(struct nfsmount *nmp)
407 nfs_rcvlock(nmp, NULL);
413 * This is the nfs send routine. For connection based socket types, it
414 * must be called with an nfs_sndlock() on the socket.
415 * "rep == NULL" indicates that it has been called from a server.
416 * For the client side:
417 * - return EINTR if the RPC is terminated, 0 otherwise
418 * - set R_NEEDSXMIT if the send fails for any reason
419 * - do any cleanup required by recoverable socket errors (?)
420 * For the server side:
421 * - return EINTR or ERESTART if interrupted by a signal
422 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
423 * - do any cleanup required by recoverable socket errors (?)
426 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
429 struct sockaddr *sendnam;
430 int error, soflags, flags;
433 if (rep->r_flags & R_SOFTTERM) {
437 if ((so = rep->r_nmp->nm_so) == NULL) {
438 rep->r_flags |= R_NEEDSXMIT;
442 rep->r_flags &= ~R_NEEDSXMIT;
443 soflags = rep->r_nmp->nm_soflags;
445 soflags = so->so_proto->pr_flags;
447 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
451 if (so->so_type == SOCK_SEQPACKET)
456 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags,
459 * ENOBUFS for dgram sockets is transient and non fatal.
460 * No need to log, and no need to break a soft mount.
462 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
465 * do backoff retransmit on client
468 rep->r_flags |= R_NEEDSXMIT;
473 log(LOG_INFO, "nfs send error %d for server %s\n",error,
474 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
476 * Deal with errors for the client side.
478 if (rep->r_flags & R_SOFTTERM)
481 rep->r_flags |= R_NEEDSXMIT;
483 log(LOG_INFO, "nfsd send error %d\n", error);
487 * Handle any recoverable (soft) socket errors here. (?)
489 if (error != EINTR && error != ERESTART &&
490 error != EWOULDBLOCK && error != EPIPE)
497 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
498 * done by soreceive(), but for SOCK_STREAM we must deal with the Record
499 * Mark and consolidate the data into a new mbuf list.
500 * nb: Sometimes TCP passes the data up to soreceive() in long lists of
502 * For SOCK_STREAM we must be very careful to read an entire record once
503 * we have read any of it, even if the system call has been interrupted.
506 nfs_receive(struct nfsmount *nmp, struct nfsreq *rep,
507 struct sockaddr **aname, struct mbuf **mp)
514 struct mbuf *control;
516 struct sockaddr **getnam;
517 int error, sotype, rcvflg;
518 struct thread *td = curthread; /* XXX */
521 * Set up arguments for soreceive()
525 sotype = nmp->nm_sotype;
528 * For reliable protocols, lock against other senders/receivers
529 * in case a reconnect is necessary.
530 * For SOCK_STREAM, first get the Record Mark to find out how much
531 * more there is to get.
532 * We must lock the socket against other receivers
533 * until we have an entire rpc request/reply.
535 if (sotype != SOCK_DGRAM) {
536 error = nfs_sndlock(nmp, rep);
541 * Check for fatal errors and resending request.
544 * Ugh: If a reconnect attempt just happened, nm_so
545 * would have changed. NULL indicates a failed
546 * attempt that has essentially shut down this
549 if (rep && (rep->r_mrep || (rep->r_flags & R_SOFTTERM))) {
555 error = nfs_reconnect(nmp, rep);
562 while (rep && (rep->r_flags & R_NEEDSXMIT)) {
563 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
564 nfsstats.rpcretries++;
565 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
567 if (error == EINTR || error == ERESTART ||
568 (error = nfs_reconnect(nmp, rep)) != 0) {
576 if (sotype == SOCK_STREAM) {
578 * Get the length marker from the stream
580 aio.iov_base = (caddr_t)&len;
581 aio.iov_len = sizeof(u_int32_t);
584 auio.uio_segflg = UIO_SYSSPACE;
585 auio.uio_rw = UIO_READ;
587 auio.uio_resid = sizeof(u_int32_t);
590 rcvflg = MSG_WAITALL;
591 error = so_pru_soreceive(so, NULL, &auio, NULL,
593 if (error == EWOULDBLOCK && rep) {
594 if (rep->r_flags & R_SOFTTERM)
597 } while (error == EWOULDBLOCK);
599 if (error == 0 && auio.uio_resid > 0) {
601 * Only log short packets if not EOF
603 if (auio.uio_resid != sizeof(u_int32_t))
605 "short receive (%d/%d) from nfs server %s\n",
606 (int)(sizeof(u_int32_t) - auio.uio_resid),
607 (int)sizeof(u_int32_t),
608 nmp->nm_mountp->mnt_stat.f_mntfromname);
613 len = ntohl(len) & ~0x80000000;
615 * This is SERIOUS! We are out of sync with the sender
616 * and forcing a disconnect/reconnect is all I can do.
618 if (len > NFS_MAXPACKET) {
619 log(LOG_ERR, "%s (%d) from nfs server %s\n",
620 "impossible packet length",
622 nmp->nm_mountp->mnt_stat.f_mntfromname);
628 * Get the rest of the packet as an mbuf chain
632 rcvflg = MSG_WAITALL;
633 error = so_pru_soreceive(so, NULL, NULL, &sio,
635 } while (error == EWOULDBLOCK || error == EINTR ||
637 if (error == 0 && sio.sb_cc != len) {
640 "short receive (%d/%d) from nfs server %s\n",
641 len - auio.uio_resid, len,
642 nmp->nm_mountp->mnt_stat.f_mntfromname);
648 * Non-stream, so get the whole packet by not
649 * specifying MSG_WAITALL and by specifying a large
652 * We have no use for control msg., but must grab them
653 * and then throw them away so we know what is going
656 sbinit(&sio, 100000000);
659 error = so_pru_soreceive(so, NULL, NULL, &sio,
663 if (error == EWOULDBLOCK && rep) {
664 if (rep->r_flags & R_SOFTTERM) {
669 } while (error == EWOULDBLOCK ||
670 (error == 0 && sio.sb_mb == NULL && control));
671 if ((rcvflg & MSG_EOR) == 0)
673 if (error == 0 && sio.sb_mb == NULL)
679 if (error && error != EINTR && error != ERESTART) {
682 if (error != EPIPE) {
684 "receive error %d from nfs server %s\n",
686 nmp->nm_mountp->mnt_stat.f_mntfromname);
688 error = nfs_sndlock(nmp, rep);
690 error = nfs_reconnect(nmp, rep);
698 if ((so = nmp->nm_so) == NULL)
700 if (so->so_state & SS_ISCONNECTED)
704 sbinit(&sio, 100000000);
707 error = so_pru_soreceive(so, getnam, NULL, &sio,
709 if (error == EWOULDBLOCK && rep &&
710 (rep->r_flags & R_SOFTTERM)) {
714 } while (error == EWOULDBLOCK);
723 * Search for any mbufs that are not a multiple of 4 bytes long
724 * or with m_data not longword aligned.
725 * These could cause pointer alignment problems, so copy them to
726 * well aligned mbufs.
728 nfs_realign(mp, 5 * NFSX_UNSIGNED);
733 * Implement receipt of reply on a socket.
735 * We must search through the list of received datagrams matching them
736 * with outstanding requests using the xid, until ours is found.
738 * If myrep is NULL we process packets on the socket until
739 * interrupted or until nm_reqrxq is non-empty.
743 nfs_reply(struct nfsmount *nmp, struct nfsreq *myrep)
746 struct sockaddr *nam;
750 struct nfsm_info info;
753 * Loop around until we get our own reply
757 * Lock against other receivers so that I don't get stuck in
758 * sbwait() after someone else has received my reply for me.
759 * Also necessary for connection based protocols to avoid
760 * race conditions during a reconnect.
762 * If nfs_rcvlock() returns EALREADY, that means that
763 * the reply has already been recieved by another
764 * process and we can return immediately. In this
765 * case, the lock is not taken to avoid races with
770 error = nfs_rcvlock(nmp, myrep);
771 if (error == EALREADY)
777 * If myrep is NULL we are the receiver helper thread.
778 * Stop waiting for incoming replies if there are
779 * messages sitting on reqrxq that we need to process,
780 * or if a shutdown request is pending.
782 if (myrep == NULL && (TAILQ_FIRST(&nmp->nm_reqrxq) ||
783 nmp->nm_rxstate > NFSSVC_PENDING)) {
789 * Get the next Rpc reply off the socket
791 * We cannot release the receive lock until we've
792 * filled in rep->r_mrep, otherwise a waiting
793 * thread may deadlock in soreceive with no incoming
796 error = nfs_receive(nmp, myrep, &nam, &info.mrep);
799 * Ignore routing errors on connectionless protocols??
802 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) {
803 if (nmp->nm_so == NULL)
805 nmp->nm_so->so_error = 0;
814 * Get the xid and check that it is an rpc reply
817 info.dpos = mtod(info.md, caddr_t);
818 NULLOUT(tl = nfsm_dissect(&info, 2*NFSX_UNSIGNED));
820 if (*tl != rpc_reply) {
821 nfsstats.rpcinvalid++;
830 * Loop through the request list to match up the reply
831 * Iff no match, just drop the datagram. On match, set
832 * r_mrep atomically to prevent the timer from messing
833 * around with the request after we have exited the critical
837 TAILQ_FOREACH(rep, &nmp->nm_reqq, r_chain) {
838 if (rep->r_mrep == NULL && rxid == rep->r_xid)
843 * Fill in the rest of the reply if we found a match.
847 rep->r_dpos = info.dpos;
851 rt = &nfsrtt.rttl[nfsrtt.pos];
852 rt->proc = rep->r_procnum;
855 rt->cwnd = nmp->nm_maxasync_scaled;
856 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1];
857 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1];
858 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid;
859 getmicrotime(&rt->tstamp);
860 if (rep->r_flags & R_TIMING)
861 rt->rtt = rep->r_rtt;
864 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ;
868 * New congestion control is based only on async
871 if (nmp->nm_maxasync_scaled < NFS_MAXASYNC_SCALED)
872 ++nmp->nm_maxasync_scaled;
873 if (rep->r_flags & R_SENT) {
874 rep->r_flags &= ~R_SENT;
877 * Update rtt using a gain of 0.125 on the mean
878 * and a gain of 0.25 on the deviation.
880 * NOTE SRTT/SDRTT are only good if R_TIMING is set.
882 if (rep->r_flags & R_TIMING) {
884 * Since the timer resolution of
885 * NFS_HZ is so course, it can often
886 * result in r_rtt == 0. Since
887 * r_rtt == N means that the actual
888 * rtt is between N+dt and N+2-dt ticks,
894 #define NFSRSB NFS_RTT_SCALE_BITS
895 n = ((NFS_SRTT(rep) * 7) +
896 (rep->r_rtt << NFSRSB)) >> 3;
897 d = n - NFS_SRTT(rep);
901 * Don't let the jitter calculation decay
902 * too quickly, but we want a fast rampup.
907 if (d < NFS_SDRTT(rep))
908 n = ((NFS_SDRTT(rep) * 15) + d) >> 4;
910 n = ((NFS_SDRTT(rep) * 3) + d) >> 2;
914 nmp->nm_timeouts = 0;
915 rep->r_mrep = info.mrep;
916 nfs_hardterm(rep, 0);
922 * If not matched to a request, drop it.
923 * If it's mine, get out.
926 nfsstats.rpcunexpected++;
929 } else if (rep == myrep) {
930 if (rep->r_mrep == NULL)
931 panic("nfsreply nil");
938 * Run the request state machine until the target state is reached
939 * or a fatal error occurs. The target state is not run. Specifying
940 * a target of NFSM_STATE_DONE runs the state machine until the rpc
943 * EINPROGRESS is returned for all states other then the DONE state,
944 * indicating that the rpc is still in progress.
947 nfs_request(struct nfsm_info *info, nfsm_state_t bstate, nfsm_state_t estate)
951 while (info->state >= bstate && info->state < estate) {
952 switch(info->state) {
953 case NFSM_STATE_SETUP:
955 * Setup the nfsreq. Any error which occurs during
956 * this state is fatal.
958 info->error = nfs_request_setup(info);
960 info->state = NFSM_STATE_DONE;
961 return (info->error);
964 req->r_mrp = &info->mrep;
965 req->r_mdp = &info->md;
966 req->r_dposp = &info->dpos;
967 info->state = NFSM_STATE_AUTH;
970 case NFSM_STATE_AUTH:
972 * Authenticate the nfsreq. Any error which occurs
973 * during this state is fatal.
975 info->error = nfs_request_auth(info->req);
977 info->state = NFSM_STATE_DONE;
978 return (info->error);
980 info->state = NFSM_STATE_TRY;
985 * Transmit or retransmit attempt. An error in this
986 * state is ignored and we always move on to the
989 * This can trivially race the receiver if the
990 * request is asynchronous. nfs_request_try()
991 * will thus set the state for us and we
992 * must also return immediately if we are
993 * running an async state machine, because
994 * info can become invalid due to races after
997 if (info->req->r_flags & R_ASYNC) {
998 nfs_request_try(info->req);
999 if (estate == NFSM_STATE_WAITREPLY)
1000 return (EINPROGRESS);
1002 nfs_request_try(info->req);
1003 info->state = NFSM_STATE_WAITREPLY;
1006 case NFSM_STATE_WAITREPLY:
1008 * Wait for a reply or timeout and move on to the
1009 * next state. The error returned by this state
1010 * is passed to the processing code in the next
1013 info->error = nfs_request_waitreply(info->req);
1014 info->state = NFSM_STATE_PROCESSREPLY;
1016 case NFSM_STATE_PROCESSREPLY:
1018 * Process the reply or timeout. Errors which occur
1019 * in this state may cause the state machine to
1020 * go back to an earlier state, and are fatal
1023 info->error = nfs_request_processreply(info,
1025 switch(info->error) {
1027 info->state = NFSM_STATE_AUTH;
1030 info->state = NFSM_STATE_TRY;
1034 * Operation complete, with or without an
1035 * error. We are done.
1038 info->state = NFSM_STATE_DONE;
1039 return (info->error);
1042 case NFSM_STATE_DONE:
1044 * Shouldn't be reached
1046 return (info->error);
1052 * If we are done return the error code (if any).
1053 * Otherwise return EINPROGRESS.
1055 if (info->state == NFSM_STATE_DONE)
1056 return (info->error);
1057 return (EINPROGRESS);
1061 * nfs_request - goes something like this
1062 * - fill in request struct
1063 * - links it into list
1064 * - calls nfs_send() for first transmit
1065 * - calls nfs_receive() to get reply
1066 * - break down rpc header and return with nfs reply pointed to
1068 * nb: always frees up mreq mbuf list
1071 nfs_request_setup(nfsm_info_t info)
1074 struct nfsmount *nmp;
1079 * Reject requests while attempting a forced unmount.
1081 if (info->vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
1082 m_freem(info->mreq);
1086 nmp = VFSTONFS(info->vp->v_mount);
1087 req = kmalloc(sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
1089 req->r_vp = info->vp;
1090 req->r_td = info->td;
1091 req->r_procnum = info->procnum;
1093 req->r_cred = info->cred;
1101 req->r_mrest = info->mreq;
1102 req->r_mrest_len = i;
1105 * The presence of a non-NULL r_info in req indicates
1106 * async completion via our helper threads. See the receiver
1111 req->r_flags = R_ASYNC;
1121 nfs_request_auth(struct nfsreq *rep)
1123 struct nfsmount *nmp = rep->r_nmp;
1125 char nickv[RPCX_NICKVERF];
1126 int error = 0, auth_len, auth_type;
1129 char *auth_str, *verf_str;
1133 rep->r_failed_auth = 0;
1136 * Get the RPC header with authorization.
1138 verf_str = auth_str = NULL;
1139 if (nmp->nm_flag & NFSMNT_KERB) {
1141 verf_len = sizeof (nickv);
1142 auth_type = RPCAUTH_KERB4;
1143 bzero((caddr_t)rep->r_key, sizeof(rep->r_key));
1144 if (rep->r_failed_auth ||
1145 nfs_getnickauth(nmp, cred, &auth_str, &auth_len,
1146 verf_str, verf_len)) {
1147 error = nfs_getauth(nmp, rep, cred, &auth_str,
1148 &auth_len, verf_str, &verf_len, rep->r_key);
1150 m_freem(rep->r_mrest);
1151 rep->r_mrest = NULL;
1152 kfree((caddr_t)rep, M_NFSREQ);
1157 auth_type = RPCAUTH_UNIX;
1158 if (cred->cr_ngroups < 1)
1159 panic("nfsreq nogrps");
1160 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
1161 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
1164 m = nfsm_rpchead(cred, nmp->nm_flag, rep->r_procnum, auth_type,
1165 auth_len, auth_str, verf_len, verf_str,
1166 rep->r_mrest, rep->r_mrest_len, &rep->r_mheadend, &xid);
1167 rep->r_mrest = NULL;
1169 kfree(auth_str, M_TEMP);
1172 * For stream protocols, insert a Sun RPC Record Mark.
1174 if (nmp->nm_sotype == SOCK_STREAM) {
1175 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT);
1177 kfree(rep, M_NFSREQ);
1180 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1181 (m->m_pkthdr.len - NFSX_UNSIGNED));
1189 nfs_request_try(struct nfsreq *rep)
1191 struct nfsmount *nmp = rep->r_nmp;
1196 * Request is not on any queue, only the owner has access to it
1197 * so it should not be locked by anyone atm.
1199 * Interlock to prevent races. While locked the only remote
1200 * action possible is for r_mrep to be set (once we enqueue it).
1202 if (rep->r_flags == 0xdeadc0de) {
1204 panic("flags nbad\n");
1206 KKASSERT((rep->r_flags & (R_LOCKED | R_ONREQQ)) == 0);
1207 if (nmp->nm_flag & NFSMNT_SOFT)
1208 rep->r_retry = nmp->nm_retry;
1210 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
1211 rep->r_rtt = rep->r_rexmit = 0;
1212 if (proct[rep->r_procnum] > 0)
1213 rep->r_flags |= R_TIMING | R_LOCKED;
1215 rep->r_flags |= R_LOCKED;
1219 * Do the client side RPC.
1221 nfsstats.rpcrequests++;
1224 * Chain request into list of outstanding requests. Be sure
1225 * to put it LAST so timer finds oldest requests first. Note
1226 * that our control of R_LOCKED prevents the request from
1227 * getting ripped out from under us or transmitted by the
1230 * For requests with info structures we must atomically set the
1231 * info's state because the structure could become invalid upon
1232 * return due to races (i.e., if async)
1235 mtx_link_init(&rep->r_link);
1236 TAILQ_INSERT_TAIL(&nmp->nm_reqq, rep, r_chain);
1237 rep->r_flags |= R_ONREQQ;
1239 if (rep->r_flags & R_ASYNC)
1240 rep->r_info->state = NFSM_STATE_WAITREPLY;
1246 * Send if we can. Congestion control is not handled here any more
1247 * becausing trying to defer the initial send based on the nfs_timer
1248 * requires having a very fast nfs_timer, which is silly.
1251 if (nmp->nm_soflags & PR_CONNREQUIRED)
1252 error = nfs_sndlock(nmp, rep);
1254 m2 = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
1255 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
1256 if (nmp->nm_soflags & PR_CONNREQUIRED)
1258 rep->r_flags &= ~R_NEEDSXMIT;
1259 if ((rep->r_flags & R_SENT) == 0) {
1260 rep->r_flags |= R_SENT;
1263 rep->r_flags |= R_NEEDSXMIT;
1266 rep->r_flags |= R_NEEDSXMIT;
1273 * Release the lock. The only remote action that may have occurred
1274 * would have been the setting of rep->r_mrep. If this occured
1275 * and the request was async we have to move it to the reader
1276 * thread's queue for action.
1278 * For async requests also make sure the reader is woken up so
1279 * it gets on the socket to read responses.
1282 if (rep->r_flags & R_ASYNC) {
1284 nfs_hardterm(rep, 1);
1285 rep->r_flags &= ~R_LOCKED;
1286 nfssvc_iod_reader_wakeup(nmp);
1288 rep->r_flags &= ~R_LOCKED;
1290 if (rep->r_flags & R_WANTED) {
1291 rep->r_flags &= ~R_WANTED;
1299 * This code is only called for synchronous requests. Completed synchronous
1300 * requests are left on reqq and we remove them before moving on to the
1304 nfs_request_waitreply(struct nfsreq *rep)
1306 struct nfsmount *nmp = rep->r_nmp;
1309 KKASSERT((rep->r_flags & R_ASYNC) == 0);
1312 * Wait until the request is finished.
1314 error = nfs_reply(nmp, rep);
1317 * RPC done, unlink the request, but don't rip it out from under
1318 * the callout timer.
1320 * Once unlinked no other receiver or the timer will have
1321 * visibility, so we do not have to set R_LOCKED.
1324 while (rep->r_flags & R_LOCKED) {
1325 rep->r_flags |= R_WANTED;
1326 tsleep(rep, 0, "nfstrac", 0);
1328 KKASSERT(rep->r_flags & R_ONREQQ);
1329 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain);
1330 rep->r_flags &= ~R_ONREQQ;
1335 * Decrement the outstanding request count.
1337 if (rep->r_flags & R_SENT) {
1338 rep->r_flags &= ~R_SENT;
1344 * Process reply with error returned from nfs_requet_waitreply().
1346 * Returns EAGAIN if it wants us to loop up to nfs_request_try() again.
1347 * Returns ENEEDAUTH if it wants us to loop up to nfs_request_auth() again.
1350 nfs_request_processreply(nfsm_info_t info, int error)
1352 struct nfsreq *req = info->req;
1353 struct nfsmount *nmp = req->r_nmp;
1359 * If there was a successful reply and a tprintf msg.
1360 * tprintf a response.
1362 if (error == 0 && (req->r_flags & R_TPRINTFMSG)) {
1363 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1366 info->mrep = req->r_mrep;
1367 info->md = req->r_md;
1368 info->dpos = req->r_dpos;
1370 m_freem(req->r_mreq);
1372 kfree(req, M_NFSREQ);
1378 * break down the rpc header and check if ok
1380 NULLOUT(tl = nfsm_dissect(info, 3 * NFSX_UNSIGNED));
1381 if (*tl++ == rpc_msgdenied) {
1382 if (*tl == rpc_mismatch) {
1384 } else if ((nmp->nm_flag & NFSMNT_KERB) &&
1385 *tl++ == rpc_autherr) {
1386 if (req->r_failed_auth == 0) {
1387 req->r_failed_auth++;
1388 req->r_mheadend->m_next = NULL;
1389 m_freem(info->mrep);
1391 m_freem(req->r_mreq);
1399 m_freem(info->mrep);
1401 m_freem(req->r_mreq);
1403 kfree(req, M_NFSREQ);
1409 * Grab any Kerberos verifier, otherwise just throw it away.
1411 verf_type = fxdr_unsigned(int, *tl++);
1412 i = fxdr_unsigned(int32_t, *tl);
1413 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) {
1414 error = nfs_savenickauth(nmp, req->r_cred, i, req->r_key,
1415 &info->md, &info->dpos, info->mrep);
1419 ERROROUT(nfsm_adv(info, nfsm_rndup(i)));
1421 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1424 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1426 error = fxdr_unsigned(int, *tl);
1429 * Does anyone even implement this? Just impose
1432 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
1433 error == NFSERR_TRYLATER) {
1434 m_freem(info->mrep);
1438 tsleep((caddr_t)&lbolt, 0, "nqnfstry", 0);
1439 return (EAGAIN); /* goto tryagain */
1443 * If the File Handle was stale, invalidate the
1444 * lookup cache, just in case.
1446 * To avoid namecache<->vnode deadlocks we must
1447 * release the vnode lock if we hold it.
1449 if (error == ESTALE) {
1450 struct vnode *vp = req->r_vp;
1453 ltype = lockstatus(&vp->v_lock, curthread);
1454 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED)
1455 lockmgr(&vp->v_lock, LK_RELEASE);
1456 cache_inval_vp(vp, CINV_CHILDREN);
1457 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED)
1458 lockmgr(&vp->v_lock, ltype);
1460 if (nmp->nm_flag & NFSMNT_NFSV3) {
1461 KKASSERT(*req->r_mrp == info->mrep);
1462 KKASSERT(*req->r_mdp == info->md);
1463 KKASSERT(*req->r_dposp == info->dpos);
1464 error |= NFSERR_RETERR;
1466 m_freem(info->mrep);
1469 m_freem(req->r_mreq);
1471 kfree(req, M_NFSREQ);
1476 KKASSERT(*req->r_mrp == info->mrep);
1477 KKASSERT(*req->r_mdp == info->md);
1478 KKASSERT(*req->r_dposp == info->dpos);
1479 m_freem(req->r_mreq);
1481 FREE(req, M_NFSREQ);
1484 m_freem(info->mrep);
1486 error = EPROTONOSUPPORT;
1488 m_freem(req->r_mreq);
1490 kfree(req, M_NFSREQ);
1495 #ifndef NFS_NOSERVER
1497 * Generate the rpc reply header
1498 * siz arg. is used to decide if adding a cluster is worthwhile
1501 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp,
1502 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp)
1505 struct nfsm_info info;
1507 siz += RPC_REPLYSIZ;
1508 info.mb = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
1509 info.mreq = info.mb;
1510 info.mreq->m_pkthdr.len = 0;
1512 * If this is not a cluster, try and leave leading space
1513 * for the lower level headers.
1515 if ((max_hdr + siz) < MINCLSIZE)
1516 info.mreq->m_data += max_hdr;
1517 tl = mtod(info.mreq, u_int32_t *);
1518 info.mreq->m_len = 6 * NFSX_UNSIGNED;
1519 info.bpos = ((caddr_t)tl) + info.mreq->m_len;
1520 *tl++ = txdr_unsigned(nd->nd_retxid);
1522 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
1523 *tl++ = rpc_msgdenied;
1524 if (err & NFSERR_AUTHERR) {
1525 *tl++ = rpc_autherr;
1526 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR);
1527 info.mreq->m_len -= NFSX_UNSIGNED;
1528 info.bpos -= NFSX_UNSIGNED;
1530 *tl++ = rpc_mismatch;
1531 *tl++ = txdr_unsigned(RPC_VER2);
1532 *tl = txdr_unsigned(RPC_VER2);
1535 *tl++ = rpc_msgaccepted;
1538 * For Kerberos authentication, we must send the nickname
1539 * verifier back, otherwise just RPCAUTH_NULL.
1541 if (nd->nd_flag & ND_KERBFULL) {
1542 struct nfsuid *nuidp;
1543 struct timeval ktvin, ktvout;
1545 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first;
1546 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1547 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid &&
1548 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp),
1549 &nuidp->nu_haddr, nd->nd_nam2)))
1554 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1);
1556 txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1559 * Encrypt the timestamp in ecb mode using the
1566 *tl++ = rpc_auth_kerb;
1567 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED);
1568 *tl = ktvout.tv_sec;
1569 tl = nfsm_build(&info, 3 * NFSX_UNSIGNED);
1570 *tl++ = ktvout.tv_usec;
1571 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid);
1582 *tl = txdr_unsigned(RPC_PROGUNAVAIL);
1585 *tl = txdr_unsigned(RPC_PROGMISMATCH);
1586 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED);
1587 *tl++ = txdr_unsigned(2);
1588 *tl = txdr_unsigned(3);
1591 *tl = txdr_unsigned(RPC_PROCUNAVAIL);
1594 *tl = txdr_unsigned(RPC_GARBAGE);
1598 if (err != NFSERR_RETVOID) {
1599 tl = nfsm_build(&info, NFSX_UNSIGNED);
1601 *tl = txdr_unsigned(nfsrv_errmap(nd, err));
1613 if (err != 0 && err != NFSERR_RETVOID)
1614 nfsstats.srvrpc_errs++;
1619 #endif /* NFS_NOSERVER */
1622 * Nfs timer routine.
1624 * Scan the nfsreq list and retranmit any requests that have timed out
1625 * To avoid retransmission attempts on STREAM sockets (in the future) make
1626 * sure to set the r_retry field to 0 (implies nm_retry == 0).
1628 * Requests with attached responses, terminated requests, and
1629 * locked requests are ignored. Locked requests will be picked up
1630 * in a later timer call.
1633 nfs_timer(void *arg /* never used */)
1635 struct nfsmount *nmp;
1637 #ifndef NFS_NOSERVER
1638 struct nfssvc_sock *slp;
1640 #endif /* NFS_NOSERVER */
1643 TAILQ_FOREACH(nmp, &nfs_mountq, nm_entry) {
1644 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1645 KKASSERT(nmp == req->r_nmp);
1648 if (req->r_flags & (R_SOFTTERM | R_LOCKED))
1650 req->r_flags |= R_LOCKED;
1651 if (nfs_sigintr(nmp, req, req->r_td)) {
1652 nfs_softterm(req, 1);
1656 req->r_flags &= ~R_LOCKED;
1657 if (req->r_flags & R_WANTED) {
1658 req->r_flags &= ~R_WANTED;
1663 #ifndef NFS_NOSERVER
1666 * Scan the write gathering queues for writes that need to be
1669 cur_usec = nfs_curusec();
1670 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
1671 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec)
1672 nfsrv_wakenfsd(slp, 1);
1674 #endif /* NFS_NOSERVER */
1676 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer, NULL);
1681 nfs_timer_req(struct nfsreq *req)
1683 struct thread *td = &thread0; /* XXX for creds, will break if sleep */
1684 struct nfsmount *nmp = req->r_nmp;
1691 * rtt ticks and timeout calculation. Return if the timeout
1692 * has not been reached yet, unless the packet is flagged
1693 * for an immediate send.
1695 * The mean rtt doesn't help when we get random I/Os, we have
1696 * to multiply by fairly large numbers.
1698 if (req->r_rtt >= 0) {
1700 if (nmp->nm_flag & NFSMNT_DUMBTIMR) {
1701 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS;
1702 } else if (req->r_flags & R_TIMING) {
1703 timeo = NFS_SRTT(req) + NFS_SDRTT(req);
1705 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS;
1707 /* timeo is still scaled by SCALE_BITS */
1709 #define NFSFS (NFS_RTT_SCALE * NFS_HZ)
1710 if (req->r_flags & R_TIMING) {
1711 static long last_time;
1712 if (nfs_showrtt && last_time != time_second) {
1713 kprintf("rpccmd %d NFS SRTT %d SDRTT %d "
1715 proct[req->r_procnum],
1716 NFS_SRTT(req), NFS_SDRTT(req),
1718 timeo % NFSFS * 1000 / NFSFS);
1719 last_time = time_second;
1725 * deal with nfs_timer jitter.
1727 timeo = (timeo >> NFS_RTT_SCALE_BITS) + 1;
1731 if (nmp->nm_timeouts > 0)
1732 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
1733 if (timeo > NFS_MAXTIMEO)
1734 timeo = NFS_MAXTIMEO;
1735 if (req->r_rtt <= timeo) {
1736 if ((req->r_flags & R_NEEDSXMIT) == 0)
1738 } else if (nmp->nm_timeouts < 8) {
1744 * Check for server not responding
1746 if ((req->r_flags & R_TPRINTFMSG) == 0 &&
1747 req->r_rexmit > nmp->nm_deadthresh) {
1748 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1750 req->r_flags |= R_TPRINTFMSG;
1752 if (req->r_rexmit >= req->r_retry) { /* too many */
1753 nfsstats.rpctimeouts++;
1754 nfs_softterm(req, 1);
1759 * Generally disable retransmission on reliable sockets,
1760 * unless the request is flagged for immediate send.
1762 if (nmp->nm_sotype != SOCK_DGRAM) {
1763 if (++req->r_rexmit > NFS_MAXREXMIT)
1764 req->r_rexmit = NFS_MAXREXMIT;
1765 if ((req->r_flags & R_NEEDSXMIT) == 0)
1770 * Stop here if we do not have a socket!
1772 if ((so = nmp->nm_so) == NULL)
1776 * If there is enough space and the window allows.. resend it.
1778 * Set r_rtt to -1 in case we fail to send it now.
1781 if (ssb_space(&so->so_snd) >= req->r_mreq->m_pkthdr.len &&
1782 (req->r_flags & (R_SENT | R_NEEDSXMIT)) &&
1783 (m = m_copym(req->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){
1784 req->r_flags &= ~R_NEEDSXMIT;
1785 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
1786 error = so_pru_send(so, 0, m, NULL, NULL, td);
1788 error = so_pru_send(so, 0, m, nmp->nm_nam,
1791 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
1793 req->r_flags |= R_NEEDSXMIT;
1794 } else if (req->r_mrep == NULL) {
1796 * Iff first send, start timing
1797 * else turn timing off, backoff timer
1798 * and divide congestion window by 2.
1800 * It is possible for the so_pru_send() to
1801 * block and for us to race a reply so we
1802 * only do this if the reply field has not
1803 * been filled in. R_LOCKED will prevent
1804 * the request from being ripped out from under
1807 if (req->r_flags & R_SENT) {
1810 req->r_flags &= ~R_TIMING;
1811 if (++req->r_rexmit > NFS_MAXREXMIT)
1812 req->r_rexmit = NFS_MAXREXMIT;
1813 nmp->nm_maxasync_scaled >>= 1;
1814 if (nmp->nm_maxasync_scaled < NFS_MINASYNC_SCALED)
1815 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED;
1816 nfsstats.rpcretries++;
1818 req->r_flags |= R_SENT;
1826 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1827 * wait for all requests to complete. This is used by forced unmounts
1828 * to terminate any outstanding RPCs.
1830 * Locked requests cannot be canceled but will be marked for
1834 nfs_nmcancelreqs(struct nfsmount *nmp)
1840 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1841 if (req->r_mrep != NULL || (req->r_flags & R_SOFTTERM))
1843 nfs_softterm(req, 0);
1845 /* XXX the other two queues as well */
1848 for (i = 0; i < 30; i++) {
1850 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1851 if (nmp == req->r_nmp)
1857 tsleep(&lbolt, 0, "nfscancel", 0);
1863 * Soft-terminate a request, effectively marking it as failed.
1865 * Must be called from within a critical section.
1868 nfs_softterm(struct nfsreq *rep, int islocked)
1870 rep->r_flags |= R_SOFTTERM;
1871 nfs_hardterm(rep, islocked);
1875 * Hard-terminate a request, typically after getting a response.
1877 * The state machine can still decide to re-issue it later if necessary.
1879 * Must be called from within a critical section.
1882 nfs_hardterm(struct nfsreq *rep, int islocked)
1884 struct nfsmount *nmp = rep->r_nmp;
1887 * The nm_send count is decremented now to avoid deadlocks
1888 * when the process in soreceive() hasn't yet managed to send
1891 if (rep->r_flags & R_SENT) {
1892 rep->r_flags &= ~R_SENT;
1896 * If we locked the request or nobody else has locked the request,
1897 * and the request is async, we can move it to the reader thread's
1898 * queue now and fix up the state.
1900 * If we locked the request or nobody else has locked the request,
1901 * we can wake up anyone blocked waiting for a response on the
1904 if (islocked || (rep->r_flags & R_LOCKED) == 0) {
1905 if ((rep->r_flags & (R_ONREQQ | R_ASYNC)) ==
1906 (R_ONREQQ | R_ASYNC)) {
1907 rep->r_flags &= ~R_ONREQQ;
1908 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain);
1910 TAILQ_INSERT_TAIL(&nmp->nm_reqrxq, rep, r_chain);
1911 KKASSERT(rep->r_info->state == NFSM_STATE_TRY ||
1912 rep->r_info->state == NFSM_STATE_WAITREPLY);
1913 rep->r_info->state = NFSM_STATE_PROCESSREPLY;
1914 nfssvc_iod_reader_wakeup(nmp);
1916 mtx_abort_ex_link(&nmp->nm_rxlock, &rep->r_link);
1921 * Test for a termination condition pending on the process.
1922 * This is used for NFSMNT_INT mounts.
1925 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
1931 if (rep && (rep->r_flags & R_SOFTTERM))
1933 /* Terminate all requests while attempting a forced unmount. */
1934 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
1936 if (!(nmp->nm_flag & NFSMNT_INT))
1938 /* td might be NULL YYY */
1939 if (td == NULL || (p = td->td_proc) == NULL)
1943 tmpset = lwp_sigpend(lp);
1944 SIGSETNAND(tmpset, lp->lwp_sigmask);
1945 SIGSETNAND(tmpset, p->p_sigignore);
1946 if (SIGNOTEMPTY(tmpset) && NFSINT_SIGMASK(tmpset))
1953 * Lock a socket against others.
1954 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
1955 * and also to avoid race conditions between the processes with nfs requests
1956 * in progress when a reconnect is necessary.
1959 nfs_sndlock(struct nfsmount *nmp, struct nfsreq *rep)
1961 mtx_t mtx = &nmp->nm_txlock;
1969 td = rep ? rep->r_td : NULL;
1970 if (nmp->nm_flag & NFSMNT_INT)
1973 while ((error = mtx_lock_ex_try(mtx)) != 0) {
1974 if (nfs_sigintr(nmp, rep, td)) {
1978 error = mtx_lock_ex(mtx, "nfsndlck", slpflag, slptimeo);
1981 if (slpflag == PCATCH) {
1986 /* Always fail if our request has been cancelled. */
1987 if (rep && (rep->r_flags & R_SOFTTERM)) {
1996 * Unlock the stream socket for others.
1999 nfs_sndunlock(struct nfsmount *nmp)
2001 mtx_unlock(&nmp->nm_txlock);
2005 * Lock the receiver side of the socket.
2010 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep)
2012 mtx_t mtx = &nmp->nm_rxlock;
2018 * Unconditionally check for completion in case another nfsiod
2019 * get the packet while the caller was blocked, before the caller
2020 * called us. Packet reception is handled by mainline code which
2021 * is protected by the BGL at the moment.
2023 * We do not strictly need the second check just before the
2024 * tsleep(), but it's good defensive programming.
2026 if (rep && rep->r_mrep != NULL)
2029 if (nmp->nm_flag & NFSMNT_INT)
2035 while ((error = mtx_lock_ex_try(mtx)) != 0) {
2036 if (nfs_sigintr(nmp, rep, (rep ? rep->r_td : NULL))) {
2040 if (rep && rep->r_mrep != NULL) {
2046 * NOTE: can return ENOLCK, but in that case rep->r_mrep
2047 * will already be set.
2050 error = mtx_lock_ex_link(mtx, &rep->r_link,
2054 error = mtx_lock_ex(mtx, "nfsrcvlk", slpflag, slptimeo);
2060 * If our reply was recieved while we were sleeping,
2061 * then just return without taking the lock to avoid a
2062 * situation where a single iod could 'capture' the
2065 if (rep && rep->r_mrep != NULL) {
2069 if (slpflag == PCATCH) {
2075 if (rep && rep->r_mrep != NULL) {
2084 * Unlock the stream socket for others.
2087 nfs_rcvunlock(struct nfsmount *nmp)
2089 mtx_unlock(&nmp->nm_rxlock);
2095 * Check for badly aligned mbuf data and realign by copying the unaligned
2096 * portion of the data into a new mbuf chain and freeing the portions
2097 * of the old chain that were replaced.
2099 * We cannot simply realign the data within the existing mbuf chain
2100 * because the underlying buffers may contain other rpc commands and
2101 * we cannot afford to overwrite them.
2103 * We would prefer to avoid this situation entirely. The situation does
2104 * not occur with NFS/UDP and is supposed to only occassionally occur
2105 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
2108 nfs_realign(struct mbuf **pm, int hsiz)
2111 struct mbuf *n = NULL;
2116 while ((m = *pm) != NULL) {
2117 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
2118 n = m_getl(m->m_len, MB_WAIT, MT_DATA, 0, NULL);
2126 * If n is non-NULL, loop on m copying data, then replace the
2127 * portion of the chain that had to be realigned.
2130 ++nfs_realign_count;
2132 m_copyback(n, off, m->m_len, mtod(m, caddr_t));
2141 #ifndef NFS_NOSERVER
2144 * Parse an RPC request
2146 * - fill in the cred struct.
2149 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header)
2156 u_int32_t nfsvers, auth_type;
2158 int error = 0, ticklen;
2159 struct nfsuid *nuidp;
2160 struct timeval tvin, tvout;
2161 struct nfsm_info info;
2162 #if 0 /* until encrypted keys are implemented */
2163 NFSKERBKEYSCHED_T keys; /* stores key schedule */
2166 info.mrep = nd->nd_mrep;
2167 info.md = nd->nd_md;
2168 info.dpos = nd->nd_dpos;
2171 NULLOUT(tl = nfsm_dissect(&info, 10 * NFSX_UNSIGNED));
2172 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++);
2173 if (*tl++ != rpc_call) {
2178 NULLOUT(tl = nfsm_dissect(&info, 8 * NFSX_UNSIGNED));
2182 if (*tl++ != rpc_vers) {
2183 nd->nd_repstat = ERPCMISMATCH;
2184 nd->nd_procnum = NFSPROC_NOOP;
2187 if (*tl != nfs_prog) {
2188 nd->nd_repstat = EPROGUNAVAIL;
2189 nd->nd_procnum = NFSPROC_NOOP;
2193 nfsvers = fxdr_unsigned(u_int32_t, *tl++);
2194 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) {
2195 nd->nd_repstat = EPROGMISMATCH;
2196 nd->nd_procnum = NFSPROC_NOOP;
2199 if (nfsvers == NFS_VER3)
2200 nd->nd_flag = ND_NFSV3;
2201 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++);
2202 if (nd->nd_procnum == NFSPROC_NULL)
2204 if (nd->nd_procnum >= NFS_NPROCS ||
2205 (nd->nd_procnum >= NQNFSPROC_GETLEASE) ||
2206 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) {
2207 nd->nd_repstat = EPROCUNAVAIL;
2208 nd->nd_procnum = NFSPROC_NOOP;
2211 if ((nd->nd_flag & ND_NFSV3) == 0)
2212 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
2214 len = fxdr_unsigned(int, *tl++);
2215 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2220 nd->nd_flag &= ~ND_KERBAUTH;
2222 * Handle auth_unix or auth_kerb.
2224 if (auth_type == rpc_auth_unix) {
2225 len = fxdr_unsigned(int, *++tl);
2226 if (len < 0 || len > NFS_MAXNAMLEN) {
2230 ERROROUT(nfsm_adv(&info, nfsm_rndup(len)));
2231 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED));
2232 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred));
2233 nd->nd_cr.cr_ref = 1;
2234 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++);
2235 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++);
2236 len = fxdr_unsigned(int, *tl);
2237 if (len < 0 || len > RPCAUTH_UNIXGIDS) {
2241 NULLOUT(tl = nfsm_dissect(&info, (len + 2) * NFSX_UNSIGNED));
2242 for (i = 1; i <= len; i++)
2244 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++);
2247 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
2248 if (nd->nd_cr.cr_ngroups > 1)
2249 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups);
2250 len = fxdr_unsigned(int, *++tl);
2251 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2256 ERROROUT(nfsm_adv(&info, nfsm_rndup(len)));
2258 } else if (auth_type == rpc_auth_kerb) {
2259 switch (fxdr_unsigned(int, *tl++)) {
2260 case RPCAKN_FULLNAME:
2261 ticklen = fxdr_unsigned(int, *tl);
2262 *((u_int32_t *)nfsd->nfsd_authstr) = *tl;
2263 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED;
2264 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED;
2265 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) {
2272 uio.uio_segflg = UIO_SYSSPACE;
2273 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4];
2274 iov.iov_len = RPCAUTH_MAXSIZ - 4;
2275 ERROROUT(nfsm_mtouio(&info, &uio, uio.uio_resid));
2276 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED));
2277 if (*tl++ != rpc_auth_kerb ||
2278 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) {
2279 kprintf("Bad kerb verifier\n");
2280 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2281 nd->nd_procnum = NFSPROC_NOOP;
2284 NULLOUT(cp = nfsm_dissect(&info, 4 * NFSX_UNSIGNED));
2285 tl = (u_int32_t *)cp;
2286 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) {
2287 kprintf("Not fullname kerb verifier\n");
2288 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2289 nd->nd_procnum = NFSPROC_NOOP;
2292 cp += NFSX_UNSIGNED;
2293 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED);
2294 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED;
2295 nd->nd_flag |= ND_KERBFULL;
2296 nfsd->nfsd_flag |= NFSD_NEEDAUTH;
2298 case RPCAKN_NICKNAME:
2299 if (len != 2 * NFSX_UNSIGNED) {
2300 kprintf("Kerb nickname short\n");
2301 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED);
2302 nd->nd_procnum = NFSPROC_NOOP;
2305 nickuid = fxdr_unsigned(uid_t, *tl);
2306 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED));
2307 if (*tl++ != rpc_auth_kerb ||
2308 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) {
2309 kprintf("Kerb nick verifier bad\n");
2310 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2311 nd->nd_procnum = NFSPROC_NOOP;
2314 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED));
2315 tvin.tv_sec = *tl++;
2318 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first;
2319 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
2320 if (nuidp->nu_cr.cr_uid == nickuid &&
2322 netaddr_match(NU_NETFAM(nuidp),
2323 &nuidp->nu_haddr, nd->nd_nam2)))
2328 (NFSERR_AUTHERR|AUTH_REJECTCRED);
2329 nd->nd_procnum = NFSPROC_NOOP;
2334 * Now, decrypt the timestamp using the session key
2341 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec);
2342 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec);
2343 if (nuidp->nu_expire < time_second ||
2344 nuidp->nu_timestamp.tv_sec > tvout.tv_sec ||
2345 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec &&
2346 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) {
2347 nuidp->nu_expire = 0;
2349 (NFSERR_AUTHERR|AUTH_REJECTVERF);
2350 nd->nd_procnum = NFSPROC_NOOP;
2353 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr);
2354 nd->nd_flag |= ND_KERBNICK;
2357 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
2358 nd->nd_procnum = NFSPROC_NOOP;
2362 nd->nd_md = info.md;
2363 nd->nd_dpos = info.dpos;
2372 * Send a message to the originating process's terminal. The thread and/or
2373 * process may be NULL. YYY the thread should not be NULL but there may
2374 * still be some uio_td's that are still being passed as NULL through to
2378 nfs_msg(struct thread *td, char *server, char *msg)
2382 if (td && td->td_proc)
2383 tpr = tprintf_open(td->td_proc);
2386 tprintf(tpr, "nfs server %s: %s\n", server, msg);
2391 #ifndef NFS_NOSERVER
2393 * Socket upcall routine for the nfsd sockets.
2394 * The caddr_t arg is a pointer to the "struct nfssvc_sock".
2395 * Essentially do as much as possible non-blocking, else punt and it will
2396 * be called with MB_WAIT from an nfsd.
2399 nfsrv_rcv(struct socket *so, void *arg, int waitflag)
2401 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
2403 struct sockaddr *nam;
2406 int nparallel_wakeup = 0;
2408 if ((slp->ns_flag & SLP_VALID) == 0)
2412 * Do not allow an infinite number of completed RPC records to build
2413 * up before we stop reading data from the socket. Otherwise we could
2414 * end up holding onto an unreasonable number of mbufs for requests
2415 * waiting for service.
2417 * This should give pretty good feedback to the TCP
2418 * layer and prevents a memory crunch for other protocols.
2420 * Note that the same service socket can be dispatched to several
2421 * nfs servers simultaniously.
2423 * the tcp protocol callback calls us with MB_DONTWAIT.
2424 * nfsd calls us with MB_WAIT (typically).
2426 if (waitflag == MB_DONTWAIT && slp->ns_numrec >= nfsd_waiting / 2 + 1) {
2427 slp->ns_flag |= SLP_NEEDQ;
2432 * Handle protocol specifics to parse an RPC request. We always
2433 * pull from the socket using non-blocking I/O.
2435 if (so->so_type == SOCK_STREAM) {
2437 * The data has to be read in an orderly fashion from a TCP
2438 * stream, unlike a UDP socket. It is possible for soreceive
2439 * and/or nfsrv_getstream() to block, so make sure only one
2440 * entity is messing around with the TCP stream at any given
2441 * moment. The receive sockbuf's lock in soreceive is not
2444 * Note that this procedure can be called from any number of
2445 * NFS severs *OR* can be upcalled directly from a TCP
2448 if (slp->ns_flag & SLP_GETSTREAM) {
2449 slp->ns_flag |= SLP_NEEDQ;
2452 slp->ns_flag |= SLP_GETSTREAM;
2455 * Do soreceive(). Pull out as much data as possible without
2458 sbinit(&sio, 1000000000);
2459 flags = MSG_DONTWAIT;
2460 error = so_pru_soreceive(so, &nam, NULL, &sio, NULL, &flags);
2461 if (error || sio.sb_mb == NULL) {
2462 if (error == EWOULDBLOCK)
2463 slp->ns_flag |= SLP_NEEDQ;
2465 slp->ns_flag |= SLP_DISCONN;
2466 slp->ns_flag &= ~SLP_GETSTREAM;
2470 if (slp->ns_rawend) {
2471 slp->ns_rawend->m_next = m;
2472 slp->ns_cc += sio.sb_cc;
2475 slp->ns_cc = sio.sb_cc;
2482 * Now try and parse as many record(s) as we can out of the
2485 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup);
2488 slp->ns_flag |= SLP_DISCONN;
2490 slp->ns_flag |= SLP_NEEDQ;
2492 slp->ns_flag &= ~SLP_GETSTREAM;
2495 * For UDP soreceive typically pulls just one packet, loop
2496 * to get the whole batch.
2499 sbinit(&sio, 1000000000);
2500 flags = MSG_DONTWAIT;
2501 error = so_pru_soreceive(so, &nam, NULL, &sio,
2504 struct nfsrv_rec *rec;
2505 int mf = (waitflag & MB_DONTWAIT) ?
2506 M_NOWAIT : M_WAITOK;
2507 rec = kmalloc(sizeof(struct nfsrv_rec),
2511 FREE(nam, M_SONAME);
2515 nfs_realign(&sio.sb_mb, 10 * NFSX_UNSIGNED);
2516 rec->nr_address = nam;
2517 rec->nr_packet = sio.sb_mb;
2518 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2523 if ((so->so_proto->pr_flags & PR_CONNREQUIRED)
2524 && error != EWOULDBLOCK) {
2525 slp->ns_flag |= SLP_DISCONN;
2529 } while (sio.sb_mb);
2533 * If we were upcalled from the tcp protocol layer and we have
2534 * fully parsed records ready to go, or there is new data pending,
2535 * or something went wrong, try to wake up an nfsd thread to deal
2539 if (waitflag == MB_DONTWAIT && (slp->ns_numrec > 0
2540 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) {
2541 nfsrv_wakenfsd(slp, nparallel_wakeup);
2546 * Try and extract an RPC request from the mbuf data list received on a
2547 * stream socket. The "waitflag" argument indicates whether or not it
2551 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp)
2553 struct mbuf *m, **mpp;
2556 struct mbuf *om, *m2, *recm;
2560 if (slp->ns_reclen == 0) {
2561 if (slp->ns_cc < NFSX_UNSIGNED)
2564 if (m->m_len >= NFSX_UNSIGNED) {
2565 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED);
2566 m->m_data += NFSX_UNSIGNED;
2567 m->m_len -= NFSX_UNSIGNED;
2569 cp1 = (caddr_t)&recmark;
2570 cp2 = mtod(m, caddr_t);
2571 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
2572 while (m->m_len == 0) {
2574 cp2 = mtod(m, caddr_t);
2581 slp->ns_cc -= NFSX_UNSIGNED;
2582 recmark = ntohl(recmark);
2583 slp->ns_reclen = recmark & ~0x80000000;
2584 if (recmark & 0x80000000)
2585 slp->ns_flag |= SLP_LASTFRAG;
2587 slp->ns_flag &= ~SLP_LASTFRAG;
2588 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) {
2589 log(LOG_ERR, "%s (%d) from nfs client\n",
2590 "impossible packet length",
2597 * Now get the record part.
2599 * Note that slp->ns_reclen may be 0. Linux sometimes
2600 * generates 0-length RPCs
2603 if (slp->ns_cc == slp->ns_reclen) {
2605 slp->ns_raw = slp->ns_rawend = NULL;
2606 slp->ns_cc = slp->ns_reclen = 0;
2607 } else if (slp->ns_cc > slp->ns_reclen) {
2612 while (len < slp->ns_reclen) {
2613 if ((len + m->m_len) > slp->ns_reclen) {
2614 m2 = m_copym(m, 0, slp->ns_reclen - len,
2622 m->m_data += slp->ns_reclen - len;
2623 m->m_len -= slp->ns_reclen - len;
2624 len = slp->ns_reclen;
2626 return (EWOULDBLOCK);
2628 } else if ((len + m->m_len) == slp->ns_reclen) {
2648 * Accumulate the fragments into a record.
2650 mpp = &slp->ns_frag;
2652 mpp = &((*mpp)->m_next);
2654 if (slp->ns_flag & SLP_LASTFRAG) {
2655 struct nfsrv_rec *rec;
2656 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK;
2657 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf);
2659 m_freem(slp->ns_frag);
2661 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED);
2662 rec->nr_address = NULL;
2663 rec->nr_packet = slp->ns_frag;
2664 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2668 slp->ns_frag = NULL;
2674 * Parse an RPC header.
2677 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd,
2678 struct nfsrv_descript **ndp)
2680 struct nfsrv_rec *rec;
2682 struct sockaddr *nam;
2683 struct nfsrv_descript *nd;
2687 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec))
2689 rec = STAILQ_FIRST(&slp->ns_rec);
2690 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link);
2691 KKASSERT(slp->ns_numrec > 0);
2693 nam = rec->nr_address;
2695 kfree(rec, M_NFSRVDESC);
2696 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript),
2697 M_NFSRVDESC, M_WAITOK);
2698 nd->nd_md = nd->nd_mrep = m;
2700 nd->nd_dpos = mtod(m, caddr_t);
2701 error = nfs_getreq(nd, nfsd, TRUE);
2704 FREE(nam, M_SONAME);
2706 kfree((caddr_t)nd, M_NFSRVDESC);
2715 * Try to assign service sockets to nfsd threads based on the number
2716 * of new rpc requests that have been queued on the service socket.
2718 * If no nfsd's are available or additonal requests are pending, set the
2719 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for
2720 * the work in the nfssvc_sock list when it is finished processing its
2721 * current work. This flag is only cleared when an nfsd can not find
2722 * any new work to perform.
2725 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel)
2729 if ((slp->ns_flag & SLP_VALID) == 0)
2733 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) {
2734 if (nd->nfsd_flag & NFSD_WAITING) {
2735 nd->nfsd_flag &= ~NFSD_WAITING;
2737 panic("nfsd wakeup");
2740 wakeup((caddr_t)nd);
2741 if (--nparallel == 0)
2746 slp->ns_flag |= SLP_DOREC;
2747 nfsd_head_flag |= NFSD_CHECKSLP;
2750 #endif /* NFS_NOSERVER */