2 * Copyright (c) 1989, 1991, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $
38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.45 2007/05/18 17:05:13 dillon Exp $
42 * Socket operations for use by nfs
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/kernel.h>
52 #include <sys/vnode.h>
53 #include <sys/fcntl.h>
54 #include <sys/protosw.h>
55 #include <sys/resourcevar.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/socketops.h>
59 #include <sys/syslog.h>
60 #include <sys/thread.h>
61 #include <sys/tprintf.h>
62 #include <sys/sysctl.h>
63 #include <sys/signalvar.h>
64 #include <sys/mutex.h>
66 #include <sys/signal2.h>
67 #include <sys/mutex2.h>
68 #include <sys/socketvar2.h>
70 #include <netinet/in.h>
71 #include <netinet/tcp.h>
72 #include <sys/thread2.h>
78 #include "nfsm_subs.h"
87 * RTT calculations are scaled by 256 (8 bits). A proper fractional
88 * RTT will still be calculated even with a slow NFS timer.
90 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum]]
91 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum]]
92 #define NFS_RTT_SCALE_BITS 8 /* bits */
93 #define NFS_RTT_SCALE 256 /* value */
96 * Defines which timer to use for the procnum.
103 static int proct[NFS_NPROCS] = {
104 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, /* 00-09 */
105 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, /* 10-19 */
106 0, 5, 0, 0, 0, 0, /* 20-29 */
109 static int multt[NFS_NPROCS] = {
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-09 */
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10-19 */
112 1, 2, 1, 1, 1, 1, /* 20-29 */
115 static int nfs_backoff[8] = { 2, 3, 5, 8, 13, 21, 34, 55 };
116 static int nfs_realign_test;
117 static int nfs_realign_count;
118 static int nfs_showrtt;
119 static int nfs_showrexmit;
120 int nfs_maxasyncbio = NFS_MAXASYNCBIO;
122 SYSCTL_DECL(_vfs_nfs);
124 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
125 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
126 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrtt, CTLFLAG_RW, &nfs_showrtt, 0, "");
127 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrexmit, CTLFLAG_RW, &nfs_showrexmit, 0, "");
128 SYSCTL_INT(_vfs_nfs, OID_AUTO, maxasyncbio, CTLFLAG_RW, &nfs_maxasyncbio, 0, "");
130 static int nfs_request_setup(nfsm_info_t info);
131 static int nfs_request_auth(struct nfsreq *rep);
132 static int nfs_request_try(struct nfsreq *rep);
133 static int nfs_request_waitreply(struct nfsreq *rep);
134 static int nfs_request_processreply(nfsm_info_t info, int);
137 struct nfsrtt nfsrtt;
138 struct callout nfs_timer_handle;
140 static int nfs_msg (struct thread *,char *,char *);
141 static int nfs_rcvlock (struct nfsmount *nmp, struct nfsreq *myreq);
142 static void nfs_rcvunlock (struct nfsmount *nmp);
143 static void nfs_realign (struct mbuf **pm, int hsiz);
144 static int nfs_receive (struct nfsmount *nmp, struct nfsreq *rep,
145 struct sockaddr **aname, struct mbuf **mp);
146 static void nfs_softterm (struct nfsreq *rep, int islocked);
147 static void nfs_hardterm (struct nfsreq *rep, int islocked);
148 static int nfs_reconnect (struct nfsmount *nmp, struct nfsreq *rep);
150 static int nfsrv_getstream (struct nfssvc_sock *, int, int *);
151 static void nfs_timer_req(struct nfsreq *req);
152 static void nfs_checkpkt(struct mbuf *m, int len);
154 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd,
155 struct nfssvc_sock *slp,
157 struct mbuf **mreqp) = {
185 #endif /* NFS_NOSERVER */
188 * Initialize sockets and congestion for a new NFS connection.
189 * We do not free the sockaddr if error.
192 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
196 struct sockaddr *saddr;
197 struct sockaddr_in *sin;
198 struct thread *td = &thread0; /* only used for socreate and sobind */
200 nmp->nm_so = so = NULL;
201 if (nmp->nm_flag & NFSMNT_FORCE)
204 error = socreate(saddr->sa_family, &so, nmp->nm_sotype,
205 nmp->nm_soproto, td);
208 nmp->nm_soflags = so->so_proto->pr_flags;
211 * Some servers require that the client port be a reserved port number.
213 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) {
216 struct sockaddr_in ssin;
218 bzero(&sopt, sizeof sopt);
219 ip = IP_PORTRANGE_LOW;
220 sopt.sopt_level = IPPROTO_IP;
221 sopt.sopt_name = IP_PORTRANGE;
222 sopt.sopt_val = (void *)&ip;
223 sopt.sopt_valsize = sizeof(ip);
225 error = sosetopt(so, &sopt);
228 bzero(&ssin, sizeof ssin);
230 sin->sin_len = sizeof (struct sockaddr_in);
231 sin->sin_family = AF_INET;
232 sin->sin_addr.s_addr = INADDR_ANY;
233 sin->sin_port = htons(0);
234 error = sobind(so, (struct sockaddr *)sin, td);
237 bzero(&sopt, sizeof sopt);
238 ip = IP_PORTRANGE_DEFAULT;
239 sopt.sopt_level = IPPROTO_IP;
240 sopt.sopt_name = IP_PORTRANGE;
241 sopt.sopt_val = (void *)&ip;
242 sopt.sopt_valsize = sizeof(ip);
244 error = sosetopt(so, &sopt);
250 * Protocols that do not require connections may be optionally left
251 * unconnected for servers that reply from a port other than NFS_PORT.
253 if (nmp->nm_flag & NFSMNT_NOCONN) {
254 if (nmp->nm_soflags & PR_CONNREQUIRED) {
259 error = soconnect(so, nmp->nm_nam, td);
264 * Wait for the connection to complete. Cribbed from the
265 * connect system call but with the wait timing out so
266 * that interruptible mounts don't hang here for a long time.
269 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
270 (void) tsleep((caddr_t)&so->so_timeo, 0,
272 if ((so->so_state & SS_ISCONNECTING) &&
273 so->so_error == 0 && rep &&
274 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){
275 soclrstate(so, SS_ISCONNECTING);
281 error = so->so_error;
288 so->so_rcv.ssb_timeo = (5 * hz);
289 so->so_snd.ssb_timeo = (5 * hz);
292 * Get buffer reservation size from sysctl, but impose reasonable
295 if (nmp->nm_sotype == SOCK_STREAM) {
296 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
300 bzero(&sopt, sizeof sopt);
301 sopt.sopt_level = SOL_SOCKET;
302 sopt.sopt_name = SO_KEEPALIVE;
303 sopt.sopt_val = &val;
304 sopt.sopt_valsize = sizeof val;
308 if (so->so_proto->pr_protocol == IPPROTO_TCP) {
312 bzero(&sopt, sizeof sopt);
313 sopt.sopt_level = IPPROTO_TCP;
314 sopt.sopt_name = TCP_NODELAY;
315 sopt.sopt_val = &val;
316 sopt.sopt_valsize = sizeof val;
321 error = soreserve(so, nfs_soreserve, nfs_soreserve, NULL);
324 atomic_set_int(&so->so_rcv.ssb_flags, SSB_NOINTR);
325 atomic_set_int(&so->so_snd.ssb_flags, SSB_NOINTR);
327 /* Initialize other non-zero congestion variables */
328 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
329 nmp->nm_srtt[3] = (NFS_TIMEO << NFS_RTT_SCALE_BITS);
330 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
331 nmp->nm_sdrtt[3] = 0;
332 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED;
333 nmp->nm_timeouts = 0;
336 * Assign nm_so last. The moment nm_so is assigned the nfs_timer()
337 * can mess with the socket.
344 soshutdown(so, SHUT_RDWR);
345 soclose(so, FNONBLOCK);
352 * Called when a connection is broken on a reliable protocol.
353 * - clean up the old socket
354 * - nfs_connect() again
355 * - set R_NEEDSXMIT for all outstanding requests on mount point
356 * If this fails the mount point is DEAD!
357 * nb: Must be called with the nfs_sndlock() set on the mount point.
360 nfs_reconnect(struct nfsmount *nmp, struct nfsreq *rep)
366 if (nmp->nm_rxstate >= NFSSVC_STOPPING)
368 while ((error = nfs_connect(nmp, rep)) != 0) {
369 if (error == EINTR || error == ERESTART)
373 if (nmp->nm_rxstate >= NFSSVC_STOPPING)
375 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0);
379 * Loop through outstanding request list and fix up all requests
383 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
384 KKASSERT(req->r_nmp == nmp);
385 req->r_flags |= R_NEEDSXMIT;
392 * NFS disconnect. Clean up and unlink.
395 nfs_disconnect(struct nfsmount *nmp)
402 soshutdown(so, SHUT_RDWR);
403 soclose(so, FNONBLOCK);
408 nfs_safedisconnect(struct nfsmount *nmp)
410 nfs_rcvlock(nmp, NULL);
416 * This is the nfs send routine. For connection based socket types, it
417 * must be called with an nfs_sndlock() on the socket.
418 * "rep == NULL" indicates that it has been called from a server.
419 * For the client side:
420 * - return EINTR if the RPC is terminated, 0 otherwise
421 * - set R_NEEDSXMIT if the send fails for any reason
422 * - do any cleanup required by recoverable socket errors (?)
423 * For the server side:
424 * - return EINTR or ERESTART if interrupted by a signal
425 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
426 * - do any cleanup required by recoverable socket errors (?)
429 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
432 struct sockaddr *sendnam;
433 int error, soflags, flags;
436 if (rep->r_flags & R_SOFTTERM) {
440 if ((so = rep->r_nmp->nm_so) == NULL) {
441 rep->r_flags |= R_NEEDSXMIT;
445 rep->r_flags &= ~R_NEEDSXMIT;
446 soflags = rep->r_nmp->nm_soflags;
448 soflags = so->so_proto->pr_flags;
450 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
454 if (so->so_type == SOCK_SEQPACKET)
460 * calls pru_sosend -> sosend -> so_pru_send -> netrpc
462 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags,
465 * ENOBUFS for dgram sockets is transient and non fatal.
466 * No need to log, and no need to break a soft mount.
468 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
471 * do backoff retransmit on client
474 if ((rep->r_nmp->nm_state & NFSSTA_SENDSPACE) == 0) {
475 rep->r_nmp->nm_state |= NFSSTA_SENDSPACE;
476 kprintf("Warning: NFS: Insufficient sendspace "
478 "\t You must increase vfs.nfs.soreserve"
479 "or decrease vfs.nfs.maxasyncbio\n",
480 so->so_snd.ssb_hiwat);
482 rep->r_flags |= R_NEEDSXMIT;
488 log(LOG_INFO, "nfs send error %d for server %s\n",error,
489 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
491 * Deal with errors for the client side.
493 if (rep->r_flags & R_SOFTTERM)
496 rep->r_flags |= R_NEEDSXMIT;
498 log(LOG_INFO, "nfsd send error %d\n", error);
502 * Handle any recoverable (soft) socket errors here. (?)
504 if (error != EINTR && error != ERESTART &&
505 error != EWOULDBLOCK && error != EPIPE)
512 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all
513 * done by soreceive(), but for SOCK_STREAM we must deal with the Record
514 * Mark and consolidate the data into a new mbuf list.
515 * nb: Sometimes TCP passes the data up to soreceive() in long lists of
517 * For SOCK_STREAM we must be very careful to read an entire record once
518 * we have read any of it, even if the system call has been interrupted.
521 nfs_receive(struct nfsmount *nmp, struct nfsreq *rep,
522 struct sockaddr **aname, struct mbuf **mp)
529 struct mbuf *control;
531 struct sockaddr **getnam;
532 int error, sotype, rcvflg;
533 struct thread *td = curthread; /* XXX */
536 * Set up arguments for soreceive()
540 sotype = nmp->nm_sotype;
543 * For reliable protocols, lock against other senders/receivers
544 * in case a reconnect is necessary.
545 * For SOCK_STREAM, first get the Record Mark to find out how much
546 * more there is to get.
547 * We must lock the socket against other receivers
548 * until we have an entire rpc request/reply.
550 if (sotype != SOCK_DGRAM) {
551 error = nfs_sndlock(nmp, rep);
556 * Check for fatal errors and resending request.
559 * Ugh: If a reconnect attempt just happened, nm_so
560 * would have changed. NULL indicates a failed
561 * attempt that has essentially shut down this
564 if (rep && (rep->r_mrep || (rep->r_flags & R_SOFTTERM))) {
570 error = nfs_reconnect(nmp, rep);
577 while (rep && (rep->r_flags & R_NEEDSXMIT)) {
578 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
579 nfsstats.rpcretries++;
580 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
582 if (error == EINTR || error == ERESTART ||
583 (error = nfs_reconnect(nmp, rep)) != 0) {
591 if (sotype == SOCK_STREAM) {
593 * Get the length marker from the stream
595 aio.iov_base = (caddr_t)&len;
596 aio.iov_len = sizeof(u_int32_t);
599 auio.uio_segflg = UIO_SYSSPACE;
600 auio.uio_rw = UIO_READ;
602 auio.uio_resid = sizeof(u_int32_t);
605 rcvflg = MSG_WAITALL;
606 error = so_pru_soreceive(so, NULL, &auio, NULL,
608 if (error == EWOULDBLOCK && rep) {
609 if (rep->r_flags & R_SOFTTERM)
612 } while (error == EWOULDBLOCK);
614 if (error == 0 && auio.uio_resid > 0) {
616 * Only log short packets if not EOF
618 if (auio.uio_resid != sizeof(u_int32_t))
620 "short receive (%d/%d) from nfs server %s\n",
621 (int)(sizeof(u_int32_t) - auio.uio_resid),
622 (int)sizeof(u_int32_t),
623 nmp->nm_mountp->mnt_stat.f_mntfromname);
628 len = ntohl(len) & ~0x80000000;
630 * This is SERIOUS! We are out of sync with the sender
631 * and forcing a disconnect/reconnect is all I can do.
633 if (len > NFS_MAXPACKET) {
634 log(LOG_ERR, "%s (%d) from nfs server %s\n",
635 "impossible packet length",
637 nmp->nm_mountp->mnt_stat.f_mntfromname);
643 * Get the rest of the packet as an mbuf chain
647 rcvflg = MSG_WAITALL;
648 error = so_pru_soreceive(so, NULL, NULL, &sio,
650 } while (error == EWOULDBLOCK || error == EINTR ||
652 if (error == 0 && sio.sb_cc != len) {
655 "short receive (%zu/%d) from nfs server %s\n",
656 (size_t)len - auio.uio_resid, len,
657 nmp->nm_mountp->mnt_stat.f_mntfromname);
663 * Non-stream, so get the whole packet by not
664 * specifying MSG_WAITALL and by specifying a large
667 * We have no use for control msg., but must grab them
668 * and then throw them away so we know what is going
671 sbinit(&sio, 100000000);
674 error = so_pru_soreceive(so, NULL, NULL, &sio,
678 if (error == EWOULDBLOCK && rep) {
679 if (rep->r_flags & R_SOFTTERM) {
684 } while (error == EWOULDBLOCK ||
685 (error == 0 && sio.sb_mb == NULL && control));
686 if ((rcvflg & MSG_EOR) == 0)
688 if (error == 0 && sio.sb_mb == NULL)
694 if (error && error != EINTR && error != ERESTART) {
697 if (error != EPIPE) {
699 "receive error %d from nfs server %s\n",
701 nmp->nm_mountp->mnt_stat.f_mntfromname);
703 error = nfs_sndlock(nmp, rep);
705 error = nfs_reconnect(nmp, rep);
713 if ((so = nmp->nm_so) == NULL)
715 if (so->so_state & SS_ISCONNECTED)
719 sbinit(&sio, 100000000);
722 error = so_pru_soreceive(so, getnam, NULL, &sio,
724 if (error == EWOULDBLOCK && rep &&
725 (rep->r_flags & R_SOFTTERM)) {
729 } while (error == EWOULDBLOCK);
735 * A shutdown may result in no error and no mbuf.
738 if (*mp == NULL && error == 0)
747 * Search for any mbufs that are not a multiple of 4 bytes long
748 * or with m_data not longword aligned.
749 * These could cause pointer alignment problems, so copy them to
750 * well aligned mbufs.
752 nfs_realign(mp, 5 * NFSX_UNSIGNED);
757 * Implement receipt of reply on a socket.
759 * We must search through the list of received datagrams matching them
760 * with outstanding requests using the xid, until ours is found.
762 * If myrep is NULL we process packets on the socket until
763 * interrupted or until nm_reqrxq is non-empty.
767 nfs_reply(struct nfsmount *nmp, struct nfsreq *myrep)
770 struct sockaddr *nam;
774 struct nfsm_info info;
777 * Loop around until we get our own reply
781 * Lock against other receivers so that I don't get stuck in
782 * sbwait() after someone else has received my reply for me.
783 * Also necessary for connection based protocols to avoid
784 * race conditions during a reconnect.
786 * If nfs_rcvlock() returns EALREADY, that means that
787 * the reply has already been recieved by another
788 * process and we can return immediately. In this
789 * case, the lock is not taken to avoid races with
794 error = nfs_rcvlock(nmp, myrep);
795 if (error == EALREADY)
801 * If myrep is NULL we are the receiver helper thread.
802 * Stop waiting for incoming replies if there are
803 * messages sitting on reqrxq that we need to process,
804 * or if a shutdown request is pending.
806 if (myrep == NULL && (TAILQ_FIRST(&nmp->nm_reqrxq) ||
807 nmp->nm_rxstate > NFSSVC_PENDING)) {
813 * Get the next Rpc reply off the socket
815 * We cannot release the receive lock until we've
816 * filled in rep->r_mrep, otherwise a waiting
817 * thread may deadlock in soreceive with no incoming
820 error = nfs_receive(nmp, myrep, &nam, &info.mrep);
823 * Ignore routing errors on connectionless protocols??
826 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) {
827 if (nmp->nm_so == NULL)
829 nmp->nm_so->so_error = 0;
838 * Get the xid and check that it is an rpc reply
841 info.dpos = mtod(info.md, caddr_t);
842 NULLOUT(tl = nfsm_dissect(&info, 2*NFSX_UNSIGNED));
844 if (*tl != rpc_reply) {
845 nfsstats.rpcinvalid++;
854 * Loop through the request list to match up the reply
855 * Iff no match, just drop the datagram. On match, set
856 * r_mrep atomically to prevent the timer from messing
857 * around with the request after we have exited the critical
861 TAILQ_FOREACH(rep, &nmp->nm_reqq, r_chain) {
862 if (rep->r_mrep == NULL && rxid == rep->r_xid)
867 * Fill in the rest of the reply if we found a match.
869 * Deal with duplicate responses if there was no match.
873 rep->r_dpos = info.dpos;
877 rt = &nfsrtt.rttl[nfsrtt.pos];
878 rt->proc = rep->r_procnum;
881 rt->cwnd = nmp->nm_maxasync_scaled;
882 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1];
883 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1];
884 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid;
885 getmicrotime(&rt->tstamp);
886 if (rep->r_flags & R_TIMING)
887 rt->rtt = rep->r_rtt;
890 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ;
894 * New congestion control is based only on async
897 if (nmp->nm_maxasync_scaled < NFS_MAXASYNC_SCALED)
898 ++nmp->nm_maxasync_scaled;
899 if (rep->r_flags & R_SENT) {
900 rep->r_flags &= ~R_SENT;
903 * Update rtt using a gain of 0.125 on the mean
904 * and a gain of 0.25 on the deviation.
906 * NOTE SRTT/SDRTT are only good if R_TIMING is set.
908 if ((rep->r_flags & R_TIMING) && rep->r_rexmit == 0) {
910 * Since the timer resolution of
911 * NFS_HZ is so course, it can often
912 * result in r_rtt == 0. Since
913 * r_rtt == N means that the actual
914 * rtt is between N+dt and N+2-dt ticks,
920 #define NFSRSB NFS_RTT_SCALE_BITS
921 n = ((NFS_SRTT(rep) * 7) +
922 (rep->r_rtt << NFSRSB)) >> 3;
923 d = n - NFS_SRTT(rep);
927 * Don't let the jitter calculation decay
928 * too quickly, but we want a fast rampup.
933 if (d < NFS_SDRTT(rep))
934 n = ((NFS_SDRTT(rep) * 15) + d) >> 4;
936 n = ((NFS_SDRTT(rep) * 3) + d) >> 2;
940 nmp->nm_timeouts = 0;
941 rep->r_mrep = info.mrep;
942 nfs_hardterm(rep, 0);
945 * Extract vers, prog, nfsver, procnum. A duplicate
946 * response means we didn't wait long enough so
947 * we increase the SRTT to avoid future spurious
950 u_int procnum = nmp->nm_lastreprocnum;
953 if (procnum < NFS_NPROCS && proct[procnum]) {
956 n = nmp->nm_srtt[proct[procnum]];
957 n += NFS_ASYSCALE * NFS_HZ;
958 if (n < NFS_ASYSCALE * NFS_HZ * 10)
959 n = NFS_ASYSCALE * NFS_HZ * 10;
960 nmp->nm_srtt[proct[procnum]] = n;
967 * If not matched to a request, drop it.
968 * If it's mine, get out.
971 nfsstats.rpcunexpected++;
974 } else if (rep == myrep) {
975 if (rep->r_mrep == NULL)
976 panic("nfsreply nil");
983 * Run the request state machine until the target state is reached
984 * or a fatal error occurs. The target state is not run. Specifying
985 * a target of NFSM_STATE_DONE runs the state machine until the rpc
988 * EINPROGRESS is returned for all states other then the DONE state,
989 * indicating that the rpc is still in progress.
992 nfs_request(struct nfsm_info *info, nfsm_state_t bstate, nfsm_state_t estate)
996 while (info->state >= bstate && info->state < estate) {
997 switch(info->state) {
998 case NFSM_STATE_SETUP:
1000 * Setup the nfsreq. Any error which occurs during
1001 * this state is fatal.
1003 info->error = nfs_request_setup(info);
1005 info->state = NFSM_STATE_DONE;
1006 return (info->error);
1009 req->r_mrp = &info->mrep;
1010 req->r_mdp = &info->md;
1011 req->r_dposp = &info->dpos;
1012 info->state = NFSM_STATE_AUTH;
1015 case NFSM_STATE_AUTH:
1017 * Authenticate the nfsreq. Any error which occurs
1018 * during this state is fatal.
1020 info->error = nfs_request_auth(info->req);
1022 info->state = NFSM_STATE_DONE;
1023 return (info->error);
1025 info->state = NFSM_STATE_TRY;
1028 case NFSM_STATE_TRY:
1030 * Transmit or retransmit attempt. An error in this
1031 * state is ignored and we always move on to the
1034 * This can trivially race the receiver if the
1035 * request is asynchronous. nfs_request_try()
1036 * will thus set the state for us and we
1037 * must also return immediately if we are
1038 * running an async state machine, because
1039 * info can become invalid due to races after
1042 if (info->req->r_flags & R_ASYNC) {
1043 nfs_request_try(info->req);
1044 if (estate == NFSM_STATE_WAITREPLY)
1045 return (EINPROGRESS);
1047 nfs_request_try(info->req);
1048 info->state = NFSM_STATE_WAITREPLY;
1051 case NFSM_STATE_WAITREPLY:
1053 * Wait for a reply or timeout and move on to the
1054 * next state. The error returned by this state
1055 * is passed to the processing code in the next
1058 info->error = nfs_request_waitreply(info->req);
1059 info->state = NFSM_STATE_PROCESSREPLY;
1061 case NFSM_STATE_PROCESSREPLY:
1063 * Process the reply or timeout. Errors which occur
1064 * in this state may cause the state machine to
1065 * go back to an earlier state, and are fatal
1068 info->error = nfs_request_processreply(info,
1070 switch(info->error) {
1072 info->state = NFSM_STATE_AUTH;
1075 info->state = NFSM_STATE_TRY;
1079 * Operation complete, with or without an
1080 * error. We are done.
1083 info->state = NFSM_STATE_DONE;
1084 return (info->error);
1087 case NFSM_STATE_DONE:
1089 * Shouldn't be reached
1091 return (info->error);
1097 * If we are done return the error code (if any).
1098 * Otherwise return EINPROGRESS.
1100 if (info->state == NFSM_STATE_DONE)
1101 return (info->error);
1102 return (EINPROGRESS);
1106 * nfs_request - goes something like this
1107 * - fill in request struct
1108 * - links it into list
1109 * - calls nfs_send() for first transmit
1110 * - calls nfs_receive() to get reply
1111 * - break down rpc header and return with nfs reply pointed to
1113 * nb: always frees up mreq mbuf list
1116 nfs_request_setup(nfsm_info_t info)
1119 struct nfsmount *nmp;
1124 * Reject requests while attempting a forced unmount.
1126 if (info->vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
1127 m_freem(info->mreq);
1131 nmp = VFSTONFS(info->vp->v_mount);
1132 req = kmalloc(sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
1134 req->r_vp = info->vp;
1135 req->r_td = info->td;
1136 req->r_procnum = info->procnum;
1138 req->r_cred = info->cred;
1146 req->r_mrest = info->mreq;
1147 req->r_mrest_len = i;
1150 * The presence of a non-NULL r_info in req indicates
1151 * async completion via our helper threads. See the receiver
1156 req->r_flags = R_ASYNC;
1166 nfs_request_auth(struct nfsreq *rep)
1168 struct nfsmount *nmp = rep->r_nmp;
1170 char nickv[RPCX_NICKVERF];
1171 int error = 0, auth_len, auth_type;
1174 char *auth_str, *verf_str;
1178 rep->r_failed_auth = 0;
1181 * Get the RPC header with authorization.
1183 verf_str = auth_str = NULL;
1184 if (nmp->nm_flag & NFSMNT_KERB) {
1186 verf_len = sizeof (nickv);
1187 auth_type = RPCAUTH_KERB4;
1188 bzero((caddr_t)rep->r_key, sizeof(rep->r_key));
1189 if (rep->r_failed_auth ||
1190 nfs_getnickauth(nmp, cred, &auth_str, &auth_len,
1191 verf_str, verf_len)) {
1192 error = nfs_getauth(nmp, rep, cred, &auth_str,
1193 &auth_len, verf_str, &verf_len, rep->r_key);
1195 m_freem(rep->r_mrest);
1196 rep->r_mrest = NULL;
1197 kfree((caddr_t)rep, M_NFSREQ);
1202 auth_type = RPCAUTH_UNIX;
1203 if (cred->cr_ngroups < 1)
1204 panic("nfsreq nogrps");
1205 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
1206 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
1210 nfs_checkpkt(rep->r_mrest, rep->r_mrest_len);
1211 m = nfsm_rpchead(cred, nmp->nm_flag, rep->r_procnum, auth_type,
1212 auth_len, auth_str, verf_len, verf_str,
1213 rep->r_mrest, rep->r_mrest_len, &rep->r_mheadend, &xid);
1214 rep->r_mrest = NULL;
1216 kfree(auth_str, M_TEMP);
1219 * For stream protocols, insert a Sun RPC Record Mark.
1221 if (nmp->nm_sotype == SOCK_STREAM) {
1222 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT);
1224 kfree(rep, M_NFSREQ);
1227 *mtod(m, u_int32_t *) = htonl(0x80000000 |
1228 (m->m_pkthdr.len - NFSX_UNSIGNED));
1231 nfs_checkpkt(m, m->m_pkthdr.len);
1239 nfs_request_try(struct nfsreq *rep)
1241 struct nfsmount *nmp = rep->r_nmp;
1246 * Request is not on any queue, only the owner has access to it
1247 * so it should not be locked by anyone atm.
1249 * Interlock to prevent races. While locked the only remote
1250 * action possible is for r_mrep to be set (once we enqueue it).
1252 if (rep->r_flags == 0xdeadc0de) {
1253 print_backtrace(-1);
1254 panic("flags nbad\n");
1256 KKASSERT((rep->r_flags & (R_LOCKED | R_ONREQQ)) == 0);
1257 if (nmp->nm_flag & NFSMNT_SOFT)
1258 rep->r_retry = nmp->nm_retry;
1260 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
1261 rep->r_rtt = rep->r_rexmit = 0;
1262 if (proct[rep->r_procnum] > 0)
1263 rep->r_flags |= R_TIMING | R_LOCKED;
1265 rep->r_flags |= R_LOCKED;
1269 * Do the client side RPC.
1271 nfsstats.rpcrequests++;
1273 if (nmp->nm_flag & NFSMNT_FORCE) {
1274 rep->r_flags |= R_SOFTTERM;
1275 rep->r_flags &= ~R_LOCKED;
1280 * Chain request into list of outstanding requests. Be sure
1281 * to put it LAST so timer finds oldest requests first. Note
1282 * that our control of R_LOCKED prevents the request from
1283 * getting ripped out from under us or transmitted by the
1286 * For requests with info structures we must atomically set the
1287 * info's state because the structure could become invalid upon
1288 * return due to races (i.e., if async)
1291 mtx_link_init(&rep->r_link);
1292 TAILQ_INSERT_TAIL(&nmp->nm_reqq, rep, r_chain);
1293 rep->r_flags |= R_ONREQQ;
1295 if (rep->r_flags & R_ASYNC)
1296 rep->r_info->state = NFSM_STATE_WAITREPLY;
1302 * Send if we can. Congestion control is not handled here any more
1303 * becausing trying to defer the initial send based on the nfs_timer
1304 * requires having a very fast nfs_timer, which is silly.
1307 if (nmp->nm_soflags & PR_CONNREQUIRED)
1308 error = nfs_sndlock(nmp, rep);
1310 m2 = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT);
1311 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
1312 if (nmp->nm_soflags & PR_CONNREQUIRED)
1314 rep->r_flags &= ~R_NEEDSXMIT;
1315 if ((rep->r_flags & R_SENT) == 0) {
1316 rep->r_flags |= R_SENT;
1319 rep->r_flags |= R_NEEDSXMIT;
1322 rep->r_flags |= R_NEEDSXMIT;
1329 * Release the lock. The only remote action that may have occurred
1330 * would have been the setting of rep->r_mrep. If this occured
1331 * and the request was async we have to move it to the reader
1332 * thread's queue for action.
1334 * For async requests also make sure the reader is woken up so
1335 * it gets on the socket to read responses.
1338 if (rep->r_flags & R_ASYNC) {
1340 nfs_hardterm(rep, 1);
1341 rep->r_flags &= ~R_LOCKED;
1342 nfssvc_iod_reader_wakeup(nmp);
1344 rep->r_flags &= ~R_LOCKED;
1346 if (rep->r_flags & R_WANTED) {
1347 rep->r_flags &= ~R_WANTED;
1355 * This code is only called for synchronous requests. Completed synchronous
1356 * requests are left on reqq and we remove them before moving on to the
1360 nfs_request_waitreply(struct nfsreq *rep)
1362 struct nfsmount *nmp = rep->r_nmp;
1365 KKASSERT((rep->r_flags & R_ASYNC) == 0);
1368 * Wait until the request is finished.
1370 error = nfs_reply(nmp, rep);
1373 * RPC done, unlink the request, but don't rip it out from under
1374 * the callout timer.
1376 * Once unlinked no other receiver or the timer will have
1377 * visibility, so we do not have to set R_LOCKED.
1380 while (rep->r_flags & R_LOCKED) {
1381 rep->r_flags |= R_WANTED;
1382 tsleep(rep, 0, "nfstrac", 0);
1384 KKASSERT(rep->r_flags & R_ONREQQ);
1385 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain);
1386 rep->r_flags &= ~R_ONREQQ;
1388 if (TAILQ_FIRST(&nmp->nm_bioq) &&
1389 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) {
1390 nfssvc_iod_writer_wakeup(nmp);
1395 * Decrement the outstanding request count.
1397 if (rep->r_flags & R_SENT) {
1398 rep->r_flags &= ~R_SENT;
1404 * Process reply with error returned from nfs_requet_waitreply().
1406 * Returns EAGAIN if it wants us to loop up to nfs_request_try() again.
1407 * Returns ENEEDAUTH if it wants us to loop up to nfs_request_auth() again.
1410 nfs_request_processreply(nfsm_info_t info, int error)
1412 struct nfsreq *req = info->req;
1413 struct nfsmount *nmp = req->r_nmp;
1419 * If there was a successful reply and a tprintf msg.
1420 * tprintf a response.
1422 if (error == 0 && (req->r_flags & R_TPRINTFMSG)) {
1423 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1426 info->mrep = req->r_mrep;
1427 info->md = req->r_md;
1428 info->dpos = req->r_dpos;
1430 m_freem(req->r_mreq);
1432 kfree(req, M_NFSREQ);
1438 * break down the rpc header and check if ok
1440 NULLOUT(tl = nfsm_dissect(info, 3 * NFSX_UNSIGNED));
1441 if (*tl++ == rpc_msgdenied) {
1442 if (*tl == rpc_mismatch) {
1444 } else if ((nmp->nm_flag & NFSMNT_KERB) &&
1445 *tl++ == rpc_autherr) {
1446 if (req->r_failed_auth == 0) {
1447 req->r_failed_auth++;
1448 req->r_mheadend->m_next = NULL;
1449 m_freem(info->mrep);
1451 m_freem(req->r_mreq);
1460 m_freem(info->mrep);
1462 m_freem(req->r_mreq);
1464 kfree(req, M_NFSREQ);
1470 * Grab any Kerberos verifier, otherwise just throw it away.
1472 verf_type = fxdr_unsigned(int, *tl++);
1473 i = fxdr_unsigned(int32_t, *tl);
1474 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) {
1475 error = nfs_savenickauth(nmp, req->r_cred, i, req->r_key,
1476 &info->md, &info->dpos, info->mrep);
1480 ERROROUT(nfsm_adv(info, nfsm_rndup(i)));
1482 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1485 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED));
1487 error = fxdr_unsigned(int, *tl);
1490 * Does anyone even implement this? Just impose
1493 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
1494 error == NFSERR_TRYLATER) {
1495 m_freem(info->mrep);
1499 tsleep((caddr_t)&lbolt, 0, "nqnfstry", 0);
1500 return (EAGAIN); /* goto tryagain */
1504 * If the File Handle was stale, invalidate the
1505 * lookup cache, just in case.
1507 * To avoid namecache<->vnode deadlocks we must
1508 * release the vnode lock if we hold it.
1510 if (error == ESTALE) {
1511 struct vnode *vp = req->r_vp;
1514 ltype = lockstatus(&vp->v_lock, curthread);
1515 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED)
1516 lockmgr(&vp->v_lock, LK_RELEASE);
1517 cache_inval_vp(vp, CINV_CHILDREN);
1518 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED)
1519 lockmgr(&vp->v_lock, ltype);
1521 if (nmp->nm_flag & NFSMNT_NFSV3) {
1522 KKASSERT(*req->r_mrp == info->mrep);
1523 KKASSERT(*req->r_mdp == info->md);
1524 KKASSERT(*req->r_dposp == info->dpos);
1525 error |= NFSERR_RETERR;
1527 m_freem(info->mrep);
1530 m_freem(req->r_mreq);
1532 kfree(req, M_NFSREQ);
1537 KKASSERT(*req->r_mrp == info->mrep);
1538 KKASSERT(*req->r_mdp == info->md);
1539 KKASSERT(*req->r_dposp == info->dpos);
1540 m_freem(req->r_mreq);
1542 FREE(req, M_NFSREQ);
1545 m_freem(info->mrep);
1547 error = EPROTONOSUPPORT;
1549 m_freem(req->r_mreq);
1551 kfree(req, M_NFSREQ);
1556 #ifndef NFS_NOSERVER
1558 * Generate the rpc reply header
1559 * siz arg. is used to decide if adding a cluster is worthwhile
1562 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp,
1563 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp)
1566 struct nfsm_info info;
1568 siz += RPC_REPLYSIZ;
1569 info.mb = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
1570 info.mreq = info.mb;
1571 info.mreq->m_pkthdr.len = 0;
1573 * If this is not a cluster, try and leave leading space
1574 * for the lower level headers.
1576 if ((max_hdr + siz) < MINCLSIZE)
1577 info.mreq->m_data += max_hdr;
1578 tl = mtod(info.mreq, u_int32_t *);
1579 info.mreq->m_len = 6 * NFSX_UNSIGNED;
1580 info.bpos = ((caddr_t)tl) + info.mreq->m_len;
1581 *tl++ = txdr_unsigned(nd->nd_retxid);
1583 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
1584 *tl++ = rpc_msgdenied;
1585 if (err & NFSERR_AUTHERR) {
1586 *tl++ = rpc_autherr;
1587 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR);
1588 info.mreq->m_len -= NFSX_UNSIGNED;
1589 info.bpos -= NFSX_UNSIGNED;
1591 *tl++ = rpc_mismatch;
1592 *tl++ = txdr_unsigned(RPC_VER2);
1593 *tl = txdr_unsigned(RPC_VER2);
1596 *tl++ = rpc_msgaccepted;
1599 * For Kerberos authentication, we must send the nickname
1600 * verifier back, otherwise just RPCAUTH_NULL.
1602 if (nd->nd_flag & ND_KERBFULL) {
1603 struct nfsuid *nuidp;
1604 struct timeval ktvin, ktvout;
1606 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first;
1607 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1608 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid &&
1609 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp),
1610 &nuidp->nu_haddr, nd->nd_nam2)))
1615 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1);
1617 txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1620 * Encrypt the timestamp in ecb mode using the
1630 *tl++ = rpc_auth_kerb;
1631 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED);
1632 *tl = ktvout.tv_sec;
1633 tl = nfsm_build(&info, 3 * NFSX_UNSIGNED);
1634 *tl++ = ktvout.tv_usec;
1635 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid);
1646 *tl = txdr_unsigned(RPC_PROGUNAVAIL);
1649 *tl = txdr_unsigned(RPC_PROGMISMATCH);
1650 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED);
1651 *tl++ = txdr_unsigned(2);
1652 *tl = txdr_unsigned(3);
1655 *tl = txdr_unsigned(RPC_PROCUNAVAIL);
1658 *tl = txdr_unsigned(RPC_GARBAGE);
1662 if (err != NFSERR_RETVOID) {
1663 tl = nfsm_build(&info, NFSX_UNSIGNED);
1665 *tl = txdr_unsigned(nfsrv_errmap(nd, err));
1677 if (err != 0 && err != NFSERR_RETVOID)
1678 nfsstats.srvrpc_errs++;
1683 #endif /* NFS_NOSERVER */
1686 * Nfs timer routine.
1688 * Scan the nfsreq list and retranmit any requests that have timed out
1689 * To avoid retransmission attempts on STREAM sockets (in the future) make
1690 * sure to set the r_retry field to 0 (implies nm_retry == 0).
1692 * Requests with attached responses, terminated requests, and
1693 * locked requests are ignored. Locked requests will be picked up
1694 * in a later timer call.
1697 nfs_timer(void *arg /* never used */)
1699 struct nfsmount *nmp;
1701 #ifndef NFS_NOSERVER
1702 struct nfssvc_sock *slp;
1704 #endif /* NFS_NOSERVER */
1707 TAILQ_FOREACH(nmp, &nfs_mountq, nm_entry) {
1708 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1709 KKASSERT(nmp == req->r_nmp);
1712 if (req->r_flags & (R_SOFTTERM | R_LOCKED))
1714 req->r_flags |= R_LOCKED;
1715 if (nfs_sigintr(nmp, req, req->r_td)) {
1716 nfs_softterm(req, 1);
1720 req->r_flags &= ~R_LOCKED;
1721 if (req->r_flags & R_WANTED) {
1722 req->r_flags &= ~R_WANTED;
1727 #ifndef NFS_NOSERVER
1730 * Scan the write gathering queues for writes that need to be
1733 cur_usec = nfs_curusec();
1734 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
1735 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec)
1736 nfsrv_wakenfsd(slp, 1);
1738 #endif /* NFS_NOSERVER */
1740 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer, NULL);
1745 nfs_timer_req(struct nfsreq *req)
1747 struct thread *td = &thread0; /* XXX for creds, will break if sleep */
1748 struct nfsmount *nmp = req->r_nmp;
1755 * rtt ticks and timeout calculation. Return if the timeout
1756 * has not been reached yet, unless the packet is flagged
1757 * for an immediate send.
1759 * The mean rtt doesn't help when we get random I/Os, we have
1760 * to multiply by fairly large numbers.
1762 if (req->r_rtt >= 0) {
1764 * Calculate the timeout to test against.
1767 if (nmp->nm_flag & NFSMNT_DUMBTIMR) {
1768 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS;
1769 } else if (req->r_flags & R_TIMING) {
1770 timeo = NFS_SRTT(req) + NFS_SDRTT(req);
1772 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS;
1774 timeo *= multt[req->r_procnum];
1775 /* timeo is still scaled by SCALE_BITS */
1777 #define NFSFS (NFS_RTT_SCALE * NFS_HZ)
1778 if (req->r_flags & R_TIMING) {
1779 static long last_time;
1780 if (nfs_showrtt && last_time != time_second) {
1781 kprintf("rpccmd %d NFS SRTT %d SDRTT %d "
1783 proct[req->r_procnum],
1784 NFS_SRTT(req), NFS_SDRTT(req),
1786 timeo % NFSFS * 1000 / NFSFS);
1787 last_time = time_second;
1793 * deal with nfs_timer jitter.
1795 timeo = (timeo >> NFS_RTT_SCALE_BITS) + 1;
1799 if (nmp->nm_timeouts > 0)
1800 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
1801 if (timeo > NFS_MAXTIMEO)
1802 timeo = NFS_MAXTIMEO;
1803 if (req->r_rtt <= timeo) {
1804 if ((req->r_flags & R_NEEDSXMIT) == 0)
1806 } else if (nmp->nm_timeouts < 8) {
1812 * Check for server not responding
1814 if ((req->r_flags & R_TPRINTFMSG) == 0 &&
1815 req->r_rexmit > nmp->nm_deadthresh) {
1816 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname,
1818 req->r_flags |= R_TPRINTFMSG;
1820 if (req->r_rexmit >= req->r_retry) { /* too many */
1821 nfsstats.rpctimeouts++;
1822 nfs_softterm(req, 1);
1827 * Generally disable retransmission on reliable sockets,
1828 * unless the request is flagged for immediate send.
1830 if (nmp->nm_sotype != SOCK_DGRAM) {
1831 if (++req->r_rexmit > NFS_MAXREXMIT)
1832 req->r_rexmit = NFS_MAXREXMIT;
1833 if ((req->r_flags & R_NEEDSXMIT) == 0)
1838 * Stop here if we do not have a socket!
1840 if ((so = nmp->nm_so) == NULL)
1844 * If there is enough space and the window allows.. resend it.
1846 * r_rtt is left intact in case we get an answer after the
1847 * retry that was a reply to the original packet.
1849 * NOTE: so_pru_send()
1851 if (ssb_space(&so->so_snd) >= req->r_mreq->m_pkthdr.len &&
1852 (req->r_flags & (R_SENT | R_NEEDSXMIT)) &&
1853 (m = m_copym(req->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){
1854 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
1855 error = so_pru_send(so, 0, m, NULL, NULL, td);
1857 error = so_pru_send(so, 0, m, nmp->nm_nam, NULL, td);
1859 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
1861 req->r_flags |= R_NEEDSXMIT;
1862 } else if (req->r_mrep == NULL) {
1864 * Iff first send, start timing
1865 * else turn timing off, backoff timer
1866 * and divide congestion window by 2.
1868 * It is possible for the so_pru_send() to
1869 * block and for us to race a reply so we
1870 * only do this if the reply field has not
1871 * been filled in. R_LOCKED will prevent
1872 * the request from being ripped out from under
1875 * Record the last resent procnum to aid us
1876 * in duplicate detection on receive.
1878 if ((req->r_flags & R_NEEDSXMIT) == 0) {
1881 if (++req->r_rexmit > NFS_MAXREXMIT)
1882 req->r_rexmit = NFS_MAXREXMIT;
1883 nmp->nm_maxasync_scaled >>= 1;
1884 if (nmp->nm_maxasync_scaled < NFS_MINASYNC_SCALED)
1885 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED;
1886 nfsstats.rpcretries++;
1887 nmp->nm_lastreprocnum = req->r_procnum;
1889 req->r_flags |= R_SENT;
1890 req->r_flags &= ~R_NEEDSXMIT;
1897 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1898 * wait for all requests to complete. This is used by forced unmounts
1899 * to terminate any outstanding RPCs.
1901 * Locked requests cannot be canceled but will be marked for
1905 nfs_nmcancelreqs(struct nfsmount *nmp)
1911 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1912 if (req->r_mrep != NULL || (req->r_flags & R_SOFTTERM))
1914 nfs_softterm(req, 0);
1916 /* XXX the other two queues as well */
1919 for (i = 0; i < 30; i++) {
1921 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) {
1922 if (nmp == req->r_nmp)
1928 tsleep(&lbolt, 0, "nfscancel", 0);
1934 * Soft-terminate a request, effectively marking it as failed.
1936 * Must be called from within a critical section.
1939 nfs_softterm(struct nfsreq *rep, int islocked)
1941 rep->r_flags |= R_SOFTTERM;
1942 nfs_hardterm(rep, islocked);
1946 * Hard-terminate a request, typically after getting a response.
1948 * The state machine can still decide to re-issue it later if necessary.
1950 * Must be called from within a critical section.
1953 nfs_hardterm(struct nfsreq *rep, int islocked)
1955 struct nfsmount *nmp = rep->r_nmp;
1958 * The nm_send count is decremented now to avoid deadlocks
1959 * when the process in soreceive() hasn't yet managed to send
1962 if (rep->r_flags & R_SENT) {
1963 rep->r_flags &= ~R_SENT;
1967 * If we locked the request or nobody else has locked the request,
1968 * and the request is async, we can move it to the reader thread's
1969 * queue now and fix up the state.
1971 * If we locked the request or nobody else has locked the request,
1972 * we can wake up anyone blocked waiting for a response on the
1975 if (islocked || (rep->r_flags & R_LOCKED) == 0) {
1976 if ((rep->r_flags & (R_ONREQQ | R_ASYNC)) ==
1977 (R_ONREQQ | R_ASYNC)) {
1978 rep->r_flags &= ~R_ONREQQ;
1979 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain);
1981 TAILQ_INSERT_TAIL(&nmp->nm_reqrxq, rep, r_chain);
1982 KKASSERT(rep->r_info->state == NFSM_STATE_TRY ||
1983 rep->r_info->state == NFSM_STATE_WAITREPLY);
1984 rep->r_info->state = NFSM_STATE_PROCESSREPLY;
1985 nfssvc_iod_reader_wakeup(nmp);
1986 if (TAILQ_FIRST(&nmp->nm_bioq) &&
1987 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) {
1988 nfssvc_iod_writer_wakeup(nmp);
1991 mtx_abort_ex_link(&nmp->nm_rxlock, &rep->r_link);
1996 * Test for a termination condition pending on the process.
1997 * This is used for NFSMNT_INT mounts.
2000 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
2006 if (rep && (rep->r_flags & R_SOFTTERM))
2008 /* Terminate all requests while attempting a forced unmount. */
2009 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
2011 if (!(nmp->nm_flag & NFSMNT_INT))
2013 /* td might be NULL YYY */
2014 if (td == NULL || (p = td->td_proc) == NULL)
2018 tmpset = lwp_sigpend(lp);
2019 SIGSETNAND(tmpset, lp->lwp_sigmask);
2020 SIGSETNAND(tmpset, p->p_sigignore);
2021 if (SIGNOTEMPTY(tmpset) && NFSINT_SIGMASK(tmpset))
2028 * Lock a socket against others.
2029 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
2030 * and also to avoid race conditions between the processes with nfs requests
2031 * in progress when a reconnect is necessary.
2034 nfs_sndlock(struct nfsmount *nmp, struct nfsreq *rep)
2036 mtx_t mtx = &nmp->nm_txlock;
2044 td = rep ? rep->r_td : NULL;
2045 if (nmp->nm_flag & NFSMNT_INT)
2048 while ((error = mtx_lock_ex_try(mtx)) != 0) {
2049 if (nfs_sigintr(nmp, rep, td)) {
2053 error = mtx_lock_ex(mtx, "nfsndlck", slpflag, slptimeo);
2056 if (slpflag == PCATCH) {
2061 /* Always fail if our request has been cancelled. */
2062 if (rep && (rep->r_flags & R_SOFTTERM)) {
2071 * Unlock the stream socket for others.
2074 nfs_sndunlock(struct nfsmount *nmp)
2076 mtx_unlock(&nmp->nm_txlock);
2080 * Lock the receiver side of the socket.
2085 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep)
2087 mtx_t mtx = &nmp->nm_rxlock;
2093 * Unconditionally check for completion in case another nfsiod
2094 * get the packet while the caller was blocked, before the caller
2095 * called us. Packet reception is handled by mainline code which
2096 * is protected by the BGL at the moment.
2098 * We do not strictly need the second check just before the
2099 * tsleep(), but it's good defensive programming.
2101 if (rep && rep->r_mrep != NULL)
2104 if (nmp->nm_flag & NFSMNT_INT)
2110 while ((error = mtx_lock_ex_try(mtx)) != 0) {
2111 if (nfs_sigintr(nmp, rep, (rep ? rep->r_td : NULL))) {
2115 if (rep && rep->r_mrep != NULL) {
2121 * NOTE: can return ENOLCK, but in that case rep->r_mrep
2122 * will already be set.
2125 error = mtx_lock_ex_link(mtx, &rep->r_link,
2129 error = mtx_lock_ex(mtx, "nfsrcvlk", slpflag, slptimeo);
2135 * If our reply was recieved while we were sleeping,
2136 * then just return without taking the lock to avoid a
2137 * situation where a single iod could 'capture' the
2140 if (rep && rep->r_mrep != NULL) {
2144 if (slpflag == PCATCH) {
2150 if (rep && rep->r_mrep != NULL) {
2159 * Unlock the stream socket for others.
2162 nfs_rcvunlock(struct nfsmount *nmp)
2164 mtx_unlock(&nmp->nm_rxlock);
2170 * Check for badly aligned mbuf data and realign by copying the unaligned
2171 * portion of the data into a new mbuf chain and freeing the portions
2172 * of the old chain that were replaced.
2174 * We cannot simply realign the data within the existing mbuf chain
2175 * because the underlying buffers may contain other rpc commands and
2176 * we cannot afford to overwrite them.
2178 * We would prefer to avoid this situation entirely. The situation does
2179 * not occur with NFS/UDP and is supposed to only occassionally occur
2180 * with TCP. Use vfs.nfs.realign_count and realign_test to check this.
2182 * NOTE! MB_DONTWAIT cannot be used here. The mbufs must be acquired
2183 * because the rpc request OR reply cannot be thrown away. TCP NFS
2184 * mounts do not retry their RPCs unless the TCP connection itself
2185 * is dropped so throwing away a RPC will basically cause the NFS
2186 * operation to lockup indefinitely.
2189 nfs_realign(struct mbuf **pm, int hsiz)
2192 struct mbuf *n = NULL;
2195 * Check for misalignemnt
2198 while ((m = *pm) != NULL) {
2199 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3))
2205 * If misalignment found make a completely new copy.
2208 ++nfs_realign_count;
2209 n = m_dup_data(m, MB_WAIT);
2215 #ifndef NFS_NOSERVER
2218 * Parse an RPC request
2220 * - fill in the cred struct.
2223 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header)
2230 u_int32_t nfsvers, auth_type;
2232 int error = 0, ticklen;
2233 struct nfsuid *nuidp;
2234 struct timeval tvin, tvout;
2235 struct nfsm_info info;
2236 #if 0 /* until encrypted keys are implemented */
2237 NFSKERBKEYSCHED_T keys; /* stores key schedule */
2240 info.mrep = nd->nd_mrep;
2241 info.md = nd->nd_md;
2242 info.dpos = nd->nd_dpos;
2245 NULLOUT(tl = nfsm_dissect(&info, 10 * NFSX_UNSIGNED));
2246 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++);
2247 if (*tl++ != rpc_call) {
2252 NULLOUT(tl = nfsm_dissect(&info, 8 * NFSX_UNSIGNED));
2256 if (*tl++ != rpc_vers) {
2257 nd->nd_repstat = ERPCMISMATCH;
2258 nd->nd_procnum = NFSPROC_NOOP;
2261 if (*tl != nfs_prog) {
2262 nd->nd_repstat = EPROGUNAVAIL;
2263 nd->nd_procnum = NFSPROC_NOOP;
2267 nfsvers = fxdr_unsigned(u_int32_t, *tl++);
2268 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) {
2269 nd->nd_repstat = EPROGMISMATCH;
2270 nd->nd_procnum = NFSPROC_NOOP;
2273 if (nfsvers == NFS_VER3)
2274 nd->nd_flag = ND_NFSV3;
2275 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++);
2276 if (nd->nd_procnum == NFSPROC_NULL)
2278 if (nd->nd_procnum >= NFS_NPROCS ||
2279 (nd->nd_procnum >= NQNFSPROC_GETLEASE) ||
2280 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) {
2281 nd->nd_repstat = EPROCUNAVAIL;
2282 nd->nd_procnum = NFSPROC_NOOP;
2285 if ((nd->nd_flag & ND_NFSV3) == 0)
2286 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
2288 len = fxdr_unsigned(int, *tl++);
2289 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2294 nd->nd_flag &= ~ND_KERBAUTH;
2296 * Handle auth_unix or auth_kerb.
2298 if (auth_type == rpc_auth_unix) {
2299 len = fxdr_unsigned(int, *++tl);
2300 if (len < 0 || len > NFS_MAXNAMLEN) {
2304 ERROROUT(nfsm_adv(&info, nfsm_rndup(len)));
2305 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED));
2306 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred));
2307 nd->nd_cr.cr_ref = 1;
2308 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++);
2309 nd->nd_cr.cr_ruid = nd->nd_cr.cr_svuid = nd->nd_cr.cr_uid;
2310 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++);
2311 nd->nd_cr.cr_rgid = nd->nd_cr.cr_svgid = nd->nd_cr.cr_gid;
2312 len = fxdr_unsigned(int, *tl);
2313 if (len < 0 || len > RPCAUTH_UNIXGIDS) {
2317 NULLOUT(tl = nfsm_dissect(&info, (len + 2) * NFSX_UNSIGNED));
2318 for (i = 1; i <= len; i++)
2320 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++);
2323 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
2324 if (nd->nd_cr.cr_ngroups > 1)
2325 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups);
2326 len = fxdr_unsigned(int, *++tl);
2327 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2332 ERROROUT(nfsm_adv(&info, nfsm_rndup(len)));
2334 } else if (auth_type == rpc_auth_kerb) {
2335 switch (fxdr_unsigned(int, *tl++)) {
2336 case RPCAKN_FULLNAME:
2337 ticklen = fxdr_unsigned(int, *tl);
2338 *((u_int32_t *)nfsd->nfsd_authstr) = *tl;
2339 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED;
2340 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED;
2341 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) {
2348 uio.uio_segflg = UIO_SYSSPACE;
2349 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4];
2350 iov.iov_len = RPCAUTH_MAXSIZ - 4;
2351 ERROROUT(nfsm_mtouio(&info, &uio, uio.uio_resid));
2352 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED));
2353 if (*tl++ != rpc_auth_kerb ||
2354 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) {
2355 kprintf("Bad kerb verifier\n");
2356 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2357 nd->nd_procnum = NFSPROC_NOOP;
2360 NULLOUT(cp = nfsm_dissect(&info, 4 * NFSX_UNSIGNED));
2361 tl = (u_int32_t *)cp;
2362 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) {
2363 kprintf("Not fullname kerb verifier\n");
2364 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2365 nd->nd_procnum = NFSPROC_NOOP;
2368 cp += NFSX_UNSIGNED;
2369 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED);
2370 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED;
2371 nd->nd_flag |= ND_KERBFULL;
2372 nfsd->nfsd_flag |= NFSD_NEEDAUTH;
2374 case RPCAKN_NICKNAME:
2375 if (len != 2 * NFSX_UNSIGNED) {
2376 kprintf("Kerb nickname short\n");
2377 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED);
2378 nd->nd_procnum = NFSPROC_NOOP;
2381 nickuid = fxdr_unsigned(uid_t, *tl);
2382 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED));
2383 if (*tl++ != rpc_auth_kerb ||
2384 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) {
2385 kprintf("Kerb nick verifier bad\n");
2386 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF);
2387 nd->nd_procnum = NFSPROC_NOOP;
2390 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED));
2391 tvin.tv_sec = *tl++;
2394 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first;
2395 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
2396 if (nuidp->nu_cr.cr_uid == nickuid &&
2398 netaddr_match(NU_NETFAM(nuidp),
2399 &nuidp->nu_haddr, nd->nd_nam2)))
2404 (NFSERR_AUTHERR|AUTH_REJECTCRED);
2405 nd->nd_procnum = NFSPROC_NOOP;
2410 * Now, decrypt the timestamp using the session key
2420 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec);
2421 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec);
2422 if (nuidp->nu_expire < time_second ||
2423 nuidp->nu_timestamp.tv_sec > tvout.tv_sec ||
2424 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec &&
2425 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) {
2426 nuidp->nu_expire = 0;
2428 (NFSERR_AUTHERR|AUTH_REJECTVERF);
2429 nd->nd_procnum = NFSPROC_NOOP;
2432 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr);
2433 nd->nd_flag |= ND_KERBNICK;
2436 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
2437 nd->nd_procnum = NFSPROC_NOOP;
2441 nd->nd_md = info.md;
2442 nd->nd_dpos = info.dpos;
2451 * Send a message to the originating process's terminal. The thread and/or
2452 * process may be NULL. YYY the thread should not be NULL but there may
2453 * still be some uio_td's that are still being passed as NULL through to
2457 nfs_msg(struct thread *td, char *server, char *msg)
2461 if (td && td->td_proc)
2462 tpr = tprintf_open(td->td_proc);
2465 tprintf(tpr, "nfs server %s: %s\n", server, msg);
2470 #ifndef NFS_NOSERVER
2472 * Socket upcall routine for the nfsd sockets.
2473 * The caddr_t arg is a pointer to the "struct nfssvc_sock".
2474 * Essentially do as much as possible non-blocking, else punt and it will
2475 * be called with MB_WAIT from an nfsd.
2478 nfsrv_rcv(struct socket *so, void *arg, int waitflag)
2480 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg;
2482 struct sockaddr *nam;
2485 int nparallel_wakeup = 0;
2487 if ((slp->ns_flag & SLP_VALID) == 0)
2491 * Do not allow an infinite number of completed RPC records to build
2492 * up before we stop reading data from the socket. Otherwise we could
2493 * end up holding onto an unreasonable number of mbufs for requests
2494 * waiting for service.
2496 * This should give pretty good feedback to the TCP
2497 * layer and prevents a memory crunch for other protocols.
2499 * Note that the same service socket can be dispatched to several
2500 * nfs servers simultaniously.
2502 * the tcp protocol callback calls us with MB_DONTWAIT.
2503 * nfsd calls us with MB_WAIT (typically).
2505 if (waitflag == MB_DONTWAIT && slp->ns_numrec >= nfsd_waiting / 2 + 1) {
2506 slp->ns_flag |= SLP_NEEDQ;
2511 * Handle protocol specifics to parse an RPC request. We always
2512 * pull from the socket using non-blocking I/O.
2514 if (so->so_type == SOCK_STREAM) {
2516 * The data has to be read in an orderly fashion from a TCP
2517 * stream, unlike a UDP socket. It is possible for soreceive
2518 * and/or nfsrv_getstream() to block, so make sure only one
2519 * entity is messing around with the TCP stream at any given
2520 * moment. The receive sockbuf's lock in soreceive is not
2523 * Note that this procedure can be called from any number of
2524 * NFS severs *OR* can be upcalled directly from a TCP
2527 if (slp->ns_flag & SLP_GETSTREAM) {
2528 slp->ns_flag |= SLP_NEEDQ;
2531 slp->ns_flag |= SLP_GETSTREAM;
2534 * Do soreceive(). Pull out as much data as possible without
2537 sbinit(&sio, 1000000000);
2538 flags = MSG_DONTWAIT;
2539 error = so_pru_soreceive(so, &nam, NULL, &sio, NULL, &flags);
2540 if (error || sio.sb_mb == NULL) {
2541 if (error == EWOULDBLOCK)
2542 slp->ns_flag |= SLP_NEEDQ;
2544 slp->ns_flag |= SLP_DISCONN;
2545 slp->ns_flag &= ~SLP_GETSTREAM;
2549 if (slp->ns_rawend) {
2550 slp->ns_rawend->m_next = m;
2551 slp->ns_cc += sio.sb_cc;
2554 slp->ns_cc = sio.sb_cc;
2561 * Now try and parse as many record(s) as we can out of the
2564 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup);
2567 slp->ns_flag |= SLP_DISCONN;
2569 slp->ns_flag |= SLP_NEEDQ;
2571 slp->ns_flag &= ~SLP_GETSTREAM;
2574 * For UDP soreceive typically pulls just one packet, loop
2575 * to get the whole batch.
2578 sbinit(&sio, 1000000000);
2579 flags = MSG_DONTWAIT;
2580 error = so_pru_soreceive(so, &nam, NULL, &sio,
2583 struct nfsrv_rec *rec;
2584 int mf = (waitflag & MB_DONTWAIT) ?
2585 M_NOWAIT : M_WAITOK;
2586 rec = kmalloc(sizeof(struct nfsrv_rec),
2590 FREE(nam, M_SONAME);
2594 nfs_realign(&sio.sb_mb, 10 * NFSX_UNSIGNED);
2595 rec->nr_address = nam;
2596 rec->nr_packet = sio.sb_mb;
2597 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2602 if ((so->so_proto->pr_flags & PR_CONNREQUIRED)
2603 && error != EWOULDBLOCK) {
2604 slp->ns_flag |= SLP_DISCONN;
2608 } while (sio.sb_mb);
2612 * If we were upcalled from the tcp protocol layer and we have
2613 * fully parsed records ready to go, or there is new data pending,
2614 * or something went wrong, try to wake up an nfsd thread to deal
2618 if (waitflag == MB_DONTWAIT && (slp->ns_numrec > 0
2619 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) {
2620 nfsrv_wakenfsd(slp, nparallel_wakeup);
2625 * Try and extract an RPC request from the mbuf data list received on a
2626 * stream socket. The "waitflag" argument indicates whether or not it
2630 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp)
2632 struct mbuf *m, **mpp;
2635 struct mbuf *om, *m2, *recm;
2639 if (slp->ns_reclen == 0) {
2640 if (slp->ns_cc < NFSX_UNSIGNED)
2643 if (m->m_len >= NFSX_UNSIGNED) {
2644 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED);
2645 m->m_data += NFSX_UNSIGNED;
2646 m->m_len -= NFSX_UNSIGNED;
2648 cp1 = (caddr_t)&recmark;
2649 cp2 = mtod(m, caddr_t);
2650 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
2651 while (m->m_len == 0) {
2653 cp2 = mtod(m, caddr_t);
2660 slp->ns_cc -= NFSX_UNSIGNED;
2661 recmark = ntohl(recmark);
2662 slp->ns_reclen = recmark & ~0x80000000;
2663 if (recmark & 0x80000000)
2664 slp->ns_flag |= SLP_LASTFRAG;
2666 slp->ns_flag &= ~SLP_LASTFRAG;
2667 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) {
2668 log(LOG_ERR, "%s (%d) from nfs client\n",
2669 "impossible packet length",
2676 * Now get the record part.
2678 * Note that slp->ns_reclen may be 0. Linux sometimes
2679 * generates 0-length RPCs
2682 if (slp->ns_cc == slp->ns_reclen) {
2684 slp->ns_raw = slp->ns_rawend = NULL;
2685 slp->ns_cc = slp->ns_reclen = 0;
2686 } else if (slp->ns_cc > slp->ns_reclen) {
2691 while (len < slp->ns_reclen) {
2692 if ((len + m->m_len) > slp->ns_reclen) {
2693 m2 = m_copym(m, 0, slp->ns_reclen - len,
2701 m->m_data += slp->ns_reclen - len;
2702 m->m_len -= slp->ns_reclen - len;
2703 len = slp->ns_reclen;
2705 return (EWOULDBLOCK);
2707 } else if ((len + m->m_len) == slp->ns_reclen) {
2727 * Accumulate the fragments into a record.
2729 mpp = &slp->ns_frag;
2731 mpp = &((*mpp)->m_next);
2733 if (slp->ns_flag & SLP_LASTFRAG) {
2734 struct nfsrv_rec *rec;
2735 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK;
2736 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf);
2738 m_freem(slp->ns_frag);
2740 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED);
2741 rec->nr_address = NULL;
2742 rec->nr_packet = slp->ns_frag;
2743 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link);
2747 slp->ns_frag = NULL;
2755 * Sanity check our mbuf chain.
2758 nfs_checkpkt(struct mbuf *m, int len)
2766 panic("nfs_checkpkt: len mismatch %d/%d mbuf %p\n",
2774 nfs_checkpkt(struct mbuf *m __unused, int len __unused)
2781 * Parse an RPC header.
2784 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd,
2785 struct nfsrv_descript **ndp)
2787 struct nfsrv_rec *rec;
2789 struct sockaddr *nam;
2790 struct nfsrv_descript *nd;
2794 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec))
2796 rec = STAILQ_FIRST(&slp->ns_rec);
2797 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link);
2798 KKASSERT(slp->ns_numrec > 0);
2800 nam = rec->nr_address;
2802 kfree(rec, M_NFSRVDESC);
2803 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript),
2804 M_NFSRVDESC, M_WAITOK);
2805 nd->nd_md = nd->nd_mrep = m;
2807 nd->nd_dpos = mtod(m, caddr_t);
2808 error = nfs_getreq(nd, nfsd, TRUE);
2811 FREE(nam, M_SONAME);
2813 kfree((caddr_t)nd, M_NFSRVDESC);
2822 * Try to assign service sockets to nfsd threads based on the number
2823 * of new rpc requests that have been queued on the service socket.
2825 * If no nfsd's are available or additonal requests are pending, set the
2826 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for
2827 * the work in the nfssvc_sock list when it is finished processing its
2828 * current work. This flag is only cleared when an nfsd can not find
2829 * any new work to perform.
2832 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel)
2836 if ((slp->ns_flag & SLP_VALID) == 0)
2840 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) {
2841 if (nd->nfsd_flag & NFSD_WAITING) {
2842 nd->nfsd_flag &= ~NFSD_WAITING;
2844 panic("nfsd wakeup");
2847 wakeup((caddr_t)nd);
2848 if (--nparallel == 0)
2853 slp->ns_flag |= SLP_DOREC;
2854 nfsd_head_flag |= NFSD_CHECKSLP;
2857 #endif /* NFS_NOSERVER */