2 * Copyright (c) 2000-2001 Boris Popov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/sys/netsmb/smb_iod.c,v 1.1.2.2 2002/04/23 03:45:01 bp Exp $
33 * $DragonFly: src/sys/netproto/smb/smb_iod.c,v 1.2 2003/06/17 04:28:54 dillon Exp $
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/malloc.h>
43 #include <sys/unistd.h>
45 #include <netsmb/smb.h>
46 #include <netsmb/smb_conn.h>
47 #include <netsmb/smb_rq.h>
48 #include <netsmb/smb_tran.h>
49 #include <netsmb/smb_trantcp.h>
52 #define SMBIOD_SLEEP_TIMO 2
53 #define SMBIOD_PING_TIMO 60 /* seconds */
55 #define SMB_IOD_EVLOCKPTR(iod) (&((iod)->iod_evlock))
56 #define SMB_IOD_EVLOCK(iod) smb_sl_lock(&((iod)->iod_evlock))
57 #define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&((iod)->iod_evlock))
59 #define SMB_IOD_RQLOCKPTR(iod) (&((iod)->iod_rqlock))
60 #define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock))
61 #define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&((iod)->iod_rqlock))
63 #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags)
66 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
68 static int smb_iod_next;
70 static int smb_iod_sendall(struct smbiod *iod);
71 static int smb_iod_disconnect(struct smbiod *iod);
72 static void smb_iod_thread(void *);
75 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
78 rqp->sr_lerror = error;
80 rqp->sr_state = SMBRQ_NOTIFIED;
81 wakeup(&rqp->sr_state);
86 smb_iod_invrq(struct smbiod *iod)
91 * Invalidate all outstanding requests for this connection
94 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
95 if (rqp->sr_flags & SMBR_INTERNAL)
97 rqp->sr_flags |= SMBR_RESTART;
98 smb_iod_rqprocessed(rqp, ENOTCONN);
100 SMB_IOD_RQUNLOCK(iod);
104 smb_iod_closetran(struct smbiod *iod)
106 struct smb_vc *vcp = iod->iod_vc;
107 struct proc *p = iod->iod_p;
109 if (vcp->vc_tdata == NULL)
111 SMB_TRAN_DISCONNECT(vcp, p);
112 SMB_TRAN_DONE(vcp, p);
113 vcp->vc_tdata = NULL;
117 smb_iod_dead(struct smbiod *iod)
119 iod->iod_state = SMBIOD_ST_DEAD;
120 smb_iod_closetran(iod);
125 smb_iod_connect(struct smbiod *iod)
127 struct smb_vc *vcp = iod->iod_vc;
128 struct proc *p = iod->iod_p;
131 SMBIODEBUG("%d\n", iod->iod_state);
132 switch(iod->iod_state) {
133 case SMBIOD_ST_VCACTIVE:
134 SMBERROR("called for already opened connection\n");
137 return ENOTCONN; /* XXX: last error code ? */
144 ithrow(SMB_TRAN_CREATE(vcp, p));
145 SMBIODEBUG("tcreate\n");
147 ithrow(SMB_TRAN_BIND(vcp, vcp->vc_laddr, p));
149 SMBIODEBUG("tbind\n");
150 ithrow(SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, p));
151 SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
152 iod->iod_state = SMBIOD_ST_TRANACTIVE;
153 SMBIODEBUG("tconnect\n");
154 /* vcp->vc_mid = 0;*/
155 ithrow(smb_smb_negotiate(vcp, &iod->iod_scred));
156 SMBIODEBUG("snegotiate\n");
157 ithrow(smb_smb_ssnsetup(vcp, &iod->iod_scred));
158 iod->iod_state = SMBIOD_ST_VCACTIVE;
159 SMBIODEBUG("completed\n");
169 smb_iod_disconnect(struct smbiod *iod)
171 struct smb_vc *vcp = iod->iod_vc;
174 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
175 smb_smb_ssnclose(vcp, &iod->iod_scred);
176 iod->iod_state = SMBIOD_ST_TRANACTIVE;
178 vcp->vc_smbuid = SMB_UID_UNKNOWN;
179 smb_iod_closetran(iod);
180 iod->iod_state = SMBIOD_ST_NOTCONN;
185 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
189 if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
190 if (iod->iod_state != SMBIOD_ST_DEAD)
192 iod->iod_state = SMBIOD_ST_RECONNECT;
193 error = smb_iod_connect(iod);
197 SMBIODEBUG("tree reconnect\n");
199 ssp->ss_flags |= SMBS_RECONNECTING;
201 error = smb_smb_treeconnect(ssp, &iod->iod_scred);
203 ssp->ss_flags &= ~SMBS_RECONNECTING;
205 wakeup(&ssp->ss_vcgenid);
210 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
212 struct proc *p = iod->iod_p;
213 struct smb_vc *vcp = iod->iod_vc;
214 struct smb_share *ssp = rqp->sr_share;
218 SMBIODEBUG("iod_state = %d\n", iod->iod_state);
219 switch (iod->iod_state) {
220 case SMBIOD_ST_NOTCONN:
221 smb_iod_rqprocessed(rqp, ENOTCONN);
224 iod->iod_state = SMBIOD_ST_RECONNECT;
226 case SMBIOD_ST_RECONNECT:
231 if (rqp->sr_sendcnt == 0) {
232 #ifdef movedtoanotherplace
233 if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
236 *rqp->sr_rqtid = htoles(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
237 *rqp->sr_rquid = htoles(vcp ? vcp->vc_smbuid : 0);
238 mb_fixhdr(&rqp->sr_rq);
240 if (rqp->sr_sendcnt++ > 5) {
241 rqp->sr_flags |= SMBR_RESTART;
242 smb_iod_rqprocessed(rqp, rqp->sr_lerror);
244 * If all attempts to send a request failed, then
245 * something is seriously hosed.
249 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
250 m_dumpm(rqp->sr_rq.mb_top);
251 m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAIT);
252 error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, p) : ENOBUFS;
254 getnanotime(&rqp->sr_timesent);
255 iod->iod_lastrqsent = rqp->sr_timesent;
256 rqp->sr_flags |= SMBR_SENT;
257 rqp->sr_state = SMBRQ_SENT;
261 * Check for fatal errors
263 if (SMB_TRAN_FATAL(vcp, error)) {
265 * No further attempts should be made
269 if (smb_rq_intr(rqp))
270 smb_iod_rqprocessed(rqp, EINTR);
275 * Process incoming packets
278 smb_iod_recvall(struct smbiod *iod)
280 struct smb_vc *vcp = iod->iod_vc;
281 struct proc *p = iod->iod_p;
288 switch (iod->iod_state) {
289 case SMBIOD_ST_NOTCONN:
291 case SMBIOD_ST_RECONNECT:
298 error = SMB_TRAN_RECV(vcp, &m, p);
299 if (error == EWOULDBLOCK)
301 if (SMB_TRAN_FATAL(vcp, error)) {
308 SMBERROR("tran return NULL without error\n");
312 m = m_pullup(m, SMB_HDRLEN);
314 continue; /* wait for a good packet */
316 * Now we got an entire and possibly invalid SMB packet.
317 * Be careful while parsing it.
320 hp = mtod(m, u_char*);
321 if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
325 mid = SMB_HDRMID(hp);
326 SMBSDEBUG("mid %04x\n", (u_int)mid);
328 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
329 if (rqp->sr_mid != mid)
332 if (rqp->sr_rp.md_top == NULL) {
333 md_initm(&rqp->sr_rp, m);
335 if (rqp->sr_flags & SMBR_MULTIPACKET) {
336 md_append_record(&rqp->sr_rp, m);
339 SMBERROR("duplicate response %d (ignored)\n", mid);
344 smb_iod_rqprocessed(rqp, 0);
347 SMB_IOD_RQUNLOCK(iod);
349 SMBERROR("drop resp with mid %d\n", (u_int)mid);
350 /* smb_printrqlist(vcp);*/
355 * check for interrupts
358 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
359 if (smb_proc_intr(rqp->sr_cred->scr_p)) {
360 smb_iod_rqprocessed(rqp, EINTR);
363 SMB_IOD_RQUNLOCK(iod);
368 smb_iod_request(struct smbiod *iod, int event, void *ident)
370 struct smbiod_event *evp;
374 evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
375 evp->ev_type = event;
376 evp->ev_ident = ident;
378 STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
379 if ((event & SMBIOD_EV_SYNC) == 0) {
380 SMB_IOD_EVUNLOCK(iod);
385 msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0);
386 error = evp->ev_error;
392 * Place request in the queue.
393 * Request from smbiod have a high priority.
396 smb_iod_addrq(struct smb_rq *rqp)
398 struct smb_vc *vcp = rqp->sr_vc;
399 struct smbiod *iod = vcp->vc_iod;
403 if (rqp->sr_cred->scr_p == iod->iod_p) {
404 rqp->sr_flags |= SMBR_INTERNAL;
406 TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
407 SMB_IOD_RQUNLOCK(iod);
409 if (smb_iod_sendrq(iod, rqp) != 0) {
414 * we don't need to lock state field here
416 if (rqp->sr_state != SMBRQ_NOTSENT)
418 tsleep(&iod->iod_flags, PWAIT, "90sndw", hz);
421 smb_iod_removerq(rqp);
422 return rqp->sr_lerror;
425 switch (iod->iod_state) {
426 case SMBIOD_ST_NOTCONN:
429 error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
439 if (vcp->vc_maxmux == 0) {
440 SMBERROR("maxmux == 0\n");
443 if (iod->iod_muxcnt < vcp->vc_maxmux)
446 msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod),
450 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
451 SMB_IOD_RQUNLOCK(iod);
457 smb_iod_removerq(struct smb_rq *rqp)
459 struct smb_vc *vcp = rqp->sr_vc;
460 struct smbiod *iod = vcp->vc_iod;
463 if (rqp->sr_flags & SMBR_INTERNAL) {
465 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
466 SMB_IOD_RQUNLOCK(iod);
470 while (rqp->sr_flags & SMBR_XLOCK) {
471 rqp->sr_flags |= SMBR_XLOCKWANT;
472 msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0);
474 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
476 if (iod->iod_muxwant) {
478 wakeup(&iod->iod_muxwant);
480 SMB_IOD_RQUNLOCK(iod);
485 smb_iod_waitrq(struct smb_rq *rqp)
487 struct smbiod *iod = rqp->sr_vc->vc_iod;
491 if (rqp->sr_flags & SMBR_INTERNAL) {
493 smb_iod_sendall(iod);
494 smb_iod_recvall(iod);
495 if (rqp->sr_rpgen != rqp->sr_rplast)
497 tsleep(&iod->iod_flags, PWAIT, "90irq", hz);
499 smb_iod_removerq(rqp);
500 return rqp->sr_lerror;
504 if (rqp->sr_rpgen == rqp->sr_rplast)
505 msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0);
508 error = rqp->sr_lerror;
509 if (rqp->sr_flags & SMBR_MULTIPACKET) {
511 * If request should stay in the list, then reinsert it
512 * at the end of queue so other waiters have chance to concur
515 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
516 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
517 SMB_IOD_RQUNLOCK(iod);
519 smb_iod_removerq(rqp);
525 smb_iod_sendall(struct smbiod *iod)
527 struct smb_vc *vcp = iod->iod_vc;
529 struct timespec ts, tstimeout;
534 * Loop through the list of requests and send them if possible
537 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
538 switch (rqp->sr_state) {
540 rqp->sr_flags |= SMBR_XLOCK;
541 SMB_IOD_RQUNLOCK(iod);
542 herror = smb_iod_sendrq(iod, rqp);
544 rqp->sr_flags &= ~SMBR_XLOCK;
545 if (rqp->sr_flags & SMBR_XLOCKWANT) {
546 rqp->sr_flags &= ~SMBR_XLOCKWANT;
551 SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
552 timespecadd(&tstimeout, &tstimeout);
554 timespecsub(&ts, &tstimeout);
555 if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
556 smb_iod_rqprocessed(rqp, ETIMEDOUT);
564 SMB_IOD_RQUNLOCK(iod);
565 if (herror == ENOTCONN)
571 * "main" function for smbiod daemon
574 smb_iod_main(struct smbiod *iod)
576 /* struct smb_vc *vcp = iod->iod_vc;*/
577 struct smbiod_event *evp;
578 /* struct timespec tsnow;*/
585 * Check all interesting events
589 evp = STAILQ_FIRST(&iod->iod_evlist);
591 SMB_IOD_EVUNLOCK(iod);
594 STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
595 evp->ev_type |= SMBIOD_EV_PROCESSING;
596 SMB_IOD_EVUNLOCK(iod);
597 switch (evp->ev_type & SMBIOD_EV_MASK) {
598 case SMBIOD_EV_CONNECT:
599 iod->iod_state = SMBIOD_ST_RECONNECT;
600 evp->ev_error = smb_iod_connect(iod);
602 case SMBIOD_EV_DISCONNECT:
603 evp->ev_error = smb_iod_disconnect(iod);
605 case SMBIOD_EV_TREECONNECT:
606 evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
608 case SMBIOD_EV_SHUTDOWN:
609 iod->iod_flags |= SMBIOD_SHUTDOWN;
611 case SMBIOD_EV_NEWRQ:
614 if (evp->ev_type & SMBIOD_EV_SYNC) {
617 SMB_IOD_EVUNLOCK(iod);
622 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
624 timespecsub(&tsnow, &iod->iod_pingtimo);
625 if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
626 smb_smb_echo(vcp, &iod->iod_scred);
630 smb_iod_sendall(iod);
631 smb_iod_recvall(iod);
635 #define kthread_create_compat kthread_create2
639 smb_iod_thread(void *arg)
641 struct smbiod *iod = arg;
643 smb_makescred(&iod->iod_scred, iod->iod_p, NULL);
644 while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
646 SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
647 if (iod->iod_flags & SMBIOD_SHUTDOWN)
649 tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo);
655 smb_iod_create(struct smb_vc *vcp)
660 iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
661 iod->iod_id = smb_iod_next++;
662 iod->iod_state = SMBIOD_ST_NOTCONN;
664 iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
665 iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
666 getnanotime(&iod->iod_lastrqsent);
668 smb_sl_init(&iod->iod_rqlock, "90rql");
669 TAILQ_INIT(&iod->iod_rqlist);
670 smb_sl_init(&iod->iod_evlock, "90evl");
671 STAILQ_INIT(&iod->iod_evlist);
672 error = kthread_create_compat(smb_iod_thread, iod, &iod->iod_p,
673 RFNOWAIT, "smbiod%d", iod->iod_id);
675 SMBERROR("can't start smbiod: %d", error);
683 smb_iod_destroy(struct smbiod *iod)
685 smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
686 smb_sl_destroy(&iod->iod_rqlock);
687 smb_sl_destroy(&iod->iod_evlock);