2 * Copyright (c) 2000-2001 Boris Popov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $FreeBSD: src/sys/netsmb/smb_iod.c,v 1.1.2.2 2002/04/23 03:45:01 bp Exp $
33 * $DragonFly: src/sys/netproto/smb/smb_iod.c,v 1.8 2004/03/01 06:33:18 dillon Exp $
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/malloc.h>
43 #include <sys/unistd.h>
49 #include "smb_trantcp.h"
52 #define SMBIOD_SLEEP_TIMO 2
53 #define SMBIOD_PING_TIMO 60 /* seconds */
55 #define SMB_IOD_EVLOCKPTR(iod) (&((iod)->iod_evlock))
56 #define SMB_IOD_EVLOCK(ilock, iod) smb_sl_lock(ilock, &((iod)->iod_evlock))
57 #define SMB_IOD_EVUNLOCK(ilock) smb_sl_unlock(ilock)
59 #define SMB_IOD_RQLOCKPTR(iod) (&((iod)->iod_rqlock))
60 #define SMB_IOD_RQLOCK(ilock, iod) smb_sl_lock(ilock, &((iod)->iod_rqlock))
61 #define SMB_IOD_RQUNLOCK(ilock) smb_sl_unlock(ilock)
63 #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags)
66 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
68 static int smb_iod_next;
70 static int smb_iod_sendall(struct smbiod *iod);
71 static int smb_iod_disconnect(struct smbiod *iod);
72 static void smb_iod_thread(void *);
75 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
79 SMBRQ_SLOCK(&ilock, rqp);
80 rqp->sr_lerror = error;
82 rqp->sr_state = SMBRQ_NOTIFIED;
83 wakeup(&rqp->sr_state);
84 SMBRQ_SUNLOCK(&ilock);
88 smb_iod_invrq(struct smbiod *iod)
94 * Invalidate all outstanding requests for this connection
96 SMB_IOD_RQLOCK(&ilock, iod);
97 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
99 /* this makes no sense whatsoever XXX */
100 if (rqp->sr_flags & SMBR_INTERNAL)
103 rqp->sr_flags |= SMBR_RESTART;
104 smb_iod_rqprocessed(rqp, ENOTCONN);
106 SMB_IOD_RQUNLOCK(&ilock);
110 smb_iod_closetran(struct smbiod *iod)
112 struct smb_vc *vcp = iod->iod_vc;
113 struct thread *td = iod->iod_td;
115 if (vcp->vc_tdata == NULL)
117 SMB_TRAN_DISCONNECT(vcp, td);
118 SMB_TRAN_DONE(vcp, td);
119 vcp->vc_tdata = NULL;
123 smb_iod_dead(struct smbiod *iod)
125 iod->iod_state = SMBIOD_ST_DEAD;
126 smb_iod_closetran(iod);
131 smb_iod_connect(struct smbiod *iod)
133 struct smb_vc *vcp = iod->iod_vc;
134 struct thread *td = iod->iod_td;
137 SMBIODEBUG("%d\n", iod->iod_state);
138 switch(iod->iod_state) {
139 case SMBIOD_ST_VCACTIVE:
140 SMBERROR("called for already opened connection\n");
143 return ENOTCONN; /* XXX: last error code ? */
150 ithrow(SMB_TRAN_CREATE(vcp, td));
151 SMBIODEBUG("tcreate\n");
153 ithrow(SMB_TRAN_BIND(vcp, vcp->vc_laddr, td));
155 SMBIODEBUG("tbind\n");
156 ithrow(SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td));
157 SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
158 iod->iod_state = SMBIOD_ST_TRANACTIVE;
159 SMBIODEBUG("tconnect\n");
160 /* vcp->vc_mid = 0;*/
161 ithrow(smb_smb_negotiate(vcp, &iod->iod_scred));
162 SMBIODEBUG("snegotiate\n");
163 ithrow(smb_smb_ssnsetup(vcp, &iod->iod_scred));
164 iod->iod_state = SMBIOD_ST_VCACTIVE;
165 SMBIODEBUG("completed\n");
175 smb_iod_disconnect(struct smbiod *iod)
177 struct smb_vc *vcp = iod->iod_vc;
180 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
181 smb_smb_ssnclose(vcp, &iod->iod_scred);
182 iod->iod_state = SMBIOD_ST_TRANACTIVE;
184 vcp->vc_smbuid = SMB_UID_UNKNOWN;
185 smb_iod_closetran(iod);
186 iod->iod_state = SMBIOD_ST_NOTCONN;
191 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
196 if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
197 if (iod->iod_state != SMBIOD_ST_DEAD)
199 iod->iod_state = SMBIOD_ST_RECONNECT;
200 error = smb_iod_connect(iod);
204 SMBIODEBUG("tree reconnect\n");
205 SMBS_ST_LOCK(&ilock, ssp);
206 ssp->ss_flags |= SMBS_RECONNECTING;
207 SMBS_ST_UNLOCK(&ilock);
208 error = smb_smb_treeconnect(ssp, &iod->iod_scred);
209 SMBS_ST_LOCK(&ilock, ssp);
210 ssp->ss_flags &= ~SMBS_RECONNECTING;
211 SMBS_ST_UNLOCK(&ilock);
212 wakeup(&ssp->ss_vcgenid);
217 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
219 struct thread *td = iod->iod_td;
220 struct smb_vc *vcp = iod->iod_vc;
221 struct smb_share *ssp = rqp->sr_share;
225 SMBIODEBUG("iod_state = %d\n", iod->iod_state);
226 switch (iod->iod_state) {
227 case SMBIOD_ST_NOTCONN:
228 smb_iod_rqprocessed(rqp, ENOTCONN);
231 iod->iod_state = SMBIOD_ST_RECONNECT;
233 case SMBIOD_ST_RECONNECT:
238 if (rqp->sr_sendcnt == 0) {
239 #ifdef movedtoanotherplace
240 if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
243 *rqp->sr_rqtid = htoles(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
244 *rqp->sr_rquid = htoles(vcp ? vcp->vc_smbuid : 0);
245 mb_fixhdr(&rqp->sr_rq);
247 if (rqp->sr_sendcnt++ > 5) {
248 rqp->sr_flags |= SMBR_RESTART;
249 smb_iod_rqprocessed(rqp, rqp->sr_lerror);
251 * If all attempts to send a request failed, then
252 * something is seriously hosed.
256 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
257 m_dumpm(rqp->sr_rq.mb_top);
258 m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAIT);
259 error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, td) : ENOBUFS;
261 getnanotime(&rqp->sr_timesent);
262 iod->iod_lastrqsent = rqp->sr_timesent;
263 rqp->sr_flags |= SMBR_SENT;
264 rqp->sr_state = SMBRQ_SENT;
268 * Check for fatal errors
270 if (SMB_TRAN_FATAL(vcp, error)) {
272 * No further attempts should be made
276 if (smb_rq_intr(rqp))
277 smb_iod_rqprocessed(rqp, EINTR);
282 * Process incoming packets
285 smb_iod_recvall(struct smbiod *iod)
287 struct smb_vc *vcp = iod->iod_vc;
288 struct thread *td = iod->iod_td;
297 switch (iod->iod_state) {
298 case SMBIOD_ST_NOTCONN:
300 case SMBIOD_ST_RECONNECT:
307 error = SMB_TRAN_RECV(vcp, &m, td);
308 if (error == EWOULDBLOCK)
310 if (SMB_TRAN_FATAL(vcp, error)) {
317 SMBERROR("tran return NULL without error\n");
321 m = m_pullup(m, SMB_HDRLEN);
323 continue; /* wait for a good packet */
325 * Now we got an entire and possibly invalid SMB packet.
326 * Be careful while parsing it.
329 hp = mtod(m, u_char*);
330 if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
334 mid = SMB_HDRMID(hp);
335 SMBSDEBUG("mid %04x\n", (u_int)mid);
336 SMB_IOD_RQLOCK(&ilock, iod);
337 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
338 if (rqp->sr_mid != mid)
340 SMBRQ_SLOCK(&jlock, rqp);
341 if (rqp->sr_rp.md_top == NULL) {
342 md_initm(&rqp->sr_rp, m);
344 if (rqp->sr_flags & SMBR_MULTIPACKET) {
345 md_append_record(&rqp->sr_rp, m);
347 SMBRQ_SUNLOCK(&jlock);
348 SMBERROR("duplicate response %d (ignored)\n", mid);
352 SMBRQ_SUNLOCK(&jlock);
353 smb_iod_rqprocessed(rqp, 0);
356 SMB_IOD_RQUNLOCK(&ilock);
358 SMBERROR("drop resp with mid %d\n", (u_int)mid);
359 /* smb_printrqlist(vcp);*/
364 * check for interrupts
366 SMB_IOD_RQLOCK(&ilock, iod);
367 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
368 if (smb_proc_intr(rqp->sr_cred->scr_td)) {
369 smb_iod_rqprocessed(rqp, EINTR);
372 SMB_IOD_RQUNLOCK(&ilock);
377 smb_iod_request(struct smbiod *iod, int event, void *ident)
379 struct smbiod_event *evp;
384 evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
385 evp->ev_type = event;
386 evp->ev_ident = ident;
387 SMB_IOD_EVLOCK(&ilock, iod);
388 STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
389 if ((event & SMBIOD_EV_SYNC) == 0) {
390 SMB_IOD_EVUNLOCK(&ilock);
395 smb_sleep(evp, &ilock, PDROP, "90evw", 0);
396 error = evp->ev_error;
402 * Place request in the queue.
403 * Request from smbiod have a high priority.
406 smb_iod_addrq(struct smb_rq *rqp)
408 struct smb_vc *vcp = rqp->sr_vc;
409 struct smbiod *iod = vcp->vc_iod;
414 if (rqp->sr_cred->scr_td == iod->iod_td) {
415 rqp->sr_flags |= SMBR_INTERNAL;
416 SMB_IOD_RQLOCK(&ilock, iod);
417 TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
418 SMB_IOD_RQUNLOCK(&ilock);
420 if (smb_iod_sendrq(iod, rqp) != 0) {
425 * we don't need to lock state field here
427 if (rqp->sr_state != SMBRQ_NOTSENT)
429 tsleep(&iod->iod_flags, 0, "90sndw", hz);
432 smb_iod_removerq(rqp);
433 return rqp->sr_lerror;
436 switch (iod->iod_state) {
437 case SMBIOD_ST_NOTCONN:
440 error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
448 SMB_IOD_RQLOCK(&ilock, iod);
450 if (vcp->vc_maxmux == 0) {
451 SMBERROR("maxmux == 0\n");
454 if (iod->iod_muxcnt < vcp->vc_maxmux)
457 smb_sleep(&iod->iod_muxwant, &ilock, 0, "90mux", 0);
460 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
461 SMB_IOD_RQUNLOCK(&ilock);
467 smb_iod_removerq(struct smb_rq *rqp)
469 struct smb_vc *vcp = rqp->sr_vc;
470 struct smbiod *iod = vcp->vc_iod;
474 if (rqp->sr_flags & SMBR_INTERNAL) {
475 SMB_IOD_RQLOCK(&ilock, iod);
476 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
477 SMB_IOD_RQUNLOCK(&ilock);
480 SMB_IOD_RQLOCK(&ilock, iod);
481 while (rqp->sr_flags & SMBR_XLOCK) {
482 rqp->sr_flags |= SMBR_XLOCKWANT;
483 smb_sleep(rqp, &ilock, 0, "90xrm", 0);
485 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
487 if (iod->iod_muxwant) {
489 wakeup(&iod->iod_muxwant);
491 SMB_IOD_RQUNLOCK(&ilock);
496 smb_iod_waitrq(struct smb_rq *rqp)
498 struct smbiod *iod = rqp->sr_vc->vc_iod;
503 if (rqp->sr_flags & SMBR_INTERNAL) {
505 smb_iod_sendall(iod);
506 smb_iod_recvall(iod);
507 if (rqp->sr_rpgen != rqp->sr_rplast)
509 tsleep(&iod->iod_flags, 0, "90irq", hz);
511 smb_iod_removerq(rqp);
512 return rqp->sr_lerror;
515 SMBRQ_SLOCK(&ilock, rqp);
516 if (rqp->sr_rpgen == rqp->sr_rplast)
517 smb_sleep(&rqp->sr_state, &ilock, 0, "90wrq", 0);
519 SMBRQ_SUNLOCK(&ilock);
520 error = rqp->sr_lerror;
521 if (rqp->sr_flags & SMBR_MULTIPACKET) {
523 * If request should stay in the list, then reinsert it
524 * at the end of queue so other waiters have chance to concur
526 SMB_IOD_RQLOCK(&ilock, iod);
527 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
528 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
529 SMB_IOD_RQUNLOCK(&ilock);
531 smb_iod_removerq(rqp);
537 smb_iod_sendall(struct smbiod *iod)
539 struct smb_vc *vcp = iod->iod_vc;
541 struct timespec ts, tstimeout;
547 * Loop through the list of requests and send them if possible
549 SMB_IOD_RQLOCK(&ilock, iod);
550 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
551 switch (rqp->sr_state) {
553 rqp->sr_flags |= SMBR_XLOCK;
554 SMB_IOD_RQUNLOCK(&ilock);
555 herror = smb_iod_sendrq(iod, rqp);
556 SMB_IOD_RQLOCK(&ilock, iod);
557 rqp->sr_flags &= ~SMBR_XLOCK;
558 if (rqp->sr_flags & SMBR_XLOCKWANT) {
559 rqp->sr_flags &= ~SMBR_XLOCKWANT;
564 SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
565 timespecadd(&tstimeout, &tstimeout);
567 timespecsub(&ts, &tstimeout);
568 if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
569 smb_iod_rqprocessed(rqp, ETIMEDOUT);
577 SMB_IOD_RQUNLOCK(&ilock);
578 if (herror == ENOTCONN)
584 * "main" function for smbiod daemon
587 smb_iod_main(struct smbiod *iod)
589 /* struct smb_vc *vcp = iod->iod_vc;*/
590 struct smbiod_event *evp;
591 /* struct timespec tsnow;*/
599 * Check all interesting events
602 SMB_IOD_EVLOCK(&ilock, iod);
603 evp = STAILQ_FIRST(&iod->iod_evlist);
605 SMB_IOD_EVUNLOCK(&ilock);
608 STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
609 evp->ev_type |= SMBIOD_EV_PROCESSING;
610 SMB_IOD_EVUNLOCK(&ilock);
611 switch (evp->ev_type & SMBIOD_EV_MASK) {
612 case SMBIOD_EV_CONNECT:
613 iod->iod_state = SMBIOD_ST_RECONNECT;
614 evp->ev_error = smb_iod_connect(iod);
616 case SMBIOD_EV_DISCONNECT:
617 evp->ev_error = smb_iod_disconnect(iod);
619 case SMBIOD_EV_TREECONNECT:
620 evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
622 case SMBIOD_EV_SHUTDOWN:
623 iod->iod_flags |= SMBIOD_SHUTDOWN;
625 case SMBIOD_EV_NEWRQ:
628 if (evp->ev_type & SMBIOD_EV_SYNC) {
629 SMB_IOD_EVLOCK(&ilock, iod);
631 SMB_IOD_EVUNLOCK(&ilock);
636 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
638 timespecsub(&tsnow, &iod->iod_pingtimo);
639 if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
640 smb_smb_echo(vcp, &iod->iod_scred);
644 smb_iod_sendall(iod);
645 smb_iod_recvall(iod);
649 #define kthread_create_compat kthread_create2
653 smb_iod_thread(void *arg)
655 struct smbiod *iod = arg;
657 smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
658 while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
660 SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
661 if (iod->iod_flags & SMBIOD_SHUTDOWN)
663 tsleep(&iod->iod_flags, 0, "90idle", iod->iod_sleeptimo);
669 smb_iod_create(struct smb_vc *vcp)
672 struct proc *newp = NULL;
675 iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
676 iod->iod_id = smb_iod_next++;
677 iod->iod_state = SMBIOD_ST_NOTCONN;
679 iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
680 iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
681 getnanotime(&iod->iod_lastrqsent);
683 smb_sl_init(&iod->iod_rqlock, "90rql");
684 TAILQ_INIT(&iod->iod_rqlist);
685 smb_sl_init(&iod->iod_evlock, "90evl");
686 STAILQ_INIT(&iod->iod_evlist);
687 error = kthread_create_compat(smb_iod_thread, iod, &newp,
688 RFNOWAIT, "smbiod%d", iod->iod_id);
690 SMBERROR("can't start smbiod: %d", error);
694 iod->iod_td = newp->p_thread;
699 smb_iod_destroy(struct smbiod *iod)
701 smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
702 smb_sl_destroy(&iod->iod_rqlock);
703 smb_sl_destroy(&iod->iod_evlock);