kernel - Fix seg-fault in clock interrupt due to race
[dragonfly.git] / sys / netproto / smb / smb_iod.c
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/netsmb/smb_iod.c,v 1.1.2.2 2002/04/23 03:45:01 bp Exp $
33  * $DragonFly: src/sys/netproto/smb/smb_iod.c,v 1.15 2007/02/03 17:05:58 corecode Exp $
34  */
35  
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/unistd.h>
44
45 #include <sys/mplock2.h>
46
47 #include "smb.h"
48 #include "smb_conn.h"
49 #include "smb_rq.h"
50 #include "smb_tran.h"
51 #include "smb_trantcp.h"
52
53
54 #define SMBIOD_SLEEP_TIMO       2
55 #define SMBIOD_PING_TIMO        60      /* seconds */
56
57 #define SMB_IOD_EVLOCKPTR(iod)  (&(iod)->iod_evlock)
58 #define SMB_IOD_EVLOCK(iod)     smb_sl_lock(&(iod)->iod_evlock)
59 #define SMB_IOD_EVUNLOCK(iod)   smb_sl_unlock(&(iod)->iod_evlock)
60 #define SMB_IOD_EVINTERLOCK(iod) (&(iod)->iod_evlock)
61
62 #define SMB_IOD_RQLOCKPTR(iod)  (&(iod)->iod_rqlock)
63 #define SMB_IOD_RQLOCK(iod)     smb_sl_lock(&((iod)->iod_rqlock))
64 #define SMB_IOD_RQUNLOCK(iod)   smb_sl_unlock(&(iod)->iod_rqlock)
65 #define SMB_IOD_RQINTERLOCK(iod) (&(iod)->iod_rqlock)
66
67 #define smb_iod_wakeup(iod)     wakeup(&(iod)->iod_flags)
68
69
70 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
71
72 static int smb_iod_next;
73
74 static int  smb_iod_sendall(struct smbiod *iod);
75 static int  smb_iod_disconnect(struct smbiod *iod);
76 static void smb_iod_thread(void *);
77
78 static __inline void
79 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
80 {
81         SMBRQ_SLOCK(rqp);
82         rqp->sr_lerror = error;
83         rqp->sr_rpgen++;
84         rqp->sr_state = SMBRQ_NOTIFIED;
85         wakeup(&rqp->sr_state);
86         SMBRQ_SUNLOCK(rqp);
87 }
88
89 static void
90 smb_iod_invrq(struct smbiod *iod)
91 {
92         struct smb_rq *rqp;
93
94         /*
95          * Invalidate all outstanding requests for this connection
96          */
97         SMB_IOD_RQLOCK(iod);
98         TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
99 #if 0
100                 /* this makes no sense whatsoever XXX */
101                 if (rqp->sr_flags & SMBR_INTERNAL)
102                         SMBRQ_SUNLOCK(rqp);
103 #endif
104                 rqp->sr_flags |= SMBR_RESTART;
105                 smb_iod_rqprocessed(rqp, ENOTCONN);
106         }
107         SMB_IOD_RQUNLOCK(iod);
108 }
109
110 static void
111 smb_iod_closetran(struct smbiod *iod)
112 {
113         struct smb_vc *vcp = iod->iod_vc;
114         struct thread *td = iod->iod_td;
115
116         if (vcp->vc_tdata == NULL)
117                 return;
118         SMB_TRAN_DISCONNECT(vcp, td);
119         SMB_TRAN_DONE(vcp, td);
120         vcp->vc_tdata = NULL;
121 }
122
123 static void
124 smb_iod_dead(struct smbiod *iod)
125 {
126         iod->iod_state = SMBIOD_ST_DEAD;
127         smb_iod_closetran(iod);
128         smb_iod_invrq(iod);
129 }
130
131 static int
132 smb_iod_connect(struct smbiod *iod)
133 {
134         struct smb_vc *vcp = iod->iod_vc;
135         struct thread *td = iod->iod_td;
136         int error;
137
138         SMBIODEBUG("%d\n", iod->iod_state);
139         switch(iod->iod_state) {
140             case SMBIOD_ST_VCACTIVE:
141                 SMBERROR("called for already opened connection\n");
142                 return EISCONN;
143             case SMBIOD_ST_DEAD:
144                 return ENOTCONN;        /* XXX: last error code ? */
145             default:
146                 break;
147         }
148         vcp->vc_genid++;
149         error = 0;
150         itry {
151                 ithrow(SMB_TRAN_CREATE(vcp, td));
152                 SMBIODEBUG("tcreate\n");
153                 if (vcp->vc_laddr) {
154                         ithrow(SMB_TRAN_BIND(vcp, vcp->vc_laddr, td));
155                 }
156                 SMBIODEBUG("tbind\n");
157                 ithrow(SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td));
158                 SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
159                 iod->iod_state = SMBIOD_ST_TRANACTIVE;
160                 SMBIODEBUG("tconnect\n");
161 /*              vcp->vc_mid = 0;*/
162                 ithrow(smb_smb_negotiate(vcp, &iod->iod_scred));
163                 SMBIODEBUG("snegotiate\n");
164                 ithrow(smb_smb_ssnsetup(vcp, &iod->iod_scred));
165                 iod->iod_state = SMBIOD_ST_VCACTIVE;
166                 SMBIODEBUG("completed\n");
167                 smb_iod_invrq(iod);
168         } icatch(error) {
169                 smb_iod_dead(iod);
170         } ifinally {
171         } iendtry;
172         return error;
173 }
174
175 static int
176 smb_iod_disconnect(struct smbiod *iod)
177 {
178         struct smb_vc *vcp = iod->iod_vc;
179
180         SMBIODEBUG("\n");
181         if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
182                 smb_smb_ssnclose(vcp, &iod->iod_scred);
183                 iod->iod_state = SMBIOD_ST_TRANACTIVE;
184         }
185         vcp->vc_smbuid = SMB_UID_UNKNOWN;
186         smb_iod_closetran(iod);
187         iod->iod_state = SMBIOD_ST_NOTCONN;
188         return 0;
189 }
190
191 static int
192 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
193 {
194         int error;
195
196         if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
197                 if (iod->iod_state != SMBIOD_ST_DEAD)
198                         return ENOTCONN;
199                 iod->iod_state = SMBIOD_ST_RECONNECT;
200                 error = smb_iod_connect(iod);
201                 if (error)
202                         return error;
203         }
204         SMBIODEBUG("tree reconnect\n");
205         SMBS_ST_LOCK(ssp);
206         ssp->ss_flags |= SMBS_RECONNECTING;
207         SMBS_ST_UNLOCK(ssp);
208         error = smb_smb_treeconnect(ssp, &iod->iod_scred);
209         SMBS_ST_LOCK(ssp);
210         ssp->ss_flags &= ~SMBS_RECONNECTING;
211         SMBS_ST_UNLOCK(ssp);
212         wakeup(&ssp->ss_vcgenid);
213         return error;
214 }
215
216 static int
217 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
218 {
219         struct thread *td = iod->iod_td;
220         struct smb_vc *vcp = iod->iod_vc;
221         struct smb_share *ssp = rqp->sr_share;
222         struct mbuf *m;
223         int error;
224
225         SMBIODEBUG("iod_state = %d\n", iod->iod_state);
226         switch (iod->iod_state) {
227             case SMBIOD_ST_NOTCONN:
228                 smb_iod_rqprocessed(rqp, ENOTCONN);
229                 return 0;
230             case SMBIOD_ST_DEAD:
231                 iod->iod_state = SMBIOD_ST_RECONNECT;
232                 return 0;
233             case SMBIOD_ST_RECONNECT:
234                 return 0;
235             default:
236                 break;
237         }
238         if (rqp->sr_sendcnt == 0) {
239 #ifdef movedtoanotherplace
240                 if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
241                         return 0;
242 #endif
243                 *rqp->sr_rqtid = htoles(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
244                 *rqp->sr_rquid = htoles(vcp ? vcp->vc_smbuid : 0);
245                 mb_fixhdr(&rqp->sr_rq);
246         }
247         if (rqp->sr_sendcnt++ > 5) {
248                 rqp->sr_flags |= SMBR_RESTART;
249                 smb_iod_rqprocessed(rqp, rqp->sr_lerror);
250                 /*
251                  * If all attempts to send a request failed, then
252                  * something is seriously hosed.
253                  */
254                 return ENOTCONN;
255         }
256         SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
257         m_dumpm(rqp->sr_rq.mb_top);
258         m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, MB_WAIT);
259         error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, td) : ENOBUFS;
260         if (error == 0) {
261                 getnanotime(&rqp->sr_timesent);
262                 iod->iod_lastrqsent = rqp->sr_timesent;
263                 rqp->sr_flags |= SMBR_SENT;
264                 rqp->sr_state = SMBRQ_SENT;
265                 return 0;
266         }
267         /*
268          * Check for fatal errors
269          */
270         if (SMB_TRAN_FATAL(vcp, error)) {
271                 /*
272                  * No further attempts should be made
273                  */
274                 return ENOTCONN;
275         }
276         if (smb_rq_intr(rqp))
277                 smb_iod_rqprocessed(rqp, EINTR);
278         return 0;
279 }
280
281 /*
282  * Process incoming packets
283  */
284 static int
285 smb_iod_recvall(struct smbiod *iod)
286 {
287         struct smb_vc *vcp = iod->iod_vc;
288         struct thread *td = iod->iod_td;
289         struct smb_rq *rqp;
290         struct mbuf *m;
291         u_char *hp;
292         u_short mid;
293         int error;
294
295         switch (iod->iod_state) {
296             case SMBIOD_ST_NOTCONN:
297             case SMBIOD_ST_DEAD:
298             case SMBIOD_ST_RECONNECT:
299                 return 0;
300             default:
301                 break;
302         }
303         for (;;) {
304                 m = NULL;
305                 error = SMB_TRAN_RECV(vcp, &m, td);
306                 if (error == EWOULDBLOCK)
307                         break;
308                 if (SMB_TRAN_FATAL(vcp, error)) {
309                         smb_iod_dead(iod);
310                         break;
311                 }
312                 if (error)
313                         break;
314                 if (m == NULL) {
315                         SMBERROR("tran return NULL without error\n");
316                         error = EPIPE;
317                         continue;
318                 }
319                 m = m_pullup(m, SMB_HDRLEN);
320                 if (m == NULL)
321                         continue;       /* wait for a good packet */
322                 /*
323                  * Now we got an entire and possibly invalid SMB packet.
324                  * Be careful while parsing it.
325                  */
326                 m_dumpm(m);
327                 hp = mtod(m, u_char*);
328                 if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
329                         m_freem(m);
330                         continue;
331                 }
332                 mid = SMB_HDRMID(hp);
333                 SMBSDEBUG("mid %04x\n", (u_int)mid);
334                 SMB_IOD_RQLOCK(iod);
335                 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
336                         if (rqp->sr_mid != mid)
337                                 continue;
338                         SMBRQ_SLOCK(rqp);
339                         if (rqp->sr_rp.md_top == NULL) {
340                                 md_initm(&rqp->sr_rp, m);
341                         } else {
342                                 if (rqp->sr_flags & SMBR_MULTIPACKET) {
343                                         md_append_record(&rqp->sr_rp, m);
344                                 } else {
345                                         SMBRQ_SUNLOCK(rqp);
346                                         SMBERROR("duplicate response %d (ignored)\n", mid);
347                                         break;
348                                 }
349                         }
350                         SMBRQ_SUNLOCK(rqp);
351                         smb_iod_rqprocessed(rqp, 0);
352                         break;
353                 }
354                 SMB_IOD_RQUNLOCK(iod);
355                 if (rqp == NULL) {
356                         SMBERROR("drop resp with mid %d\n", (u_int)mid);
357 /*                      smb_printrqlist(vcp);*/
358                         m_freem(m);
359                 }
360         }
361         /*
362          * check for interrupts
363          */
364         SMB_IOD_RQLOCK(iod);
365         TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
366                 if (smb_proc_intr(rqp->sr_cred->scr_td)) {
367                         smb_iod_rqprocessed(rqp, EINTR);
368                 }
369         }
370         SMB_IOD_RQUNLOCK(iod);
371         return 0;
372 }
373
374 int
375 smb_iod_request(struct smbiod *iod, int event, void *ident)
376 {
377         struct smbiod_event *evp;
378         int error;
379
380         SMBIODEBUG("\n");
381         evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
382         evp->ev_type = event;
383         evp->ev_ident = ident;
384         SMB_IOD_EVLOCK(iod);
385         STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
386         if ((event & SMBIOD_EV_SYNC) == 0) {
387                 SMB_IOD_EVUNLOCK(iod);
388                 smb_iod_wakeup(iod);
389                 return 0;
390         }
391         smb_iod_wakeup(iod);
392         smb_sleep(evp, SMB_IOD_EVINTERLOCK(iod), PDROP, "90evw", 0);
393         error = evp->ev_error;
394         kfree(evp, M_SMBIOD);
395         return error;
396 }
397
398 /*
399  * Place request in the queue.
400  * Request from smbiod have a high priority.
401  */
402 int
403 smb_iod_addrq(struct smb_rq *rqp)
404 {
405         struct smb_vc *vcp = rqp->sr_vc;
406         struct smbiod *iod = vcp->vc_iod;
407         int error;
408
409         SMBIODEBUG("\n");
410         if (rqp->sr_cred->scr_td == iod->iod_td) {
411                 rqp->sr_flags |= SMBR_INTERNAL;
412                 SMB_IOD_RQLOCK(iod);
413                 TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
414                 SMB_IOD_RQUNLOCK(iod);
415                 for (;;) {
416                         if (smb_iod_sendrq(iod, rqp) != 0) {
417                                 smb_iod_dead(iod);
418                                 break;
419                         }
420                         /*
421                          * we don't need to lock state field here
422                          */
423                         if (rqp->sr_state != SMBRQ_NOTSENT)
424                                 break;
425                         tsleep(&iod->iod_flags, 0, "90sndw", hz);
426                 }
427                 if (rqp->sr_lerror)
428                         smb_iod_removerq(rqp);
429                 return rqp->sr_lerror;
430         }
431
432         switch (iod->iod_state) {
433             case SMBIOD_ST_NOTCONN:
434                 return ENOTCONN;
435             case SMBIOD_ST_DEAD:
436                 error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
437                 if (error)
438                         return error;
439                 return EXDEV;
440             default:
441                 break;
442         }
443
444         SMB_IOD_RQLOCK(iod);
445         for (;;) {
446                 if (vcp->vc_maxmux == 0) {
447                         SMBERROR("maxmux == 0\n");
448                         break;
449                 }
450                 if (iod->iod_muxcnt < vcp->vc_maxmux)
451                         break;
452                 iod->iod_muxwant++;
453                 smb_sleep(&iod->iod_muxwant, SMB_IOD_RQINTERLOCK(iod), 0, "90mux", 0);
454         }
455         iod->iod_muxcnt++;
456         TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
457         SMB_IOD_RQUNLOCK(iod);
458         smb_iod_wakeup(iod);
459         return 0;
460 }
461
462 int
463 smb_iod_removerq(struct smb_rq *rqp)
464 {
465         struct smb_vc *vcp = rqp->sr_vc;
466         struct smbiod *iod = vcp->vc_iod;
467
468         SMBIODEBUG("\n");
469         if (rqp->sr_flags & SMBR_INTERNAL) {
470                 SMB_IOD_RQLOCK(iod);
471                 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
472                 SMB_IOD_RQUNLOCK(iod);
473                 return 0;
474         }
475         SMB_IOD_RQLOCK(iod);
476         while (rqp->sr_flags & SMBR_XLOCK) {
477                 rqp->sr_flags |= SMBR_XLOCKWANT;
478                 smb_sleep(rqp, SMB_IOD_RQINTERLOCK(iod), 0, "90xrm", 0);
479         }
480         TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
481         iod->iod_muxcnt--;
482         if (iod->iod_muxwant) {
483                 iod->iod_muxwant--;
484                 wakeup(&iod->iod_muxwant);
485         }
486         SMB_IOD_RQUNLOCK(iod);
487         return 0;
488 }
489
490 int
491 smb_iod_waitrq(struct smb_rq *rqp)
492 {
493         struct smbiod *iod = rqp->sr_vc->vc_iod;
494         int error;
495
496         SMBIODEBUG("\n");
497         if (rqp->sr_flags & SMBR_INTERNAL) {
498                 for (;;) {
499                         smb_iod_sendall(iod);
500                         smb_iod_recvall(iod);
501                         if (rqp->sr_rpgen != rqp->sr_rplast)
502                                 break;
503                         tsleep(&iod->iod_flags, 0, "90irq", hz);
504                 }
505                 smb_iod_removerq(rqp);
506                 return rqp->sr_lerror;
507
508         }
509         SMBRQ_SLOCK(rqp);
510         if (rqp->sr_rpgen == rqp->sr_rplast)
511                 smb_sleep(&rqp->sr_state, SMBRQ_INTERLOCK(rqp), 0, "90wrq", 0);
512         rqp->sr_rplast++;
513         SMBRQ_SUNLOCK(rqp);
514         error = rqp->sr_lerror;
515         if (rqp->sr_flags & SMBR_MULTIPACKET) {
516                 /*
517                  * If request should stay in the list, then reinsert it
518                  * at the end of queue so other waiters have chance to concur
519                  */
520                 SMB_IOD_RQLOCK(iod);
521                 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
522                 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
523                 SMB_IOD_RQUNLOCK(iod);
524         } else
525                 smb_iod_removerq(rqp);
526         return error;
527 }
528
529
530 static int
531 smb_iod_sendall(struct smbiod *iod)
532 {
533         struct smb_vc *vcp = iod->iod_vc;
534         struct smb_rq *rqp;
535         struct timespec ts, tstimeout;
536         int herror;
537
538         herror = 0;
539         /*
540          * Loop through the list of requests and send them if possible
541          */
542         SMB_IOD_RQLOCK(iod);
543         TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
544                 switch (rqp->sr_state) {
545                     case SMBRQ_NOTSENT:
546                         rqp->sr_flags |= SMBR_XLOCK;
547                         SMB_IOD_RQUNLOCK(iod);
548                         herror = smb_iod_sendrq(iod, rqp);
549                         SMB_IOD_RQLOCK(iod);
550                         rqp->sr_flags &= ~SMBR_XLOCK;
551                         if (rqp->sr_flags & SMBR_XLOCKWANT) {
552                                 rqp->sr_flags &= ~SMBR_XLOCKWANT;
553                                 wakeup(rqp);
554                         }
555                         break;
556                     case SMBRQ_SENT:
557                         SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
558                         timespecadd(&tstimeout, &tstimeout);
559                         getnanotime(&ts);
560                         timespecsub(&ts, &tstimeout);
561                         if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
562                                 smb_iod_rqprocessed(rqp, ETIMEDOUT);
563                         }
564                         break;
565                     default:
566                         break;
567                 }
568                 if (herror)
569                         break;
570         }
571         SMB_IOD_RQUNLOCK(iod);
572         if (herror == ENOTCONN)
573                 smb_iod_dead(iod);
574         return 0;
575 }
576
577 /*
578  * "main" function for smbiod daemon
579  */
580 static __inline void
581 smb_iod_main(struct smbiod *iod)
582 {
583 /*      struct smb_vc *vcp = iod->iod_vc;*/
584         struct smbiod_event *evp;
585 /*      struct timespec tsnow;*/
586         int error;
587
588         SMBIODEBUG("\n");
589         error = 0;
590
591         /*
592          * Check all interesting events
593          */
594         for (;;) {
595                 SMB_IOD_EVLOCK(iod);
596                 evp = STAILQ_FIRST(&iod->iod_evlist);
597                 if (evp == NULL) {
598                         SMB_IOD_EVUNLOCK(iod);
599                         break;
600                 }
601                 STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
602                 evp->ev_type |= SMBIOD_EV_PROCESSING;
603                 SMB_IOD_EVUNLOCK(iod);
604                 switch (evp->ev_type & SMBIOD_EV_MASK) {
605                     case SMBIOD_EV_CONNECT:
606                         iod->iod_state = SMBIOD_ST_RECONNECT;
607                         evp->ev_error = smb_iod_connect(iod);
608                         break;
609                     case SMBIOD_EV_DISCONNECT:
610                         evp->ev_error = smb_iod_disconnect(iod);
611                         break;
612                     case SMBIOD_EV_TREECONNECT:
613                         evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
614                         break;
615                     case SMBIOD_EV_SHUTDOWN:
616                         iod->iod_flags |= SMBIOD_SHUTDOWN;
617                         break;
618                     case SMBIOD_EV_NEWRQ:
619                         break;
620                 }
621                 if (evp->ev_type & SMBIOD_EV_SYNC) {
622                         SMB_IOD_EVLOCK(iod);
623                         wakeup(evp);
624                         SMB_IOD_EVUNLOCK(iod);
625                 } else
626                         kfree(evp, M_SMBIOD);
627         }
628 #if 0
629         if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
630                 getnanotime(&tsnow);
631                 timespecsub(&tsnow, &iod->iod_pingtimo);
632                 if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
633                         smb_smb_echo(vcp, &iod->iod_scred);
634                 }
635         }
636 #endif
637         smb_iod_sendall(iod);
638         smb_iod_recvall(iod);
639         return;
640 }
641
642 #define kthread_create_compat   smb_kthread_create
643 #define kthread_exit_compat     smb_kthread_exit
644
645 void
646 smb_iod_thread(void *arg)
647 {
648         struct smbiod *iod = arg;
649
650         /*
651          * mplock not held on entry but we aren't mpsafe yet.
652          */
653         get_mplock();
654
655         smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
656         while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
657                 smb_iod_main(iod);
658                 SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
659                 if (iod->iod_flags & SMBIOD_SHUTDOWN)
660                         break;
661                 tsleep(&iod->iod_flags, 0, "90idle", iod->iod_sleeptimo);
662         }
663         kthread_exit_compat();
664 }
665
666 int
667 smb_iod_create(struct smb_vc *vcp)
668 {
669         struct smbiod *iod;
670         struct proc *newp = NULL;
671         int error;
672
673         iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
674         iod->iod_id = smb_iod_next++;
675         iod->iod_state = SMBIOD_ST_NOTCONN;
676         iod->iod_vc = vcp;
677         iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
678         iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
679         getnanotime(&iod->iod_lastrqsent);
680         vcp->vc_iod = iod;
681         smb_sl_init(&iod->iod_rqlock, "90rql");
682         TAILQ_INIT(&iod->iod_rqlist);
683         smb_sl_init(&iod->iod_evlock, "90evl");
684         STAILQ_INIT(&iod->iod_evlist);
685         error = kthread_create_compat(smb_iod_thread, iod, &newp,
686             RFNOWAIT, "smbiod%d", iod->iod_id);
687         if (error) {
688                 SMBERROR("can't start smbiod: %d", error);
689                 kfree(iod, M_SMBIOD);
690                 return error;
691         }
692         /* XXX lwp */
693         iod->iod_td = ONLY_LWP_IN_PROC(newp)->lwp_thread;
694         return 0;
695 }
696
697 int
698 smb_iod_destroy(struct smbiod *iod)
699 {
700         smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
701         smb_sl_destroy(&iod->iod_rqlock);
702         smb_sl_destroy(&iod->iod_evlock);
703         kfree(iod, M_SMBIOD);
704         return 0;
705 }
706
707 int
708 smb_iod_init(void)
709 {
710         return 0;
711 }
712
713 int
714 smb_iod_done(void)
715 {
716         return 0;
717 }
718