proc->thread stage 4: rework the VFS and DEVICE subsystems to take thread
[dragonfly.git] / sys / netproto / smb / smb_rq.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 2000-2001, Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Boris Popov.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * $FreeBSD: src/sys/netsmb/smb_rq.c,v 1.1.2.2 2002/04/23 03:45:01 bp Exp $
dadab5e9 33 * $DragonFly: src/sys/netproto/smb/smb_rq.c,v 1.3 2003/06/25 03:56:06 dillon Exp $
984263bc
MD
34 */
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/malloc.h>
39#include <sys/proc.h>
40#include <sys/lock.h>
41#include <sys/sysctl.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/mbuf.h>
45
46#include <netsmb/smb.h>
47#include <netsmb/smb_conn.h>
48#include <netsmb/smb_rq.h>
49#include <netsmb/smb_subr.h>
50#include <netsmb/smb_tran.h>
51
52MALLOC_DEFINE(M_SMBRQ, "SMBRQ", "SMB request");
53
54MODULE_DEPEND(netsmb, libmchain, 1, 1, 1);
55
56static int smb_rq_reply(struct smb_rq *rqp);
57static int smb_rq_enqueue(struct smb_rq *rqp);
58static int smb_rq_getenv(struct smb_connobj *layer,
59 struct smb_vc **vcpp, struct smb_share **sspp);
60static int smb_rq_new(struct smb_rq *rqp, u_char cmd);
61static int smb_t2_reply(struct smb_t2rq *t2p);
62
63int
64smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred,
65 struct smb_rq **rqpp)
66{
67 struct smb_rq *rqp;
68 int error;
69
70 MALLOC(rqp, struct smb_rq *, sizeof(*rqp), M_SMBRQ, M_WAITOK);
71 if (rqp == NULL)
72 return ENOMEM;
73 error = smb_rq_init(rqp, layer, cmd, scred);
74 rqp->sr_flags |= SMBR_ALLOCED;
75 if (error) {
76 smb_rq_done(rqp);
77 return error;
78 }
79 *rqpp = rqp;
80 return 0;
81}
82
83static char tzero[12];
84
85int
86smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd,
87 struct smb_cred *scred)
88{
89 int error;
90
91 bzero(rqp, sizeof(*rqp));
92 smb_sl_init(&rqp->sr_slock, "srslock");
93 error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share);
94 if (error)
95 return error;
96 error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC);
97 if (error)
98 return error;
99 if (rqp->sr_share) {
100 error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC);
101 if (error)
102 return error;
103 }
104 rqp->sr_cred = scred;
105 rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc);
106 return smb_rq_new(rqp, cmd);
107}
108
109static int
110smb_rq_new(struct smb_rq *rqp, u_char cmd)
111{
112 struct smb_vc *vcp = rqp->sr_vc;
113 struct mbchain *mbp = &rqp->sr_rq;
114 int error;
115
116 rqp->sr_sendcnt = 0;
117 mb_done(mbp);
118 md_done(&rqp->sr_rp);
119 error = mb_init(mbp);
120 if (error)
121 return error;
122 mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM);
123 mb_put_uint8(mbp, cmd);
124 mb_put_uint32le(mbp, 0); /* DosError */
125 mb_put_uint8(mbp, vcp->vc_hflags);
126 mb_put_uint16le(mbp, vcp->vc_hflags2);
127 mb_put_mem(mbp, tzero, 12, MB_MSYSTEM);
128 rqp->sr_rqtid = (u_int16_t*)mb_reserve(mbp, sizeof(u_int16_t));
129 mb_put_uint16le(mbp, 1 /*scred->sc_p->p_pid & 0xffff*/);
130 rqp->sr_rquid = (u_int16_t*)mb_reserve(mbp, sizeof(u_int16_t));
131 mb_put_uint16le(mbp, rqp->sr_mid);
132 return 0;
133}
134
135void
136smb_rq_done(struct smb_rq *rqp)
137{
138 mb_done(&rqp->sr_rq);
139 md_done(&rqp->sr_rp);
140 smb_sl_destroy(&rqp->sr_slock);
141 if (rqp->sr_flags & SMBR_ALLOCED)
142 free(rqp, M_SMBRQ);
143}
144
145/*
146 * Simple request-reply exchange
147 */
148int
149smb_rq_simple(struct smb_rq *rqp)
150{
151 struct smb_vc *vcp = rqp->sr_vc;
152 int error = EINVAL, i;
153
154 for (i = 0; i < SMB_MAXRCN; i++) {
155 rqp->sr_flags &= ~SMBR_RESTART;
156 rqp->sr_timo = vcp->vc_timo;
157 rqp->sr_state = SMBRQ_NOTSENT;
158 error = smb_rq_enqueue(rqp);
159 if (error)
160 return error;
161 error = smb_rq_reply(rqp);
162 if (error == 0)
163 break;
164 if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != SMBR_RESTART)
165 break;
166 }
167 return error;
168}
169
170static int
171smb_rq_enqueue(struct smb_rq *rqp)
172{
173 struct smb_share *ssp = rqp->sr_share;
174 int error;
175
176 if (ssp == NULL || rqp->sr_cred == &rqp->sr_vc->vc_iod->iod_scred) {
177 return smb_iod_addrq(rqp);
178 }
179 for (;;) {
180 SMBS_ST_LOCK(ssp);
181 if (ssp->ss_flags & SMBS_RECONNECTING) {
182 msleep(&ssp->ss_vcgenid, SMBS_ST_LOCKPTR(ssp),
183 PWAIT | PDROP, "90trcn", hz);
dadab5e9 184 if (smb_proc_intr(rqp->sr_cred->scr_td))
984263bc
MD
185 return EINTR;
186 continue;
187 }
188 if (smb_share_valid(ssp) || (ssp->ss_flags & SMBS_CONNECTED) == 0) {
189 SMBS_ST_UNLOCK(ssp);
190 } else {
191 SMBS_ST_UNLOCK(ssp);
192 error = smb_iod_request(rqp->sr_vc->vc_iod,
193 SMBIOD_EV_TREECONNECT | SMBIOD_EV_SYNC, ssp);
194 if (error)
195 return error;
196 }
197 error = smb_iod_addrq(rqp);
198 if (error != EXDEV)
199 break;
200 }
201 return error;
202}
203
204void
205smb_rq_wstart(struct smb_rq *rqp)
206{
207 rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof(u_int8_t));
208 rqp->sr_rq.mb_count = 0;
209}
210
211void
212smb_rq_wend(struct smb_rq *rqp)
213{
214 if (rqp->sr_wcount == NULL) {
215 SMBERROR("no wcount\n"); /* actually panic */
216 return;
217 }
218 if (rqp->sr_rq.mb_count & 1)
219 SMBERROR("odd word count\n");
220 *rqp->sr_wcount = rqp->sr_rq.mb_count / 2;
221}
222
223void
224smb_rq_bstart(struct smb_rq *rqp)
225{
226 rqp->sr_bcount = (u_short*)mb_reserve(&rqp->sr_rq, sizeof(u_short));
227 rqp->sr_rq.mb_count = 0;
228}
229
230void
231smb_rq_bend(struct smb_rq *rqp)
232{
233 int bcnt;
234
235 if (rqp->sr_bcount == NULL) {
236 SMBERROR("no bcount\n"); /* actually panic */
237 return;
238 }
239 bcnt = rqp->sr_rq.mb_count;
240 if (bcnt > 0xffff)
241 SMBERROR("byte count too large (%d)\n", bcnt);
242 *rqp->sr_bcount = bcnt;
243}
244
245int
246smb_rq_intr(struct smb_rq *rqp)
247{
dadab5e9 248 struct thread *td = rqp->sr_cred->scr_td;
984263bc
MD
249
250 if (rqp->sr_flags & SMBR_INTR)
251 return EINTR;
dadab5e9 252 return smb_proc_intr(td);
984263bc
MD
253}
254
255int
256smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp)
257{
258 *mbpp = &rqp->sr_rq;
259 return 0;
260}
261
262int
263smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp)
264{
265 *mbpp = &rqp->sr_rp;
266 return 0;
267}
268
269static int
270smb_rq_getenv(struct smb_connobj *layer,
271 struct smb_vc **vcpp, struct smb_share **sspp)
272{
273 struct smb_vc *vcp = NULL;
274 struct smb_share *ssp = NULL;
275 struct smb_connobj *cp;
276 int error = 0;
277
278 switch (layer->co_level) {
279 case SMBL_VC:
280 vcp = CPTOVC(layer);
281 if (layer->co_parent == NULL) {
282 SMBERROR("zombie VC %s\n", vcp->vc_srvname);
283 error = EINVAL;
284 break;
285 }
286 break;
287 case SMBL_SHARE:
288 ssp = CPTOSS(layer);
289 cp = layer->co_parent;
290 if (cp == NULL) {
291 SMBERROR("zombie share %s\n", ssp->ss_name);
292 error = EINVAL;
293 break;
294 }
295 error = smb_rq_getenv(cp, &vcp, NULL);
296 if (error)
297 break;
298 break;
299 default:
300 SMBERROR("invalid layer %d passed\n", layer->co_level);
301 error = EINVAL;
302 }
303 if (vcpp)
304 *vcpp = vcp;
305 if (sspp)
306 *sspp = ssp;
307 return error;
308}
309
310/*
311 * Wait for reply on the request
312 */
313static int
314smb_rq_reply(struct smb_rq *rqp)
315{
316 struct mdchain *mdp = &rqp->sr_rp;
317 u_int32_t tdw;
318 u_int8_t tb;
319 int error, rperror = 0;
320
321 error = smb_iod_waitrq(rqp);
322 if (error)
323 return error;
324 error = md_get_uint32(mdp, &tdw);
325 if (error)
326 return error;
327 error = md_get_uint8(mdp, &tb);
328 if (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_ERR_STATUS) {
329 error = md_get_uint32le(mdp, &rqp->sr_error);
330 } else {
331 error = md_get_uint8(mdp, &rqp->sr_errclass);
332 error = md_get_uint8(mdp, &tb);
333 error = md_get_uint16le(mdp, &rqp->sr_serror);
334 if (!error)
335 rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
336 }
337 error = md_get_uint8(mdp, &rqp->sr_rpflags);
338 error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
339
340 error = md_get_uint32(mdp, &tdw);
341 error = md_get_uint32(mdp, &tdw);
342 error = md_get_uint32(mdp, &tdw);
343
344 error = md_get_uint16le(mdp, &rqp->sr_rptid);
345 error = md_get_uint16le(mdp, &rqp->sr_rppid);
346 error = md_get_uint16le(mdp, &rqp->sr_rpuid);
347 error = md_get_uint16le(mdp, &rqp->sr_rpmid);
348
349 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
350 rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid,
351 rqp->sr_errclass, rqp->sr_serror);
352 return error ? error : rperror;
353}
354
355
356#define ALIGN4(a) (((a) + 3) & ~3)
357
358/*
359 * TRANS2 request implementation
360 */
361int
362smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred,
363 struct smb_t2rq **t2pp)
364{
365 struct smb_t2rq *t2p;
366 int error;
367
368 MALLOC(t2p, struct smb_t2rq *, sizeof(*t2p), M_SMBRQ, M_WAITOK);
369 if (t2p == NULL)
370 return ENOMEM;
371 error = smb_t2_init(t2p, layer, setup, scred);
372 t2p->t2_flags |= SMBT2_ALLOCED;
373 if (error) {
374 smb_t2_done(t2p);
375 return error;
376 }
377 *t2pp = t2p;
378 return 0;
379}
380
381int
382smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup,
383 struct smb_cred *scred)
384{
385 int error;
386
387 bzero(t2p, sizeof(*t2p));
388 t2p->t2_source = source;
389 t2p->t2_setupcount = 1;
390 t2p->t2_setupdata = t2p->t2_setup;
391 t2p->t2_setup[0] = setup;
392 t2p->t2_fid = 0xffff;
393 t2p->t2_cred = scred;
394 error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
395 if (error)
396 return error;
397 return 0;
398}
399
400void
401smb_t2_done(struct smb_t2rq *t2p)
402{
403 mb_done(&t2p->t2_tparam);
404 mb_done(&t2p->t2_tdata);
405 md_done(&t2p->t2_rparam);
406 md_done(&t2p->t2_rdata);
407 if (t2p->t2_flags & SMBT2_ALLOCED)
408 free(t2p, M_SMBRQ);
409}
410
411static int
412smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count,
413 struct mdchain *mdp)
414{
415 struct mbuf *m, *m0;
416 int len;
417
418 m0 = m_split(mtop, offset, M_WAIT);
419 if (m0 == NULL)
420 return EBADRPC;
421 for(len = 0, m = m0; m->m_next; m = m->m_next)
422 len += m->m_len;
423 len += m->m_len;
424 m->m_len -= len - count;
425 if (mdp->md_top == NULL) {
426 md_initm(mdp, m0);
427 } else
428 m_cat(mdp->md_top, m0);
429 return 0;
430}
431
432static int
433smb_t2_reply(struct smb_t2rq *t2p)
434{
435 struct mdchain *mdp;
436 struct smb_rq *rqp = t2p->t2_rq;
437 int error, totpgot, totdgot;
438 u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
439 u_int16_t tmp, bc, dcount;
440 u_int8_t wc;
441
442 error = smb_rq_reply(rqp);
443 if (error)
444 return error;
445 if ((t2p->t2_flags & SMBT2_ALLSENT) == 0) {
446 /*
447 * this is an interim response, ignore it.
448 */
449 SMBRQ_SLOCK(rqp);
450 md_next_record(&rqp->sr_rp);
451 SMBRQ_SUNLOCK(rqp);
452 return 0;
453 }
454 /*
455 * Now we have to get all subsequent responses. The CIFS specification
456 * says that they can be disordered which is weird.
457 * TODO: timo
458 */
459 totpgot = totdgot = 0;
460 totpcount = totdcount = 0xffff;
461 mdp = &rqp->sr_rp;
462 for (;;) {
463 m_dumpm(mdp->md_top);
464 if ((error = md_get_uint8(mdp, &wc)) != 0)
465 break;
466 if (wc < 10) {
467 error = ENOENT;
468 break;
469 }
470 if ((error = md_get_uint16le(mdp, &tmp)) != 0)
471 break;
472 if (totpcount > tmp)
473 totpcount = tmp;
474 md_get_uint16le(mdp, &tmp);
475 if (totdcount > tmp)
476 totdcount = tmp;
477 if ((error = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
478 (error = md_get_uint16le(mdp, &pcount)) != 0 ||
479 (error = md_get_uint16le(mdp, &poff)) != 0 ||
480 (error = md_get_uint16le(mdp, &pdisp)) != 0)
481 break;
482 if (pcount != 0 && pdisp != totpgot) {
483 SMBERROR("Can't handle disordered parameters %d:%d\n",
484 pdisp, totpgot);
485 error = EINVAL;
486 break;
487 }
488 if ((error = md_get_uint16le(mdp, &dcount)) != 0 ||
489 (error = md_get_uint16le(mdp, &doff)) != 0 ||
490 (error = md_get_uint16le(mdp, &ddisp)) != 0)
491 break;
492 if (dcount != 0 && ddisp != totdgot) {
493 SMBERROR("Can't handle disordered data\n");
494 error = EINVAL;
495 break;
496 }
497 md_get_uint8(mdp, &wc);
498 md_get_uint8(mdp, NULL);
499 tmp = wc;
500 while (tmp--)
501 md_get_uint16(mdp, NULL);
502 if ((error = md_get_uint16le(mdp, &bc)) != 0)
503 break;
504/* tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/
505 if (dcount) {
506 error = smb_t2_placedata(mdp->md_top, doff, dcount,
507 &t2p->t2_rdata);
508 if (error)
509 break;
510 }
511 if (pcount) {
512 error = smb_t2_placedata(mdp->md_top, poff, pcount,
513 &t2p->t2_rparam);
514 if (error)
515 break;
516 }
517 totpgot += pcount;
518 totdgot += dcount;
519 if (totpgot >= totpcount && totdgot >= totdcount) {
520 error = 0;
521 t2p->t2_flags |= SMBT2_ALLRECV;
522 break;
523 }
524 /*
525 * We're done with this reply, look for the next one.
526 */
527 SMBRQ_SLOCK(rqp);
528 md_next_record(&rqp->sr_rp);
529 SMBRQ_SUNLOCK(rqp);
530 error = smb_rq_reply(rqp);
531 if (error)
532 break;
533 }
534 return error;
535}
536
537/*
538 * Perform a full round of TRANS2 request
539 */
540static int
541smb_t2_request_int(struct smb_t2rq *t2p)
542{
543 struct smb_vc *vcp = t2p->t2_vc;
544 struct smb_cred *scred = t2p->t2_cred;
545 struct mbchain *mbp;
546 struct mdchain *mdp, mbparam, mbdata;
547 struct mbuf *m;
548 struct smb_rq *rqp;
549 int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
550 int error, doff, poff, txdcount, txpcount, nmlen;
551
552 m = t2p->t2_tparam.mb_top;
553 if (m) {
554 md_initm(&mbparam, m); /* do not free it! */
555 totpcount = m_fixhdr(m);
556 if (totpcount > 0xffff) /* maxvalue for u_short */
557 return EINVAL;
558 } else
559 totpcount = 0;
560 m = t2p->t2_tdata.mb_top;
561 if (m) {
562 md_initm(&mbdata, m); /* do not free it! */
563 totdcount = m_fixhdr(m);
564 if (totdcount > 0xffff)
565 return EINVAL;
566 } else
567 totdcount = 0;
568 leftdcount = totdcount;
569 leftpcount = totpcount;
570 txmax = vcp->vc_txmax;
571 error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
572 SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
573 if (error)
574 return error;
575 rqp->sr_flags |= SMBR_MULTIPACKET;
576 t2p->t2_rq = rqp;
577 mbp = &rqp->sr_rq;
578 smb_rq_wstart(rqp);
579 mb_put_uint16le(mbp, totpcount);
580 mb_put_uint16le(mbp, totdcount);
581 mb_put_uint16le(mbp, t2p->t2_maxpcount);
582 mb_put_uint16le(mbp, t2p->t2_maxdcount);
583 mb_put_uint8(mbp, t2p->t2_maxscount);
584 mb_put_uint8(mbp, 0); /* reserved */
585 mb_put_uint16le(mbp, 0); /* flags */
586 mb_put_uint32le(mbp, 0); /* Timeout */
587 mb_put_uint16le(mbp, 0); /* reserved 2 */
588 len = mb_fixhdr(mbp);
589 /*
590 * now we have known packet size as
591 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1),
592 * and need to decide which parts should go into the first request
593 */
594 nmlen = t2p->t_name ? strlen(t2p->t_name) : 0;
595 len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1);
596 if (len + leftpcount > txmax) {
597 txpcount = min(leftpcount, txmax - len);
598 poff = len;
599 txdcount = 0;
600 doff = 0;
601 } else {
602 txpcount = leftpcount;
603 poff = txpcount ? len : 0;
604 len = ALIGN4(len + txpcount);
605 txdcount = min(leftdcount, txmax - len);
606 doff = txdcount ? len : 0;
607 }
608 leftpcount -= txpcount;
609 leftdcount -= txdcount;
610 mb_put_uint16le(mbp, txpcount);
611 mb_put_uint16le(mbp, poff);
612 mb_put_uint16le(mbp, txdcount);
613 mb_put_uint16le(mbp, doff);
614 mb_put_uint8(mbp, t2p->t2_setupcount);
615 mb_put_uint8(mbp, 0);
616 for (i = 0; i < t2p->t2_setupcount; i++)
617 mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
618 smb_rq_wend(rqp);
619 smb_rq_bstart(rqp);
620 /* TDUNICODE */
621 if (t2p->t_name)
622 mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM);
623 mb_put_uint8(mbp, 0); /* terminating zero */
624 len = mb_fixhdr(mbp);
625 if (txpcount) {
626 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
627 error = md_get_mbuf(&mbparam, txpcount, &m);
628 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
629 if (error)
630 goto freerq;
631 mb_put_mbuf(mbp, m);
632 }
633 len = mb_fixhdr(mbp);
634 if (txdcount) {
635 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
636 error = md_get_mbuf(&mbdata, txdcount, &m);
637 if (error)
638 goto freerq;
639 mb_put_mbuf(mbp, m);
640 }
641 smb_rq_bend(rqp); /* incredible, but thats it... */
642 error = smb_rq_enqueue(rqp);
643 if (error)
644 goto freerq;
645 if (leftpcount == 0 && leftdcount == 0)
646 t2p->t2_flags |= SMBT2_ALLSENT;
647 error = smb_t2_reply(t2p);
648 if (error)
649 goto bad;
650 while (leftpcount || leftdcount) {
651 error = smb_rq_new(rqp, t2p->t_name ?
652 SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY);
653 if (error)
654 goto bad;
655 mbp = &rqp->sr_rq;
656 smb_rq_wstart(rqp);
657 mb_put_uint16le(mbp, totpcount);
658 mb_put_uint16le(mbp, totdcount);
659 len = mb_fixhdr(mbp);
660 /*
661 * now we have known packet size as
662 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
663 * and need to decide which parts should go into request
664 */
665 len = ALIGN4(len + 6 * 2 + 2);
666 if (t2p->t_name == NULL)
667 len += 2;
668 if (len + leftpcount > txmax) {
669 txpcount = min(leftpcount, txmax - len);
670 poff = len;
671 txdcount = 0;
672 doff = 0;
673 } else {
674 txpcount = leftpcount;
675 poff = txpcount ? len : 0;
676 len = ALIGN4(len + txpcount);
677 txdcount = min(leftdcount, txmax - len);
678 doff = txdcount ? len : 0;
679 }
680 mb_put_uint16le(mbp, txpcount);
681 mb_put_uint16le(mbp, poff);
682 mb_put_uint16le(mbp, totpcount - leftpcount);
683 mb_put_uint16le(mbp, txdcount);
684 mb_put_uint16le(mbp, doff);
685 mb_put_uint16le(mbp, totdcount - leftdcount);
686 leftpcount -= txpcount;
687 leftdcount -= txdcount;
688 if (t2p->t_name == NULL)
689 mb_put_uint16le(mbp, t2p->t2_fid);
690 smb_rq_wend(rqp);
691 smb_rq_bstart(rqp);
692 mb_put_uint8(mbp, 0); /* name */
693 len = mb_fixhdr(mbp);
694 if (txpcount) {
695 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
696 error = md_get_mbuf(&mbparam, txpcount, &m);
697 if (error)
698 goto bad;
699 mb_put_mbuf(mbp, m);
700 }
701 len = mb_fixhdr(mbp);
702 if (txdcount) {
703 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
704 error = md_get_mbuf(&mbdata, txdcount, &m);
705 if (error)
706 goto bad;
707 mb_put_mbuf(mbp, m);
708 }
709 smb_rq_bend(rqp);
710 rqp->sr_state = SMBRQ_NOTSENT;
711 error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL);
712 if (error)
713 goto bad;
714 } /* while left params or data */
715 t2p->t2_flags |= SMBT2_ALLSENT;
716 mdp = &t2p->t2_rdata;
717 if (mdp->md_top) {
718 m_fixhdr(mdp->md_top);
719 md_initm(mdp, mdp->md_top);
720 }
721 mdp = &t2p->t2_rparam;
722 if (mdp->md_top) {
723 m_fixhdr(mdp->md_top);
724 md_initm(mdp, mdp->md_top);
725 }
726bad:
727 smb_iod_removerq(rqp);
728freerq:
729 smb_rq_done(rqp);
730 if (error) {
731 if (rqp->sr_flags & SMBR_RESTART)
732 t2p->t2_flags |= SMBT2_RESTART;
733 md_done(&t2p->t2_rparam);
734 md_done(&t2p->t2_rdata);
735 }
736 return error;
737}
738
739int
740smb_t2_request(struct smb_t2rq *t2p)
741{
742 int error = EINVAL, i;
743
744 for (i = 0; i < SMB_MAXRCN; i++) {
745 t2p->t2_flags &= ~SMBR_RESTART;
746 error = smb_t2_request_int(t2p);
747 if (error == 0)
748 break;
749 if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != SMBT2_RESTART)
750 break;
751 }
752 return error;
753}