1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2000-2001 Boris Popov
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/endian.h>
32#include <sys/kernel.h>
33#include <sys/malloc.h>
34#include <sys/module.h>
35#include <sys/proc.h>
36#include <sys/lock.h>
37#include <sys/sysctl.h>
38#include <sys/socket.h>
39#include <sys/socketvar.h>
40#include <sys/mbuf.h>
41
42#include <netsmb/smb.h>
43#include <netsmb/smb_conn.h>
44#include <netsmb/smb_rq.h>
45#include <netsmb/smb_subr.h>
46#include <netsmb/smb_tran.h>
47
48static MALLOC_DEFINE(M_SMBRQ, "SMBRQ", "SMB request");
49
50MODULE_DEPEND(netsmb, libmchain, 1, 1, 1);
51
52static int  smb_rq_reply(struct smb_rq *rqp);
53static int  smb_rq_enqueue(struct smb_rq *rqp);
54static int  smb_rq_getenv(struct smb_connobj *layer,
55		struct smb_vc **vcpp, struct smb_share **sspp);
56static int  smb_rq_new(struct smb_rq *rqp, u_char cmd);
57static int  smb_t2_reply(struct smb_t2rq *t2p);
58
59int
60smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred,
61	struct smb_rq **rqpp)
62{
63	struct smb_rq *rqp;
64	int error;
65
66	rqp = malloc(sizeof(*rqp), M_SMBRQ, M_WAITOK);
67	if (rqp == NULL)
68		return ENOMEM;
69	error = smb_rq_init(rqp, layer, cmd, scred);
70	rqp->sr_flags |= SMBR_ALLOCED;
71	if (error) {
72		smb_rq_done(rqp);
73		return error;
74	}
75	*rqpp = rqp;
76	return 0;
77}
78
79static char tzero[12];
80
81int
82smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd,
83	struct smb_cred *scred)
84{
85	int error;
86
87	bzero(rqp, sizeof(*rqp));
88	smb_sl_init(&rqp->sr_slock, "srslock");
89	error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share);
90	if (error)
91		return error;
92	error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC);
93	if (error)
94		return error;
95	if (rqp->sr_share) {
96		error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC);
97		if (error)
98			return error;
99	}
100	rqp->sr_cred = scred;
101	rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc);
102	return smb_rq_new(rqp, cmd);
103}
104
105static int
106smb_rq_new(struct smb_rq *rqp, u_char cmd)
107{
108	struct smb_vc *vcp = rqp->sr_vc;
109	struct mbchain *mbp = &rqp->sr_rq;
110	int error;
111	u_int16_t flags2;
112
113	rqp->sr_sendcnt = 0;
114	mb_done(mbp);
115	md_done(&rqp->sr_rp);
116	error = mb_init(mbp);
117	if (error)
118		return error;
119	mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM);
120	mb_put_uint8(mbp, cmd);
121	mb_put_uint32le(mbp, 0);		/* DosError */
122	mb_put_uint8(mbp, vcp->vc_hflags);
123	flags2 = vcp->vc_hflags2;
124	if (cmd == SMB_COM_TRANSACTION || cmd == SMB_COM_TRANSACTION_SECONDARY)
125		flags2 &= ~SMB_FLAGS2_UNICODE;
126	if (cmd == SMB_COM_NEGOTIATE)
127		flags2 &= ~SMB_FLAGS2_SECURITY_SIGNATURE;
128	mb_put_uint16le(mbp, flags2);
129	if ((flags2 & SMB_FLAGS2_SECURITY_SIGNATURE) == 0) {
130		mb_put_mem(mbp, tzero, 12, MB_MSYSTEM);
131		rqp->sr_rqsig = NULL;
132	} else {
133		mb_put_uint16le(mbp, 0 /*scred->sc_p->p_pid >> 16*/);
134		rqp->sr_rqsig = (u_int8_t *)mb_reserve(mbp, 8);
135		mb_put_uint16le(mbp, 0);
136	}
137	rqp->sr_rqtid = mb_reserve(mbp, sizeof(u_int16_t));
138	mb_put_uint16le(mbp, 1 /*scred->sc_p->p_pid & 0xffff*/);
139	rqp->sr_rquid = mb_reserve(mbp, sizeof(u_int16_t));
140	mb_put_uint16le(mbp, rqp->sr_mid);
141	return 0;
142}
143
144void
145smb_rq_done(struct smb_rq *rqp)
146{
147	mb_done(&rqp->sr_rq);
148	md_done(&rqp->sr_rp);
149	smb_sl_destroy(&rqp->sr_slock);
150	if (rqp->sr_flags & SMBR_ALLOCED)
151		free(rqp, M_SMBRQ);
152}
153
154/*
155 * Simple request-reply exchange
156 */
157int
158smb_rq_simple(struct smb_rq *rqp)
159{
160	struct smb_vc *vcp = rqp->sr_vc;
161	int error = EINVAL, i;
162
163	for (i = 0; i < SMB_MAXRCN; i++) {
164		rqp->sr_flags &= ~SMBR_RESTART;
165		rqp->sr_timo = vcp->vc_timo;
166		rqp->sr_state = SMBRQ_NOTSENT;
167		error = smb_rq_enqueue(rqp);
168		if (error)
169			return error;
170		error = smb_rq_reply(rqp);
171		if (error == 0)
172			break;
173		if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != SMBR_RESTART)
174			break;
175	}
176	return error;
177}
178
179static int
180smb_rq_enqueue(struct smb_rq *rqp)
181{
182	struct smb_share *ssp = rqp->sr_share;
183	int error;
184
185	if (ssp == NULL || rqp->sr_cred == &rqp->sr_vc->vc_iod->iod_scred) {
186		return smb_iod_addrq(rqp);
187	}
188	for (;;) {
189		SMBS_ST_LOCK(ssp);
190		if (ssp->ss_flags & SMBS_RECONNECTING) {
191			msleep(&ssp->ss_vcgenid, SMBS_ST_LOCKPTR(ssp),
192			    PWAIT | PDROP, "90trcn", hz);
193			if (smb_td_intr(rqp->sr_cred->scr_td))
194				return EINTR;
195			continue;
196		}
197		if (smb_share_valid(ssp) || (ssp->ss_flags & SMBS_CONNECTED) == 0) {
198			SMBS_ST_UNLOCK(ssp);
199		} else {
200			SMBS_ST_UNLOCK(ssp);
201			error = smb_iod_request(rqp->sr_vc->vc_iod,
202			    SMBIOD_EV_TREECONNECT | SMBIOD_EV_SYNC, ssp);
203			if (error)
204				return error;
205		}
206		error = smb_iod_addrq(rqp);
207		if (error != EXDEV)
208			break;
209	}
210	return error;
211}
212
213void
214smb_rq_wstart(struct smb_rq *rqp)
215{
216	rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof(u_int8_t));
217	rqp->sr_rq.mb_count = 0;
218}
219
220void
221smb_rq_wend(struct smb_rq *rqp)
222{
223	if (rqp->sr_wcount == NULL) {
224		SMBERROR("no wcount\n");	/* actually panic */
225		return;
226	}
227	if (rqp->sr_rq.mb_count & 1)
228		SMBERROR("odd word count\n");
229	*rqp->sr_wcount = rqp->sr_rq.mb_count / 2;
230}
231
232void
233smb_rq_bstart(struct smb_rq *rqp)
234{
235	rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof(u_short));
236	rqp->sr_rq.mb_count = 0;
237}
238
239void
240smb_rq_bend(struct smb_rq *rqp)
241{
242	int bcnt;
243
244	if (rqp->sr_bcount == NULL) {
245		SMBERROR("no bcount\n");	/* actually panic */
246		return;
247	}
248	bcnt = rqp->sr_rq.mb_count;
249	if (bcnt > 0xffff)
250		SMBERROR("byte count too large (%d)\n", bcnt);
251	le16enc(rqp->sr_bcount, bcnt);
252}
253
254int
255smb_rq_intr(struct smb_rq *rqp)
256{
257	if (rqp->sr_flags & SMBR_INTR)
258		return EINTR;
259	return smb_td_intr(rqp->sr_cred->scr_td);
260}
261
262int
263smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp)
264{
265	*mbpp = &rqp->sr_rq;
266	return 0;
267}
268
269int
270smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp)
271{
272	*mbpp = &rqp->sr_rp;
273	return 0;
274}
275
276static int
277smb_rq_getenv(struct smb_connobj *layer,
278	struct smb_vc **vcpp, struct smb_share **sspp)
279{
280	struct smb_vc *vcp = NULL;
281	struct smb_share *ssp = NULL;
282	struct smb_connobj *cp;
283	int error = 0;
284
285	switch (layer->co_level) {
286	    case SMBL_VC:
287		vcp = CPTOVC(layer);
288		if (layer->co_parent == NULL) {
289			SMBERROR("zombie VC %s\n", vcp->vc_srvname);
290			error = EINVAL;
291			break;
292		}
293		break;
294	    case SMBL_SHARE:
295		ssp = CPTOSS(layer);
296		cp = layer->co_parent;
297		if (cp == NULL) {
298			SMBERROR("zombie share %s\n", ssp->ss_name);
299			error = EINVAL;
300			break;
301		}
302		error = smb_rq_getenv(cp, &vcp, NULL);
303		if (error)
304			break;
305		break;
306	    default:
307		SMBERROR("invalid layer %d passed\n", layer->co_level);
308		error = EINVAL;
309	}
310	if (vcpp)
311		*vcpp = vcp;
312	if (sspp)
313		*sspp = ssp;
314	return error;
315}
316
317/*
318 * Wait for reply on the request
319 */
320static int
321smb_rq_reply(struct smb_rq *rqp)
322{
323	struct mdchain *mdp = &rqp->sr_rp;
324	u_int32_t tdw;
325	u_int8_t tb;
326	int error, rperror = 0;
327
328	error = smb_iod_waitrq(rqp);
329	if (error)
330		return error;
331	error = md_get_uint32(mdp, &tdw);
332	if (error)
333		return error;
334	error = md_get_uint8(mdp, &tb);
335	if (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_ERR_STATUS) {
336		error = md_get_uint32le(mdp, &rqp->sr_error);
337	} else {
338		error = md_get_uint8(mdp, &rqp->sr_errclass);
339		error = md_get_uint8(mdp, &tb);
340		error = md_get_uint16le(mdp, &rqp->sr_serror);
341		if (!error)
342			rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
343	}
344	error = md_get_uint8(mdp, &rqp->sr_rpflags);
345	error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
346
347	error = md_get_uint32(mdp, &tdw);
348	error = md_get_uint32(mdp, &tdw);
349	error = md_get_uint32(mdp, &tdw);
350
351	error = md_get_uint16le(mdp, &rqp->sr_rptid);
352	error = md_get_uint16le(mdp, &rqp->sr_rppid);
353	error = md_get_uint16le(mdp, &rqp->sr_rpuid);
354	error = md_get_uint16le(mdp, &rqp->sr_rpmid);
355
356	if (error == 0 &&
357	    (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE))
358		error = smb_rq_verify(rqp);
359
360	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
361	    rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid,
362	    rqp->sr_errclass, rqp->sr_serror);
363	return error ? error : rperror;
364}
365
366#define ALIGN4(a)	(((a) + 3) & ~3)
367
368/*
369 * TRANS2 request implementation
370 */
371int
372smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred,
373	struct smb_t2rq **t2pp)
374{
375	struct smb_t2rq *t2p;
376	int error;
377
378	t2p = malloc(sizeof(*t2p), M_SMBRQ, M_WAITOK);
379	if (t2p == NULL)
380		return ENOMEM;
381	error = smb_t2_init(t2p, layer, setup, scred);
382	t2p->t2_flags |= SMBT2_ALLOCED;
383	if (error) {
384		smb_t2_done(t2p);
385		return error;
386	}
387	*t2pp = t2p;
388	return 0;
389}
390
391int
392smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup,
393	struct smb_cred *scred)
394{
395	int error;
396
397	bzero(t2p, sizeof(*t2p));
398	t2p->t2_source = source;
399	t2p->t2_setupcount = 1;
400	t2p->t2_setupdata = t2p->t2_setup;
401	t2p->t2_setup[0] = setup;
402	t2p->t2_fid = 0xffff;
403	t2p->t2_cred = scred;
404	error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
405	if (error)
406		return error;
407	return 0;
408}
409
410void
411smb_t2_done(struct smb_t2rq *t2p)
412{
413	mb_done(&t2p->t2_tparam);
414	mb_done(&t2p->t2_tdata);
415	md_done(&t2p->t2_rparam);
416	md_done(&t2p->t2_rdata);
417	if (t2p->t2_flags & SMBT2_ALLOCED)
418		free(t2p, M_SMBRQ);
419}
420
421static int
422smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count,
423	struct mdchain *mdp)
424{
425	struct mbuf *m0;
426	int len;
427
428	len = m_length(mtop, NULL);
429	if (offset + count > len)
430		return (EPROTO);
431
432	m0 = m_split(mtop, offset, M_WAITOK);
433	if (len != offset + count) {
434		len -= offset + count;
435		m_adj(m0, -len);
436	}
437	if (mdp->md_top == NULL) {
438		md_initm(mdp, m0);
439	} else
440		m_cat(mdp->md_top, m0);
441	return 0;
442}
443
444static int
445smb_t2_reply(struct smb_t2rq *t2p)
446{
447	struct mdchain *mdp;
448	struct smb_rq *rqp = t2p->t2_rq;
449	int error, totpgot, totdgot;
450	u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
451	u_int16_t tmp, bc, dcount;
452	u_int8_t wc;
453
454	error = smb_rq_reply(rqp);
455	if (error)
456		return error;
457	if ((t2p->t2_flags & SMBT2_ALLSENT) == 0) {
458		/*
459		 * this is an interim response, ignore it.
460		 */
461		SMBRQ_SLOCK(rqp);
462		md_next_record(&rqp->sr_rp);
463		SMBRQ_SUNLOCK(rqp);
464		return 0;
465	}
466	/*
467	 * Now we have to get all subsequent responses. The CIFS specification
468	 * says that they can be disordered which is weird.
469	 * TODO: timo
470	 */
471	totpgot = totdgot = 0;
472	totpcount = totdcount = 0xffff;
473	mdp = &rqp->sr_rp;
474	for (;;) {
475		m_dumpm(mdp->md_top);
476		if ((error = md_get_uint8(mdp, &wc)) != 0)
477			break;
478		if (wc < 10) {
479			error = ENOENT;
480			break;
481		}
482		if ((error = md_get_uint16le(mdp, &tmp)) != 0)
483			break;
484		if (totpcount > tmp)
485			totpcount = tmp;
486		md_get_uint16le(mdp, &tmp);
487		if (totdcount > tmp)
488			totdcount = tmp;
489		if ((error = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
490		    (error = md_get_uint16le(mdp, &pcount)) != 0 ||
491		    (error = md_get_uint16le(mdp, &poff)) != 0 ||
492		    (error = md_get_uint16le(mdp, &pdisp)) != 0)
493			break;
494		if (pcount != 0 && pdisp != totpgot) {
495			SMBERROR("Can't handle disordered parameters %d:%d\n",
496			    pdisp, totpgot);
497			error = EINVAL;
498			break;
499		}
500		if ((error = md_get_uint16le(mdp, &dcount)) != 0 ||
501		    (error = md_get_uint16le(mdp, &doff)) != 0 ||
502		    (error = md_get_uint16le(mdp, &ddisp)) != 0)
503			break;
504		if (dcount != 0 && ddisp != totdgot) {
505			SMBERROR("Can't handle disordered data\n");
506			error = EINVAL;
507			break;
508		}
509		md_get_uint8(mdp, &wc);
510		md_get_uint8(mdp, NULL);
511		tmp = wc;
512		while (tmp--)
513			md_get_uint16(mdp, NULL);
514		if ((error = md_get_uint16le(mdp, &bc)) != 0)
515			break;
516/*		tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/
517		if (dcount) {
518			error = smb_t2_placedata(mdp->md_top, doff, dcount,
519			    &t2p->t2_rdata);
520			if (error)
521				break;
522		}
523		if (pcount) {
524			error = smb_t2_placedata(mdp->md_top, poff, pcount,
525			    &t2p->t2_rparam);
526			if (error)
527				break;
528		}
529		totpgot += pcount;
530		totdgot += dcount;
531		if (totpgot >= totpcount && totdgot >= totdcount) {
532			error = 0;
533			t2p->t2_flags |= SMBT2_ALLRECV;
534			break;
535		}
536		/*
537		 * We're done with this reply, look for the next one.
538		 */
539		SMBRQ_SLOCK(rqp);
540		md_next_record(&rqp->sr_rp);
541		SMBRQ_SUNLOCK(rqp);
542		error = smb_rq_reply(rqp);
543		if (error)
544			break;
545	}
546	return error;
547}
548
549/*
550 * Perform a full round of TRANS2 request
551 */
552static int
553smb_t2_request_int(struct smb_t2rq *t2p)
554{
555	struct smb_vc *vcp = t2p->t2_vc;
556	struct smb_cred *scred = t2p->t2_cred;
557	struct mbchain *mbp;
558	struct mdchain *mdp, mbparam, mbdata;
559	struct mbuf *m;
560	struct smb_rq *rqp;
561	int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
562	int error, doff, poff, txdcount, txpcount, nmlen;
563
564	m = t2p->t2_tparam.mb_top;
565	if (m) {
566		md_initm(&mbparam, m);	/* do not free it! */
567		totpcount = m_fixhdr(m);
568		if (totpcount > 0xffff)		/* maxvalue for u_short */
569			return EINVAL;
570	} else
571		totpcount = 0;
572	m = t2p->t2_tdata.mb_top;
573	if (m) {
574		md_initm(&mbdata, m);	/* do not free it! */
575		totdcount =  m_fixhdr(m);
576		if (totdcount > 0xffff)
577			return EINVAL;
578	} else
579		totdcount = 0;
580	leftdcount = totdcount;
581	leftpcount = totpcount;
582	txmax = vcp->vc_txmax;
583	error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
584	    SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
585	if (error)
586		return error;
587	rqp->sr_flags |= SMBR_MULTIPACKET;
588	t2p->t2_rq = rqp;
589	rqp->sr_t2 = t2p;
590	mbp = &rqp->sr_rq;
591	smb_rq_wstart(rqp);
592	mb_put_uint16le(mbp, totpcount);
593	mb_put_uint16le(mbp, totdcount);
594	mb_put_uint16le(mbp, t2p->t2_maxpcount);
595	mb_put_uint16le(mbp, t2p->t2_maxdcount);
596	mb_put_uint8(mbp, t2p->t2_maxscount);
597	mb_put_uint8(mbp, 0);			/* reserved */
598	mb_put_uint16le(mbp, 0);			/* flags */
599	mb_put_uint32le(mbp, 0);			/* Timeout */
600	mb_put_uint16le(mbp, 0);			/* reserved 2 */
601	len = mb_fixhdr(mbp);
602	/*
603	 * now we have known packet size as
604	 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1),
605	 * and need to decide which parts should go into the first request
606	 */
607	nmlen = t2p->t_name ? strlen(t2p->t_name) : 0;
608	len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1);
609	if (len + leftpcount > txmax) {
610		txpcount = min(leftpcount, txmax - len);
611		poff = len;
612		txdcount = 0;
613		doff = 0;
614	} else {
615		txpcount = leftpcount;
616		poff = txpcount ? len : 0;
617		len = ALIGN4(len + txpcount);
618		txdcount = min(leftdcount, txmax - len);
619		doff = txdcount ? len : 0;
620	}
621	leftpcount -= txpcount;
622	leftdcount -= txdcount;
623	mb_put_uint16le(mbp, txpcount);
624	mb_put_uint16le(mbp, poff);
625	mb_put_uint16le(mbp, txdcount);
626	mb_put_uint16le(mbp, doff);
627	mb_put_uint8(mbp, t2p->t2_setupcount);
628	mb_put_uint8(mbp, 0);
629	for (i = 0; i < t2p->t2_setupcount; i++)
630		mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
631	smb_rq_wend(rqp);
632	smb_rq_bstart(rqp);
633	/* TDUNICODE */
634	if (t2p->t_name)
635		mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM);
636	mb_put_uint8(mbp, 0);	/* terminating zero */
637	len = mb_fixhdr(mbp);
638	if (txpcount) {
639		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
640		error = md_get_mbuf(&mbparam, txpcount, &m);
641		SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
642		if (error)
643			goto freerq;
644		mb_put_mbuf(mbp, m);
645	}
646	len = mb_fixhdr(mbp);
647	if (txdcount) {
648		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
649		error = md_get_mbuf(&mbdata, txdcount, &m);
650		if (error)
651			goto freerq;
652		mb_put_mbuf(mbp, m);
653	}
654	smb_rq_bend(rqp);	/* incredible, but thats it... */
655	error = smb_rq_enqueue(rqp);
656	if (error)
657		goto freerq;
658	if (leftpcount == 0 && leftdcount == 0)
659		t2p->t2_flags |= SMBT2_ALLSENT;
660	error = smb_t2_reply(t2p);
661	if (error)
662		goto bad;
663	while (leftpcount || leftdcount) {
664		t2p->t2_flags |= SMBT2_SECONDARY;
665		error = smb_rq_new(rqp, t2p->t_name ?
666		    SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY);
667		if (error)
668			goto bad;
669		mbp = &rqp->sr_rq;
670		smb_rq_wstart(rqp);
671		mb_put_uint16le(mbp, totpcount);
672		mb_put_uint16le(mbp, totdcount);
673		len = mb_fixhdr(mbp);
674		/*
675		 * now we have known packet size as
676		 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
677		 * and need to decide which parts should go into request
678		 */
679		len = ALIGN4(len + 6 * 2 + 2);
680		if (t2p->t_name == NULL)
681			len += 2;
682		if (len + leftpcount > txmax) {
683			txpcount = min(leftpcount, txmax - len);
684			poff = len;
685			txdcount = 0;
686			doff = 0;
687		} else {
688			txpcount = leftpcount;
689			poff = txpcount ? len : 0;
690			len = ALIGN4(len + txpcount);
691			txdcount = min(leftdcount, txmax - len);
692			doff = txdcount ? len : 0;
693		}
694		mb_put_uint16le(mbp, txpcount);
695		mb_put_uint16le(mbp, poff);
696		mb_put_uint16le(mbp, totpcount - leftpcount);
697		mb_put_uint16le(mbp, txdcount);
698		mb_put_uint16le(mbp, doff);
699		mb_put_uint16le(mbp, totdcount - leftdcount);
700		leftpcount -= txpcount;
701		leftdcount -= txdcount;
702		if (t2p->t_name == NULL)
703			mb_put_uint16le(mbp, t2p->t2_fid);
704		smb_rq_wend(rqp);
705		smb_rq_bstart(rqp);
706		mb_put_uint8(mbp, 0);	/* name */
707		len = mb_fixhdr(mbp);
708		if (txpcount) {
709			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
710			error = md_get_mbuf(&mbparam, txpcount, &m);
711			if (error)
712				goto bad;
713			mb_put_mbuf(mbp, m);
714		}
715		len = mb_fixhdr(mbp);
716		if (txdcount) {
717			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
718			error = md_get_mbuf(&mbdata, txdcount, &m);
719			if (error)
720				goto bad;
721			mb_put_mbuf(mbp, m);
722		}
723		smb_rq_bend(rqp);
724		rqp->sr_state = SMBRQ_NOTSENT;
725		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL);
726		if (error)
727			goto bad;
728	}	/* while left params or data */
729	t2p->t2_flags |= SMBT2_ALLSENT;
730	mdp = &t2p->t2_rdata;
731	if (mdp->md_top) {
732		m_fixhdr(mdp->md_top);
733		md_initm(mdp, mdp->md_top);
734	}
735	mdp = &t2p->t2_rparam;
736	if (mdp->md_top) {
737		m_fixhdr(mdp->md_top);
738		md_initm(mdp, mdp->md_top);
739	}
740bad:
741	smb_iod_removerq(rqp);
742freerq:
743	if (error) {
744		if (rqp->sr_flags & SMBR_RESTART)
745			t2p->t2_flags |= SMBT2_RESTART;
746		md_done(&t2p->t2_rparam);
747		md_done(&t2p->t2_rdata);
748	}
749	smb_rq_done(rqp);
750	return error;
751}
752
753int
754smb_t2_request(struct smb_t2rq *t2p)
755{
756	int error = EINVAL, i;
757
758	for (i = 0; i < SMB_MAXRCN; i++) {
759		t2p->t2_flags &= ~SMBR_RESTART;
760		error = smb_t2_request_int(t2p);
761		if (error == 0)
762			break;
763		if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != SMBT2_RESTART)
764			break;
765	}
766	return error;
767}
768