1/*-
2 * Copyright (c) 2000-2001 Boris Popov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/endian.h>
33#include <sys/proc.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/malloc.h>
37#include <sys/mbuf.h>
38#include <sys/unistd.h>
39
40#include <netsmb/smb.h>
41#include <netsmb/smb_conn.h>
42#include <netsmb/smb_rq.h>
43#include <netsmb/smb_tran.h>
44#include <netsmb/smb_trantcp.h>
45
46
47#define SMBIOD_SLEEP_TIMO	2
48#define	SMBIOD_PING_TIMO	60	/* seconds */
49
50#define	SMB_IOD_EVLOCKPTR(iod)	(&((iod)->iod_evlock))
51#define	SMB_IOD_EVLOCK(iod)	smb_sl_lock(&((iod)->iod_evlock))
52#define	SMB_IOD_EVUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_evlock))
53
54#define	SMB_IOD_RQLOCKPTR(iod)	(&((iod)->iod_rqlock))
55#define	SMB_IOD_RQLOCK(iod)	smb_sl_lock(&((iod)->iod_rqlock))
56#define	SMB_IOD_RQUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_rqlock))
57
58#define	smb_iod_wakeup(iod)	wakeup(&(iod)->iod_flags)
59
60
61static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
62
63static int smb_iod_next;
64
65static int  smb_iod_sendall(struct smbiod *iod);
66static int  smb_iod_disconnect(struct smbiod *iod);
67static void smb_iod_thread(void *);
68
69static __inline void
70smb_iod_rqprocessed(struct smb_rq *rqp, int error)
71{
72	SMBRQ_SLOCK(rqp);
73	rqp->sr_lerror = error;
74	rqp->sr_rpgen++;
75	rqp->sr_state = SMBRQ_NOTIFIED;
76	wakeup(&rqp->sr_state);
77	SMBRQ_SUNLOCK(rqp);
78}
79
80static void
81smb_iod_invrq(struct smbiod *iod)
82{
83	struct smb_rq *rqp;
84
85	/*
86	 * Invalidate all outstanding requests for this connection
87	 */
88	SMB_IOD_RQLOCK(iod);
89	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
90		rqp->sr_flags |= SMBR_RESTART;
91		smb_iod_rqprocessed(rqp, ENOTCONN);
92	}
93	SMB_IOD_RQUNLOCK(iod);
94}
95
96static void
97smb_iod_closetran(struct smbiod *iod)
98{
99	struct smb_vc *vcp = iod->iod_vc;
100	struct thread *td = iod->iod_td;
101
102	if (vcp->vc_tdata == NULL)
103		return;
104	SMB_TRAN_DISCONNECT(vcp, td);
105	SMB_TRAN_DONE(vcp, td);
106	vcp->vc_tdata = NULL;
107}
108
109static void
110smb_iod_dead(struct smbiod *iod)
111{
112	iod->iod_state = SMBIOD_ST_DEAD;
113	smb_iod_closetran(iod);
114	smb_iod_invrq(iod);
115}
116
117static int
118smb_iod_connect(struct smbiod *iod)
119{
120	struct smb_vc *vcp = iod->iod_vc;
121	struct thread *td = iod->iod_td;
122	int error;
123
124	SMBIODEBUG("%d\n", iod->iod_state);
125	switch(iod->iod_state) {
126	    case SMBIOD_ST_VCACTIVE:
127		SMBERROR("called for already opened connection\n");
128		return EISCONN;
129	    case SMBIOD_ST_DEAD:
130		return ENOTCONN;	/* XXX: last error code ? */
131	    default:
132		break;
133	}
134	vcp->vc_genid++;
135	error = 0;
136
137	error = (int)SMB_TRAN_CREATE(vcp, td);
138	if (error)
139		goto fail;
140	SMBIODEBUG("tcreate\n");
141	if (vcp->vc_laddr) {
142		error = (int)SMB_TRAN_BIND(vcp, vcp->vc_laddr, td);
143		if (error)
144			goto fail;
145	}
146	SMBIODEBUG("tbind\n");
147	error = (int)SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td);
148	if (error)
149		goto fail;
150	SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
151	iod->iod_state = SMBIOD_ST_TRANACTIVE;
152	SMBIODEBUG("tconnect\n");
153	/* vcp->vc_mid = 0;*/
154	error = (int)smb_smb_negotiate(vcp, &iod->iod_scred);
155	if (error)
156		goto fail;
157	SMBIODEBUG("snegotiate\n");
158	error = (int)smb_smb_ssnsetup(vcp, &iod->iod_scred);
159	if (error)
160		goto fail;
161	iod->iod_state = SMBIOD_ST_VCACTIVE;
162	SMBIODEBUG("completed\n");
163	smb_iod_invrq(iod);
164	return (0);
165
166 fail:
167	smb_iod_dead(iod);
168	return (error);
169}
170
171static int
172smb_iod_disconnect(struct smbiod *iod)
173{
174	struct smb_vc *vcp = iod->iod_vc;
175
176	SMBIODEBUG("\n");
177	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
178		smb_smb_ssnclose(vcp, &iod->iod_scred);
179		iod->iod_state = SMBIOD_ST_TRANACTIVE;
180	}
181	vcp->vc_smbuid = SMB_UID_UNKNOWN;
182	smb_iod_closetran(iod);
183	iod->iod_state = SMBIOD_ST_NOTCONN;
184	return 0;
185}
186
187static int
188smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
189{
190	int error;
191
192	if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
193		if (iod->iod_state != SMBIOD_ST_DEAD)
194			return ENOTCONN;
195		iod->iod_state = SMBIOD_ST_RECONNECT;
196		error = smb_iod_connect(iod);
197		if (error)
198			return error;
199	}
200	SMBIODEBUG("tree reconnect\n");
201	SMBS_ST_LOCK(ssp);
202	ssp->ss_flags |= SMBS_RECONNECTING;
203	SMBS_ST_UNLOCK(ssp);
204	error = smb_smb_treeconnect(ssp, &iod->iod_scred);
205	SMBS_ST_LOCK(ssp);
206	ssp->ss_flags &= ~SMBS_RECONNECTING;
207	SMBS_ST_UNLOCK(ssp);
208	wakeup(&ssp->ss_vcgenid);
209	return error;
210}
211
212static int
213smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
214{
215	struct thread *td = iod->iod_td;
216	struct smb_vc *vcp = iod->iod_vc;
217	struct smb_share *ssp = rqp->sr_share;
218	struct mbuf *m;
219	int error;
220
221	SMBIODEBUG("iod_state = %d\n", iod->iod_state);
222	switch (iod->iod_state) {
223	    case SMBIOD_ST_NOTCONN:
224		smb_iod_rqprocessed(rqp, ENOTCONN);
225		return 0;
226	    case SMBIOD_ST_DEAD:
227		iod->iod_state = SMBIOD_ST_RECONNECT;
228		return 0;
229	    case SMBIOD_ST_RECONNECT:
230		return 0;
231	    default:
232		break;
233	}
234	if (rqp->sr_sendcnt == 0) {
235#ifdef movedtoanotherplace
236		if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
237			return 0;
238#endif
239		le16enc(rqp->sr_rqtid, ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
240		le16enc(rqp->sr_rquid, vcp ? vcp->vc_smbuid : 0);
241		mb_fixhdr(&rqp->sr_rq);
242		if (vcp->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE)
243			smb_rq_sign(rqp);
244	}
245	if (rqp->sr_sendcnt++ > 5) {
246		rqp->sr_flags |= SMBR_RESTART;
247		smb_iod_rqprocessed(rqp, rqp->sr_lerror);
248		/*
249		 * If all attempts to send a request failed, then
250		 * something is seriously hosed.
251		 */
252		return ENOTCONN;
253	}
254	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
255	m_dumpm(rqp->sr_rq.mb_top);
256	m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAITOK);
257	error = rqp->sr_lerror = SMB_TRAN_SEND(vcp, m, td);
258	if (error == 0) {
259		getnanotime(&rqp->sr_timesent);
260		iod->iod_lastrqsent = rqp->sr_timesent;
261		rqp->sr_flags |= SMBR_SENT;
262		rqp->sr_state = SMBRQ_SENT;
263		return 0;
264	}
265	/*
266	 * Check for fatal errors
267	 */
268	if (SMB_TRAN_FATAL(vcp, error)) {
269		/*
270		 * No further attempts should be made
271		 */
272		return ENOTCONN;
273	}
274	if (smb_rq_intr(rqp))
275		smb_iod_rqprocessed(rqp, EINTR);
276	return 0;
277}
278
279/*
280 * Process incoming packets
281 */
282static int
283smb_iod_recvall(struct smbiod *iod)
284{
285	struct smb_vc *vcp = iod->iod_vc;
286	struct thread *td = iod->iod_td;
287	struct smb_rq *rqp;
288	struct mbuf *m;
289	u_char *hp;
290	u_short mid;
291	int error;
292
293	switch (iod->iod_state) {
294	    case SMBIOD_ST_NOTCONN:
295	    case SMBIOD_ST_DEAD:
296	    case SMBIOD_ST_RECONNECT:
297		return 0;
298	    default:
299		break;
300	}
301	for (;;) {
302		m = NULL;
303		error = SMB_TRAN_RECV(vcp, &m, td);
304		if (error == EWOULDBLOCK)
305			break;
306		if (SMB_TRAN_FATAL(vcp, error)) {
307			smb_iod_dead(iod);
308			break;
309		}
310		if (error)
311			break;
312		if (m == NULL) {
313			SMBERROR("tran return NULL without error\n");
314			error = EPIPE;
315			continue;
316		}
317		m = m_pullup(m, SMB_HDRLEN);
318		if (m == NULL)
319			continue;	/* wait for a good packet */
320		/*
321		 * Now we got an entire and possibly invalid SMB packet.
322		 * Be careful while parsing it.
323		 */
324		m_dumpm(m);
325		hp = mtod(m, u_char*);
326		if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
327			m_freem(m);
328			continue;
329		}
330		mid = SMB_HDRMID(hp);
331		SMBSDEBUG("mid %04x\n", (u_int)mid);
332		SMB_IOD_RQLOCK(iod);
333		TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
334			if (rqp->sr_mid != mid)
335				continue;
336			SMBRQ_SLOCK(rqp);
337			if (rqp->sr_rp.md_top == NULL) {
338				md_initm(&rqp->sr_rp, m);
339			} else {
340				if (rqp->sr_flags & SMBR_MULTIPACKET) {
341					md_append_record(&rqp->sr_rp, m);
342				} else {
343					SMBRQ_SUNLOCK(rqp);
344					SMBERROR("duplicate response %d (ignored)\n", mid);
345					break;
346				}
347			}
348			SMBRQ_SUNLOCK(rqp);
349			smb_iod_rqprocessed(rqp, 0);
350			break;
351		}
352		SMB_IOD_RQUNLOCK(iod);
353		if (rqp == NULL) {
354			SMBERROR("drop resp with mid %d\n", (u_int)mid);
355/*			smb_printrqlist(vcp);*/
356			m_freem(m);
357		}
358	}
359	/*
360	 * check for interrupts
361	 */
362	SMB_IOD_RQLOCK(iod);
363	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
364		if (smb_td_intr(rqp->sr_cred->scr_td)) {
365			smb_iod_rqprocessed(rqp, EINTR);
366		}
367	}
368	SMB_IOD_RQUNLOCK(iod);
369	return 0;
370}
371
372int
373smb_iod_request(struct smbiod *iod, int event, void *ident)
374{
375	struct smbiod_event *evp;
376	int error;
377
378	SMBIODEBUG("\n");
379	evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
380	evp->ev_type = event;
381	evp->ev_ident = ident;
382	SMB_IOD_EVLOCK(iod);
383	STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
384	if ((event & SMBIOD_EV_SYNC) == 0) {
385		SMB_IOD_EVUNLOCK(iod);
386		smb_iod_wakeup(iod);
387		return 0;
388	}
389	smb_iod_wakeup(iod);
390	msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0);
391	error = evp->ev_error;
392	free(evp, M_SMBIOD);
393	return error;
394}
395
396/*
397 * Place request in the queue.
398 * Request from smbiod have a high priority.
399 */
400int
401smb_iod_addrq(struct smb_rq *rqp)
402{
403	struct smb_vc *vcp = rqp->sr_vc;
404	struct smbiod *iod = vcp->vc_iod;
405	int error;
406
407	SMBIODEBUG("\n");
408	if (rqp->sr_cred->scr_td != NULL &&
409	    rqp->sr_cred->scr_td->td_proc == iod->iod_p) {
410		rqp->sr_flags |= SMBR_INTERNAL;
411		SMB_IOD_RQLOCK(iod);
412		TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
413		SMB_IOD_RQUNLOCK(iod);
414		for (;;) {
415			if (smb_iod_sendrq(iod, rqp) != 0) {
416				smb_iod_dead(iod);
417				break;
418			}
419			/*
420			 * we don't need to lock state field here
421			 */
422			if (rqp->sr_state != SMBRQ_NOTSENT)
423				break;
424			tsleep(&iod->iod_flags, PWAIT, "90sndw", hz);
425		}
426		if (rqp->sr_lerror)
427			smb_iod_removerq(rqp);
428		return rqp->sr_lerror;
429	}
430
431	switch (iod->iod_state) {
432	    case SMBIOD_ST_NOTCONN:
433		return ENOTCONN;
434	    case SMBIOD_ST_DEAD:
435		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
436		if (error)
437			return error;
438		return EXDEV;
439	    default:
440		break;
441	}
442
443	SMB_IOD_RQLOCK(iod);
444	for (;;) {
445		if (vcp->vc_maxmux == 0) {
446			SMBERROR("maxmux == 0\n");
447			break;
448		}
449		if (iod->iod_muxcnt < vcp->vc_maxmux)
450			break;
451		iod->iod_muxwant++;
452		msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod),
453		    PWAIT, "90mux", 0);
454	}
455	iod->iod_muxcnt++;
456	TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
457	SMB_IOD_RQUNLOCK(iod);
458	smb_iod_wakeup(iod);
459	return 0;
460}
461
462int
463smb_iod_removerq(struct smb_rq *rqp)
464{
465	struct smb_vc *vcp = rqp->sr_vc;
466	struct smbiod *iod = vcp->vc_iod;
467
468	SMBIODEBUG("\n");
469	if (rqp->sr_flags & SMBR_INTERNAL) {
470		SMB_IOD_RQLOCK(iod);
471		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
472		SMB_IOD_RQUNLOCK(iod);
473		return 0;
474	}
475	SMB_IOD_RQLOCK(iod);
476	while (rqp->sr_flags & SMBR_XLOCK) {
477		rqp->sr_flags |= SMBR_XLOCKWANT;
478		msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0);
479	}
480	TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
481	iod->iod_muxcnt--;
482	if (iod->iod_muxwant) {
483		iod->iod_muxwant--;
484		wakeup(&iod->iod_muxwant);
485	}
486	SMB_IOD_RQUNLOCK(iod);
487	return 0;
488}
489
490int
491smb_iod_waitrq(struct smb_rq *rqp)
492{
493	struct smbiod *iod = rqp->sr_vc->vc_iod;
494	int error;
495
496	SMBIODEBUG("\n");
497	if (rqp->sr_flags & SMBR_INTERNAL) {
498		for (;;) {
499			smb_iod_sendall(iod);
500			smb_iod_recvall(iod);
501			if (rqp->sr_rpgen != rqp->sr_rplast)
502				break;
503			tsleep(&iod->iod_flags, PWAIT, "90irq", hz);
504		}
505		smb_iod_removerq(rqp);
506		return rqp->sr_lerror;
507
508	}
509	SMBRQ_SLOCK(rqp);
510	if (rqp->sr_rpgen == rqp->sr_rplast)
511		msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0);
512	rqp->sr_rplast++;
513	SMBRQ_SUNLOCK(rqp);
514	error = rqp->sr_lerror;
515	if (rqp->sr_flags & SMBR_MULTIPACKET) {
516		/*
517		 * If request should stay in the list, then reinsert it
518		 * at the end of queue so other waiters have chance to concur
519		 */
520		SMB_IOD_RQLOCK(iod);
521		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
522		TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
523		SMB_IOD_RQUNLOCK(iod);
524	} else
525		smb_iod_removerq(rqp);
526	return error;
527}
528
529
530static int
531smb_iod_sendall(struct smbiod *iod)
532{
533	struct smb_vc *vcp = iod->iod_vc;
534	struct smb_rq *rqp;
535	struct timespec ts, tstimeout;
536	int herror;
537
538	herror = 0;
539	/*
540	 * Loop through the list of requests and send them if possible
541	 */
542	SMB_IOD_RQLOCK(iod);
543	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
544		switch (rqp->sr_state) {
545		    case SMBRQ_NOTSENT:
546			rqp->sr_flags |= SMBR_XLOCK;
547			SMB_IOD_RQUNLOCK(iod);
548			herror = smb_iod_sendrq(iod, rqp);
549			SMB_IOD_RQLOCK(iod);
550			rqp->sr_flags &= ~SMBR_XLOCK;
551			if (rqp->sr_flags & SMBR_XLOCKWANT) {
552				rqp->sr_flags &= ~SMBR_XLOCKWANT;
553				wakeup(rqp);
554			}
555			break;
556		    case SMBRQ_SENT:
557			SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
558			timespecadd(&tstimeout, &tstimeout);
559			getnanotime(&ts);
560			timespecsub(&ts, &tstimeout);
561			if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
562				smb_iod_rqprocessed(rqp, ETIMEDOUT);
563			}
564			break;
565		    default:
566			break;
567		}
568		if (herror)
569			break;
570	}
571	SMB_IOD_RQUNLOCK(iod);
572	if (herror == ENOTCONN)
573		smb_iod_dead(iod);
574	return 0;
575}
576
577/*
578 * "main" function for smbiod daemon
579 */
580static __inline void
581smb_iod_main(struct smbiod *iod)
582{
583/*	struct smb_vc *vcp = iod->iod_vc;*/
584	struct smbiod_event *evp;
585/*	struct timespec tsnow;*/
586	int error;
587
588	SMBIODEBUG("\n");
589	error = 0;
590
591	/*
592	 * Check all interesting events
593	 */
594	for (;;) {
595		SMB_IOD_EVLOCK(iod);
596		evp = STAILQ_FIRST(&iod->iod_evlist);
597		if (evp == NULL) {
598			SMB_IOD_EVUNLOCK(iod);
599			break;
600		}
601		STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
602		evp->ev_type |= SMBIOD_EV_PROCESSING;
603		SMB_IOD_EVUNLOCK(iod);
604		switch (evp->ev_type & SMBIOD_EV_MASK) {
605		    case SMBIOD_EV_CONNECT:
606			iod->iod_state = SMBIOD_ST_RECONNECT;
607			evp->ev_error = smb_iod_connect(iod);
608			break;
609		    case SMBIOD_EV_DISCONNECT:
610			evp->ev_error = smb_iod_disconnect(iod);
611			break;
612		    case SMBIOD_EV_TREECONNECT:
613			evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
614			break;
615		    case SMBIOD_EV_SHUTDOWN:
616			iod->iod_flags |= SMBIOD_SHUTDOWN;
617			break;
618		    case SMBIOD_EV_NEWRQ:
619			break;
620		}
621		if (evp->ev_type & SMBIOD_EV_SYNC) {
622			SMB_IOD_EVLOCK(iod);
623			wakeup(evp);
624			SMB_IOD_EVUNLOCK(iod);
625		} else
626			free(evp, M_SMBIOD);
627	}
628#if 0
629	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
630		getnanotime(&tsnow);
631		timespecsub(&tsnow, &iod->iod_pingtimo);
632		if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
633			smb_smb_echo(vcp, &iod->iod_scred);
634		}
635	}
636#endif
637	smb_iod_sendall(iod);
638	smb_iod_recvall(iod);
639	return;
640}
641
642void
643smb_iod_thread(void *arg)
644{
645	struct smbiod *iod = arg;
646
647	mtx_lock(&Giant);
648
649	/*
650	 * Here we assume that the thread structure will be the same
651	 * for an entire kthread (kproc, to be more precise) life.
652	 */
653	iod->iod_td = curthread;
654	smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
655	while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
656		smb_iod_main(iod);
657		SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
658		if (iod->iod_flags & SMBIOD_SHUTDOWN)
659			break;
660		tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo);
661	}
662
663	/* We can now safely destroy the mutexes and free the iod structure. */
664	smb_sl_destroy(&iod->iod_rqlock);
665	smb_sl_destroy(&iod->iod_evlock);
666	free(iod, M_SMBIOD);
667	mtx_unlock(&Giant);
668	kproc_exit(0);
669}
670
671int
672smb_iod_create(struct smb_vc *vcp)
673{
674	struct smbiod *iod;
675	int error;
676
677	iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
678	iod->iod_id = smb_iod_next++;
679	iod->iod_state = SMBIOD_ST_NOTCONN;
680	iod->iod_vc = vcp;
681	iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
682	iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
683	getnanotime(&iod->iod_lastrqsent);
684	vcp->vc_iod = iod;
685	smb_sl_init(&iod->iod_rqlock, "90rql");
686	TAILQ_INIT(&iod->iod_rqlist);
687	smb_sl_init(&iod->iod_evlock, "90evl");
688	STAILQ_INIT(&iod->iod_evlist);
689	error = kproc_create(smb_iod_thread, iod, &iod->iod_p,
690	    RFNOWAIT, 0, "smbiod%d", iod->iod_id);
691	if (error) {
692		SMBERROR("can't start smbiod: %d", error);
693		vcp->vc_iod = NULL;
694		smb_sl_destroy(&iod->iod_rqlock);
695		smb_sl_destroy(&iod->iod_evlock);
696		free(iod, M_SMBIOD);
697		return error;
698	}
699	return 0;
700}
701
702int
703smb_iod_destroy(struct smbiod *iod)
704{
705	smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
706	return 0;
707}
708
709int
710smb_iod_init(void)
711{
712	return 0;
713}
714
715int
716smb_iod_done(void)
717{
718	return 0;
719}
720
721