1/*
2 * Copyright (c) 1999-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Kernel Control domain - allows control connections to
31 *  and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/syslog.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/protosw.h>
45#include <sys/domain.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
48#include <sys/sys_domain.h>
49#include <sys/kern_event.h>
50#include <sys/kern_control.h>
51#include <sys/kauth.h>
52#include <net/if_var.h>
53
54#include <mach/vm_types.h>
55
56#include <kern/thread.h>
57
58/*
59 * Definitions and vars for we support
60 */
61
62#define CTL_SENDSIZE	(2 * 1024)	/* default buffer size */
63#define CTL_RECVSIZE 	(8 * 1024)	/* default buffer size */
64
65/*
66 * Definitions and vars for we support
67 */
68
69static u_int32_t		ctl_maxunit = 65536;
70static lck_grp_attr_t	*ctl_lck_grp_attr = 0;
71static lck_attr_t		*ctl_lck_attr = 0;
72static lck_grp_t		*ctl_lck_grp = 0;
73static lck_mtx_t 		*ctl_mtx;
74
75
76/* all the controllers are chained */
77TAILQ_HEAD(kctl_list, kctl) 	ctl_head;
78
79static int ctl_attach(struct socket *, int, struct proc *);
80static int ctl_detach(struct socket *);
81static int ctl_sofreelastref(struct socket *so);
82static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
83static int ctl_disconnect(struct socket *);
84static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
85                  struct ifnet *ifp, struct proc *p);
86static int ctl_send(struct socket *, int, struct mbuf *,
87            struct sockaddr *, struct mbuf *, struct proc *);
88static int ctl_ctloutput(struct socket *, struct sockopt *);
89static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
90
91static struct kctl *ctl_find_by_name(const char *);
92static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
93
94static struct socket *kcb_find_socket(struct kctl *, u_int32_t unit);
95static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
96static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
97
98static int ctl_lock(struct socket *, int, void *);
99static int ctl_unlock(struct socket *, int, void *);
100static lck_mtx_t * ctl_getlock(struct socket *, int);
101
102static struct pr_usrreqs ctl_usrreqs =
103{
104	pru_abort_notsupp, pru_accept_notsupp, ctl_attach, pru_bind_notsupp,
105	ctl_connect, pru_connect2_notsupp, ctl_ioctl, ctl_detach,
106	ctl_disconnect, pru_listen_notsupp, ctl_peeraddr,
107	pru_rcvd_notsupp, pru_rcvoob_notsupp, ctl_send,
108	pru_sense_null, pru_shutdown_notsupp, pru_sockaddr_notsupp,
109	sosend, soreceive, pru_sopoll_notsupp
110};
111
112static struct protosw kctlswk_dgram =
113{
114	SOCK_DGRAM, &systemdomain, SYSPROTO_CONTROL,
115	PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK,
116	NULL, NULL, NULL, ctl_ctloutput,
117	NULL, NULL,
118	NULL, NULL, NULL, NULL, &ctl_usrreqs,
119	ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
120};
121
122static struct protosw kctlswk_stream =
123{
124	SOCK_STREAM, &systemdomain, SYSPROTO_CONTROL,
125	PR_CONNREQUIRED|PR_PCBLOCK,
126	NULL, NULL, NULL, ctl_ctloutput,
127	NULL, NULL,
128	NULL, NULL, NULL, NULL, &ctl_usrreqs,
129	ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
130};
131
132
133/*
134 * Install the protosw's for the Kernel Control manager.
135 */
136__private_extern__ int
137kern_control_init(void)
138{
139	int error = 0;
140
141	ctl_lck_grp_attr = lck_grp_attr_alloc_init();
142	if (ctl_lck_grp_attr == 0) {
143			printf(": lck_grp_attr_alloc_init failed\n");
144			error = ENOMEM;
145			goto done;
146	}
147
148	ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr);
149	if (ctl_lck_grp == 0) {
150			printf("kern_control_init: lck_grp_alloc_init failed\n");
151			error = ENOMEM;
152			goto done;
153	}
154
155	ctl_lck_attr = lck_attr_alloc_init();
156	if (ctl_lck_attr == 0) {
157			printf("kern_control_init: lck_attr_alloc_init failed\n");
158			error = ENOMEM;
159			goto done;
160	}
161
162	ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
163	if (ctl_mtx == 0) {
164			printf("kern_control_init: lck_mtx_alloc_init failed\n");
165			error = ENOMEM;
166			goto done;
167	}
168	TAILQ_INIT(&ctl_head);
169
170	error = net_add_proto(&kctlswk_dgram, &systemdomain);
171	if (error) {
172		log(LOG_WARNING, "kern_control_init: net_add_proto dgram failed (%d)\n", error);
173	}
174	error = net_add_proto(&kctlswk_stream, &systemdomain);
175	if (error) {
176		log(LOG_WARNING, "kern_control_init: net_add_proto stream failed (%d)\n", error);
177	}
178
179	done:
180	if (error != 0) {
181		if (ctl_mtx) {
182				lck_mtx_free(ctl_mtx, ctl_lck_grp);
183				ctl_mtx = 0;
184		}
185		if (ctl_lck_grp) {
186				lck_grp_free(ctl_lck_grp);
187				ctl_lck_grp = 0;
188		}
189		if (ctl_lck_grp_attr) {
190				lck_grp_attr_free(ctl_lck_grp_attr);
191				ctl_lck_grp_attr = 0;
192		}
193		if (ctl_lck_attr) {
194				lck_attr_free(ctl_lck_attr);
195				ctl_lck_attr = 0;
196		}
197	}
198	return error;
199}
200
201static void
202kcb_delete(struct ctl_cb *kcb)
203{
204	if (kcb != 0) {
205		if (kcb->mtx != 0)
206			lck_mtx_free(kcb->mtx, ctl_lck_grp);
207		FREE(kcb, M_TEMP);
208	}
209}
210
211
212/*
213 * Kernel Controller user-request functions
214 * attach function must exist and succeed
215 * detach not necessary
216 * we need a pcb for the per socket mutex
217 */
218static int
219ctl_attach(__unused struct socket *so, __unused int proto, __unused struct proc *p)
220{
221	int error = 0;
222	struct ctl_cb			*kcb = 0;
223
224	MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
225	if (kcb == NULL) {
226		error = ENOMEM;
227		goto quit;
228	}
229	bzero(kcb, sizeof(struct ctl_cb));
230
231	kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
232	if (kcb->mtx == NULL) {
233		error = ENOMEM;
234		goto quit;
235	}
236	kcb->so = so;
237	so->so_pcb = (caddr_t)kcb;
238
239quit:
240	if (error != 0) {
241		kcb_delete(kcb);
242		kcb = 0;
243	}
244	return error;
245}
246
247static int
248ctl_sofreelastref(struct socket *so)
249{
250    struct ctl_cb 	*kcb = (struct ctl_cb *)so->so_pcb;
251
252    so->so_pcb = 0;
253
254    if (kcb != 0) {
255        struct kctl		*kctl;
256        if ((kctl = kcb->kctl) != 0) {
257            lck_mtx_lock(ctl_mtx);
258            TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
259            lck_mtx_unlock(ctl_mtx);
260    	}
261    	kcb_delete(kcb);
262    }
263    sofreelastref(so, 1);
264    return 0;
265}
266
267static int
268ctl_detach(struct socket *so)
269{
270    struct ctl_cb 	*kcb = (struct ctl_cb *)so->so_pcb;
271
272    if (kcb == 0)
273    	return 0;
274
275    soisdisconnected(so);
276    so->so_flags |= SOF_PCBCLEARING;
277    return 0;
278}
279
280
281static int
282ctl_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
283{
284    struct kctl			*kctl;
285    int					error = 0;
286    struct sockaddr_ctl	sa;
287    struct ctl_cb		*kcb = (struct ctl_cb *)so->so_pcb;
288    struct ctl_cb		*kcb_next = NULL;
289
290    if (kcb == 0)
291    	panic("ctl_connect so_pcb null\n");
292
293    if (nam->sa_len !=  sizeof(struct sockaddr_ctl))
294    	return(EINVAL);
295
296    bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
297
298    lck_mtx_lock(ctl_mtx);
299    kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
300    if (kctl == NULL) {
301        lck_mtx_unlock(ctl_mtx);
302        return ENOENT;
303    }
304
305	if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_STREAM)) ||
306		(!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_DGRAM))) {
307        lck_mtx_unlock(ctl_mtx);
308        return EPROTOTYPE;
309	}
310
311    if (kctl->flags & CTL_FLAG_PRIVILEGED) {
312        if (p == 0) {
313            lck_mtx_unlock(ctl_mtx);
314            return(EINVAL);
315        }
316        if (kauth_cred_issuser(kauth_cred_get()) == 0) {
317            lck_mtx_unlock(ctl_mtx);
318            return EPERM;
319        }
320    }
321
322	if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
323		if (kcb_find(kctl, sa.sc_unit) != NULL) {
324			lck_mtx_unlock(ctl_mtx);
325			return EBUSY;
326		}
327	} else {
328		/* Find an unused ID, assumes control IDs are listed in order */
329    	u_int32_t	unit = 1;
330
331    	TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
332    		if (kcb_next->unit > unit) {
333    			/* Found a gap, lets fill it in */
334    			break;
335    		}
336    		unit = kcb_next->unit + 1;
337    		if (unit == ctl_maxunit)
338    			break;
339    	}
340
341		if (unit == ctl_maxunit) {
342			lck_mtx_unlock(ctl_mtx);
343			return EBUSY;
344		}
345
346		sa.sc_unit = unit;
347    }
348
349	kcb->unit = sa.sc_unit;
350    kcb->kctl = kctl;
351    if (kcb_next != NULL) {
352    	TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
353    }
354    else {
355		TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
356	}
357    lck_mtx_unlock(ctl_mtx);
358
359    error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
360    if (error)
361		goto done;
362    soisconnecting(so);
363
364	socket_unlock(so, 0);
365    error = (*kctl->connect)(kctl, &sa, &kcb->userdata);
366	socket_lock(so, 0);
367    if (error)
368		goto end;
369
370    soisconnected(so);
371
372end:
373	if (error && kctl->disconnect) {
374		socket_unlock(so, 0);
375		(*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
376		socket_lock(so, 0);
377	}
378done:
379    if (error) {
380        soisdisconnected(so);
381        lck_mtx_lock(ctl_mtx);
382        kcb->kctl = 0;
383        kcb->unit = 0;
384        TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
385        lck_mtx_unlock(ctl_mtx);
386    }
387    return error;
388}
389
390static int
391ctl_disconnect(struct socket *so)
392{
393    struct ctl_cb 	*kcb = (struct ctl_cb *)so->so_pcb;
394
395    if ((kcb = (struct ctl_cb *)so->so_pcb)) {
396        struct kctl		*kctl = kcb->kctl;
397
398        if (kctl && kctl->disconnect) {
399            socket_unlock(so, 0);
400            (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
401            socket_lock(so, 0);
402        }
403
404        soisdisconnected(so);
405
406		socket_unlock(so, 0);
407        lck_mtx_lock(ctl_mtx);
408        kcb->kctl = 0;
409    	kcb->unit = 0;
410    	while (kcb->usecount != 0) {
411    		msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
412    	}
413        TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
414        lck_mtx_unlock(ctl_mtx);
415		socket_lock(so, 0);
416    }
417    return 0;
418}
419
420static int
421ctl_peeraddr(struct socket *so, struct sockaddr **nam)
422{
423	struct ctl_cb 		*kcb = (struct ctl_cb *)so->so_pcb;
424	struct kctl			*kctl;
425	struct sockaddr_ctl	sc;
426
427	if (kcb == NULL)	/* sanity check */
428		return(ENOTCONN);
429
430	if ((kctl = kcb->kctl) == NULL)
431		return(EINVAL);
432
433	bzero(&sc, sizeof(struct sockaddr_ctl));
434	sc.sc_len = sizeof(struct sockaddr_ctl);
435	sc.sc_family = AF_SYSTEM;
436	sc.ss_sysaddr = AF_SYS_CONTROL;
437	sc.sc_id =  kctl->id;
438	sc.sc_unit = kcb->unit;
439
440	*nam = dup_sockaddr((struct sockaddr *)&sc, 1);
441
442	return 0;
443}
444
445static int
446ctl_send(struct socket *so, int flags, struct mbuf *m,
447            __unused struct sockaddr *addr, struct mbuf *control,
448            __unused struct proc *p)
449{
450	int	 	error = 0;
451	struct ctl_cb 	*kcb = (struct ctl_cb *)so->so_pcb;
452	struct kctl		*kctl;
453
454	if (control) m_freem(control);
455
456	if (kcb == NULL)	/* sanity check */
457		error = ENOTCONN;
458
459	if (error == 0 && (kctl = kcb->kctl) == NULL)
460		error = EINVAL;
461
462	if (error == 0 && kctl->send) {
463		socket_unlock(so, 0);
464		error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags);
465		socket_lock(so, 0);
466	} else {
467		m_freem(m);
468		if (error == 0)
469			error = ENOTSUP;
470	}
471	return error;
472}
473
474errno_t
475ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags)
476{
477	struct socket 	*so;
478	errno_t 		error = 0;
479	struct kctl		*kctl = (struct kctl *)kctlref;
480
481	if (kctl == NULL)
482		return EINVAL;
483
484	so = kcb_find_socket(kctl, unit);
485
486	if (so == NULL)
487		return EINVAL;
488
489	if (sbspace(&so->so_rcv) < m->m_pkthdr.len) {
490		error = ENOBUFS;
491		goto bye;
492	}
493	if ((flags & CTL_DATA_EOR))
494		m->m_flags |= M_EOR;
495	if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
496		sorwakeup(so);
497bye:
498	socket_unlock(so, 1);
499	return error;
500}
501
502errno_t
503ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags)
504{
505	struct socket 	*so;
506	struct mbuf 	*m;
507	errno_t			error = 0;
508	struct kctl		*kctl = (struct kctl *)kctlref;
509	unsigned int 	num_needed;
510	struct mbuf 	*n;
511	size_t			curlen = 0;
512
513	if (kctlref == NULL)
514		return EINVAL;
515
516	so = kcb_find_socket(kctl, unit);
517	if (so == NULL)
518		return EINVAL;
519
520	if (sbspace(&so->so_rcv) < (int)len) {
521		error = ENOBUFS;
522		goto bye;
523	}
524
525	num_needed = 1;
526	m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
527	if (m == NULL) {
528		printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len);
529		error = ENOBUFS;
530		goto bye;
531	}
532
533	for (n = m; n != NULL; n = n->m_next) {
534		size_t mlen = mbuf_maxlen(n);
535
536		if (mlen + curlen > len)
537			mlen = len - curlen;
538		n->m_len = mlen;
539		bcopy((char *)data + curlen, n->m_data, mlen);
540		curlen += mlen;
541	}
542	mbuf_pkthdr_setlen(m, curlen);
543
544	if ((flags & CTL_DATA_EOR))
545		m->m_flags |= M_EOR;
546	if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
547		sorwakeup(so);
548bye:
549	socket_unlock(so, 1);
550	return error;
551}
552
553
554errno_t
555ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
556{
557	struct kctl		*kctl = (struct kctl *)kctlref;
558	struct socket 	*so;
559	long avail;
560
561	if (kctlref == NULL || space == NULL)
562		return EINVAL;
563
564	so = kcb_find_socket(kctl, unit);
565	if (so == NULL)
566		return EINVAL;
567
568	avail = sbspace(&so->so_rcv);
569	*space = (avail < 0) ? 0 : avail;
570	socket_unlock(so, 1);
571
572	return 0;
573}
574
575static int
576ctl_ctloutput(struct socket *so, struct sockopt *sopt)
577{
578	struct ctl_cb 	*kcb = (struct ctl_cb *)so->so_pcb;
579	struct kctl	*kctl;
580	int 	error = 0;
581	void 	*data;
582	size_t	len;
583
584	if (sopt->sopt_level != SYSPROTO_CONTROL) {
585		return(EINVAL);
586	}
587
588	if (kcb == NULL)	/* sanity check */
589		return(ENOTCONN);
590
591	if ((kctl = kcb->kctl) == NULL)
592		return(EINVAL);
593
594	switch (sopt->sopt_dir) {
595		case SOPT_SET:
596			if (kctl->setopt == NULL)
597				return(ENOTSUP);
598			if (sopt->sopt_valsize == 0) {
599				data = NULL;
600			} else {
601				MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
602				if (data == NULL)
603					return(ENOMEM);
604				error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
605			}
606			if (error == 0) {
607				socket_unlock(so, 0);
608				error = (*kctl->setopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
609							data, sopt->sopt_valsize);
610				socket_lock(so, 0);
611			}
612			FREE(data, M_TEMP);
613			break;
614
615		case SOPT_GET:
616			if (kctl->getopt == NULL)
617				return(ENOTSUP);
618			data = NULL;
619			if (sopt->sopt_valsize && sopt->sopt_val) {
620				MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
621				if (data == NULL)
622					return(ENOMEM);
623				/* 4108337 - copy in data for get socket option */
624				error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
625			}
626			len = sopt->sopt_valsize;
627			socket_unlock(so, 0);
628			error = (*kctl->getopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
629						data, &len);
630			if (data != NULL && len > sopt->sopt_valsize)
631				panic_plain("ctl_ctloutput: ctl %s returned len (%lu) > sopt_valsize (%lu)\n",
632					kcb->kctl->name, len, sopt->sopt_valsize);
633			socket_lock(so, 0);
634			if (error == 0) {
635				if (data != NULL)
636					error = sooptcopyout(sopt, data, len);
637				else
638					sopt->sopt_valsize = len;
639			}
640			if (data != NULL)
641				FREE(data, M_TEMP);
642			break;
643	}
644	return error;
645}
646
647static int
648ctl_ioctl(__unused struct socket *so, u_long cmd, caddr_t data,
649			__unused struct ifnet *ifp, __unused struct proc *p)
650{
651	int 	error = ENOTSUP;
652
653	switch (cmd) {
654		/* get the number of controllers */
655		case CTLIOCGCOUNT: {
656			struct kctl	*kctl;
657			u_int32_t n = 0;
658
659			lck_mtx_lock(ctl_mtx);
660			TAILQ_FOREACH(kctl, &ctl_head, next)
661				n++;
662			lck_mtx_unlock(ctl_mtx);
663
664			bcopy(&n, data, sizeof (n));
665			error = 0;
666			break;
667		}
668		case CTLIOCGINFO: {
669			struct ctl_info ctl_info;
670			struct kctl 	*kctl = 0;
671			size_t name_len;
672
673			bcopy(data, &ctl_info, sizeof (ctl_info));
674			name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
675
676			if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
677				error = EINVAL;
678				break;
679			}
680			lck_mtx_lock(ctl_mtx);
681			kctl = ctl_find_by_name(ctl_info.ctl_name);
682			lck_mtx_unlock(ctl_mtx);
683			if (kctl == 0) {
684				error = ENOENT;
685				break;
686			}
687			ctl_info.ctl_id = kctl->id;
688			bcopy(&ctl_info, data, sizeof (ctl_info));
689			error = 0;
690			break;
691		}
692
693		/* add controls to get list of NKEs */
694
695	}
696
697	return error;
698}
699
700/*
701 * Register/unregister a NKE
702 */
703errno_t
704ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
705{
706	struct kctl 	*kctl = NULL;
707	struct kctl 	*kctl_next = NULL;
708	u_int32_t		id = 1;
709	size_t			name_len;
710
711	if (userkctl == NULL)	/* sanity check */
712		return(EINVAL);
713	if (userkctl->ctl_connect == NULL)
714		return(EINVAL);
715	name_len = strlen(userkctl->ctl_name);
716	if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
717		return(EINVAL);
718
719	MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
720	if (kctl == NULL)
721		return(ENOMEM);
722	bzero((char *)kctl, sizeof(*kctl));
723
724	lck_mtx_lock(ctl_mtx);
725
726	/*
727	 * Kernel Control IDs
728	 *
729	 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
730	 * static. If they do not exist, add them to the list in order. If the
731	 * flag is not set, we must find a new unique value. We assume the
732	 * list is in order. We find the last item in the list and add one. If
733	 * this leads to wrapping the id around, we start at the front of the
734	 * list and look for a gap.
735	 */
736
737	if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
738		/* Must dynamically assign an unused ID */
739
740		/* Verify the same name isn't already registered */
741		if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
742			lck_mtx_unlock(ctl_mtx);
743			FREE(kctl, M_TEMP);
744			return(EEXIST);
745		}
746
747		/* Start with 1 in case the list is empty */
748		id = 1;
749		kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
750
751		if (kctl_next != NULL) {
752			/* List was not empty, add one to the last item in the list */
753			id = kctl_next->id + 1;
754			kctl_next = NULL;
755
756			/*
757			 * If this wrapped the id number, start looking at the front
758			 * of the list for an unused id.
759			 */
760			if (id == 0) {
761				/* Find the next unused ID */
762				id = 1;
763
764				TAILQ_FOREACH(kctl_next, &ctl_head, next) {
765					if (kctl_next->id > id) {
766						/* We found a gap */
767						break;
768					}
769
770					id = kctl_next->id + 1;
771				}
772			}
773		}
774
775		userkctl->ctl_id = id;
776		kctl->id = id;
777		kctl->reg_unit = -1;
778	} else {
779		TAILQ_FOREACH(kctl_next, &ctl_head, next) {
780			if (kctl_next->id > userkctl->ctl_id)
781				break;
782		}
783
784		if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit) != NULL) {
785			lck_mtx_unlock(ctl_mtx);
786			FREE(kctl, M_TEMP);
787			return(EEXIST);
788		}
789		kctl->id = userkctl->ctl_id;
790		kctl->reg_unit = userkctl->ctl_unit;
791	}
792	strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
793	kctl->flags = userkctl->ctl_flags;
794
795	/* Let the caller know the default send and receive sizes */
796	if (userkctl->ctl_sendsize == 0)
797		userkctl->ctl_sendsize = CTL_SENDSIZE;
798	kctl->sendbufsize = userkctl->ctl_sendsize;
799
800	if (userkctl->ctl_recvsize == 0)
801		userkctl->ctl_recvsize = CTL_RECVSIZE;
802	kctl->recvbufsize = userkctl->ctl_recvsize;
803
804	kctl->connect = userkctl->ctl_connect;
805	kctl->disconnect = userkctl->ctl_disconnect;
806	kctl->send = userkctl->ctl_send;
807	kctl->setopt = userkctl->ctl_setopt;
808	kctl->getopt = userkctl->ctl_getopt;
809
810	TAILQ_INIT(&kctl->kcb_head);
811
812	if (kctl_next)
813		TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
814	else
815		TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
816
817	lck_mtx_unlock(ctl_mtx);
818
819	*kctlref = kctl;
820
821	ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
822	return(0);
823}
824
825errno_t
826ctl_deregister(void *kctlref)
827{
828    struct kctl		*kctl;
829
830    if (kctlref == NULL)	/* sanity check */
831        return(EINVAL);
832
833    lck_mtx_lock(ctl_mtx);
834    TAILQ_FOREACH(kctl, &ctl_head, next) {
835    	if (kctl == (struct kctl *)kctlref)
836    		break;
837    }
838    if (kctl != (struct kctl *)kctlref) {
839        lck_mtx_unlock(ctl_mtx);
840        return EINVAL;
841    }
842	if (!TAILQ_EMPTY(&kctl->kcb_head)) {
843        lck_mtx_unlock(ctl_mtx);
844		return EBUSY;
845	}
846
847    TAILQ_REMOVE(&ctl_head, kctl, next);
848
849    lck_mtx_unlock(ctl_mtx);
850
851    ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
852    FREE(kctl, M_TEMP);
853    return(0);
854}
855
856/*
857 * Must be called with global ctl_mtx lock taked
858 */
859static struct kctl *
860ctl_find_by_name(const char *name)
861{
862    struct kctl 	*kctl;
863
864    TAILQ_FOREACH(kctl, &ctl_head, next)
865        if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0)
866            return kctl;
867
868    return NULL;
869}
870
871u_int32_t
872ctl_id_by_name(const char *name)
873{
874	u_int32_t	ctl_id = 0;
875
876	lck_mtx_lock(ctl_mtx);
877	struct kctl *kctl = ctl_find_by_name(name);
878	if (kctl) ctl_id = kctl->id;
879	lck_mtx_unlock(ctl_mtx);
880
881	return ctl_id;
882}
883
884errno_t
885ctl_name_by_id(
886	u_int32_t id,
887	char	*out_name,
888	size_t	maxsize)
889{
890	int 		found = 0;
891
892	lck_mtx_lock(ctl_mtx);
893	struct kctl *kctl;
894    TAILQ_FOREACH(kctl, &ctl_head, next) {
895        if (kctl->id == id)
896            break;
897    }
898
899    if (kctl && kctl->name)
900    {
901    	if (maxsize > MAX_KCTL_NAME)
902    		maxsize = MAX_KCTL_NAME;
903    	strlcpy(out_name, kctl->name, maxsize);
904    	found = 1;
905    }
906	lck_mtx_unlock(ctl_mtx);
907
908	return found ? 0 : ENOENT;
909}
910
911/*
912 * Must be called with global ctl_mtx lock taked
913 *
914 */
915static struct kctl *
916ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
917{
918    struct kctl 	*kctl;
919
920    TAILQ_FOREACH(kctl, &ctl_head, next) {
921        if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
922            return kctl;
923        else if (kctl->id == id && kctl->reg_unit == unit)
924            return kctl;
925    }
926    return NULL;
927}
928
929/*
930 * Must be called with kernel controller lock taken
931 */
932static struct ctl_cb *
933kcb_find(struct kctl *kctl, u_int32_t unit)
934{
935    struct ctl_cb 	*kcb;
936
937    TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
938        if (kcb->unit == unit)
939            return kcb;
940
941    return NULL;
942}
943
944static struct socket *
945kcb_find_socket(struct kctl *kctl, u_int32_t unit)
946{
947	struct socket *so = NULL;
948
949	lck_mtx_lock(ctl_mtx);
950	struct ctl_cb	*kcb = kcb_find(kctl, unit);
951	if (kcb && kcb->kctl == kctl) {
952		so = kcb->so;
953		if (so) {
954			kcb->usecount++;
955		}
956	}
957	lck_mtx_unlock(ctl_mtx);
958
959	if (so == NULL) {
960		return NULL;
961	}
962
963	socket_lock(so, 1);
964
965	lck_mtx_lock(ctl_mtx);
966	if (kcb->kctl == NULL)
967	{
968		lck_mtx_unlock(ctl_mtx);
969		socket_unlock(so, 1);
970		so = NULL;
971		lck_mtx_lock(ctl_mtx);
972	}
973	kcb->usecount--;
974	if (kcb->usecount == 0)
975		wakeup((event_t)&kcb->usecount);
976	lck_mtx_unlock(ctl_mtx);
977
978	return so;
979}
980
981static void
982ctl_post_msg(u_int32_t event_code, u_int32_t id)
983{
984    struct ctl_event_data  	ctl_ev_data;
985    struct kev_msg  		ev_msg;
986
987    lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
988
989    bzero(&ev_msg, sizeof(struct kev_msg));
990    ev_msg.vendor_code    = KEV_VENDOR_APPLE;
991
992    ev_msg.kev_class      = KEV_SYSTEM_CLASS;
993    ev_msg.kev_subclass   = KEV_CTL_SUBCLASS;
994    ev_msg.event_code 	  = event_code;
995
996    /* common nke subclass data */
997    bzero(&ctl_ev_data, sizeof(ctl_ev_data));
998    ctl_ev_data.ctl_id = id;
999    ev_msg.dv[0].data_ptr    = &ctl_ev_data;
1000    ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
1001
1002    ev_msg.dv[1].data_length = 0;
1003
1004    kev_post_msg(&ev_msg);
1005}
1006
1007static int
1008ctl_lock(struct socket *so, int refcount, void *lr)
1009{
1010	void *lr_saved;
1011
1012	if (lr == NULL)
1013		lr_saved = __builtin_return_address(0);
1014	else
1015		lr_saved = lr;
1016
1017	if (so->so_pcb != NULL) {
1018		lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
1019	} else  {
1020		panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
1021		    so, lr_saved, solockhistory_nr(so));
1022		/* NOTREACHED */
1023	}
1024
1025	if (so->so_usecount < 0) {
1026		panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
1027		    so, so->so_pcb, lr_saved, so->so_usecount, solockhistory_nr(so));
1028		/* NOTREACHED */
1029	}
1030
1031	if (refcount)
1032		so->so_usecount++;
1033
1034	so->lock_lr[so->next_lock_lr] = lr_saved;
1035	so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
1036	return (0);
1037}
1038
1039static int
1040ctl_unlock(struct socket *so, int refcount, void *lr)
1041{
1042	void *lr_saved;
1043	lck_mtx_t *mutex_held;
1044
1045	if (lr == NULL)
1046		lr_saved = __builtin_return_address(0);
1047	else
1048		lr_saved = lr;
1049
1050#ifdef MORE_KCTLLOCK_DEBUG
1051	printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%p\n",
1052	    so, so->so_pcb, ((struct ctl_cb *)so->so_pcb)->mtx,
1053	    so->so_usecount, lr_saved);
1054#endif
1055	if (refcount)
1056		so->so_usecount--;
1057
1058	if (so->so_usecount < 0) {
1059		panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
1060		    so, so->so_usecount, solockhistory_nr(so));
1061		/* NOTREACHED */
1062	}
1063	if (so->so_pcb == NULL) {
1064		panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
1065		    so, so->so_usecount, (void *)lr_saved, solockhistory_nr(so));
1066		/* NOTREACHED */
1067	}
1068	mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
1069
1070	lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
1071	so->unlock_lr[so->next_unlock_lr] = lr_saved;
1072	so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
1073	lck_mtx_unlock(mutex_held);
1074
1075	if (so->so_usecount == 0)
1076		ctl_sofreelastref(so);
1077
1078	return (0);
1079}
1080
1081static lck_mtx_t *
1082ctl_getlock(struct socket *so, __unused int locktype)
1083{
1084	struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1085
1086	if (so->so_pcb)  {
1087		if (so->so_usecount < 0)
1088			panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
1089			    so, so->so_usecount, solockhistory_nr(so));
1090		return(kcb->mtx);
1091	} else {
1092		panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
1093		    so, solockhistory_nr(so));
1094		return (so->so_proto->pr_domain->dom_mtx);
1095	}
1096}
1097