libunbound.c revision 285206
1/*
2 * unbound.c - unbound validating resolver public API implementation
3 *
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
5 *
6 * This software is open source.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 *
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36/**
37 * \file
38 *
39 * This file contains functions to resolve DNS queries and
40 * validate the answers. Synchonously and asynchronously.
41 *
42 */
43
44/* include the public api first, it should be able to stand alone */
45#include "libunbound/unbound.h"
46#include "libunbound/unbound-event.h"
47#include "config.h"
48#include <ctype.h>
49#include "libunbound/context.h"
50#include "libunbound/libworker.h"
51#include "util/locks.h"
52#include "util/config_file.h"
53#include "util/alloc.h"
54#include "util/module.h"
55#include "util/regional.h"
56#include "util/log.h"
57#include "util/random.h"
58#include "util/net_help.h"
59#include "util/tube.h"
60#include "services/modstack.h"
61#include "services/localzone.h"
62#include "services/cache/infra.h"
63#include "services/cache/rrset.h"
64#include "ldns/sbuffer.h"
65#ifdef HAVE_PTHREAD
66#include <signal.h>
67#endif
68
69#if defined(UB_ON_WINDOWS) && defined (HAVE_WINDOWS_H)
70#include <windows.h>
71#include <iphlpapi.h>
72#endif /* UB_ON_WINDOWS */
73
74/** create context functionality, but no pipes */
75static struct ub_ctx* ub_ctx_create_nopipe(void)
76{
77	struct ub_ctx* ctx;
78	unsigned int seed;
79#ifdef USE_WINSOCK
80	int r;
81	WSADATA wsa_data;
82#endif
83
84	log_init(NULL, 0, NULL); /* logs to stderr */
85	log_ident_set("libunbound");
86#ifdef USE_WINSOCK
87	if((r = WSAStartup(MAKEWORD(2,2), &wsa_data)) != 0) {
88		log_err("could not init winsock. WSAStartup: %s",
89			wsa_strerror(r));
90		return NULL;
91	}
92#endif
93	verbosity = 0; /* errors only */
94	checklock_start();
95	ctx = (struct ub_ctx*)calloc(1, sizeof(*ctx));
96	if(!ctx) {
97		errno = ENOMEM;
98		return NULL;
99	}
100	alloc_init(&ctx->superalloc, NULL, 0);
101	seed = (unsigned int)time(NULL) ^ (unsigned int)getpid();
102	if(!(ctx->seed_rnd = ub_initstate(seed, NULL))) {
103		seed = 0;
104		ub_randfree(ctx->seed_rnd);
105		free(ctx);
106		errno = ENOMEM;
107		return NULL;
108	}
109	seed = 0;
110	lock_basic_init(&ctx->qqpipe_lock);
111	lock_basic_init(&ctx->rrpipe_lock);
112	lock_basic_init(&ctx->cfglock);
113	ctx->env = (struct module_env*)calloc(1, sizeof(*ctx->env));
114	if(!ctx->env) {
115		ub_randfree(ctx->seed_rnd);
116		free(ctx);
117		errno = ENOMEM;
118		return NULL;
119	}
120	ctx->env->cfg = config_create_forlib();
121	if(!ctx->env->cfg) {
122		free(ctx->env);
123		ub_randfree(ctx->seed_rnd);
124		free(ctx);
125		errno = ENOMEM;
126		return NULL;
127	}
128	ctx->env->alloc = &ctx->superalloc;
129	ctx->env->worker = NULL;
130	ctx->env->need_to_validate = 0;
131	modstack_init(&ctx->mods);
132	rbtree_init(&ctx->queries, &context_query_cmp);
133	return ctx;
134}
135
136struct ub_ctx*
137ub_ctx_create(void)
138{
139	struct ub_ctx* ctx = ub_ctx_create_nopipe();
140	if(!ctx)
141		return NULL;
142	if((ctx->qq_pipe = tube_create()) == NULL) {
143		int e = errno;
144		ub_randfree(ctx->seed_rnd);
145		config_delete(ctx->env->cfg);
146		modstack_desetup(&ctx->mods, ctx->env);
147		free(ctx->env);
148		free(ctx);
149		errno = e;
150		return NULL;
151	}
152	if((ctx->rr_pipe = tube_create()) == NULL) {
153		int e = errno;
154		tube_delete(ctx->qq_pipe);
155		ub_randfree(ctx->seed_rnd);
156		config_delete(ctx->env->cfg);
157		modstack_desetup(&ctx->mods, ctx->env);
158		free(ctx->env);
159		free(ctx);
160		errno = e;
161		return NULL;
162	}
163	return ctx;
164}
165
166struct ub_ctx*
167ub_ctx_create_event(struct event_base* eb)
168{
169	struct ub_ctx* ctx = ub_ctx_create_nopipe();
170	if(!ctx)
171		return NULL;
172	/* no pipes, but we have the locks to make sure everything works */
173	ctx->created_bg = 0;
174	ctx->dothread = 1; /* the processing is in the same process,
175		makes ub_cancel and ub_ctx_delete do the right thing */
176	ctx->event_base = eb;
177	return ctx;
178}
179
180/** delete q */
181static void
182delq(rbnode_t* n, void* ATTR_UNUSED(arg))
183{
184	struct ctx_query* q = (struct ctx_query*)n;
185	context_query_delete(q);
186}
187
188/** stop the bg thread */
189static void ub_stop_bg(struct ub_ctx* ctx)
190{
191	/* stop the bg thread */
192	lock_basic_lock(&ctx->cfglock);
193	if(ctx->created_bg) {
194		uint8_t* msg;
195		uint32_t len;
196		uint32_t cmd = UB_LIBCMD_QUIT;
197		lock_basic_unlock(&ctx->cfglock);
198		lock_basic_lock(&ctx->qqpipe_lock);
199		(void)tube_write_msg(ctx->qq_pipe, (uint8_t*)&cmd,
200			(uint32_t)sizeof(cmd), 0);
201		lock_basic_unlock(&ctx->qqpipe_lock);
202		lock_basic_lock(&ctx->rrpipe_lock);
203		while(tube_read_msg(ctx->rr_pipe, &msg, &len, 0)) {
204			/* discard all results except a quit confirm */
205			if(context_serial_getcmd(msg, len) == UB_LIBCMD_QUIT) {
206				free(msg);
207				break;
208			}
209			free(msg);
210		}
211		lock_basic_unlock(&ctx->rrpipe_lock);
212
213		/* if bg worker is a thread, wait for it to exit, so that all
214	 	 * resources are really gone. */
215		lock_basic_lock(&ctx->cfglock);
216		if(ctx->dothread) {
217			lock_basic_unlock(&ctx->cfglock);
218			ub_thread_join(ctx->bg_tid);
219		} else {
220			lock_basic_unlock(&ctx->cfglock);
221		}
222	}
223	else {
224		lock_basic_unlock(&ctx->cfglock);
225	}
226}
227
228void
229ub_ctx_delete(struct ub_ctx* ctx)
230{
231	struct alloc_cache* a, *na;
232	int do_stop = 1;
233	if(!ctx) return;
234
235	/* see if bg thread is created and if threads have been killed */
236	/* no locks, because those may be held by terminated threads */
237	/* for processes the read pipe is closed and we see that on read */
238#ifdef HAVE_PTHREAD
239	if(ctx->created_bg && ctx->dothread) {
240		if(pthread_kill(ctx->bg_tid, 0) == ESRCH) {
241			/* thread has been killed */
242			do_stop = 0;
243		}
244	}
245#endif /* HAVE_PTHREAD */
246	if(do_stop)
247		ub_stop_bg(ctx);
248	libworker_delete_event(ctx->event_worker);
249
250	modstack_desetup(&ctx->mods, ctx->env);
251	a = ctx->alloc_list;
252	while(a) {
253		na = a->super;
254		a->super = &ctx->superalloc;
255		alloc_clear(a);
256		free(a);
257		a = na;
258	}
259	local_zones_delete(ctx->local_zones);
260	lock_basic_destroy(&ctx->qqpipe_lock);
261	lock_basic_destroy(&ctx->rrpipe_lock);
262	lock_basic_destroy(&ctx->cfglock);
263	tube_delete(ctx->qq_pipe);
264	tube_delete(ctx->rr_pipe);
265	if(ctx->env) {
266		slabhash_delete(ctx->env->msg_cache);
267		rrset_cache_delete(ctx->env->rrset_cache);
268		infra_delete(ctx->env->infra_cache);
269		config_delete(ctx->env->cfg);
270		free(ctx->env);
271	}
272	ub_randfree(ctx->seed_rnd);
273	alloc_clear(&ctx->superalloc);
274	traverse_postorder(&ctx->queries, delq, NULL);
275	free(ctx);
276#ifdef USE_WINSOCK
277	WSACleanup();
278#endif
279}
280
281int
282ub_ctx_set_option(struct ub_ctx* ctx, const char* opt, const char* val)
283{
284	lock_basic_lock(&ctx->cfglock);
285	if(ctx->finalized) {
286		lock_basic_unlock(&ctx->cfglock);
287		return UB_AFTERFINAL;
288	}
289	if(!config_set_option(ctx->env->cfg, opt, val)) {
290		lock_basic_unlock(&ctx->cfglock);
291		return UB_SYNTAX;
292	}
293	lock_basic_unlock(&ctx->cfglock);
294	return UB_NOERROR;
295}
296
297int
298ub_ctx_get_option(struct ub_ctx* ctx, const char* opt, char** str)
299{
300	int r;
301	lock_basic_lock(&ctx->cfglock);
302	r = config_get_option_collate(ctx->env->cfg, opt, str);
303	lock_basic_unlock(&ctx->cfglock);
304	if(r == 0) r = UB_NOERROR;
305	else if(r == 1) r = UB_SYNTAX;
306	else if(r == 2) r = UB_NOMEM;
307	return r;
308}
309
310int
311ub_ctx_config(struct ub_ctx* ctx, const char* fname)
312{
313	lock_basic_lock(&ctx->cfglock);
314	if(ctx->finalized) {
315		lock_basic_unlock(&ctx->cfglock);
316		return UB_AFTERFINAL;
317	}
318	if(!config_read(ctx->env->cfg, fname, NULL)) {
319		lock_basic_unlock(&ctx->cfglock);
320		return UB_SYNTAX;
321	}
322	lock_basic_unlock(&ctx->cfglock);
323	return UB_NOERROR;
324}
325
326int
327ub_ctx_add_ta(struct ub_ctx* ctx, const char* ta)
328{
329	char* dup = strdup(ta);
330	if(!dup) return UB_NOMEM;
331	lock_basic_lock(&ctx->cfglock);
332	if(ctx->finalized) {
333		lock_basic_unlock(&ctx->cfglock);
334		free(dup);
335		return UB_AFTERFINAL;
336	}
337	if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_list, dup)) {
338		lock_basic_unlock(&ctx->cfglock);
339		free(dup);
340		return UB_NOMEM;
341	}
342	lock_basic_unlock(&ctx->cfglock);
343	return UB_NOERROR;
344}
345
346int
347ub_ctx_add_ta_file(struct ub_ctx* ctx, const char* fname)
348{
349	char* dup = strdup(fname);
350	if(!dup) return UB_NOMEM;
351	lock_basic_lock(&ctx->cfglock);
352	if(ctx->finalized) {
353		lock_basic_unlock(&ctx->cfglock);
354		free(dup);
355		return UB_AFTERFINAL;
356	}
357	if(!cfg_strlist_insert(&ctx->env->cfg->trust_anchor_file_list, dup)) {
358		lock_basic_unlock(&ctx->cfglock);
359		free(dup);
360		return UB_NOMEM;
361	}
362	lock_basic_unlock(&ctx->cfglock);
363	return UB_NOERROR;
364}
365
366int ub_ctx_add_ta_autr(struct ub_ctx* ctx, const char* fname)
367{
368	char* dup = strdup(fname);
369	if(!dup) return UB_NOMEM;
370	lock_basic_lock(&ctx->cfglock);
371	if(ctx->finalized) {
372		lock_basic_unlock(&ctx->cfglock);
373		free(dup);
374		return UB_AFTERFINAL;
375	}
376	if(!cfg_strlist_insert(&ctx->env->cfg->auto_trust_anchor_file_list,
377		dup)) {
378		lock_basic_unlock(&ctx->cfglock);
379		free(dup);
380		return UB_NOMEM;
381	}
382	lock_basic_unlock(&ctx->cfglock);
383	return UB_NOERROR;
384}
385
386int
387ub_ctx_trustedkeys(struct ub_ctx* ctx, const char* fname)
388{
389	char* dup = strdup(fname);
390	if(!dup) return UB_NOMEM;
391	lock_basic_lock(&ctx->cfglock);
392	if(ctx->finalized) {
393		lock_basic_unlock(&ctx->cfglock);
394		free(dup);
395		return UB_AFTERFINAL;
396	}
397	if(!cfg_strlist_insert(&ctx->env->cfg->trusted_keys_file_list, dup)) {
398		lock_basic_unlock(&ctx->cfglock);
399		free(dup);
400		return UB_NOMEM;
401	}
402	lock_basic_unlock(&ctx->cfglock);
403	return UB_NOERROR;
404}
405
406int
407ub_ctx_debuglevel(struct ub_ctx* ctx, int d)
408{
409	lock_basic_lock(&ctx->cfglock);
410	verbosity = d;
411	ctx->env->cfg->verbosity = d;
412	lock_basic_unlock(&ctx->cfglock);
413	return UB_NOERROR;
414}
415
416int ub_ctx_debugout(struct ub_ctx* ctx, void* out)
417{
418	lock_basic_lock(&ctx->cfglock);
419	log_file((FILE*)out);
420	ctx->logfile_override = 1;
421	ctx->log_out = out;
422	lock_basic_unlock(&ctx->cfglock);
423	return UB_NOERROR;
424}
425
426int
427ub_ctx_async(struct ub_ctx* ctx, int dothread)
428{
429#ifdef THREADS_DISABLED
430	if(dothread) /* cannot do threading */
431		return UB_NOERROR;
432#endif
433	lock_basic_lock(&ctx->cfglock);
434	if(ctx->finalized) {
435		lock_basic_unlock(&ctx->cfglock);
436		return UB_AFTERFINAL;
437	}
438	ctx->dothread = dothread;
439	lock_basic_unlock(&ctx->cfglock);
440	return UB_NOERROR;
441}
442
443int
444ub_poll(struct ub_ctx* ctx)
445{
446	/* no need to hold lock while testing for readability. */
447	return tube_poll(ctx->rr_pipe);
448}
449
450int
451ub_fd(struct ub_ctx* ctx)
452{
453	return tube_read_fd(ctx->rr_pipe);
454}
455
456/** process answer from bg worker */
457static int
458process_answer_detail(struct ub_ctx* ctx, uint8_t* msg, uint32_t len,
459	ub_callback_t* cb, void** cbarg, int* err,
460	struct ub_result** res)
461{
462	struct ctx_query* q;
463	if(context_serial_getcmd(msg, len) != UB_LIBCMD_ANSWER) {
464		log_err("error: bad data from bg worker %d",
465			(int)context_serial_getcmd(msg, len));
466		return 0;
467	}
468
469	lock_basic_lock(&ctx->cfglock);
470	q = context_deserialize_answer(ctx, msg, len, err);
471	if(!q) {
472		lock_basic_unlock(&ctx->cfglock);
473		/* probably simply the lookup that failed, i.e.
474		 * response returned before cancel was sent out, so noerror */
475		return 1;
476	}
477	log_assert(q->async);
478
479	/* grab cb while locked */
480	if(q->cancelled) {
481		*cb = NULL;
482		*cbarg = NULL;
483	} else {
484		*cb = q->cb;
485		*cbarg = q->cb_arg;
486	}
487	if(*err) {
488		*res = NULL;
489		ub_resolve_free(q->res);
490	} else {
491		/* parse the message, extract rcode, fill result */
492		sldns_buffer* buf = sldns_buffer_new(q->msg_len);
493		struct regional* region = regional_create();
494		*res = q->res;
495		(*res)->rcode = LDNS_RCODE_SERVFAIL;
496		if(region && buf) {
497			sldns_buffer_clear(buf);
498			sldns_buffer_write(buf, q->msg, q->msg_len);
499			sldns_buffer_flip(buf);
500			libworker_enter_result(*res, buf, region,
501				q->msg_security);
502		}
503		(*res)->answer_packet = q->msg;
504		(*res)->answer_len = (int)q->msg_len;
505		q->msg = NULL;
506		sldns_buffer_free(buf);
507		regional_destroy(region);
508	}
509	q->res = NULL;
510	/* delete the q from list */
511	(void)rbtree_delete(&ctx->queries, q->node.key);
512	ctx->num_async--;
513	context_query_delete(q);
514	lock_basic_unlock(&ctx->cfglock);
515
516	if(*cb) return 2;
517	ub_resolve_free(*res);
518	return 1;
519}
520
521/** process answer from bg worker */
522static int
523process_answer(struct ub_ctx* ctx, uint8_t* msg, uint32_t len)
524{
525	int err;
526	ub_callback_t cb;
527	void* cbarg;
528	struct ub_result* res;
529	int r;
530
531	r = process_answer_detail(ctx, msg, len, &cb, &cbarg, &err, &res);
532
533	/* no locks held while calling callback, so that library is
534	 * re-entrant. */
535	if(r == 2)
536		(*cb)(cbarg, err, res);
537
538	return r;
539}
540
541int
542ub_process(struct ub_ctx* ctx)
543{
544	int r;
545	uint8_t* msg;
546	uint32_t len;
547	while(1) {
548		msg = NULL;
549		lock_basic_lock(&ctx->rrpipe_lock);
550		r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1);
551		lock_basic_unlock(&ctx->rrpipe_lock);
552		if(r == 0)
553			return UB_PIPE;
554		else if(r == -1)
555			break;
556		if(!process_answer(ctx, msg, len)) {
557			free(msg);
558			return UB_PIPE;
559		}
560		free(msg);
561	}
562	return UB_NOERROR;
563}
564
565int
566ub_wait(struct ub_ctx* ctx)
567{
568	int err;
569	ub_callback_t cb;
570	void* cbarg;
571	struct ub_result* res;
572	int r;
573	uint8_t* msg;
574	uint32_t len;
575	/* this is basically the same loop as _process(), but with changes.
576	 * holds the rrpipe lock and waits with tube_wait */
577	while(1) {
578		lock_basic_lock(&ctx->rrpipe_lock);
579		lock_basic_lock(&ctx->cfglock);
580		if(ctx->num_async == 0) {
581			lock_basic_unlock(&ctx->cfglock);
582			lock_basic_unlock(&ctx->rrpipe_lock);
583			break;
584		}
585		lock_basic_unlock(&ctx->cfglock);
586
587		/* keep rrpipe locked, while
588		 * 	o waiting for pipe readable
589		 * 	o parsing message
590		 * 	o possibly decrementing num_async
591		 * do callback without lock
592		 */
593		r = tube_wait(ctx->rr_pipe);
594		if(r) {
595			r = tube_read_msg(ctx->rr_pipe, &msg, &len, 1);
596			if(r == 0) {
597				lock_basic_unlock(&ctx->rrpipe_lock);
598				return UB_PIPE;
599			}
600			if(r == -1) {
601				lock_basic_unlock(&ctx->rrpipe_lock);
602				continue;
603			}
604			r = process_answer_detail(ctx, msg, len,
605				&cb, &cbarg, &err, &res);
606			lock_basic_unlock(&ctx->rrpipe_lock);
607			free(msg);
608			if(r == 0)
609				return UB_PIPE;
610			if(r == 2)
611				(*cb)(cbarg, err, res);
612		} else {
613			lock_basic_unlock(&ctx->rrpipe_lock);
614		}
615	}
616	return UB_NOERROR;
617}
618
619int
620ub_resolve(struct ub_ctx* ctx, const char* name, int rrtype,
621	int rrclass, struct ub_result** result)
622{
623	struct ctx_query* q;
624	int r;
625	*result = NULL;
626
627	lock_basic_lock(&ctx->cfglock);
628	if(!ctx->finalized) {
629		r = context_finalize(ctx);
630		if(r) {
631			lock_basic_unlock(&ctx->cfglock);
632			return r;
633		}
634	}
635	/* create new ctx_query and attempt to add to the list */
636	lock_basic_unlock(&ctx->cfglock);
637	q = context_new(ctx, name, rrtype, rrclass, NULL, NULL);
638	if(!q)
639		return UB_NOMEM;
640	/* become a resolver thread for a bit */
641
642	r = libworker_fg(ctx, q);
643	if(r) {
644		lock_basic_lock(&ctx->cfglock);
645		(void)rbtree_delete(&ctx->queries, q->node.key);
646		context_query_delete(q);
647		lock_basic_unlock(&ctx->cfglock);
648		return r;
649	}
650	q->res->answer_packet = q->msg;
651	q->res->answer_len = (int)q->msg_len;
652	q->msg = NULL;
653	*result = q->res;
654	q->res = NULL;
655
656	lock_basic_lock(&ctx->cfglock);
657	(void)rbtree_delete(&ctx->queries, q->node.key);
658	context_query_delete(q);
659	lock_basic_unlock(&ctx->cfglock);
660	return UB_NOERROR;
661}
662
663int
664ub_resolve_event(struct ub_ctx* ctx, const char* name, int rrtype,
665	int rrclass, void* mydata, ub_event_callback_t callback, int* async_id)
666{
667	struct ctx_query* q;
668	int r;
669
670	if(async_id)
671		*async_id = 0;
672	lock_basic_lock(&ctx->cfglock);
673	if(!ctx->finalized) {
674		int r = context_finalize(ctx);
675		if(r) {
676			lock_basic_unlock(&ctx->cfglock);
677			return r;
678		}
679	}
680	lock_basic_unlock(&ctx->cfglock);
681	if(!ctx->event_worker) {
682		ctx->event_worker = libworker_create_event(ctx,
683			ctx->event_base);
684		if(!ctx->event_worker) {
685			return UB_INITFAIL;
686		}
687	}
688
689	/* create new ctx_query and attempt to add to the list */
690	q = context_new(ctx, name, rrtype, rrclass, (ub_callback_t)callback,
691		mydata);
692	if(!q)
693		return UB_NOMEM;
694
695	/* attach to mesh */
696	if((r=libworker_attach_mesh(ctx, q, async_id)) != 0)
697		return r;
698	return UB_NOERROR;
699}
700
701
702int
703ub_resolve_async(struct ub_ctx* ctx, const char* name, int rrtype,
704	int rrclass, void* mydata, ub_callback_t callback, int* async_id)
705{
706	struct ctx_query* q;
707	uint8_t* msg = NULL;
708	uint32_t len = 0;
709
710	if(async_id)
711		*async_id = 0;
712	lock_basic_lock(&ctx->cfglock);
713	if(!ctx->finalized) {
714		int r = context_finalize(ctx);
715		if(r) {
716			lock_basic_unlock(&ctx->cfglock);
717			return r;
718		}
719	}
720	if(!ctx->created_bg) {
721		int r;
722		ctx->created_bg = 1;
723		lock_basic_unlock(&ctx->cfglock);
724		r = libworker_bg(ctx);
725		if(r) {
726			lock_basic_lock(&ctx->cfglock);
727			ctx->created_bg = 0;
728			lock_basic_unlock(&ctx->cfglock);
729			return r;
730		}
731	} else {
732		lock_basic_unlock(&ctx->cfglock);
733	}
734
735	/* create new ctx_query and attempt to add to the list */
736	q = context_new(ctx, name, rrtype, rrclass, callback, mydata);
737	if(!q)
738		return UB_NOMEM;
739
740	/* write over pipe to background worker */
741	lock_basic_lock(&ctx->cfglock);
742	msg = context_serialize_new_query(q, &len);
743	if(!msg) {
744		(void)rbtree_delete(&ctx->queries, q->node.key);
745		ctx->num_async--;
746		context_query_delete(q);
747		lock_basic_unlock(&ctx->cfglock);
748		return UB_NOMEM;
749	}
750	if(async_id)
751		*async_id = q->querynum;
752	lock_basic_unlock(&ctx->cfglock);
753
754	lock_basic_lock(&ctx->qqpipe_lock);
755	if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) {
756		lock_basic_unlock(&ctx->qqpipe_lock);
757		free(msg);
758		return UB_PIPE;
759	}
760	lock_basic_unlock(&ctx->qqpipe_lock);
761	free(msg);
762	return UB_NOERROR;
763}
764
765int
766ub_cancel(struct ub_ctx* ctx, int async_id)
767{
768	struct ctx_query* q;
769	uint8_t* msg = NULL;
770	uint32_t len = 0;
771	lock_basic_lock(&ctx->cfglock);
772	q = (struct ctx_query*)rbtree_search(&ctx->queries, &async_id);
773	if(!q || !q->async) {
774		/* it is not there, so nothing to do */
775		lock_basic_unlock(&ctx->cfglock);
776		return UB_NOID;
777	}
778	log_assert(q->async);
779	q->cancelled = 1;
780
781	/* delete it */
782	if(!ctx->dothread) { /* if forked */
783		(void)rbtree_delete(&ctx->queries, q->node.key);
784		ctx->num_async--;
785		msg = context_serialize_cancel(q, &len);
786		context_query_delete(q);
787		lock_basic_unlock(&ctx->cfglock);
788		if(!msg) {
789			return UB_NOMEM;
790		}
791		/* send cancel to background worker */
792		lock_basic_lock(&ctx->qqpipe_lock);
793		if(!tube_write_msg(ctx->qq_pipe, msg, len, 0)) {
794			lock_basic_unlock(&ctx->qqpipe_lock);
795			free(msg);
796			return UB_PIPE;
797		}
798		lock_basic_unlock(&ctx->qqpipe_lock);
799		free(msg);
800	} else {
801		lock_basic_unlock(&ctx->cfglock);
802	}
803	return UB_NOERROR;
804}
805
806void
807ub_resolve_free(struct ub_result* result)
808{
809	char** p;
810	if(!result) return;
811	free(result->qname);
812	if(result->canonname != result->qname)
813		free(result->canonname);
814	if(result->data)
815		for(p = result->data; *p; p++)
816			free(*p);
817	free(result->data);
818	free(result->len);
819	free(result->answer_packet);
820	free(result->why_bogus);
821	free(result);
822}
823
824const char*
825ub_strerror(int err)
826{
827	switch(err) {
828		case UB_NOERROR: return "no error";
829		case UB_SOCKET: return "socket io error";
830		case UB_NOMEM: return "out of memory";
831		case UB_SYNTAX: return "syntax error";
832		case UB_SERVFAIL: return "server failure";
833		case UB_FORKFAIL: return "could not fork";
834		case UB_INITFAIL: return "initialization failure";
835		case UB_AFTERFINAL: return "setting change after finalize";
836		case UB_PIPE: return "error in pipe communication with async";
837		case UB_READFILE: return "error reading file";
838		case UB_NOID: return "error async_id does not exist";
839		default: return "unknown error";
840	}
841}
842
843int
844ub_ctx_set_fwd(struct ub_ctx* ctx, const char* addr)
845{
846	struct sockaddr_storage storage;
847	socklen_t stlen;
848	struct config_stub* s;
849	char* dupl;
850	lock_basic_lock(&ctx->cfglock);
851	if(ctx->finalized) {
852		lock_basic_unlock(&ctx->cfglock);
853		errno=EINVAL;
854		return UB_AFTERFINAL;
855	}
856	if(!addr) {
857		/* disable fwd mode - the root stub should be first. */
858		if(ctx->env->cfg->forwards &&
859			strcmp(ctx->env->cfg->forwards->name, ".") == 0) {
860			s = ctx->env->cfg->forwards;
861			ctx->env->cfg->forwards = s->next;
862			s->next = NULL;
863			config_delstubs(s);
864		}
865		lock_basic_unlock(&ctx->cfglock);
866		return UB_NOERROR;
867	}
868	lock_basic_unlock(&ctx->cfglock);
869
870	/* check syntax for addr */
871	if(!extstrtoaddr(addr, &storage, &stlen)) {
872		errno=EINVAL;
873		return UB_SYNTAX;
874	}
875
876	/* it parses, add root stub in front of list */
877	lock_basic_lock(&ctx->cfglock);
878	if(!ctx->env->cfg->forwards ||
879		strcmp(ctx->env->cfg->forwards->name, ".") != 0) {
880		s = calloc(1, sizeof(*s));
881		if(!s) {
882			lock_basic_unlock(&ctx->cfglock);
883			errno=ENOMEM;
884			return UB_NOMEM;
885		}
886		s->name = strdup(".");
887		if(!s->name) {
888			free(s);
889			lock_basic_unlock(&ctx->cfglock);
890			errno=ENOMEM;
891			return UB_NOMEM;
892		}
893		s->next = ctx->env->cfg->forwards;
894		ctx->env->cfg->forwards = s;
895	} else {
896		log_assert(ctx->env->cfg->forwards);
897		s = ctx->env->cfg->forwards;
898	}
899	dupl = strdup(addr);
900	if(!dupl) {
901		lock_basic_unlock(&ctx->cfglock);
902		errno=ENOMEM;
903		return UB_NOMEM;
904	}
905	if(!cfg_strlist_insert(&s->addrs, dupl)) {
906		free(dupl);
907		lock_basic_unlock(&ctx->cfglock);
908		errno=ENOMEM;
909		return UB_NOMEM;
910	}
911	lock_basic_unlock(&ctx->cfglock);
912	return UB_NOERROR;
913}
914
915int
916ub_ctx_resolvconf(struct ub_ctx* ctx, const char* fname)
917{
918	FILE* in;
919	int numserv = 0;
920	char buf[1024];
921	char* parse, *addr;
922	int r;
923
924	if(fname == NULL) {
925#if !defined(UB_ON_WINDOWS) || !defined(HAVE_WINDOWS_H)
926		fname = "/etc/resolv.conf";
927#else
928		FIXED_INFO *info;
929		ULONG buflen = sizeof(*info);
930		IP_ADDR_STRING *ptr;
931
932		info = (FIXED_INFO *) malloc(sizeof (FIXED_INFO));
933		if (info == NULL)
934			return UB_READFILE;
935
936		if (GetNetworkParams(info, &buflen) == ERROR_BUFFER_OVERFLOW) {
937			free(info);
938			info = (FIXED_INFO *) malloc(buflen);
939			if (info == NULL)
940				return UB_READFILE;
941		}
942
943		if (GetNetworkParams(info, &buflen) == NO_ERROR) {
944			int retval=0;
945			ptr = &(info->DnsServerList);
946			while (ptr) {
947				numserv++;
948				if((retval=ub_ctx_set_fwd(ctx,
949					ptr->IpAddress.String)!=0)) {
950					free(info);
951					return retval;
952				}
953				ptr = ptr->Next;
954			}
955			free(info);
956			if (numserv==0)
957				return UB_READFILE;
958			return UB_NOERROR;
959		}
960		free(info);
961		return UB_READFILE;
962#endif /* WINDOWS */
963	}
964	in = fopen(fname, "r");
965	if(!in) {
966		/* error in errno! perror(fname) */
967		return UB_READFILE;
968	}
969	while(fgets(buf, (int)sizeof(buf), in)) {
970		buf[sizeof(buf)-1] = 0;
971		parse=buf;
972		while(*parse == ' ' || *parse == '\t')
973			parse++;
974		if(strncmp(parse, "nameserver", 10) == 0) {
975			numserv++;
976			parse += 10; /* skip 'nameserver' */
977			/* skip whitespace */
978			while(*parse == ' ' || *parse == '\t')
979				parse++;
980			addr = parse;
981			/* skip [0-9a-fA-F.:]*, i.e. IP4 and IP6 address */
982			while(isxdigit((unsigned char)*parse) || *parse=='.' || *parse==':')
983				parse++;
984			/* terminate after the address, remove newline */
985			*parse = 0;
986
987			if((r = ub_ctx_set_fwd(ctx, addr)) != UB_NOERROR) {
988				fclose(in);
989				return r;
990			}
991		}
992	}
993	fclose(in);
994	if(numserv == 0) {
995		/* from resolv.conf(5) if none given, use localhost */
996		return ub_ctx_set_fwd(ctx, "127.0.0.1");
997	}
998	return UB_NOERROR;
999}
1000
1001int
1002ub_ctx_hosts(struct ub_ctx* ctx, const char* fname)
1003{
1004	FILE* in;
1005	char buf[1024], ldata[1024];
1006	char* parse, *addr, *name, *ins;
1007	lock_basic_lock(&ctx->cfglock);
1008	if(ctx->finalized) {
1009		lock_basic_unlock(&ctx->cfglock);
1010		errno=EINVAL;
1011		return UB_AFTERFINAL;
1012	}
1013	lock_basic_unlock(&ctx->cfglock);
1014	if(fname == NULL) {
1015#if defined(UB_ON_WINDOWS) && defined(HAVE_WINDOWS_H)
1016		/*
1017		 * If this is Windows NT/XP/2K it's in
1018		 * %WINDIR%\system32\drivers\etc\hosts.
1019		 * If this is Windows 95/98/Me it's in %WINDIR%\hosts.
1020		 */
1021		name = getenv("WINDIR");
1022		if (name != NULL) {
1023			int retval=0;
1024			snprintf(buf, sizeof(buf), "%s%s", name,
1025				"\\system32\\drivers\\etc\\hosts");
1026			if((retval=ub_ctx_hosts(ctx, buf)) !=0 ) {
1027				snprintf(buf, sizeof(buf), "%s%s", name,
1028					"\\hosts");
1029				retval=ub_ctx_hosts(ctx, buf);
1030			}
1031			free(name);
1032			return retval;
1033		}
1034		return UB_READFILE;
1035#else
1036		fname = "/etc/hosts";
1037#endif /* WIN32 */
1038	}
1039	in = fopen(fname, "r");
1040	if(!in) {
1041		/* error in errno! perror(fname) */
1042		return UB_READFILE;
1043	}
1044	while(fgets(buf, (int)sizeof(buf), in)) {
1045		buf[sizeof(buf)-1] = 0;
1046		parse=buf;
1047		while(*parse == ' ' || *parse == '\t')
1048			parse++;
1049		if(*parse == '#')
1050			continue; /* skip comment */
1051		/* format: <addr> spaces <name> spaces <name> ... */
1052		addr = parse;
1053		/* skip addr */
1054		while(isxdigit((unsigned char)*parse) || *parse == '.' || *parse == ':')
1055			parse++;
1056		if(*parse == '\n' || *parse == 0)
1057			continue;
1058		if(*parse == '%')
1059			continue; /* ignore macOSX fe80::1%lo0 localhost */
1060		if(*parse != ' ' && *parse != '\t') {
1061			/* must have whitespace after address */
1062			fclose(in);
1063			errno=EINVAL;
1064			return UB_SYNTAX;
1065		}
1066		*parse++ = 0; /* end delimiter for addr ... */
1067		/* go to names and add them */
1068		while(*parse) {
1069			while(*parse == ' ' || *parse == '\t' || *parse=='\n')
1070				parse++;
1071			if(*parse == 0 || *parse == '#')
1072				break;
1073			/* skip name, allows (too) many printable characters */
1074			name = parse;
1075			while('!' <= *parse && *parse <= '~')
1076				parse++;
1077			if(*parse)
1078				*parse++ = 0; /* end delimiter for name */
1079			snprintf(ldata, sizeof(ldata), "%s %s %s",
1080				name, str_is_ip6(addr)?"AAAA":"A", addr);
1081			ins = strdup(ldata);
1082			if(!ins) {
1083				/* out of memory */
1084				fclose(in);
1085				errno=ENOMEM;
1086				return UB_NOMEM;
1087			}
1088			lock_basic_lock(&ctx->cfglock);
1089			if(!cfg_strlist_insert(&ctx->env->cfg->local_data,
1090				ins)) {
1091				lock_basic_unlock(&ctx->cfglock);
1092				fclose(in);
1093				free(ins);
1094				errno=ENOMEM;
1095				return UB_NOMEM;
1096			}
1097			lock_basic_unlock(&ctx->cfglock);
1098		}
1099	}
1100	fclose(in);
1101	return UB_NOERROR;
1102}
1103
1104/** finalize the context, if not already finalized */
1105static int ub_ctx_finalize(struct ub_ctx* ctx)
1106{
1107	int res = 0;
1108	lock_basic_lock(&ctx->cfglock);
1109	if (!ctx->finalized) {
1110		res = context_finalize(ctx);
1111	}
1112	lock_basic_unlock(&ctx->cfglock);
1113	return res;
1114}
1115
1116/* Print local zones and RR data */
1117int ub_ctx_print_local_zones(struct ub_ctx* ctx)
1118{
1119	int res = ub_ctx_finalize(ctx);
1120	if (res) return res;
1121
1122	local_zones_print(ctx->local_zones);
1123
1124	return UB_NOERROR;
1125}
1126
1127/* Add a new zone */
1128int ub_ctx_zone_add(struct ub_ctx* ctx, const char *zone_name,
1129	const char *zone_type)
1130{
1131	enum localzone_type t;
1132	struct local_zone* z;
1133	uint8_t* nm;
1134	int nmlabs;
1135	size_t nmlen;
1136
1137	int res = ub_ctx_finalize(ctx);
1138	if (res) return res;
1139
1140	if(!local_zone_str2type(zone_type, &t)) {
1141		return UB_SYNTAX;
1142	}
1143
1144	if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) {
1145		return UB_SYNTAX;
1146	}
1147
1148	lock_rw_wrlock(&ctx->local_zones->lock);
1149	if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs,
1150		LDNS_RR_CLASS_IN))) {
1151		/* already present in tree */
1152		lock_rw_wrlock(&z->lock);
1153		z->type = t; /* update type anyway */
1154		lock_rw_unlock(&z->lock);
1155		lock_rw_unlock(&ctx->local_zones->lock);
1156		free(nm);
1157		return UB_NOERROR;
1158	}
1159	if(!local_zones_add_zone(ctx->local_zones, nm, nmlen, nmlabs,
1160		LDNS_RR_CLASS_IN, t)) {
1161		lock_rw_unlock(&ctx->local_zones->lock);
1162		return UB_NOMEM;
1163	}
1164	lock_rw_unlock(&ctx->local_zones->lock);
1165	return UB_NOERROR;
1166}
1167
1168/* Remove zone */
1169int ub_ctx_zone_remove(struct ub_ctx* ctx, const char *zone_name)
1170{
1171	struct local_zone* z;
1172	uint8_t* nm;
1173	int nmlabs;
1174	size_t nmlen;
1175
1176	int res = ub_ctx_finalize(ctx);
1177	if (res) return res;
1178
1179	if(!parse_dname(zone_name, &nm, &nmlen, &nmlabs)) {
1180		return UB_SYNTAX;
1181	}
1182
1183	lock_rw_wrlock(&ctx->local_zones->lock);
1184	if((z=local_zones_find(ctx->local_zones, nm, nmlen, nmlabs,
1185		LDNS_RR_CLASS_IN))) {
1186		/* present in tree */
1187		local_zones_del_zone(ctx->local_zones, z);
1188	}
1189	lock_rw_unlock(&ctx->local_zones->lock);
1190	free(nm);
1191	return UB_NOERROR;
1192}
1193
1194/* Add new RR data */
1195int ub_ctx_data_add(struct ub_ctx* ctx, const char *data)
1196{
1197	int res = ub_ctx_finalize(ctx);
1198	if (res) return res;
1199
1200	res = local_zones_add_RR(ctx->local_zones, data);
1201	return (!res) ? UB_NOMEM : UB_NOERROR;
1202}
1203
1204/* Remove RR data */
1205int ub_ctx_data_remove(struct ub_ctx* ctx, const char *data)
1206{
1207	uint8_t* nm;
1208	int nmlabs;
1209	size_t nmlen;
1210	int res = ub_ctx_finalize(ctx);
1211	if (res) return res;
1212
1213	if(!parse_dname(data, &nm, &nmlen, &nmlabs))
1214		return UB_SYNTAX;
1215
1216	local_zones_del_data(ctx->local_zones, nm, nmlen, nmlabs,
1217		LDNS_RR_CLASS_IN);
1218
1219	free(nm);
1220	return UB_NOERROR;
1221}
1222
1223const char* ub_version(void)
1224{
1225	return PACKAGE_VERSION;
1226}
1227
1228int
1229ub_ctx_set_event(struct ub_ctx* ctx, struct event_base* base) {
1230	if (!ctx || !ctx->event_base || !base) {
1231		return UB_INITFAIL;
1232	}
1233	if (ctx->event_base == base) {
1234		/* already set */
1235		return UB_NOERROR;
1236	}
1237
1238	lock_basic_lock(&ctx->cfglock);
1239	/* destroy the current worker - safe to pass in NULL */
1240	libworker_delete_event(ctx->event_worker);
1241	ctx->event_worker = NULL;
1242	ctx->event_base = base;
1243	ctx->created_bg = 0;
1244	ctx->dothread = 1;
1245	lock_basic_unlock(&ctx->cfglock);
1246	return UB_NOERROR;
1247}
1248