ntp_intres.c revision 298770
1/*
2 * ntp_intres.c - Implements a generic blocking worker child or thread,
3 *		  initially to provide a nonblocking solution for DNS
4 *		  name to address lookups available with getaddrinfo().
5 *
6 * This is a new implementation as of 2009 sharing the filename and
7 * very little else with the prior implementation, which used a
8 * temporary file to receive a single set of requests from the parent,
9 * and a NTP mode 7 authenticated request to push back responses.
10 *
11 * A primary goal in rewriting this code was the need to support the
12 * pool configuration directive's requirement to retrieve multiple
13 * addresses resolving a single name, which has previously been
14 * satisfied with blocking resolver calls from the ntpd mainline code.
15 *
16 * A secondary goal is to provide a generic mechanism for other
17 * blocking operations to be delegated to a worker using a common
18 * model for both Unix and Windows ntpd.  ntp_worker.c, work_fork.c,
19 * and work_thread.c implement the generic mechanism.  This file
20 * implements the two current consumers, getaddrinfo_sometime() and the
21 * presently unused getnameinfo_sometime().
22 *
23 * Both routines deliver results to a callback and manage memory
24 * allocation, meaning there is no freeaddrinfo_sometime().
25 *
26 * The initial implementation for Unix uses a pair of unidirectional
27 * pipes, one each for requests and responses, connecting the forked
28 * blocking child worker with the ntpd mainline.  The threaded code
29 * uses arrays of pointers to queue requests and responses.
30 *
31 * The parent drives the process, including scheduling sleeps between
32 * retries.
33 *
34 * Memory is managed differently for a child process, which mallocs
35 * request buffers to read from the pipe into, whereas the threaded
36 * code mallocs a copy of the request to hand off to the worker via
37 * the queueing array.  The resulting request buffer is free()d by
38 * platform-independent code.  A wrinkle is the request needs to be
39 * available to the requestor during response processing.
40 *
41 * Response memory allocation is also platform-dependent.  With a
42 * separate process and pipes, the response is free()d after being
43 * written to the pipe.  With threads, the same memory is handed
44 * over and the requestor frees it after processing is completed.
45 *
46 * The code should be generalized to support threads on Unix using
47 * much of the same code used for Windows initially.
48 *
49 */
50#ifdef HAVE_CONFIG_H
51# include <config.h>
52#endif
53
54#include "ntp_workimpl.h"
55
56#ifdef WORKER
57
58#include <stdio.h>
59#include <ctype.h>
60#include <signal.h>
61
62/**/
63#ifdef HAVE_SYS_TYPES_H
64# include <sys/types.h>
65#endif
66#ifdef HAVE_NETINET_IN_H
67#include <netinet/in.h>
68#endif
69#include <arpa/inet.h>
70/**/
71#ifdef HAVE_SYS_PARAM_H
72# include <sys/param.h>
73#endif
74
75#if !defined(HAVE_RES_INIT) && defined(HAVE___RES_INIT)
76# define HAVE_RES_INIT
77#endif
78
79#if defined(HAVE_RESOLV_H) && defined(HAVE_RES_INIT)
80# ifdef HAVE_ARPA_NAMESER_H
81#  include <arpa/nameser.h> /* DNS HEADER struct */
82# endif
83# ifdef HAVE_NETDB_H
84#  include <netdb.h>
85# endif
86# include <resolv.h>
87# ifdef HAVE_INT32_ONLY_WITH_DNS
88#  define HAVE_INT32
89# endif
90# ifdef HAVE_U_INT32_ONLY_WITH_DNS
91#  define HAVE_U_INT32
92# endif
93#endif
94
95#include "ntp.h"
96#include "ntp_debug.h"
97#include "ntp_malloc.h"
98#include "ntp_syslog.h"
99#include "ntp_unixtime.h"
100#include "ntp_intres.h"
101#include "intreswork.h"
102
103
104/*
105 * Following are implementations of getaddrinfo_sometime() and
106 * getnameinfo_sometime().  Each is implemented in three routines:
107 *
108 * getaddrinfo_sometime()		getnameinfo_sometime()
109 * blocking_getaddrinfo()		blocking_getnameinfo()
110 * getaddrinfo_sometime_complete()	getnameinfo_sometime_complete()
111 *
112 * The first runs in the parent and marshalls (or serializes) request
113 * parameters into a request blob which is processed in the child by
114 * the second routine, blocking_*(), which serializes the results into
115 * a response blob unpacked by the third routine, *_complete(), which
116 * calls the callback routine provided with the request and frees
117 * _request_ memory allocated by the first routine.  Response memory
118 * is managed by the code which calls the *_complete routines.
119 */
120
121/* === typedefs === */
122typedef struct blocking_gai_req_tag {	/* marshalled args */
123	size_t			octets;
124	u_int			dns_idx;
125	time_t			scheduled;
126	time_t			earliest;
127	struct addrinfo		hints;
128	int			retry;
129	gai_sometime_callback	callback;
130	void *			context;
131	size_t			nodesize;
132	size_t			servsize;
133} blocking_gai_req;
134
135typedef struct blocking_gai_resp_tag {
136	size_t			octets;
137	int			retcode;
138	int			retry;
139	int			gai_errno; /* for EAI_SYSTEM case */
140	int			ai_count;
141	/*
142	 * Followed by ai_count struct addrinfo and then ai_count
143	 * sockaddr_u and finally the canonical name strings.
144	 */
145} blocking_gai_resp;
146
147typedef struct blocking_gni_req_tag {
148	size_t			octets;
149	u_int			dns_idx;
150	time_t			scheduled;
151	time_t			earliest;
152	int			retry;
153	size_t			hostoctets;
154	size_t			servoctets;
155	int			flags;
156	gni_sometime_callback	callback;
157	void *			context;
158	sockaddr_u		socku;
159} blocking_gni_req;
160
161typedef struct blocking_gni_resp_tag {
162	size_t			octets;
163	int			retcode;
164	int			gni_errno; /* for EAI_SYSTEM case */
165	int			retry;
166	size_t			hostoctets;
167	size_t			servoctets;
168	/*
169	 * Followed by hostoctets bytes of null-terminated host,
170	 * then servoctets bytes of null-terminated service.
171	 */
172} blocking_gni_resp;
173
174/* per-DNS-worker state in parent */
175typedef struct dnschild_ctx_tag {
176	u_int	index;
177	time_t	next_dns_timeslot;
178} dnschild_ctx;
179
180/* per-DNS-worker state in worker */
181typedef struct dnsworker_ctx_tag {
182	blocking_child *	c;
183	time_t			ignore_scheduled_before;
184#ifdef HAVE_RES_INIT
185	time_t	next_res_init;
186#endif
187} dnsworker_ctx;
188
189
190/* === variables === */
191dnschild_ctx **		dnschild_contexts;		/* parent */
192u_int			dnschild_contexts_alloc;
193dnsworker_ctx **	dnsworker_contexts;		/* child */
194u_int			dnsworker_contexts_alloc;
195
196#ifdef HAVE_RES_INIT
197static	time_t		next_res_init;
198#endif
199
200
201/* === forward declarations === */
202static	u_int		reserve_dnschild_ctx(void);
203static	u_int		get_dnschild_ctx(void);
204static	dnsworker_ctx *	get_worker_context(blocking_child *, u_int);
205static	void		scheduled_sleep(time_t, time_t,
206					dnsworker_ctx *);
207static	void		manage_dns_retry_interval(time_t *, time_t *,
208						  int *,
209						  time_t *);
210static	int		should_retry_dns(int, int);
211#ifdef HAVE_RES_INIT
212static	void		reload_resolv_conf(dnsworker_ctx *);
213#else
214# define		reload_resolv_conf(wc)		\
215	do {						\
216		(void)(wc);				\
217	} while (FALSE)
218#endif
219static	void		getaddrinfo_sometime_complete(blocking_work_req,
220						      void *, size_t,
221						      void *);
222static	void		getnameinfo_sometime_complete(blocking_work_req,
223						      void *, size_t,
224						      void *);
225
226
227/* === functions === */
228/*
229 * getaddrinfo_sometime - uses blocking child to call getaddrinfo then
230 *			  invokes provided callback completion function.
231 */
232int
233getaddrinfo_sometime(
234	const char *		node,
235	const char *		service,
236	const struct addrinfo *	hints,
237	int			retry,
238	gai_sometime_callback	callback,
239	void *			context
240	)
241{
242	blocking_gai_req *	gai_req;
243	u_int			idx;
244	dnschild_ctx *		child_ctx;
245	size_t			req_size;
246	size_t			nodesize;
247	size_t			servsize;
248	time_t			now;
249
250	REQUIRE(NULL != node);
251	if (NULL != hints) {
252		REQUIRE(0 == hints->ai_addrlen);
253		REQUIRE(NULL == hints->ai_addr);
254		REQUIRE(NULL == hints->ai_canonname);
255		REQUIRE(NULL == hints->ai_next);
256	}
257
258	idx = get_dnschild_ctx();
259	child_ctx = dnschild_contexts[idx];
260
261	nodesize = strlen(node) + 1;
262	servsize = strlen(service) + 1;
263	req_size = sizeof(*gai_req) + nodesize + servsize;
264
265	gai_req = emalloc_zero(req_size);
266
267	gai_req->octets = req_size;
268	gai_req->dns_idx = idx;
269	now = time(NULL);
270	gai_req->scheduled = now;
271	gai_req->earliest = max(now, child_ctx->next_dns_timeslot);
272	child_ctx->next_dns_timeslot = gai_req->earliest;
273	if (hints != NULL)
274		gai_req->hints = *hints;
275	gai_req->retry = retry;
276	gai_req->callback = callback;
277	gai_req->context = context;
278	gai_req->nodesize = nodesize;
279	gai_req->servsize = servsize;
280
281	memcpy((char *)gai_req + sizeof(*gai_req), node, nodesize);
282	memcpy((char *)gai_req + sizeof(*gai_req) + nodesize, service,
283	       servsize);
284
285	if (queue_blocking_request(
286		BLOCKING_GETADDRINFO,
287		gai_req,
288		req_size,
289		&getaddrinfo_sometime_complete,
290		gai_req)) {
291
292		msyslog(LOG_ERR, "unable to queue getaddrinfo request");
293		errno = EFAULT;
294		return -1;
295	}
296
297	return 0;
298}
299
300int
301blocking_getaddrinfo(
302	blocking_child *	c,
303	blocking_pipe_header *	req
304	)
305{
306	blocking_gai_req *	gai_req;
307	dnsworker_ctx *		worker_ctx;
308	blocking_pipe_header *	resp;
309	blocking_gai_resp *	gai_resp;
310	char *			node;
311	char *			service;
312	struct addrinfo *	ai_res;
313	struct addrinfo *	ai;
314	struct addrinfo *	serialized_ai;
315	size_t			canons_octets;
316	size_t			this_octets;
317	size_t			resp_octets;
318	char *			cp;
319	time_t			time_now;
320
321	gai_req = (void *)((char *)req + sizeof(*req));
322	node = (char *)gai_req + sizeof(*gai_req);
323	service = node + gai_req->nodesize;
324
325	worker_ctx = get_worker_context(c, gai_req->dns_idx);
326	scheduled_sleep(gai_req->scheduled, gai_req->earliest,
327			worker_ctx);
328	reload_resolv_conf(worker_ctx);
329
330	/*
331	 * Take a shot at the final size, better to overestimate
332	 * at first and then realloc to a smaller size.
333	 */
334
335	resp_octets = sizeof(*resp) + sizeof(*gai_resp) +
336		      16 * (sizeof(struct addrinfo) +
337			    sizeof(sockaddr_u)) +
338		      256;
339	resp = emalloc_zero(resp_octets);
340	gai_resp = (void *)(resp + 1);
341
342	TRACE(2, ("blocking_getaddrinfo given node %s serv %s fam %d flags %x\n",
343		  node, service, gai_req->hints.ai_family,
344		  gai_req->hints.ai_flags));
345#ifdef DEBUG
346	if (debug >= 2)
347		fflush(stdout);
348#endif
349	ai_res = NULL;
350	gai_resp->retcode = getaddrinfo(node, service, &gai_req->hints,
351					&ai_res);
352	gai_resp->retry = gai_req->retry;
353#ifdef EAI_SYSTEM
354	if (EAI_SYSTEM == gai_resp->retcode)
355		gai_resp->gai_errno = errno;
356#endif
357	canons_octets = 0;
358
359	if (0 == gai_resp->retcode) {
360		ai = ai_res;
361		while (NULL != ai) {
362			gai_resp->ai_count++;
363			if (ai->ai_canonname)
364				canons_octets += strlen(ai->ai_canonname) + 1;
365			ai = ai->ai_next;
366		}
367		/*
368		 * If this query succeeded only after retrying, DNS may have
369		 * just become responsive.  Ignore previously-scheduled
370		 * retry sleeps once for each pending request, similar to
371		 * the way scheduled_sleep() does when its worker_sleep()
372		 * is interrupted.
373		 */
374		if (gai_resp->retry > INITIAL_DNS_RETRY) {
375			time_now = time(NULL);
376			worker_ctx->ignore_scheduled_before = time_now;
377			TRACE(1, ("DNS success after retry, ignoring sleeps scheduled before now (%s)\n",
378				  humantime(time_now)));
379		}
380	}
381
382	/*
383	 * Our response consists of a header, followed by ai_count
384	 * addrinfo structs followed by ai_count sockaddr_storage
385	 * structs followed by the canonical names.
386	 */
387	gai_resp->octets = sizeof(*gai_resp)
388			    + gai_resp->ai_count
389				* (sizeof(gai_req->hints)
390				   + sizeof(sockaddr_u))
391			    + canons_octets;
392
393	resp_octets = sizeof(*resp) + gai_resp->octets;
394	resp = erealloc(resp, resp_octets);
395	gai_resp = (void *)(resp + 1);
396
397	/* cp serves as our current pointer while serializing */
398	cp = (void *)(gai_resp + 1);
399	canons_octets = 0;
400
401	if (0 == gai_resp->retcode) {
402		ai = ai_res;
403		while (NULL != ai) {
404			memcpy(cp, ai, sizeof(*ai));
405			serialized_ai = (void *)cp;
406			cp += sizeof(*ai);
407
408			/* transform ai_canonname into offset */
409			if (NULL != serialized_ai->ai_canonname) {
410				serialized_ai->ai_canonname = (char *)canons_octets;
411				canons_octets += strlen(ai->ai_canonname) + 1;
412			}
413
414			/* leave fixup of ai_addr pointer for receiver */
415
416			ai = ai->ai_next;
417		}
418
419		ai = ai_res;
420		while (NULL != ai) {
421			INSIST(ai->ai_addrlen <= sizeof(sockaddr_u));
422			memcpy(cp, ai->ai_addr, ai->ai_addrlen);
423			cp += sizeof(sockaddr_u);
424
425			ai = ai->ai_next;
426		}
427
428		ai = ai_res;
429		while (NULL != ai) {
430			if (NULL != ai->ai_canonname) {
431				this_octets = strlen(ai->ai_canonname) + 1;
432				memcpy(cp, ai->ai_canonname, this_octets);
433				cp += this_octets;
434			}
435
436			ai = ai->ai_next;
437		}
438		freeaddrinfo(ai_res);
439	}
440
441	/*
442	 * make sure our walk and earlier calc match
443	 */
444	DEBUG_INSIST((size_t)(cp - (char *)resp) == resp_octets);
445
446	if (queue_blocking_response(c, resp, resp_octets, req)) {
447		msyslog(LOG_ERR, "blocking_getaddrinfo can not queue response");
448		return -1;
449	}
450
451	return 0;
452}
453
454
455static void
456getaddrinfo_sometime_complete(
457	blocking_work_req	rtype,
458	void *			context,
459	size_t			respsize,
460	void *			resp
461	)
462{
463	blocking_gai_req *	gai_req;
464	blocking_gai_resp *	gai_resp;
465	dnschild_ctx *		child_ctx;
466	struct addrinfo *	ai;
467	struct addrinfo *	next_ai;
468	sockaddr_u *		psau;
469	char *			node;
470	char *			service;
471	char *			canon_start;
472	time_t			time_now;
473	int			again;
474	int			af;
475	const char *		fam_spec;
476	int			i;
477
478	gai_req = context;
479	gai_resp = resp;
480
481	DEBUG_REQUIRE(BLOCKING_GETADDRINFO == rtype);
482	DEBUG_REQUIRE(respsize == gai_resp->octets);
483
484	node = (char *)gai_req + sizeof(*gai_req);
485	service = node + gai_req->nodesize;
486
487	child_ctx = dnschild_contexts[gai_req->dns_idx];
488
489	if (0 == gai_resp->retcode) {
490		/*
491		 * If this query succeeded only after retrying, DNS may have
492		 * just become responsive.
493		 */
494		if (gai_resp->retry > INITIAL_DNS_RETRY) {
495			time_now = time(NULL);
496			child_ctx->next_dns_timeslot = time_now;
497			TRACE(1, ("DNS success after retry, %u next_dns_timeslot reset (%s)\n",
498				  gai_req->dns_idx, humantime(time_now)));
499		}
500	} else {
501		again = should_retry_dns(gai_resp->retcode,
502					 gai_resp->gai_errno);
503		/*
504		 * exponential backoff of DNS retries to 64s
505		 */
506		if (gai_req->retry > 0 && again) {
507			/* log the first retry only */
508			if (INITIAL_DNS_RETRY == gai_req->retry)
509				NLOG(NLOG_SYSINFO) {
510					af = gai_req->hints.ai_family;
511					fam_spec = (AF_INET6 == af)
512						       ? " (AAAA)"
513						       : (AF_INET == af)
514							     ? " (A)"
515							     : "";
516#ifdef EAI_SYSTEM
517					if (EAI_SYSTEM == gai_resp->retcode) {
518						errno = gai_resp->gai_errno;
519						msyslog(LOG_INFO,
520							"retrying DNS %s%s: EAI_SYSTEM %d: %m",
521							node, fam_spec,
522							gai_resp->gai_errno);
523					} else
524#endif
525						msyslog(LOG_INFO,
526							"retrying DNS %s%s: %s (%d)",
527							node, fam_spec,
528							gai_strerror(gai_resp->retcode),
529							gai_resp->retcode);
530				}
531			manage_dns_retry_interval(&gai_req->scheduled,
532			    &gai_req->earliest, &gai_req->retry,
533			    &child_ctx->next_dns_timeslot);
534			if (!queue_blocking_request(
535					BLOCKING_GETADDRINFO,
536					gai_req,
537					gai_req->octets,
538					&getaddrinfo_sometime_complete,
539					gai_req))
540				return;
541			else
542				msyslog(LOG_ERR,
543					"unable to retry hostname %s",
544					node);
545		}
546	}
547
548	/*
549	 * fixup pointers in returned addrinfo array
550	 */
551	ai = (void *)((char *)gai_resp + sizeof(*gai_resp));
552	next_ai = NULL;
553	for (i = gai_resp->ai_count - 1; i >= 0; i--) {
554		ai[i].ai_next = next_ai;
555		next_ai = &ai[i];
556	}
557
558	psau = (void *)((char *)ai + gai_resp->ai_count * sizeof(*ai));
559	canon_start = (char *)psau + gai_resp->ai_count * sizeof(*psau);
560
561	for (i = 0; i < gai_resp->ai_count; i++) {
562		if (NULL != ai[i].ai_addr)
563			ai[i].ai_addr = &psau->sa;
564		psau++;
565		if (NULL != ai[i].ai_canonname)
566			ai[i].ai_canonname += (size_t)canon_start;
567	}
568
569	ENSURE((char *)psau == canon_start);
570
571	if (!gai_resp->ai_count)
572		ai = NULL;
573
574	(*gai_req->callback)(gai_resp->retcode, gai_resp->gai_errno,
575			     gai_req->context, node, service,
576			     &gai_req->hints, ai);
577
578	free(gai_req);
579	/* gai_resp is part of block freed by process_blocking_resp() */
580}
581
582
583#ifdef TEST_BLOCKING_WORKER
584void gai_test_callback(int rescode, int gai_errno, void *context, const char *name, const char *service, const struct addrinfo *hints, const struct addrinfo *ai_res)
585{
586	sockaddr_u addr;
587
588	if (rescode) {
589		TRACE(1, ("gai_test_callback context %p error rescode %d %s serv %s\n",
590			  context, rescode, name, service));
591		return;
592	}
593	while (!rescode && NULL != ai_res) {
594		ZERO_SOCK(&addr);
595		memcpy(&addr, ai_res->ai_addr, ai_res->ai_addrlen);
596		TRACE(1, ("ctx %p fam %d addr %s canon '%s' type %s at %p ai_addr %p ai_next %p\n",
597			  context,
598			  AF(&addr),
599			  stoa(&addr),
600			  (ai_res->ai_canonname)
601			      ? ai_res->ai_canonname
602			      : "",
603			  (SOCK_DGRAM == ai_res->ai_socktype)
604			      ? "DGRAM"
605			      : (SOCK_STREAM == ai_res->ai_socktype)
606				    ? "STREAM"
607				    : "(other)",
608			  ai_res,
609			  ai_res->ai_addr,
610			  ai_res->ai_next));
611
612		getnameinfo_sometime((sockaddr_u *)ai_res->ai_addr, 128, 32, 0, gni_test_callback, context);
613
614		ai_res = ai_res->ai_next;
615	}
616}
617#endif	/* TEST_BLOCKING_WORKER */
618
619
620int
621getnameinfo_sometime(
622	sockaddr_u *		psau,
623	size_t			hostoctets,
624	size_t			servoctets,
625	int			flags,
626	gni_sometime_callback	callback,
627	void *			context
628	)
629{
630	blocking_gni_req *	gni_req;
631	u_int			idx;
632	dnschild_ctx *		child_ctx;
633	time_t			time_now;
634
635	REQUIRE(hostoctets);
636	REQUIRE(hostoctets + servoctets < 1024);
637
638	idx = get_dnschild_ctx();
639	child_ctx = dnschild_contexts[idx];
640
641	gni_req = emalloc_zero(sizeof(*gni_req));
642
643	gni_req->octets = sizeof(*gni_req);
644	gni_req->dns_idx = idx;
645	time_now = time(NULL);
646	gni_req->scheduled = time_now;
647	gni_req->earliest = max(time_now, child_ctx->next_dns_timeslot);
648	child_ctx->next_dns_timeslot = gni_req->earliest;
649	memcpy(&gni_req->socku, psau, SOCKLEN(psau));
650	gni_req->hostoctets = hostoctets;
651	gni_req->servoctets = servoctets;
652	gni_req->flags = flags;
653	gni_req->retry = INITIAL_DNS_RETRY;
654	gni_req->callback = callback;
655	gni_req->context = context;
656
657	if (queue_blocking_request(
658		BLOCKING_GETNAMEINFO,
659		gni_req,
660		sizeof(*gni_req),
661		&getnameinfo_sometime_complete,
662		gni_req)) {
663
664		msyslog(LOG_ERR, "unable to queue getnameinfo request");
665		errno = EFAULT;
666		return -1;
667	}
668
669	return 0;
670}
671
672
673int
674blocking_getnameinfo(
675	blocking_child *	c,
676	blocking_pipe_header *	req
677	)
678{
679	blocking_gni_req *	gni_req;
680	dnsworker_ctx *		worker_ctx;
681	blocking_pipe_header *	resp;
682	blocking_gni_resp *	gni_resp;
683	size_t			octets;
684	size_t			resp_octets;
685	char *			service;
686	char *			cp;
687	int			rc;
688	time_t			time_now;
689	char			host[1024];
690
691	gni_req = (void *)((char *)req + sizeof(*req));
692
693	octets = gni_req->hostoctets + gni_req->servoctets;
694
695	/*
696	 * Some alloca() implementations are fragile regarding
697	 * large allocations.  We only need room for the host
698	 * and service names.
699	 */
700	REQUIRE(octets < sizeof(host));
701	service = host + gni_req->hostoctets;
702
703	worker_ctx = get_worker_context(c, gni_req->dns_idx);
704	scheduled_sleep(gni_req->scheduled, gni_req->earliest,
705			worker_ctx);
706	reload_resolv_conf(worker_ctx);
707
708	/*
709	 * Take a shot at the final size, better to overestimate
710	 * then realloc to a smaller size.
711	 */
712
713	resp_octets = sizeof(*resp) + sizeof(*gni_resp) + octets;
714	resp = emalloc_zero(resp_octets);
715	gni_resp = (void *)((char *)resp + sizeof(*resp));
716
717	TRACE(2, ("blocking_getnameinfo given addr %s flags 0x%x hostlen %lu servlen %lu\n",
718		  stoa(&gni_req->socku), gni_req->flags,
719		  (u_long)gni_req->hostoctets, (u_long)gni_req->servoctets));
720
721	gni_resp->retcode = getnameinfo(&gni_req->socku.sa,
722					SOCKLEN(&gni_req->socku),
723					host,
724					gni_req->hostoctets,
725					service,
726					gni_req->servoctets,
727					gni_req->flags);
728	gni_resp->retry = gni_req->retry;
729#ifdef EAI_SYSTEM
730	if (EAI_SYSTEM == gni_resp->retcode)
731		gni_resp->gni_errno = errno;
732#endif
733
734	if (0 != gni_resp->retcode) {
735		gni_resp->hostoctets = 0;
736		gni_resp->servoctets = 0;
737	} else {
738		gni_resp->hostoctets = strlen(host) + 1;
739		gni_resp->servoctets = strlen(service) + 1;
740		/*
741		 * If this query succeeded only after retrying, DNS may have
742		 * just become responsive.  Ignore previously-scheduled
743		 * retry sleeps once for each pending request, similar to
744		 * the way scheduled_sleep() does when its worker_sleep()
745		 * is interrupted.
746		 */
747		if (gni_req->retry > INITIAL_DNS_RETRY) {
748			time_now = time(NULL);
749			worker_ctx->ignore_scheduled_before = time_now;
750			TRACE(1, ("DNS success after retrying, ignoring sleeps scheduled before now (%s)\n",
751				humantime(time_now)));
752		}
753	}
754	octets = gni_resp->hostoctets + gni_resp->servoctets;
755	/*
756	 * Our response consists of a header, followed by the host and
757	 * service strings, each null-terminated.
758	 */
759	resp_octets = sizeof(*resp) + sizeof(*gni_resp) + octets;
760
761	resp = erealloc(resp, resp_octets);
762	gni_resp = (void *)(resp + 1);
763
764	gni_resp->octets = sizeof(*gni_resp) + octets;
765
766	/* cp serves as our current pointer while serializing */
767	cp = (void *)(gni_resp + 1);
768
769	if (0 == gni_resp->retcode) {
770		memcpy(cp, host, gni_resp->hostoctets);
771		cp += gni_resp->hostoctets;
772		memcpy(cp, service, gni_resp->servoctets);
773		cp += gni_resp->servoctets;
774	}
775
776	INSIST((size_t)(cp - (char *)resp) == resp_octets);
777	INSIST(resp_octets - sizeof(*resp) == gni_resp->octets);
778
779	rc = queue_blocking_response(c, resp, resp_octets, req);
780	if (rc)
781		msyslog(LOG_ERR, "blocking_getnameinfo unable to queue response");
782	return rc;
783}
784
785
786static void
787getnameinfo_sometime_complete(
788	blocking_work_req	rtype,
789	void *			context,
790	size_t			respsize,
791	void *			resp
792	)
793{
794	blocking_gni_req *	gni_req;
795	blocking_gni_resp *	gni_resp;
796	dnschild_ctx *		child_ctx;
797	char *			host;
798	char *			service;
799	time_t			time_now;
800	int			again;
801
802	gni_req = context;
803	gni_resp = resp;
804
805	DEBUG_REQUIRE(BLOCKING_GETNAMEINFO == rtype);
806	DEBUG_REQUIRE(respsize == gni_resp->octets);
807
808	child_ctx = dnschild_contexts[gni_req->dns_idx];
809
810	if (0 == gni_resp->retcode) {
811		/*
812		 * If this query succeeded only after retrying, DNS may have
813		 * just become responsive.
814		 */
815		if (gni_resp->retry > INITIAL_DNS_RETRY) {
816			time_now = time(NULL);
817			child_ctx->next_dns_timeslot = time_now;
818			TRACE(1, ("DNS success after retry, %u next_dns_timeslot reset (%s)\n",
819				  gni_req->dns_idx, humantime(time_now)));
820		}
821	} else {
822		again = should_retry_dns(gni_resp->retcode, gni_resp->gni_errno);
823		/*
824		 * exponential backoff of DNS retries to 64s
825		 */
826		if (gni_req->retry > 0)
827			manage_dns_retry_interval(&gni_req->scheduled,
828			    &gni_req->earliest, &gni_req->retry,
829			    &child_ctx->next_dns_timeslot);
830
831		if (gni_req->retry > 0 && again) {
832			if (!queue_blocking_request(
833				BLOCKING_GETNAMEINFO,
834				gni_req,
835				gni_req->octets,
836				&getnameinfo_sometime_complete,
837				gni_req))
838				return;
839
840			msyslog(LOG_ERR, "unable to retry reverse lookup of %s", stoa(&gni_req->socku));
841		}
842	}
843
844	if (!gni_resp->hostoctets) {
845		host = NULL;
846		service = NULL;
847	} else {
848		host = (char *)gni_resp + sizeof(*gni_resp);
849		service = (gni_resp->servoctets)
850			      ? host + gni_resp->hostoctets
851			      : NULL;
852	}
853
854	(*gni_req->callback)(gni_resp->retcode, gni_resp->gni_errno,
855			     &gni_req->socku, gni_req->flags, host,
856			     service, gni_req->context);
857
858	free(gni_req);
859	/* gni_resp is part of block freed by process_blocking_resp() */
860}
861
862
863#ifdef TEST_BLOCKING_WORKER
864void gni_test_callback(int rescode, int gni_errno, sockaddr_u *psau, int flags, const char *host, const char *service, void *context)
865{
866	if (!rescode)
867		TRACE(1, ("gni_test_callback got host '%s' serv '%s' for addr %s context %p\n",
868			  host, service, stoa(psau), context));
869	else
870		TRACE(1, ("gni_test_callback context %p rescode %d gni_errno %d flags 0x%x addr %s\n",
871			  context, rescode, gni_errno, flags, stoa(psau)));
872}
873#endif	/* TEST_BLOCKING_WORKER */
874
875
876#ifdef HAVE_RES_INIT
877static void
878reload_resolv_conf(
879	dnsworker_ctx *	worker_ctx
880	)
881{
882	time_t	time_now;
883
884	/*
885	 * This is ad-hoc.  Reload /etc/resolv.conf once per minute
886	 * to pick up on changes from the DHCP client.  [Bug 1226]
887	 * When using threads for the workers, this needs to happen
888	 * only once per minute process-wide.
889	 */
890	time_now = time(NULL);
891# ifdef WORK_THREAD
892	worker_ctx->next_res_init = next_res_init;
893# endif
894	if (worker_ctx->next_res_init <= time_now) {
895		if (worker_ctx->next_res_init != 0)
896			res_init();
897		worker_ctx->next_res_init = time_now + 60;
898# ifdef WORK_THREAD
899		next_res_init = worker_ctx->next_res_init;
900# endif
901	}
902}
903#endif	/* HAVE_RES_INIT */
904
905
906static u_int
907reserve_dnschild_ctx(void)
908{
909	const size_t	ps = sizeof(dnschild_contexts[0]);
910	const size_t	cs = sizeof(*dnschild_contexts[0]);
911	u_int		c;
912	u_int		new_alloc;
913	size_t		octets;
914	size_t		new_octets;
915
916	c = 0;
917	while (TRUE) {
918		for ( ; c < dnschild_contexts_alloc; c++) {
919			if (NULL == dnschild_contexts[c]) {
920				dnschild_contexts[c] = emalloc_zero(cs);
921
922				return c;
923			}
924		}
925		new_alloc = dnschild_contexts_alloc + 20;
926		new_octets = new_alloc * ps;
927		octets = dnschild_contexts_alloc * ps;
928		dnschild_contexts = erealloc_zero(dnschild_contexts,
929						  new_octets, octets);
930		dnschild_contexts_alloc = new_alloc;
931	}
932}
933
934
935static u_int
936get_dnschild_ctx(void)
937{
938	static u_int	shared_ctx = UINT_MAX;
939
940	if (worker_per_query)
941		return reserve_dnschild_ctx();
942
943	if (UINT_MAX == shared_ctx)
944		shared_ctx = reserve_dnschild_ctx();
945
946	return shared_ctx;
947}
948
949
950static dnsworker_ctx *
951get_worker_context(
952	blocking_child *	c,
953	u_int			idx
954	)
955{
956	u_int		min_new_alloc;
957	u_int		new_alloc;
958	size_t		octets;
959	size_t		new_octets;
960	dnsworker_ctx *	retv;
961
962	worker_global_lock(TRUE);
963
964	if (dnsworker_contexts_alloc <= idx) {
965		min_new_alloc = 1 + idx;
966		/* round new_alloc up to nearest multiple of 4 */
967		new_alloc = (min_new_alloc + 4) & ~(4 - 1);
968		new_octets = new_alloc * sizeof(dnsworker_ctx*);
969		octets = dnsworker_contexts_alloc * sizeof(dnsworker_ctx*);
970		dnsworker_contexts = erealloc_zero(dnsworker_contexts,
971						   new_octets, octets);
972		dnsworker_contexts_alloc = new_alloc;
973		retv = emalloc_zero(sizeof(dnsworker_ctx));
974		dnsworker_contexts[idx] = retv;
975	} else if (NULL == (retv = dnsworker_contexts[idx])) {
976		retv = emalloc_zero(sizeof(dnsworker_ctx));
977		dnsworker_contexts[idx] = retv;
978	}
979
980	worker_global_lock(FALSE);
981
982	ZERO(*retv);
983	retv->c = c;
984	return retv;
985}
986
987
988static void
989scheduled_sleep(
990	time_t		scheduled,
991	time_t		earliest,
992	dnsworker_ctx *	worker_ctx
993	)
994{
995	time_t now;
996
997	if (scheduled < worker_ctx->ignore_scheduled_before) {
998		TRACE(1, ("ignoring sleep until %s scheduled at %s (before %s)\n",
999			  humantime(earliest), humantime(scheduled),
1000			  humantime(worker_ctx->ignore_scheduled_before)));
1001		return;
1002	}
1003
1004	now = time(NULL);
1005
1006	if (now < earliest) {
1007		TRACE(1, ("sleep until %s scheduled at %s (>= %s)\n",
1008			  humantime(earliest), humantime(scheduled),
1009			  humantime(worker_ctx->ignore_scheduled_before)));
1010		if (-1 == worker_sleep(worker_ctx->c, earliest - now)) {
1011			/* our sleep was interrupted */
1012			now = time(NULL);
1013			worker_ctx->ignore_scheduled_before = now;
1014#ifdef HAVE_RES_INIT
1015			worker_ctx->next_res_init = now + 60;
1016			next_res_init = worker_ctx->next_res_init;
1017			res_init();
1018#endif
1019			TRACE(1, ("sleep interrupted by daemon, ignoring sleeps scheduled before now (%s)\n",
1020				  humantime(worker_ctx->ignore_scheduled_before)));
1021		}
1022	}
1023}
1024
1025
1026/*
1027 * manage_dns_retry_interval is a helper used by
1028 * getaddrinfo_sometime_complete and getnameinfo_sometime_complete
1029 * to calculate the new retry interval and schedule the next query.
1030 */
1031static void
1032manage_dns_retry_interval(
1033	time_t *	pscheduled,
1034	time_t *	pwhen,
1035	int *		pretry,
1036	time_t *	pnext_timeslot
1037	)
1038{
1039	time_t	now;
1040	time_t	when;
1041	int	retry;
1042
1043	now = time(NULL);
1044	retry = *pretry;
1045	when = max(now + retry, *pnext_timeslot);
1046	*pnext_timeslot = when;
1047	retry = min(64, retry << 1);
1048
1049	*pscheduled = now;
1050	*pwhen = when;
1051	*pretry = retry;
1052}
1053
1054/*
1055 * should_retry_dns is a helper used by getaddrinfo_sometime_complete
1056 * and getnameinfo_sometime_complete which implements ntpd's DNS retry
1057 * policy.
1058 */
1059static int
1060should_retry_dns(
1061	int	rescode,
1062	int	res_errno
1063	)
1064{
1065	static int	eai_again_seen;
1066	int		again;
1067#if defined (EAI_SYSTEM) && defined(DEBUG)
1068	char		msg[256];
1069#endif
1070
1071	/*
1072	 * If the resolver failed, see if the failure is
1073	 * temporary. If so, return success.
1074	 */
1075	again = 0;
1076
1077	switch (rescode) {
1078
1079	case EAI_FAIL:
1080		again = 1;
1081		break;
1082
1083	case EAI_AGAIN:
1084		again = 1;
1085		eai_again_seen = 1;		/* [Bug 1178] */
1086		break;
1087
1088	case EAI_NONAME:
1089#if defined(EAI_NODATA) && (EAI_NODATA != EAI_NONAME)
1090	case EAI_NODATA:
1091#endif
1092		again = !eai_again_seen;	/* [Bug 1178] */
1093		break;
1094
1095#ifdef EAI_SYSTEM
1096	case EAI_SYSTEM:
1097		/*
1098		 * EAI_SYSTEM means the real error is in errno.  We should be more
1099		 * discriminating about which errno values require retrying, but
1100		 * this matches existing behavior.
1101		 */
1102		again = 1;
1103# ifdef DEBUG
1104		errno_to_str(res_errno, msg, sizeof(msg));
1105		TRACE(1, ("intres: EAI_SYSTEM errno %d (%s) means try again, right?\n",
1106			  res_errno, msg));
1107# endif
1108		break;
1109#endif
1110	}
1111
1112	TRACE(2, ("intres: resolver returned: %s (%d), %sretrying\n",
1113		  gai_strerror(rescode), rescode, again ? "" : "not "));
1114
1115	return again;
1116}
1117
1118#else	/* !WORKER follows */
1119int ntp_intres_nonempty_compilation_unit;
1120#endif
1121