ntp_restrict.c revision 293894
159191Skris/*
259191Skris * ntp_restrict.c - determine host restrictions
359191Skris */
459191Skris#ifdef HAVE_CONFIG_H
559191Skris#include <config.h>
659191Skris#endif
759191Skris
8296465Sdelphij#include <stdio.h>
959191Skris#include <sys/types.h>
1059191Skris
1159191Skris#include "ntpd.h"
1259191Skris#include "ntp_if.h"
1359191Skris#include "ntp_lists.h"
1459191Skris#include "ntp_stdlib.h"
15296465Sdelphij#include "ntp_assert.h"
1659191Skris
1759191Skris/*
1859191Skris * This code keeps a simple address-and-mask list of hosts we want
1959191Skris * to place restrictions on (or remove them from). The restrictions
2059191Skris * are implemented as a set of flags which tell you what the host
2159191Skris * can't do. There is a subroutine entry to return the flags. The
22296465Sdelphij * list is kept sorted to reduce the average number of comparisons
2359191Skris * and make sure you get the set of restrictions most specific to
2459191Skris * the address.
2559191Skris *
2659191Skris * The algorithm is that, when looking up a host, it is first assumed
2759191Skris * that the default set of restrictions will apply. It then searches
2859191Skris * down through the list. Whenever it finds a match it adopts the
2959191Skris * match's flags instead. When you hit the point where the sorted
3059191Skris * address is greater than the target, you return with the last set of
3159191Skris * flags you found. Because of the ordering of the list, the most
3259191Skris * specific match will provide the final set of flags.
3359191Skris *
3459191Skris * This was originally intended to restrict you from sync'ing to your
3559191Skris * own broadcasts when you are doing that, by restricting yourself from
3659191Skris * your own interfaces. It was also thought it would sometimes be useful
37296465Sdelphij * to keep a misbehaving host or two from abusing your primary clock. It
3859191Skris * has been expanded, however, to suit the needs of those with more
3959191Skris * restrictive access policies.
40296465Sdelphij */
4159191Skris/*
4259191Skris * We will use two lists, one for IPv4 addresses and one for IPv6
4359191Skris * addresses. This is not protocol-independant but for now I can't
4459191Skris * find a way to respect this. We'll check this later... JFB 07/2001
4559191Skris */
4659191Skris#define MASK_IPV6_ADDR(dst, src, msk)					\
4759191Skris	do {								\
4859191Skris		int idx;						\
4959191Skris		for (idx = 0; idx < (int)COUNTOF((dst)->s6_addr); idx++) { \
5059191Skris			(dst)->s6_addr[idx] = (src)->s6_addr[idx]	\
5159191Skris					      & (msk)->s6_addr[idx];	\
52296465Sdelphij		}							\
5359191Skris	} while (0)
5459191Skris
5559191Skris/*
5659191Skris * We allocate INC_RESLIST{4|6} entries to the free list whenever empty.
5759191Skris * Auto-tune these to be just less than 1KB (leaving at least 16 bytes
5859191Skris * for allocator overhead).
5959191Skris */
6059191Skris#define	INC_RESLIST4	((1024 - 16) / V4_SIZEOF_RESTRICT_U)
6159191Skris#define	INC_RESLIST6	((1024 - 16) / V6_SIZEOF_RESTRICT_U)
6259191Skris
6359191Skris/*
6459191Skris * The restriction list
6559191Skris */
66296465Sdelphijrestrict_u *restrictlist4;
67100936Snectarrestrict_u *restrictlist6;
68109998Smarkmstatic int restrictcount;	/* count in the restrict lists */
69296465Sdelphij
70296465Sdelphij/*
71296465Sdelphij * The free list and associated counters.  Also some uninteresting
72296465Sdelphij * stat counters.
73296465Sdelphij */
74296465Sdelphijstatic restrict_u *resfree4;	/* available entries (free list) */
75296465Sdelphijstatic restrict_u *resfree6;
7659191Skris
77296465Sdelphijstatic u_long res_calls;
78296465Sdelphijstatic u_long res_found;
79296465Sdelphijstatic u_long res_not_found;
8059191Skris
81296465Sdelphij/*
82296465Sdelphij * Count number of restriction entries referring to RES_LIMITED, to
83296465Sdelphij * control implicit activation/deactivation of the MRU monlist.
8459191Skris */
85296465Sdelphijstatic	u_long res_limited_refcnt;
86296465Sdelphij
87296465Sdelphij/*
88296465Sdelphij * Our default entries.
89296465Sdelphij */
90296465Sdelphijstatic	restrict_u	restrict_def4;
91296465Sdelphijstatic	restrict_u	restrict_def6;
9259191Skris
93296465Sdelphij/*
94296465Sdelphij * "restrict source ..." enabled knob and restriction bits.
95296465Sdelphij */
9659191Skrisstatic	int		restrict_source_enabled;
9759191Skrisstatic	u_short		restrict_source_flags;
98109998Smarkmstatic	u_short		restrict_source_mflags;
99296465Sdelphij
100296465Sdelphij/*
10159191Skris * private functions
10259191Skris */
103109998Smarkmstatic restrict_u *	alloc_res4(void);
104296465Sdelphijstatic restrict_u *	alloc_res6(void);
105296465Sdelphijstatic void		free_res(restrict_u *, int);
106296465Sdelphijstatic void		inc_res_limited(void);
107296465Sdelphijstatic void		dec_res_limited(void);
108296465Sdelphijstatic restrict_u *	match_restrict4_addr(u_int32, u_short);
109296465Sdelphijstatic restrict_u *	match_restrict6_addr(const struct in6_addr *,
11059191Skris					     u_short);
11159191Skrisstatic restrict_u *	match_restrict_entry(const restrict_u *, int);
112194206Ssimonstatic int		res_sorts_before4(restrict_u *, restrict_u *);
113296465Sdelphijstatic int		res_sorts_before6(restrict_u *, restrict_u *);
114296465Sdelphij
115296465Sdelphij
116296465Sdelphij/*
117296465Sdelphij * init_restrict - initialize the restriction data structures
118296465Sdelphij */
119194206Ssimonvoid
120194206Ssimoninit_restrict(void)
121109998Smarkm{
122296465Sdelphij	/*
123296465Sdelphij	 * The restriction lists begin with a default entry with address
124296465Sdelphij	 * and mask 0, which will match any entry.  The lists are kept
125296465Sdelphij	 * sorted by descending address followed by descending mask:
126296465Sdelphij	 *
127296465Sdelphij	 *   address	  mask
128296465Sdelphij	 * 192.168.0.0	255.255.255.0	kod limited noquery nopeer
129296465Sdelphij	 * 192.168.0.0	255.255.0.0	kod limited
13059191Skris	 * 0.0.0.0	0.0.0.0		kod limited noquery
13159191Skris	 *
132109998Smarkm	 * The first entry which matches an address is used.  With the
133296465Sdelphij	 * example restrictions above, 192.168.0.0/24 matches the first
134296465Sdelphij	 * entry, the rest of 192.168.0.0/16 matches the second, and
135296465Sdelphij	 * everything else matches the third (default).
136296465Sdelphij	 *
137296465Sdelphij	 * Note this achieves the same result a little more efficiently
138296465Sdelphij	 * than the documented behavior, which is to keep the lists
139296465Sdelphij	 * sorted by ascending address followed by ascending mask, with
14059191Skris	 * the _last_ matching entry used.
14159191Skris	 *
142109998Smarkm	 * An additional wrinkle is we may have multiple entries with
143296465Sdelphij	 * the same address and mask but differing match flags (mflags).
144296465Sdelphij	 * At present there is only one, RESM_NTPONLY.  Entries with
145296465Sdelphij	 * RESM_NTPONLY are sorted earlier so they take precedence over
146296465Sdelphij	 * any otherwise similar entry without.  Again, this is the same
147296465Sdelphij	 * behavior as but reversed implementation compared to the docs.
148296465Sdelphij	 *
149296465Sdelphij	 */
150296465Sdelphij	LINK_SLIST(restrictlist4, &restrict_def4, link);
15159191Skris	LINK_SLIST(restrictlist6, &restrict_def6, link);
15259191Skris	restrictcount = 2;
153109998Smarkm}
154296465Sdelphij
155296465Sdelphij
156296465Sdelphijstatic restrict_u *
157296465Sdelphijalloc_res4(void)
158296465Sdelphij{
159296465Sdelphij	const size_t	cb = V4_SIZEOF_RESTRICT_U;
16059191Skris	const size_t	count = INC_RESLIST4;
161109998Smarkm	restrict_u *	rl;
162109998Smarkm	restrict_u *	res;
163296465Sdelphij	size_t		i;
164296465Sdelphij
165296465Sdelphij	UNLINK_HEAD_SLIST(res, resfree4, link);
166296465Sdelphij	if (res != NULL)
167296465Sdelphij		return res;
168296465Sdelphij
169296465Sdelphij	rl = emalloc_zero(count * cb);
170296465Sdelphij	/* link all but the first onto free list */
171296465Sdelphij	res = (void *)((char *)rl + (count - 1) * cb);
172296465Sdelphij	for (i = count - 1; i > 0; i--) {
173296465Sdelphij		LINK_SLIST(resfree4, res, link);
174296465Sdelphij		res = (void *)((char *)res - cb);
175296465Sdelphij	}
176296465Sdelphij	INSIST(rl == res);
177296465Sdelphij	/* allocate the first */
178296465Sdelphij	return res;
179296465Sdelphij}
180296465Sdelphij
181296465Sdelphij
182296465Sdelphijstatic restrict_u *
183296465Sdelphijalloc_res6(void)
184296465Sdelphij{
185296465Sdelphij	const size_t	cb = V6_SIZEOF_RESTRICT_U;
186296465Sdelphij	const size_t	count = INC_RESLIST6;
187296465Sdelphij	restrict_u *	rl;
188296465Sdelphij	restrict_u *	res;
189296465Sdelphij	size_t		i;
190296465Sdelphij
191296465Sdelphij	UNLINK_HEAD_SLIST(res, resfree6, link);
192296465Sdelphij	if (res != NULL)
193296465Sdelphij		return res;
194296465Sdelphij
195296465Sdelphij	rl = emalloc_zero(count * cb);
196109998Smarkm	/* link all but the first onto free list */
197162911Ssimon	res = (void *)((char *)rl + (count - 1) * cb);
198162911Ssimon	for (i = count - 1; i > 0; i--) {
199296465Sdelphij		LINK_SLIST(resfree6, res, link);
200296465Sdelphij		res = (void *)((char *)res - cb);
201296465Sdelphij	}
202296465Sdelphij	INSIST(rl == res);
203296465Sdelphij	/* allocate the first */
204296465Sdelphij	return res;
205296465Sdelphij}
206296465Sdelphij
207296465Sdelphij
208296465Sdelphijstatic void
209296465Sdelphijfree_res(
210296465Sdelphij	restrict_u *	res,
211296465Sdelphij	int		v6
212296465Sdelphij	)
213296465Sdelphij{
214296465Sdelphij	restrict_u **	plisthead;
215296465Sdelphij	restrict_u *	unlinked;
216296465Sdelphij
217296465Sdelphij	restrictcount--;
218296465Sdelphij	if (RES_LIMITED & res->flags)
219296465Sdelphij		dec_res_limited();
220296465Sdelphij
221296465Sdelphij	if (v6)
222296465Sdelphij		plisthead = &restrictlist6;
223162911Ssimon	else
224162911Ssimon		plisthead = &restrictlist4;
225296465Sdelphij	UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u);
226296465Sdelphij	INSIST(unlinked == res);
227296465Sdelphij
228	if (v6) {
229		zero_mem(res, V6_SIZEOF_RESTRICT_U);
230		plisthead = &resfree6;
231	} else {
232		zero_mem(res, V4_SIZEOF_RESTRICT_U);
233		plisthead = &resfree4;
234	}
235	LINK_SLIST(*plisthead, res, link);
236}
237
238
239static void
240inc_res_limited(void)
241{
242	if (!res_limited_refcnt)
243		mon_start(MON_RES);
244	res_limited_refcnt++;
245}
246
247
248static void
249dec_res_limited(void)
250{
251	res_limited_refcnt--;
252	if (!res_limited_refcnt)
253		mon_stop(MON_RES);
254}
255
256
257static restrict_u *
258match_restrict4_addr(
259	u_int32	addr,
260	u_short	port
261	)
262{
263	const int	v6 = 0;
264	restrict_u *	res;
265	restrict_u *	next;
266
267	for (res = restrictlist4; res != NULL; res = next) {
268		next = res->link;
269		if (res->expire &&
270		    res->expire <= current_time)
271			free_res(res, v6);
272		if (res->u.v4.addr == (addr & res->u.v4.mask)
273		    && (!(RESM_NTPONLY & res->mflags)
274			|| NTP_PORT == port))
275			break;
276	}
277	return res;
278}
279
280
281static restrict_u *
282match_restrict6_addr(
283	const struct in6_addr *	addr,
284	u_short			port
285	)
286{
287	const int	v6 = 1;
288	restrict_u *	res;
289	restrict_u *	next;
290	struct in6_addr	masked;
291
292	for (res = restrictlist6; res != NULL; res = next) {
293		next = res->link;
294		INSIST(next != res);
295		if (res->expire &&
296		    res->expire <= current_time)
297			free_res(res, v6);
298		MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask);
299		if (ADDR6_EQ(&masked, &res->u.v6.addr)
300		    && (!(RESM_NTPONLY & res->mflags)
301			|| NTP_PORT == (int)port))
302			break;
303	}
304	return res;
305}
306
307
308/*
309 * match_restrict_entry - find an exact match on a restrict list.
310 *
311 * Exact match is addr, mask, and mflags all equal.
312 * In order to use more common code for IPv4 and IPv6, this routine
313 * requires the caller to populate a restrict_u with mflags and either
314 * the v4 or v6 address and mask as appropriate.  Other fields in the
315 * input restrict_u are ignored.
316 */
317static restrict_u *
318match_restrict_entry(
319	const restrict_u *	pmatch,
320	int			v6
321	)
322{
323	restrict_u *res;
324	restrict_u *rlist;
325	size_t cb;
326
327	if (v6) {
328		rlist = restrictlist6;
329		cb = sizeof(pmatch->u.v6);
330	} else {
331		rlist = restrictlist4;
332		cb = sizeof(pmatch->u.v4);
333	}
334
335	for (res = rlist; res != NULL; res = res->link)
336		if (res->mflags == pmatch->mflags &&
337		    !memcmp(&res->u, &pmatch->u, cb))
338			break;
339	return res;
340}
341
342
343/*
344 * res_sorts_before4 - compare two restrict4 entries
345 *
346 * Returns nonzero if r1 sorts before r2.  We sort by descending
347 * address, then descending mask, then descending mflags, so sorting
348 * before means having a higher value.
349 */
350static int
351res_sorts_before4(
352	restrict_u *r1,
353	restrict_u *r2
354	)
355{
356	int r1_before_r2;
357
358	if (r1->u.v4.addr > r2->u.v4.addr)
359		r1_before_r2 = 1;
360	else if (r1->u.v4.addr < r2->u.v4.addr)
361		r1_before_r2 = 0;
362	else if (r1->u.v4.mask > r2->u.v4.mask)
363		r1_before_r2 = 1;
364	else if (r1->u.v4.mask < r2->u.v4.mask)
365		r1_before_r2 = 0;
366	else if (r1->mflags > r2->mflags)
367		r1_before_r2 = 1;
368	else
369		r1_before_r2 = 0;
370
371	return r1_before_r2;
372}
373
374
375/*
376 * res_sorts_before6 - compare two restrict6 entries
377 *
378 * Returns nonzero if r1 sorts before r2.  We sort by descending
379 * address, then descending mask, then descending mflags, so sorting
380 * before means having a higher value.
381 */
382static int
383res_sorts_before6(
384	restrict_u *r1,
385	restrict_u *r2
386	)
387{
388	int r1_before_r2;
389	int cmp;
390
391	cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr);
392	if (cmp > 0)		/* r1->addr > r2->addr */
393		r1_before_r2 = 1;
394	else if (cmp < 0)	/* r2->addr > r1->addr */
395		r1_before_r2 = 0;
396	else {
397		cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask);
398		if (cmp > 0)		/* r1->mask > r2->mask*/
399			r1_before_r2 = 1;
400		else if (cmp < 0)	/* r2->mask > r1->mask */
401			r1_before_r2 = 0;
402		else if (r1->mflags > r2->mflags)
403			r1_before_r2 = 1;
404		else
405			r1_before_r2 = 0;
406	}
407
408	return r1_before_r2;
409}
410
411
412/*
413 * restrictions - return restrictions for this host
414 */
415u_short
416restrictions(
417	sockaddr_u *srcadr
418	)
419{
420	restrict_u *match;
421	struct in6_addr *pin6;
422	u_short flags;
423
424	res_calls++;
425	flags = 0;
426	/* IPv4 source address */
427	if (IS_IPV4(srcadr)) {
428		/*
429		 * Ignore any packets with a multicast source address
430		 * (this should be done early in the receive process,
431		 * not later!)
432		 */
433		if (IN_CLASSD(SRCADR(srcadr)))
434			return (int)RES_IGNORE;
435
436		match = match_restrict4_addr(SRCADR(srcadr),
437					     SRCPORT(srcadr));
438
439		INSIST(match != NULL);
440
441		match->count++;
442		/*
443		 * res_not_found counts only use of the final default
444		 * entry, not any "restrict default ntpport ...", which
445		 * would be just before the final default.
446		 */
447		if (&restrict_def4 == match)
448			res_not_found++;
449		else
450			res_found++;
451		flags = match->flags;
452	}
453
454	/* IPv6 source address */
455	if (IS_IPV6(srcadr)) {
456		pin6 = PSOCK_ADDR6(srcadr);
457
458		/*
459		 * Ignore any packets with a multicast source address
460		 * (this should be done early in the receive process,
461		 * not later!)
462		 */
463		if (IN6_IS_ADDR_MULTICAST(pin6))
464			return (int)RES_IGNORE;
465
466		match = match_restrict6_addr(pin6, SRCPORT(srcadr));
467		INSIST(match != NULL);
468		match->count++;
469		if (&restrict_def6 == match)
470			res_not_found++;
471		else
472			res_found++;
473		flags = match->flags;
474	}
475	return (flags);
476}
477
478
479/*
480 * hack_restrict - add/subtract/manipulate entries on the restrict list
481 */
482void
483hack_restrict(
484	int		op,
485	sockaddr_u *	resaddr,
486	sockaddr_u *	resmask,
487	u_short		mflags,
488	u_short		flags,
489	u_long		expire
490	)
491{
492	int		v6;
493	restrict_u	match;
494	restrict_u *	res;
495	restrict_u **	plisthead;
496
497	DPRINTF(1, ("restrict: op %d addr %s mask %s mflags %08x flags %08x\n",
498		    op, stoa(resaddr), stoa(resmask), mflags, flags));
499
500	if (NULL == resaddr) {
501		REQUIRE(NULL == resmask);
502		REQUIRE(RESTRICT_FLAGS == op);
503		restrict_source_flags = flags;
504		restrict_source_mflags = mflags;
505		restrict_source_enabled = 1;
506		return;
507	}
508
509	ZERO(match);
510
511#if 0
512	/* silence VC9 potentially uninit warnings */
513	// HMS: let's use a compiler-specific "enable" for this.
514	res = NULL;
515	v6 = 0;
516#endif
517
518	if (IS_IPV4(resaddr)) {
519		v6 = 0;
520		/*
521		 * Get address and mask in host byte order for easy
522		 * comparison as u_int32
523		 */
524		match.u.v4.addr = SRCADR(resaddr);
525		match.u.v4.mask = SRCADR(resmask);
526		match.u.v4.addr &= match.u.v4.mask;
527
528	} else if (IS_IPV6(resaddr)) {
529		v6 = 1;
530		/*
531		 * Get address and mask in network byte order for easy
532		 * comparison as byte sequences (e.g. memcmp())
533		 */
534		match.u.v6.mask = SOCK_ADDR6(resmask);
535		MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr),
536			       &match.u.v6.mask);
537
538	} else	/* not IPv4 nor IPv6 */
539		REQUIRE(0);
540
541	match.flags = flags;
542	match.mflags = mflags;
543	match.expire = expire;
544	res = match_restrict_entry(&match, v6);
545
546	switch (op) {
547
548	case RESTRICT_FLAGS:
549		/*
550		 * Here we add bits to the flags. If this is a
551		 * new restriction add it.
552		 */
553		if (NULL == res) {
554			if (v6) {
555				res = alloc_res6();
556				memcpy(res, &match,
557				       V6_SIZEOF_RESTRICT_U);
558				plisthead = &restrictlist6;
559			} else {
560				res = alloc_res4();
561				memcpy(res, &match,
562				       V4_SIZEOF_RESTRICT_U);
563				plisthead = &restrictlist4;
564			}
565			LINK_SORT_SLIST(
566				*plisthead, res,
567				(v6)
568				  ? res_sorts_before6(res, L_S_S_CUR())
569				  : res_sorts_before4(res, L_S_S_CUR()),
570				link, restrict_u);
571			restrictcount++;
572			if (RES_LIMITED & flags)
573				inc_res_limited();
574		} else {
575			if ((RES_LIMITED & flags) &&
576			    !(RES_LIMITED & res->flags))
577				inc_res_limited();
578			res->flags |= flags;
579		}
580		break;
581
582	case RESTRICT_UNFLAG:
583		/*
584		 * Remove some bits from the flags. If we didn't
585		 * find this one, just return.
586		 */
587		if (res != NULL) {
588			if ((RES_LIMITED & res->flags)
589			    && (RES_LIMITED & flags))
590				dec_res_limited();
591			res->flags &= ~flags;
592		}
593		break;
594
595	case RESTRICT_REMOVE:
596	case RESTRICT_REMOVEIF:
597		/*
598		 * Remove an entry from the table entirely if we
599		 * found one. Don't remove the default entry and
600		 * don't remove an interface entry.
601		 */
602		if (res != NULL
603		    && (RESTRICT_REMOVEIF == op
604			|| !(RESM_INTERFACE & res->mflags))
605		    && res != &restrict_def4
606		    && res != &restrict_def6)
607			free_res(res, v6);
608		break;
609
610	default:	/* unknown op */
611		INSIST(0);
612		break;
613	}
614
615}
616
617
618/*
619 * restrict_source - maintains dynamic "restrict source ..." entries as
620 *		     peers come and go.
621 */
622void
623restrict_source(
624	sockaddr_u *	addr,
625	int		farewell,	/* 0 to add, 1 to remove */
626	u_long		expire		/* 0 is infinite, valid until */
627	)
628{
629	sockaddr_u	onesmask;
630	restrict_u *	res;
631	int		found_specific;
632
633	if (!restrict_source_enabled || SOCK_UNSPEC(addr) ||
634	    IS_MCAST(addr) || ISREFCLOCKADR(addr))
635		return;
636
637	REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr));
638
639	SET_HOSTMASK(&onesmask, AF(addr));
640	if (farewell) {
641		hack_restrict(RESTRICT_REMOVE, addr, &onesmask,
642			      0, 0, 0);
643		DPRINTF(1, ("restrict_source: %s removed", stoa(addr)));
644		return;
645	}
646
647	/*
648	 * If there is a specific entry for this address, hands
649	 * off, as it is condidered more specific than "restrict
650	 * server ...".
651	 * However, if the specific entry found is a fleeting one
652	 * added by pool_xmit() before soliciting, replace it
653	 * immediately regardless of the expire value to make way
654	 * for the more persistent entry.
655	 */
656	if (IS_IPV4(addr)) {
657		res = match_restrict4_addr(SRCADR(addr), SRCPORT(addr));
658		INSIST(res != NULL);
659		found_specific = (SRCADR(&onesmask) == res->u.v4.mask);
660	} else {
661		res = match_restrict6_addr(&SOCK_ADDR6(addr),
662					   SRCPORT(addr));
663		INSIST(res != NULL);
664		found_specific = ADDR6_EQ(&res->u.v6.mask,
665					  &SOCK_ADDR6(&onesmask));
666	}
667	if (!expire && found_specific && res->expire) {
668		found_specific = 0;
669		free_res(res, IS_IPV6(addr));
670	}
671	if (found_specific)
672		return;
673
674	hack_restrict(RESTRICT_FLAGS, addr, &onesmask,
675		      restrict_source_mflags, restrict_source_flags,
676		      expire);
677	DPRINTF(1, ("restrict_source: %s host restriction added\n",
678		    stoa(addr)));
679}
680