ntp_restrict.c revision 358659
1/*
2 * ntp_restrict.c - determine host restrictions
3 */
4#ifdef HAVE_CONFIG_H
5#include <config.h>
6#endif
7
8#include <stdio.h>
9#include <sys/types.h>
10
11#include "ntpd.h"
12#include "ntp_if.h"
13#include "ntp_lists.h"
14#include "ntp_stdlib.h"
15#include "ntp_assert.h"
16
17/*
18 * This code keeps a simple address-and-mask list of hosts we want
19 * to place restrictions on (or remove them from). The restrictions
20 * are implemented as a set of flags which tell you what the host
21 * can't do. There is a subroutine entry to return the flags. The
22 * list is kept sorted to reduce the average number of comparisons
23 * and make sure you get the set of restrictions most specific to
24 * the address.
25 *
26 * The algorithm is that, when looking up a host, it is first assumed
27 * that the default set of restrictions will apply. It then searches
28 * down through the list. Whenever it finds a match it adopts the
29 * match's flags instead. When you hit the point where the sorted
30 * address is greater than the target, you return with the last set of
31 * flags you found. Because of the ordering of the list, the most
32 * specific match will provide the final set of flags.
33 *
34 * This was originally intended to restrict you from sync'ing to your
35 * own broadcasts when you are doing that, by restricting yourself from
36 * your own interfaces. It was also thought it would sometimes be useful
37 * to keep a misbehaving host or two from abusing your primary clock. It
38 * has been expanded, however, to suit the needs of those with more
39 * restrictive access policies.
40 */
41/*
42 * We will use two lists, one for IPv4 addresses and one for IPv6
43 * addresses. This is not protocol-independant but for now I can't
44 * find a way to respect this. We'll check this later... JFB 07/2001
45 */
46#define MASK_IPV6_ADDR(dst, src, msk)					\
47	do {								\
48		int idx;						\
49		for (idx = 0; idx < (int)COUNTOF((dst)->s6_addr); idx++) { \
50			(dst)->s6_addr[idx] = (src)->s6_addr[idx]	\
51					      & (msk)->s6_addr[idx];	\
52		}							\
53	} while (0)
54
55/*
56 * We allocate INC_RESLIST{4|6} entries to the free list whenever empty.
57 * Auto-tune these to be just less than 1KB (leaving at least 16 bytes
58 * for allocator overhead).
59 */
60#define	INC_RESLIST4	((1024 - 16) / V4_SIZEOF_RESTRICT_U)
61#define	INC_RESLIST6	((1024 - 16) / V6_SIZEOF_RESTRICT_U)
62
63/*
64 * The restriction list
65 */
66restrict_u *restrictlist4;
67restrict_u *restrictlist6;
68static int restrictcount;	/* count in the restrict lists */
69
70/*
71 * The free list and associated counters.  Also some uninteresting
72 * stat counters.
73 */
74static restrict_u *resfree4;	/* available entries (free list) */
75static restrict_u *resfree6;
76
77static u_long res_calls;
78static u_long res_found;
79static u_long res_not_found;
80
81/*
82 * Count number of restriction entries referring to RES_LIMITED, to
83 * control implicit activation/deactivation of the MRU monlist.
84 */
85static	u_long res_limited_refcnt;
86
87/*
88 * Our default entries.
89 *
90 * We can make this cleaner with c99 support: see init_restrict().
91 */
92static	restrict_u	restrict_def4;
93static	restrict_u	restrict_def6;
94
95/*
96 * "restrict source ..." enabled knob and restriction bits.
97 */
98static	int		restrict_source_enabled;
99static	u_int32		restrict_source_rflags;
100static	u_short		restrict_source_mflags;
101static	short		restrict_source_ippeerlimit;
102
103/*
104 * private functions
105 */
106static restrict_u *	alloc_res4(void);
107static restrict_u *	alloc_res6(void);
108static void		free_res(restrict_u *, int);
109static void		inc_res_limited(void);
110static void		dec_res_limited(void);
111static restrict_u *	match_restrict4_addr(u_int32, u_short);
112static restrict_u *	match_restrict6_addr(const struct in6_addr *,
113					     u_short);
114static restrict_u *	match_restrict_entry(const restrict_u *, int);
115static int		res_sorts_before4(restrict_u *, restrict_u *);
116static int		res_sorts_before6(restrict_u *, restrict_u *);
117static char *		roptoa(restrict_op op);
118
119
120void	dump_restricts(void);
121
122/*
123 * dump_restrict - spit out a restrict_u
124 */
125static void
126dump_restrict(
127	restrict_u *	res,
128	int		is_ipv6
129	)
130{
131	char as[INET6_ADDRSTRLEN];
132	char ms[INET6_ADDRSTRLEN];
133
134	if (is_ipv6) {
135		inet_ntop(AF_INET6, &res->u.v6.addr, as, sizeof as);
136		inet_ntop(AF_INET6, &res->u.v6.mask, ms, sizeof ms);
137	} else {
138		struct in_addr	sia = { htonl(res->u.v4.addr) };
139		struct in_addr	sim = { htonl(res->u.v4.mask) };
140
141		inet_ntop(AF_INET, &sia, as, sizeof as);
142		inet_ntop(AF_INET, &sim, ms, sizeof ms);
143	}
144	mprintf("restrict node at %p: %s/%s count %d, rflags %08x, mflags %04x, ippeerlimit %d, expire %lu, next %p\n",
145		res, as, ms, res->count, res->rflags, res->mflags,
146		res->ippeerlimit, res->expire, res->link);
147	return;
148}
149
150
151/*
152 * dump_restricts - spit out the 'restrict' lines
153 */
154void
155dump_restricts(void)
156{
157	restrict_u *	res;
158	restrict_u *	next;
159
160	mprintf("dump_restrict: restrict_def4: %p\n", &restrict_def4);
161	/* Spit out 'restrict {,-4,-6} default ...' lines, if needed */
162	for (res = &restrict_def4; res != NULL; res = next) {
163		dump_restrict(res, 0);
164		next = res->link;
165	}
166
167	mprintf("dump_restrict: restrict_def6: %p\n", &restrict_def6);
168	for (res = &restrict_def6; res != NULL; res = next) {
169		dump_restrict(res, 1);
170		next = res->link;
171	}
172
173	/* Spit out the IPv4 list */
174	mprintf("dump_restrict: restrictlist4: %p\n", &restrictlist4);
175	for (res = restrictlist4; res != NULL; res = next) {
176		dump_restrict(res, 0);
177		next = res->link;
178	}
179
180	/* Spit out the IPv6 list */
181	mprintf("dump_restrict: restrictlist6: %p\n", &restrictlist6);
182	for (res = restrictlist6; res != NULL; res = next) {
183		dump_restrict(res, 1);
184		next = res->link;
185	}
186
187	return;
188}
189
190/*
191 * init_restrict - initialize the restriction data structures
192 */
193void
194init_restrict(void)
195{
196	/*
197	 * The restriction lists begin with a default entry with address
198	 * and mask 0, which will match any entry.  The lists are kept
199	 * sorted by descending address followed by descending mask:
200	 *
201	 *   address	  mask
202	 * 192.168.0.0	255.255.255.0	kod limited noquery nopeer
203	 * 192.168.0.0	255.255.0.0	kod limited
204	 * 0.0.0.0	0.0.0.0		kod limited noquery
205	 *
206	 * The first entry which matches an address is used.  With the
207	 * example restrictions above, 192.168.0.0/24 matches the first
208	 * entry, the rest of 192.168.0.0/16 matches the second, and
209	 * everything else matches the third (default).
210	 *
211	 * Note this achieves the same result a little more efficiently
212	 * than the documented behavior, which is to keep the lists
213	 * sorted by ascending address followed by ascending mask, with
214	 * the _last_ matching entry used.
215	 *
216	 * An additional wrinkle is we may have multiple entries with
217	 * the same address and mask but differing match flags (mflags).
218	 * At present there is only one, RESM_NTPONLY.  Entries with
219	 * RESM_NTPONLY are sorted earlier so they take precedence over
220	 * any otherwise similar entry without.  Again, this is the same
221	 * behavior as but reversed implementation compared to the docs.
222	 *
223	 */
224
225	restrict_def4.ippeerlimit = -1;		/* Cleaner if we have C99 */
226	restrict_def6.ippeerlimit = -1;		/* Cleaner if we have C99 */
227
228	LINK_SLIST(restrictlist4, &restrict_def4, link);
229	LINK_SLIST(restrictlist6, &restrict_def6, link);
230	restrictcount = 2;
231}
232
233
234static restrict_u *
235alloc_res4(void)
236{
237	const size_t	cb = V4_SIZEOF_RESTRICT_U;
238	const size_t	count = INC_RESLIST4;
239	restrict_u *	rl;
240	restrict_u *	res;
241	size_t		i;
242
243	UNLINK_HEAD_SLIST(res, resfree4, link);
244	if (res != NULL)
245		return res;
246
247	rl = eallocarray(count, cb);
248	/* link all but the first onto free list */
249	res = (void *)((char *)rl + (count - 1) * cb);
250	for (i = count - 1; i > 0; i--) {
251		LINK_SLIST(resfree4, res, link);
252		res = (void *)((char *)res - cb);
253	}
254	INSIST(rl == res);
255	/* allocate the first */
256	return res;
257}
258
259
260static restrict_u *
261alloc_res6(void)
262{
263	const size_t	cb = V6_SIZEOF_RESTRICT_U;
264	const size_t	count = INC_RESLIST6;
265	restrict_u *	rl;
266	restrict_u *	res;
267	size_t		i;
268
269	UNLINK_HEAD_SLIST(res, resfree6, link);
270	if (res != NULL)
271		return res;
272
273	rl = eallocarray(count, cb);
274	/* link all but the first onto free list */
275	res = (void *)((char *)rl + (count - 1) * cb);
276	for (i = count - 1; i > 0; i--) {
277		LINK_SLIST(resfree6, res, link);
278		res = (void *)((char *)res - cb);
279	}
280	INSIST(rl == res);
281	/* allocate the first */
282	return res;
283}
284
285
286static void
287free_res(
288	restrict_u *	res,
289	int		v6
290	)
291{
292	restrict_u **	plisthead;
293	restrict_u *	unlinked;
294
295	restrictcount--;
296	if (RES_LIMITED & res->rflags)
297		dec_res_limited();
298
299	if (v6)
300		plisthead = &restrictlist6;
301	else
302		plisthead = &restrictlist4;
303	UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u);
304	INSIST(unlinked == res);
305
306	if (v6) {
307		zero_mem(res, V6_SIZEOF_RESTRICT_U);
308		plisthead = &resfree6;
309	} else {
310		zero_mem(res, V4_SIZEOF_RESTRICT_U);
311		plisthead = &resfree4;
312	}
313	LINK_SLIST(*plisthead, res, link);
314}
315
316
317static void
318inc_res_limited(void)
319{
320	if (!res_limited_refcnt)
321		mon_start(MON_RES);
322	res_limited_refcnt++;
323}
324
325
326static void
327dec_res_limited(void)
328{
329	res_limited_refcnt--;
330	if (!res_limited_refcnt)
331		mon_stop(MON_RES);
332}
333
334
335static restrict_u *
336match_restrict4_addr(
337	u_int32	addr,
338	u_short	port
339	)
340{
341	const int	v6 = 0;
342	restrict_u *	res;
343	restrict_u *	next;
344
345	for (res = restrictlist4; res != NULL; res = next) {
346		struct in_addr	sia = { htonl(res->u.v4.addr) };
347
348		next = res->link;
349		DPRINTF(2, ("match_restrict4_addr: Checking %s, port %d ... ",
350			    inet_ntoa(sia), port));
351		if (   res->expire
352		    && res->expire <= current_time)
353			free_res(res, v6);	/* zeroes the contents */
354		if (   res->u.v4.addr == (addr & res->u.v4.mask)
355		    && (   !(RESM_NTPONLY & res->mflags)
356			|| NTP_PORT == port)) {
357			DPRINTF(2, ("MATCH: ippeerlimit %d\n", res->ippeerlimit));
358			break;
359		}
360		DPRINTF(2, ("doesn't match: ippeerlimit %d\n", res->ippeerlimit));
361	}
362	return res;
363}
364
365
366static restrict_u *
367match_restrict6_addr(
368	const struct in6_addr *	addr,
369	u_short			port
370	)
371{
372	const int	v6 = 1;
373	restrict_u *	res;
374	restrict_u *	next;
375	struct in6_addr	masked;
376
377	for (res = restrictlist6; res != NULL; res = next) {
378		next = res->link;
379		INSIST(next != res);
380		if (res->expire &&
381		    res->expire <= current_time)
382			free_res(res, v6);
383		MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask);
384		if (ADDR6_EQ(&masked, &res->u.v6.addr)
385		    && (!(RESM_NTPONLY & res->mflags)
386			|| NTP_PORT == (int)port))
387			break;
388	}
389	return res;
390}
391
392
393/*
394 * match_restrict_entry - find an exact match on a restrict list.
395 *
396 * Exact match is addr, mask, and mflags all equal.
397 * In order to use more common code for IPv4 and IPv6, this routine
398 * requires the caller to populate a restrict_u with mflags and either
399 * the v4 or v6 address and mask as appropriate.  Other fields in the
400 * input restrict_u are ignored.
401 */
402static restrict_u *
403match_restrict_entry(
404	const restrict_u *	pmatch,
405	int			v6
406	)
407{
408	restrict_u *res;
409	restrict_u *rlist;
410	size_t cb;
411
412	if (v6) {
413		rlist = restrictlist6;
414		cb = sizeof(pmatch->u.v6);
415	} else {
416		rlist = restrictlist4;
417		cb = sizeof(pmatch->u.v4);
418	}
419
420	for (res = rlist; res != NULL; res = res->link)
421		if (res->mflags == pmatch->mflags &&
422		    !memcmp(&res->u, &pmatch->u, cb))
423			break;
424	return res;
425}
426
427
428/*
429 * res_sorts_before4 - compare two restrict4 entries
430 *
431 * Returns nonzero if r1 sorts before r2.  We sort by descending
432 * address, then descending mask, then descending mflags, so sorting
433 * before means having a higher value.
434 */
435static int
436res_sorts_before4(
437	restrict_u *r1,
438	restrict_u *r2
439	)
440{
441	int r1_before_r2;
442
443	if (r1->u.v4.addr > r2->u.v4.addr)
444		r1_before_r2 = 1;
445	else if (r1->u.v4.addr < r2->u.v4.addr)
446		r1_before_r2 = 0;
447	else if (r1->u.v4.mask > r2->u.v4.mask)
448		r1_before_r2 = 1;
449	else if (r1->u.v4.mask < r2->u.v4.mask)
450		r1_before_r2 = 0;
451	else if (r1->mflags > r2->mflags)
452		r1_before_r2 = 1;
453	else
454		r1_before_r2 = 0;
455
456	return r1_before_r2;
457}
458
459
460/*
461 * res_sorts_before6 - compare two restrict6 entries
462 *
463 * Returns nonzero if r1 sorts before r2.  We sort by descending
464 * address, then descending mask, then descending mflags, so sorting
465 * before means having a higher value.
466 */
467static int
468res_sorts_before6(
469	restrict_u *r1,
470	restrict_u *r2
471	)
472{
473	int r1_before_r2;
474	int cmp;
475
476	cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr);
477	if (cmp > 0)		/* r1->addr > r2->addr */
478		r1_before_r2 = 1;
479	else if (cmp < 0)	/* r2->addr > r1->addr */
480		r1_before_r2 = 0;
481	else {
482		cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask);
483		if (cmp > 0)		/* r1->mask > r2->mask*/
484			r1_before_r2 = 1;
485		else if (cmp < 0)	/* r2->mask > r1->mask */
486			r1_before_r2 = 0;
487		else if (r1->mflags > r2->mflags)
488			r1_before_r2 = 1;
489		else
490			r1_before_r2 = 0;
491	}
492
493	return r1_before_r2;
494}
495
496
497/*
498 * restrictions - return restrictions for this host in *r4a
499 */
500void
501restrictions(
502	sockaddr_u *srcadr,
503	r4addr *r4a
504	)
505{
506	restrict_u *match;
507	struct in6_addr *pin6;
508
509	REQUIRE(NULL != r4a);
510
511	res_calls++;
512	r4a->rflags = RES_IGNORE;
513	r4a->ippeerlimit = 0;
514
515	DPRINTF(1, ("restrictions: looking up %s\n", stoa(srcadr)));
516
517	/* IPv4 source address */
518	if (IS_IPV4(srcadr)) {
519		/*
520		 * Ignore any packets with a multicast source address
521		 * (this should be done early in the receive process,
522		 * not later!)
523		 */
524		if (IN_CLASSD(SRCADR(srcadr))) {
525			DPRINTF(1, ("restrictions: srcadr %s is multicast\n", stoa(srcadr)));
526			r4a->ippeerlimit = 2;	/* XXX: we should use a better value */
527			return;
528		}
529
530		match = match_restrict4_addr(SRCADR(srcadr),
531					     SRCPORT(srcadr));
532
533		INSIST(match != NULL);
534
535		match->count++;
536		/*
537		 * res_not_found counts only use of the final default
538		 * entry, not any "restrict default ntpport ...", which
539		 * would be just before the final default.
540		 */
541		if (&restrict_def4 == match)
542			res_not_found++;
543		else
544			res_found++;
545		r4a->rflags = match->rflags;
546		r4a->ippeerlimit = match->ippeerlimit;
547	}
548
549	/* IPv6 source address */
550	if (IS_IPV6(srcadr)) {
551		pin6 = PSOCK_ADDR6(srcadr);
552
553		/*
554		 * Ignore any packets with a multicast source address
555		 * (this should be done early in the receive process,
556		 * not later!)
557		 */
558		if (IN6_IS_ADDR_MULTICAST(pin6))
559			return;
560
561		match = match_restrict6_addr(pin6, SRCPORT(srcadr));
562		INSIST(match != NULL);
563		match->count++;
564		if (&restrict_def6 == match)
565			res_not_found++;
566		else
567			res_found++;
568		r4a->rflags = match->rflags;
569		r4a->ippeerlimit = match->ippeerlimit;
570	}
571
572	return;
573}
574
575
576/*
577 * roptoa - convert a restrict_op to a string
578 */
579char *
580roptoa(restrict_op op) {
581	static char sb[30];
582
583	switch(op) {
584	    case RESTRICT_FLAGS:	return "RESTRICT_FLAGS";
585	    case RESTRICT_UNFLAG:	return "RESTRICT_UNFLAGS";
586	    case RESTRICT_REMOVE:	return "RESTRICT_REMOVE";
587	    case RESTRICT_REMOVEIF:	return "RESTRICT_REMOVEIF";
588	    default:
589		snprintf(sb, sizeof sb, "**RESTRICT_#%d**", op);
590		return sb;
591	}
592}
593
594
595/*
596 * hack_restrict - add/subtract/manipulate entries on the restrict list
597 */
598void
599hack_restrict(
600	restrict_op	op,
601	sockaddr_u *	resaddr,
602	sockaddr_u *	resmask,
603	short		ippeerlimit,
604	u_short		mflags,
605	u_short		rflags,
606	u_long		expire
607	)
608{
609	int		v6;
610	restrict_u	match;
611	restrict_u *	res;
612	restrict_u **	plisthead;
613
614	DPRINTF(1, ("hack_restrict: op %s addr %s mask %s ippeerlimit %d mflags %08x rflags %08x\n",
615		    roptoa(op), stoa(resaddr), stoa(resmask), ippeerlimit, mflags, rflags));
616
617	if (NULL == resaddr) {
618		REQUIRE(NULL == resmask);
619		REQUIRE(RESTRICT_FLAGS == op);
620		restrict_source_rflags = rflags;
621		restrict_source_mflags = mflags;
622		restrict_source_ippeerlimit = ippeerlimit;
623		restrict_source_enabled = 1;
624		return;
625	}
626
627	ZERO(match);
628
629#if 0
630	/* silence VC9 potentially uninit warnings */
631	// HMS: let's use a compiler-specific "enable" for this.
632	res = NULL;
633	v6 = 0;
634#endif
635
636	if (IS_IPV4(resaddr)) {
637		v6 = 0;
638		/*
639		 * Get address and mask in host byte order for easy
640		 * comparison as u_int32
641		 */
642		match.u.v4.addr = SRCADR(resaddr);
643		match.u.v4.mask = SRCADR(resmask);
644		match.u.v4.addr &= match.u.v4.mask;
645
646	} else if (IS_IPV6(resaddr)) {
647		v6 = 1;
648		/*
649		 * Get address and mask in network byte order for easy
650		 * comparison as byte sequences (e.g. memcmp())
651		 */
652		match.u.v6.mask = SOCK_ADDR6(resmask);
653		MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr),
654			       &match.u.v6.mask);
655
656	} else	/* not IPv4 nor IPv6 */
657		REQUIRE(0);
658
659	match.rflags = rflags;
660	match.mflags = mflags;
661	match.ippeerlimit = ippeerlimit;
662	match.expire = expire;
663	res = match_restrict_entry(&match, v6);
664
665	switch (op) {
666
667	case RESTRICT_FLAGS:
668		/*
669		 * Here we add bits to the rflags. If this is a
670		 * new restriction add it.
671		 */
672		if (NULL == res) {
673			if (v6) {
674				res = alloc_res6();
675				memcpy(res, &match,
676				       V6_SIZEOF_RESTRICT_U);
677				plisthead = &restrictlist6;
678			} else {
679				res = alloc_res4();
680				memcpy(res, &match,
681				       V4_SIZEOF_RESTRICT_U);
682				plisthead = &restrictlist4;
683			}
684			LINK_SORT_SLIST(
685				*plisthead, res,
686				(v6)
687				  ? res_sorts_before6(res, L_S_S_CUR())
688				  : res_sorts_before4(res, L_S_S_CUR()),
689				link, restrict_u);
690			restrictcount++;
691			if (RES_LIMITED & rflags)
692				inc_res_limited();
693		} else {
694			if (   (RES_LIMITED & rflags)
695			    && !(RES_LIMITED & res->rflags))
696				inc_res_limited();
697			res->rflags |= rflags;
698		}
699
700		res->ippeerlimit = match.ippeerlimit;
701
702		break;
703
704	case RESTRICT_UNFLAG:
705		/*
706		 * Remove some bits from the rflags. If we didn't
707		 * find this one, just return.
708		 */
709		if (res != NULL) {
710			if (   (RES_LIMITED & res->rflags)
711			    && (RES_LIMITED & rflags))
712				dec_res_limited();
713			res->rflags &= ~rflags;
714		}
715		break;
716
717	case RESTRICT_REMOVE:
718	case RESTRICT_REMOVEIF:
719		/*
720		 * Remove an entry from the table entirely if we
721		 * found one. Don't remove the default entry and
722		 * don't remove an interface entry.
723		 */
724		if (res != NULL
725		    && (RESTRICT_REMOVEIF == op
726			|| !(RESM_INTERFACE & res->mflags))
727		    && res != &restrict_def4
728		    && res != &restrict_def6)
729			free_res(res, v6);
730		break;
731
732	default:	/* unknown op */
733		INSIST(0);
734		break;
735	}
736
737}
738
739
740/*
741 * restrict_source - maintains dynamic "restrict source ..." entries as
742 *		     peers come and go.
743 */
744void
745restrict_source(
746	sockaddr_u *	addr,
747	int		farewell,	/* 0 to add, 1 to remove */
748	u_long		expire		/* 0 is infinite, valid until */
749	)
750{
751	sockaddr_u	onesmask;
752	restrict_u *	res;
753	int		found_specific;
754
755	if (!restrict_source_enabled || SOCK_UNSPEC(addr) ||
756	    IS_MCAST(addr) || ISREFCLOCKADR(addr))
757		return;
758
759	REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr));
760
761	SET_HOSTMASK(&onesmask, AF(addr));
762	if (farewell) {
763		hack_restrict(RESTRICT_REMOVE, addr, &onesmask,
764			      -2, 0, 0, 0);
765		DPRINTF(1, ("restrict_source: %s removed", stoa(addr)));
766		return;
767	}
768
769	/*
770	 * If there is a specific entry for this address, hands
771	 * off, as it is condidered more specific than "restrict
772	 * server ...".
773	 * However, if the specific entry found is a fleeting one
774	 * added by pool_xmit() before soliciting, replace it
775	 * immediately regardless of the expire value to make way
776	 * for the more persistent entry.
777	 */
778	if (IS_IPV4(addr)) {
779		res = match_restrict4_addr(SRCADR(addr), SRCPORT(addr));
780		INSIST(res != NULL);
781		found_specific = (SRCADR(&onesmask) == res->u.v4.mask);
782	} else {
783		res = match_restrict6_addr(&SOCK_ADDR6(addr),
784					   SRCPORT(addr));
785		INSIST(res != NULL);
786		found_specific = ADDR6_EQ(&res->u.v6.mask,
787					  &SOCK_ADDR6(&onesmask));
788	}
789	if (!expire && found_specific && res->expire) {
790		found_specific = 0;
791		free_res(res, IS_IPV6(addr));
792	}
793	if (found_specific)
794		return;
795
796	hack_restrict(RESTRICT_FLAGS, addr, &onesmask,
797		      restrict_source_ippeerlimit,
798		      restrict_source_mflags, restrict_source_rflags, expire);
799	DPRINTF(1, ("restrict_source: %s host restriction added\n",
800		    stoa(addr)));
801}
802