1/*
2 * ntp_restrict.c - determine host restrictions
3 */
4#ifdef HAVE_CONFIG_H
5#include <config.h>
6#endif
7
8#include <stdio.h>
9#include <sys/types.h>
10
11#include "ntpd.h"
12#include "ntp_if.h"
13#include "ntp_lists.h"
14#include "ntp_stdlib.h"
15#include "ntp_assert.h"
16
17/*
18 * This code keeps a simple address-and-mask list of hosts we want
19 * to place restrictions on (or remove them from). The restrictions
20 * are implemented as a set of flags which tell you what the host
21 * can't do. There is a subroutine entry to return the flags. The
22 * list is kept sorted to reduce the average number of comparisons
23 * and make sure you get the set of restrictions most specific to
24 * the address.
25 *
26 * The algorithm is that, when looking up a host, it is first assumed
27 * that the default set of restrictions will apply. It then searches
28 * down through the list. Whenever it finds a match it adopts the
29 * match's flags instead. When you hit the point where the sorted
30 * address is greater than the target, you return with the last set of
31 * flags you found. Because of the ordering of the list, the most
32 * specific match will provide the final set of flags.
33 *
34 * This was originally intended to restrict you from sync'ing to your
35 * own broadcasts when you are doing that, by restricting yourself from
36 * your own interfaces. It was also thought it would sometimes be useful
37 * to keep a misbehaving host or two from abusing your primary clock. It
38 * has been expanded, however, to suit the needs of those with more
39 * restrictive access policies.
40 */
41/*
42 * We will use two lists, one for IPv4 addresses and one for IPv6
43 * addresses. This is not protocol-independant but for now I can't
44 * find a way to respect this. We'll check this later... JFB 07/2001
45 */
46#define MASK_IPV6_ADDR(dst, src, msk)					\
47	do {								\
48		int idx;						\
49		for (idx = 0; idx < (int)COUNTOF((dst)->s6_addr); idx++) { \
50			(dst)->s6_addr[idx] = (src)->s6_addr[idx]	\
51					      & (msk)->s6_addr[idx];	\
52		}							\
53	} while (0)
54
55/*
56 * We allocate INC_RESLIST{4|6} entries to the free list whenever empty.
57 * Auto-tune these to be just less than 1KB (leaving at least 16 bytes
58 * for allocator overhead).
59 */
60#define	INC_RESLIST4	((1024 - 16) / V4_SIZEOF_RESTRICT_U)
61#define	INC_RESLIST6	((1024 - 16) / V6_SIZEOF_RESTRICT_U)
62
63/*
64 * The restriction list
65 */
66restrict_u *restrictlist4;
67restrict_u *restrictlist6;
68static int restrictcount;	/* count in the restrict lists */
69
70/*
71 * The free list and associated counters.  Also some uninteresting
72 * stat counters.
73 */
74static restrict_u *resfree4;	/* available entries (free list) */
75static restrict_u *resfree6;
76
77static u_long res_calls;
78static u_long res_found;
79static u_long res_not_found;
80
81/*
82 * Count number of restriction entries referring to RES_LIMITED, to
83 * control implicit activation/deactivation of the MRU monlist.
84 */
85static	u_long res_limited_refcnt;
86
87/*
88 * Our default entries.
89 *
90 * We can make this cleaner with c99 support: see init_restrict().
91 */
92static	restrict_u	restrict_def4;
93static	restrict_u	restrict_def6;
94
95/*
96 * "restrict source ..." enabled knob and restriction bits.
97 */
98static	int		restrict_source_enabled;
99static	u_short		restrict_source_rflags;
100static	u_short		restrict_source_mflags;
101static	short		restrict_source_ippeerlimit;
102
103/*
104 * private functions
105 */
106static restrict_u *	alloc_res4(void);
107static restrict_u *	alloc_res6(void);
108static void		free_res(restrict_u *, int);
109static void		inc_res_limited(void);
110static void		dec_res_limited(void);
111static restrict_u *	match_restrict4_addr(u_int32, u_short);
112static restrict_u *	match_restrict6_addr(const struct in6_addr *,
113					     u_short);
114static restrict_u *	match_restrict_entry(const restrict_u *, int);
115static int		res_sorts_before4(restrict_u *, restrict_u *);
116static int		res_sorts_before6(restrict_u *, restrict_u *);
117static char *		roptoa(restrict_op op);
118
119
120void	dump_restricts(void);
121
122/*
123 * dump_restrict - spit out a restrict_u
124 */
125static void
126dump_restrict(
127	restrict_u *	res,
128	int		is_ipv6
129	)
130{
131	char as[INET6_ADDRSTRLEN];
132	char ms[INET6_ADDRSTRLEN];
133
134	if (is_ipv6) {
135		inet_ntop(AF_INET6, &res->u.v6.addr, as, sizeof as);
136		inet_ntop(AF_INET6, &res->u.v6.mask, ms, sizeof ms);
137	} else {
138		struct in_addr	sia = { htonl(res->u.v4.addr) };
139		struct in_addr	sim = { htonl(res->u.v4.mask) };
140
141		inet_ntop(AF_INET, &sia, as, sizeof as);
142		inet_ntop(AF_INET, &sim, ms, sizeof ms);
143	}
144	mprintf("restrict node at %p: %s/%s count %d, rflags %05x, mflags %05x, ippeerlimit %d, expire %lu, next %p\n",
145		res, as, ms, res->count, res->rflags, res->mflags,
146		res->ippeerlimit, res->expire, res->link);
147	return;
148}
149
150
151/*
152 * dump_restricts - spit out the 'restrict' lines
153 */
154void
155dump_restricts(void)
156{
157	int		defaultv4_done = 0;
158	int		defaultv6_done = 0;
159	restrict_u *	res;
160	restrict_u *	next;
161
162	mprintf("dump_restrict: restrict_def4: %p\n", &restrict_def4);
163	/* Spit out 'restrict {,-4,-6} default ...' lines, if needed */
164	for (res = &restrict_def4; res != NULL; res = next) {
165		dump_restrict(res, 0);
166		next = res->link;
167	}
168
169	mprintf("dump_restrict: restrict_def6: %p\n", &restrict_def6);
170	for (res = &restrict_def6; res != NULL; res = next) {
171		dump_restrict(res, 1);
172		next = res->link;
173	}
174
175	/* Spit out the IPv4 list */
176	mprintf("dump_restrict: restrictlist4: %p\n", &restrictlist4);
177	for (res = restrictlist4; res != NULL; res = next) {
178		dump_restrict(res, 0);
179		next = res->link;
180	}
181
182	/* Spit out the IPv6 list */
183	mprintf("dump_restrict: restrictlist6: %p\n", &restrictlist6);
184	for (res = restrictlist6; res != NULL; res = next) {
185		dump_restrict(res, 1);
186		next = res->link;
187	}
188
189	return;
190}
191
192/*
193 * init_restrict - initialize the restriction data structures
194 */
195void
196init_restrict(void)
197{
198	/*
199	 * The restriction lists begin with a default entry with address
200	 * and mask 0, which will match any entry.  The lists are kept
201	 * sorted by descending address followed by descending mask:
202	 *
203	 *   address	  mask
204	 * 192.168.0.0	255.255.255.0	kod limited noquery nopeer
205	 * 192.168.0.0	255.255.0.0	kod limited
206	 * 0.0.0.0	0.0.0.0		kod limited noquery
207	 *
208	 * The first entry which matches an address is used.  With the
209	 * example restrictions above, 192.168.0.0/24 matches the first
210	 * entry, the rest of 192.168.0.0/16 matches the second, and
211	 * everything else matches the third (default).
212	 *
213	 * Note this achieves the same result a little more efficiently
214	 * than the documented behavior, which is to keep the lists
215	 * sorted by ascending address followed by ascending mask, with
216	 * the _last_ matching entry used.
217	 *
218	 * An additional wrinkle is we may have multiple entries with
219	 * the same address and mask but differing match flags (mflags).
220	 * At present there is only one, RESM_NTPONLY.  Entries with
221	 * RESM_NTPONLY are sorted earlier so they take precedence over
222	 * any otherwise similar entry without.  Again, this is the same
223	 * behavior as but reversed implementation compared to the docs.
224	 *
225	 */
226
227	restrict_def4.ippeerlimit = -1;		/* Cleaner if we have C99 */
228	restrict_def6.ippeerlimit = -1;		/* Cleaner if we have C99 */
229
230	LINK_SLIST(restrictlist4, &restrict_def4, link);
231	LINK_SLIST(restrictlist6, &restrict_def6, link);
232	restrictcount = 2;
233}
234
235
236static restrict_u *
237alloc_res4(void)
238{
239	const size_t	cb = V4_SIZEOF_RESTRICT_U;
240	const size_t	count = INC_RESLIST4;
241	restrict_u *	rl;
242	restrict_u *	res;
243	size_t		i;
244
245	UNLINK_HEAD_SLIST(res, resfree4, link);
246	if (res != NULL)
247		return res;
248
249	rl = eallocarray(count, cb);
250	/* link all but the first onto free list */
251	res = (void *)((char *)rl + (count - 1) * cb);
252	for (i = count - 1; i > 0; i--) {
253		LINK_SLIST(resfree4, res, link);
254		res = (void *)((char *)res - cb);
255	}
256	INSIST(rl == res);
257	/* allocate the first */
258	return res;
259}
260
261
262static restrict_u *
263alloc_res6(void)
264{
265	const size_t	cb = V6_SIZEOF_RESTRICT_U;
266	const size_t	count = INC_RESLIST6;
267	restrict_u *	rl;
268	restrict_u *	res;
269	size_t		i;
270
271	UNLINK_HEAD_SLIST(res, resfree6, link);
272	if (res != NULL)
273		return res;
274
275	rl = eallocarray(count, cb);
276	/* link all but the first onto free list */
277	res = (void *)((char *)rl + (count - 1) * cb);
278	for (i = count - 1; i > 0; i--) {
279		LINK_SLIST(resfree6, res, link);
280		res = (void *)((char *)res - cb);
281	}
282	INSIST(rl == res);
283	/* allocate the first */
284	return res;
285}
286
287
288static void
289free_res(
290	restrict_u *	res,
291	int		v6
292	)
293{
294	restrict_u **	plisthead;
295	restrict_u *	unlinked;
296
297	restrictcount--;
298	if (RES_LIMITED & res->rflags)
299		dec_res_limited();
300
301	if (v6)
302		plisthead = &restrictlist6;
303	else
304		plisthead = &restrictlist4;
305	UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u);
306	INSIST(unlinked == res);
307
308	if (v6) {
309		zero_mem(res, V6_SIZEOF_RESTRICT_U);
310		plisthead = &resfree6;
311	} else {
312		zero_mem(res, V4_SIZEOF_RESTRICT_U);
313		plisthead = &resfree4;
314	}
315	LINK_SLIST(*plisthead, res, link);
316}
317
318
319static void
320inc_res_limited(void)
321{
322	if (!res_limited_refcnt)
323		mon_start(MON_RES);
324	res_limited_refcnt++;
325}
326
327
328static void
329dec_res_limited(void)
330{
331	res_limited_refcnt--;
332	if (!res_limited_refcnt)
333		mon_stop(MON_RES);
334}
335
336
337static restrict_u *
338match_restrict4_addr(
339	u_int32	addr,
340	u_short	port
341	)
342{
343	const int	v6 = 0;
344	restrict_u *	res;
345	restrict_u *	next;
346
347	for (res = restrictlist4; res != NULL; res = next) {
348		struct in_addr	sia = { htonl(res->u.v4.addr) };
349
350		next = res->link;
351		DPRINTF(2, ("match_restrict4_addr: Checking %s, port %d ... ",
352			    inet_ntoa(sia), port));
353		if (   res->expire
354		    && res->expire <= current_time)
355			free_res(res, v6);	/* zeroes the contents */
356		if (   res->u.v4.addr == (addr & res->u.v4.mask)
357		    && (   !(RESM_NTPONLY & res->mflags)
358			|| NTP_PORT == port)) {
359			DPRINTF(2, ("MATCH: ippeerlimit %d\n", res->ippeerlimit));
360			break;
361		}
362		DPRINTF(2, ("doesn't match: ippeerlimit %d\n", res->ippeerlimit));
363	}
364	return res;
365}
366
367
368static restrict_u *
369match_restrict6_addr(
370	const struct in6_addr *	addr,
371	u_short			port
372	)
373{
374	const int	v6 = 1;
375	restrict_u *	res;
376	restrict_u *	next;
377	struct in6_addr	masked;
378
379	for (res = restrictlist6; res != NULL; res = next) {
380		next = res->link;
381		INSIST(next != res);
382		if (res->expire &&
383		    res->expire <= current_time)
384			free_res(res, v6);
385		MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask);
386		if (ADDR6_EQ(&masked, &res->u.v6.addr)
387		    && (!(RESM_NTPONLY & res->mflags)
388			|| NTP_PORT == (int)port))
389			break;
390	}
391	return res;
392}
393
394
395/*
396 * match_restrict_entry - find an exact match on a restrict list.
397 *
398 * Exact match is addr, mask, and mflags all equal.
399 * In order to use more common code for IPv4 and IPv6, this routine
400 * requires the caller to populate a restrict_u with mflags and either
401 * the v4 or v6 address and mask as appropriate.  Other fields in the
402 * input restrict_u are ignored.
403 */
404static restrict_u *
405match_restrict_entry(
406	const restrict_u *	pmatch,
407	int			v6
408	)
409{
410	restrict_u *res;
411	restrict_u *rlist;
412	size_t cb;
413
414	if (v6) {
415		rlist = restrictlist6;
416		cb = sizeof(pmatch->u.v6);
417	} else {
418		rlist = restrictlist4;
419		cb = sizeof(pmatch->u.v4);
420	}
421
422	for (res = rlist; res != NULL; res = res->link)
423		if (res->mflags == pmatch->mflags &&
424		    !memcmp(&res->u, &pmatch->u, cb))
425			break;
426	return res;
427}
428
429
430/*
431 * res_sorts_before4 - compare two restrict4 entries
432 *
433 * Returns nonzero if r1 sorts before r2.  We sort by descending
434 * address, then descending mask, then descending mflags, so sorting
435 * before means having a higher value.
436 */
437static int
438res_sorts_before4(
439	restrict_u *r1,
440	restrict_u *r2
441	)
442{
443	int r1_before_r2;
444
445	if (r1->u.v4.addr > r2->u.v4.addr)
446		r1_before_r2 = 1;
447	else if (r1->u.v4.addr < r2->u.v4.addr)
448		r1_before_r2 = 0;
449	else if (r1->u.v4.mask > r2->u.v4.mask)
450		r1_before_r2 = 1;
451	else if (r1->u.v4.mask < r2->u.v4.mask)
452		r1_before_r2 = 0;
453	else if (r1->mflags > r2->mflags)
454		r1_before_r2 = 1;
455	else
456		r1_before_r2 = 0;
457
458	return r1_before_r2;
459}
460
461
462/*
463 * res_sorts_before6 - compare two restrict6 entries
464 *
465 * Returns nonzero if r1 sorts before r2.  We sort by descending
466 * address, then descending mask, then descending mflags, so sorting
467 * before means having a higher value.
468 */
469static int
470res_sorts_before6(
471	restrict_u *r1,
472	restrict_u *r2
473	)
474{
475	int r1_before_r2;
476	int cmp;
477
478	cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr);
479	if (cmp > 0)		/* r1->addr > r2->addr */
480		r1_before_r2 = 1;
481	else if (cmp < 0)	/* r2->addr > r1->addr */
482		r1_before_r2 = 0;
483	else {
484		cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask);
485		if (cmp > 0)		/* r1->mask > r2->mask*/
486			r1_before_r2 = 1;
487		else if (cmp < 0)	/* r2->mask > r1->mask */
488			r1_before_r2 = 0;
489		else if (r1->mflags > r2->mflags)
490			r1_before_r2 = 1;
491		else
492			r1_before_r2 = 0;
493	}
494
495	return r1_before_r2;
496}
497
498
499/*
500 * restrictions - return restrictions for this host in *r4a
501 */
502void
503restrictions(
504	sockaddr_u *srcadr,
505	r4addr *r4a
506	)
507{
508	restrict_u *match;
509	struct in6_addr *pin6;
510
511	REQUIRE(NULL != r4a);
512
513	res_calls++;
514	r4a->rflags = RES_IGNORE;
515	r4a->ippeerlimit = 0;
516
517	DPRINTF(1, ("restrictions: looking up %s\n", stoa(srcadr)));
518
519	/* IPv4 source address */
520	if (IS_IPV4(srcadr)) {
521		/*
522		 * Ignore any packets with a multicast source address
523		 * (this should be done early in the receive process,
524		 * not later!)
525		 */
526		if (IN_CLASSD(SRCADR(srcadr))) {
527			DPRINTF(1, ("restrictions: srcadr %s is multicast\n", stoa(srcadr)));
528			r4a->ippeerlimit = 2;	/* XXX: we should use a better value */
529			return;
530		}
531
532		match = match_restrict4_addr(SRCADR(srcadr),
533					     SRCPORT(srcadr));
534
535		INSIST(match != NULL);
536
537		match->count++;
538		/*
539		 * res_not_found counts only use of the final default
540		 * entry, not any "restrict default ntpport ...", which
541		 * would be just before the final default.
542		 */
543		if (&restrict_def4 == match)
544			res_not_found++;
545		else
546			res_found++;
547		r4a->rflags = match->rflags;
548		r4a->ippeerlimit = match->ippeerlimit;
549	}
550
551	/* IPv6 source address */
552	if (IS_IPV6(srcadr)) {
553		pin6 = PSOCK_ADDR6(srcadr);
554
555		/*
556		 * Ignore any packets with a multicast source address
557		 * (this should be done early in the receive process,
558		 * not later!)
559		 */
560		if (IN6_IS_ADDR_MULTICAST(pin6))
561			return;
562
563		match = match_restrict6_addr(pin6, SRCPORT(srcadr));
564		INSIST(match != NULL);
565		match->count++;
566		if (&restrict_def6 == match)
567			res_not_found++;
568		else
569			res_found++;
570		r4a->rflags = match->rflags;
571		r4a->ippeerlimit = match->ippeerlimit;
572	}
573	return;
574}
575
576
577/*
578 * roptoa - convert a restrict_op to a string
579 */
580char *
581roptoa(restrict_op op) {
582	static char sb[30];
583
584	switch(op) {
585	    case RESTRICT_FLAGS:	return "RESTRICT_FLAGS";
586	    case RESTRICT_UNFLAG:	return "RESTRICT_UNFLAGS";
587	    case RESTRICT_REMOVE:	return "RESTRICT_REMOVE";
588	    case RESTRICT_REMOVEIF:	return "RESTRICT_REMOVEIF";
589	    default:
590		snprintf(sb, sizeof sb, "**RESTRICT_#%d**", op);
591		return sb;
592	}
593}
594
595
596/*
597 * hack_restrict - add/subtract/manipulate entries on the restrict list
598 */
599void
600hack_restrict(
601	restrict_op	op,
602	sockaddr_u *	resaddr,
603	sockaddr_u *	resmask,
604	short		ippeerlimit,
605	u_short		mflags,
606	u_short		rflags,
607	u_long		expire
608	)
609{
610	int		v6;
611	restrict_u	match;
612	restrict_u *	res;
613	restrict_u **	plisthead;
614
615	DPRINTF(1, ("hack_restrict: op %s addr %s mask %s ippeerlimit %d mflags %08x rflags %08x\n",
616		    roptoa(op), stoa(resaddr), stoa(resmask), ippeerlimit, mflags, rflags));
617
618	if (NULL == resaddr) {
619		REQUIRE(NULL == resmask);
620		REQUIRE(RESTRICT_FLAGS == op);
621		restrict_source_rflags = rflags;
622		restrict_source_mflags = mflags;
623		restrict_source_ippeerlimit = ippeerlimit;
624		restrict_source_enabled = 1;
625		return;
626	}
627
628	ZERO(match);
629
630#if 0
631	/* silence VC9 potentially uninit warnings */
632	// HMS: let's use a compiler-specific "enable" for this.
633	res = NULL;
634	v6 = 0;
635#endif
636
637	if (IS_IPV4(resaddr)) {
638		v6 = 0;
639		/*
640		 * Get address and mask in host byte order for easy
641		 * comparison as u_int32
642		 */
643		match.u.v4.addr = SRCADR(resaddr);
644		match.u.v4.mask = SRCADR(resmask);
645		match.u.v4.addr &= match.u.v4.mask;
646
647	} else if (IS_IPV6(resaddr)) {
648		v6 = 1;
649		/*
650		 * Get address and mask in network byte order for easy
651		 * comparison as byte sequences (e.g. memcmp())
652		 */
653		match.u.v6.mask = SOCK_ADDR6(resmask);
654		MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr),
655			       &match.u.v6.mask);
656
657	} else	/* not IPv4 nor IPv6 */
658		REQUIRE(0);
659
660	match.rflags = rflags;
661	match.mflags = mflags;
662	match.ippeerlimit = ippeerlimit;
663	match.expire = expire;
664	res = match_restrict_entry(&match, v6);
665
666	switch (op) {
667
668	case RESTRICT_FLAGS:
669		/*
670		 * Here we add bits to the rflags. If this is a
671		 * new restriction add it.
672		 */
673		if (NULL == res) {
674			if (v6) {
675				res = alloc_res6();
676				memcpy(res, &match,
677				       V6_SIZEOF_RESTRICT_U);
678				plisthead = &restrictlist6;
679			} else {
680				res = alloc_res4();
681				memcpy(res, &match,
682				       V4_SIZEOF_RESTRICT_U);
683				plisthead = &restrictlist4;
684			}
685			LINK_SORT_SLIST(
686				*plisthead, res,
687				(v6)
688				  ? res_sorts_before6(res, L_S_S_CUR())
689				  : res_sorts_before4(res, L_S_S_CUR()),
690				link, restrict_u);
691			restrictcount++;
692			if (RES_LIMITED & rflags)
693				inc_res_limited();
694		} else {
695			if (   (RES_LIMITED & rflags)
696			    && !(RES_LIMITED & res->rflags))
697				inc_res_limited();
698			res->rflags |= rflags;
699		}
700
701		res->ippeerlimit = match.ippeerlimit;
702
703		break;
704
705	case RESTRICT_UNFLAG:
706		/*
707		 * Remove some bits from the rflags. If we didn't
708		 * find this one, just return.
709		 */
710		if (res != NULL) {
711			if (   (RES_LIMITED & res->rflags)
712			    && (RES_LIMITED & rflags))
713				dec_res_limited();
714			res->rflags &= ~rflags;
715		}
716		break;
717
718	case RESTRICT_REMOVE:
719	case RESTRICT_REMOVEIF:
720		/*
721		 * Remove an entry from the table entirely if we
722		 * found one. Don't remove the default entry and
723		 * don't remove an interface entry.
724		 */
725		if (res != NULL
726		    && (RESTRICT_REMOVEIF == op
727			|| !(RESM_INTERFACE & res->mflags))
728		    && res != &restrict_def4
729		    && res != &restrict_def6)
730			free_res(res, v6);
731		break;
732
733	default:	/* unknown op */
734		INSIST(0);
735		break;
736	}
737
738}
739
740
741/*
742 * restrict_source - maintains dynamic "restrict source ..." entries as
743 *		     peers come and go.
744 */
745void
746restrict_source(
747	sockaddr_u *	addr,
748	int		farewell,	/* 0 to add, 1 to remove */
749	u_long		expire		/* 0 is infinite, valid until */
750	)
751{
752	sockaddr_u	onesmask;
753	restrict_u *	res;
754	int		found_specific;
755
756	if (!restrict_source_enabled || SOCK_UNSPEC(addr) ||
757	    IS_MCAST(addr) || ISREFCLOCKADR(addr))
758		return;
759
760	REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr));
761
762	SET_HOSTMASK(&onesmask, AF(addr));
763	if (farewell) {
764		hack_restrict(RESTRICT_REMOVE, addr, &onesmask,
765			      -2, 0, 0, 0);
766		DPRINTF(1, ("restrict_source: %s removed", stoa(addr)));
767		return;
768	}
769
770	/*
771	 * If there is a specific entry for this address, hands
772	 * off, as it is condidered more specific than "restrict
773	 * server ...".
774	 * However, if the specific entry found is a fleeting one
775	 * added by pool_xmit() before soliciting, replace it
776	 * immediately regardless of the expire value to make way
777	 * for the more persistent entry.
778	 */
779	if (IS_IPV4(addr)) {
780		res = match_restrict4_addr(SRCADR(addr), SRCPORT(addr));
781		INSIST(res != NULL);
782		found_specific = (SRCADR(&onesmask) == res->u.v4.mask);
783	} else {
784		res = match_restrict6_addr(&SOCK_ADDR6(addr),
785					   SRCPORT(addr));
786		INSIST(res != NULL);
787		found_specific = ADDR6_EQ(&res->u.v6.mask,
788					  &SOCK_ADDR6(&onesmask));
789	}
790	if (!expire && found_specific && res->expire) {
791		found_specific = 0;
792		free_res(res, IS_IPV6(addr));
793	}
794	if (found_specific)
795		return;
796
797	hack_restrict(RESTRICT_FLAGS, addr, &onesmask,
798		      restrict_source_ippeerlimit, restrict_source_mflags,
799		      restrict_source_rflags, expire);
800	DPRINTF(1, ("restrict_source: %s host restriction added\n",
801		    stoa(addr)));
802}
803