ntp_restrict.c revision 290000
155714Skris/*
255714Skris * ntp_restrict.c - determine host restrictions
355714Skris */
455714Skris#ifdef HAVE_CONFIG_H
555714Skris#include <config.h>
655714Skris#endif
755714Skris
8296465Sdelphij#include <stdio.h>
955714Skris#include <sys/types.h>
1055714Skris
1155714Skris#include "ntpd.h"
1255714Skris#include "ntp_if.h"
1355714Skris#include "ntp_lists.h"
1455714Skris#include "ntp_stdlib.h"
15296465Sdelphij#include "ntp_assert.h"
1655714Skris
1755714Skris/*
1855714Skris * This code keeps a simple address-and-mask list of hosts we want
1955714Skris * to place restrictions on (or remove them from). The restrictions
2055714Skris * are implemented as a set of flags which tell you what the host
2155714Skris * can't do. There is a subroutine entry to return the flags. The
22296465Sdelphij * list is kept sorted to reduce the average number of comparisons
2355714Skris * and make sure you get the set of restrictions most specific to
2455714Skris * the address.
2555714Skris *
2655714Skris * The algorithm is that, when looking up a host, it is first assumed
2755714Skris * that the default set of restrictions will apply. It then searches
2855714Skris * down through the list. Whenever it finds a match it adopts the
2955714Skris * match's flags instead. When you hit the point where the sorted
3055714Skris * address is greater than the target, you return with the last set of
3155714Skris * flags you found. Because of the ordering of the list, the most
3255714Skris * specific match will provide the final set of flags.
3355714Skris *
3455714Skris * This was originally intended to restrict you from sync'ing to your
3555714Skris * own broadcasts when you are doing that, by restricting yourself from
3655714Skris * your own interfaces. It was also thought it would sometimes be useful
37296465Sdelphij * to keep a misbehaving host or two from abusing your primary clock. It
3855714Skris * has been expanded, however, to suit the needs of those with more
3955714Skris * restrictive access policies.
40296465Sdelphij */
4155714Skris/*
4255714Skris * We will use two lists, one for IPv4 addresses and one for IPv6
4355714Skris * addresses. This is not protocol-independant but for now I can't
4455714Skris * find a way to respect this. We'll check this later... JFB 07/2001
4555714Skris */
4655714Skris#define MASK_IPV6_ADDR(dst, src, msk)					\
4755714Skris	do {								\
4855714Skris		int idx;						\
4955714Skris		for (idx = 0; idx < (int)COUNTOF((dst)->s6_addr); idx++) { \
5055714Skris			(dst)->s6_addr[idx] = (src)->s6_addr[idx]	\
5155714Skris					      & (msk)->s6_addr[idx];	\
52296465Sdelphij		}							\
5355714Skris	} while (0)
5455714Skris
5555714Skris/*
5655714Skris * We allocate INC_RESLIST{4|6} entries to the free list whenever empty.
5755714Skris * Auto-tune these to be just less than 1KB (leaving at least 16 bytes
5889837Skris * for allocator overhead).
5989837Skris */
6089837Skris#define	INC_RESLIST4	((1024 - 16) / V4_SIZEOF_RESTRICT_U)
6189837Skris#define	INC_RESLIST6	((1024 - 16) / V6_SIZEOF_RESTRICT_U)
6289837Skris
6389837Skris/*
6489837Skris * The restriction list
6589837Skris */
66296465Sdelphijrestrict_u *restrictlist4;
6789837Skrisrestrict_u *restrictlist6;
6889837Skrisstatic int restrictcount;	/* count in the restrict lists */
6989837Skris
7089837Skris/*
7189837Skris * The free list and associated counters.  Also some uninteresting
7289837Skris * stat counters.
7389837Skris */
7489837Skrisstatic restrict_u *resfree4;	/* available entries (free list) */
7589837Skrisstatic restrict_u *resfree6;
7689837Skris
7789837Skrisstatic u_long res_calls;
7889837Skrisstatic u_long res_found;
7989837Skrisstatic u_long res_not_found;
8089837Skris
8189837Skris/*
8289837Skris * Count number of restriction entries referring to RES_LIMITED, to
8389837Skris * control implicit activation/deactivation of the MRU monlist.
8489837Skris */
8589837Skrisstatic	u_long res_limited_refcnt;
8689837Skris
8789837Skris/*
8889837Skris * Our default entries.
8989837Skris */
9089837Skrisstatic	restrict_u	restrict_def4;
9189837Skrisstatic	restrict_u	restrict_def6;
9289837Skris
9389837Skris/*
9489837Skris * "restrict source ..." enabled knob and restriction bits.
9589837Skris */
9689837Skrisstatic	int		restrict_source_enabled;
9789837Skrisstatic	u_short		restrict_source_flags;
9889837Skrisstatic	u_short		restrict_source_mflags;
9989837Skris
10089837Skris/*
10189837Skris * private functions
10289837Skris */
10389837Skrisstatic restrict_u *	alloc_res4(void);
10489837Skrisstatic restrict_u *	alloc_res6(void);
10589837Skrisstatic void		free_res(restrict_u *, int);
10689837Skrisstatic void		inc_res_limited(void);
10789837Skrisstatic void		dec_res_limited(void);
10889837Skrisstatic restrict_u *	match_restrict4_addr(u_int32, u_short);
10989837Skrisstatic restrict_u *	match_restrict6_addr(const struct in6_addr *,
11089837Skris					     u_short);
11155714Skrisstatic restrict_u *	match_restrict_entry(const restrict_u *, int);
11255714Skrisstatic int		res_sorts_before4(restrict_u *, restrict_u *);
11355714Skrisstatic int		res_sorts_before6(restrict_u *, restrict_u *);
11455714Skris
11555714Skris
11655714Skris/*
11755714Skris * init_restrict - initialize the restriction data structures
11859191Skris */
119296465Sdelphijvoid
120296465Sdelphijinit_restrict(void)
121296465Sdelphij{
122296465Sdelphij	/*
12355714Skris	 * The restriction lists begin with a default entry with address
124296465Sdelphij	 * and mask 0, which will match any entry.  The lists are kept
125296465Sdelphij	 * sorted by descending address followed by descending mask:
126296465Sdelphij	 *
127296465Sdelphij	 *   address	  mask
12868651Skris	 * 192.168.0.0	255.255.255.0	kod limited noquery nopeer
129296465Sdelphij	 * 192.168.0.0	255.255.0.0	kod limited
130296465Sdelphij	 * 0.0.0.0	0.0.0.0		kod limited noquery
131296465Sdelphij	 *
132296465Sdelphij	 * The first entry which matches an address is used.  With the
13355714Skris	 * example restrictions above, 192.168.0.0/24 matches the first
134296465Sdelphij	 * entry, the rest of 192.168.0.0/16 matches the second, and
135296465Sdelphij	 * everything else matches the third (default).
136296465Sdelphij	 *
13755714Skris	 * Note this achieves the same result a little more efficiently
138296465Sdelphij	 * than the documented behavior, which is to keep the lists
139296465Sdelphij	 * sorted by ascending address followed by ascending mask, with
140296465Sdelphij	 * the _last_ matching entry used.
141296465Sdelphij	 *
142296465Sdelphij	 * An additional wrinkle is we may have multiple entries with
14355714Skris	 * the same address and mask but differing match flags (mflags).
144296465Sdelphij	 * At present there is only one, RESM_NTPONLY.  Entries with
145296465Sdelphij	 * RESM_NTPONLY are sorted earlier so they take precedence over
146296465Sdelphij	 * any otherwise similar entry without.  Again, this is the same
14759191Skris	 * behavior as but reversed implementation compared to the docs.
148296465Sdelphij	 *
149296465Sdelphij	 */
150296465Sdelphij	LINK_SLIST(restrictlist4, &restrict_def4, link);
151296465Sdelphij	LINK_SLIST(restrictlist6, &restrict_def6, link);
152296465Sdelphij	restrictcount = 2;
153296465Sdelphij}
154296465Sdelphij
155296465Sdelphij
15676866Skrisstatic restrict_u *
157296465Sdelphijalloc_res4(void)
158296465Sdelphij{
159296465Sdelphij	const size_t	cb = V4_SIZEOF_RESTRICT_U;
160296465Sdelphij	const size_t	count = INC_RESLIST4;
161296465Sdelphij	restrict_u *	rl;
162296465Sdelphij	restrict_u *	res;
16376866Skris	int		i;
164296465Sdelphij
165296465Sdelphij	UNLINK_HEAD_SLIST(res, resfree4, link);
166296465Sdelphij	if (res != NULL)
167296465Sdelphij		return res;
168296465Sdelphij
169296465Sdelphij	rl = emalloc_zero(count * cb);
170296465Sdelphij	/* link all but the first onto free list */
171296465Sdelphij	res = (void *)((char *)rl + (count - 1) * cb);
172296465Sdelphij	for (i = count - 1; i > 0; i--) {
173296465Sdelphij		LINK_SLIST(resfree4, res, link);
17476866Skris		res = (void *)((char *)res - cb);
17576866Skris	}
176296465Sdelphij	INSIST(rl == res);
177296465Sdelphij	/* allocate the first */
178296465Sdelphij	return res;
179296465Sdelphij}
180296465Sdelphij
181296465Sdelphij
182296465Sdelphijstatic restrict_u *
183296465Sdelphijalloc_res6(void)
184296465Sdelphij{
185296465Sdelphij	const size_t	cb = V6_SIZEOF_RESTRICT_U;
186296465Sdelphij	const size_t	count = INC_RESLIST6;
187296465Sdelphij	restrict_u *	rl;
188296465Sdelphij	restrict_u *	res;
189296465Sdelphij	int		i;
190296465Sdelphij
191296465Sdelphij	UNLINK_HEAD_SLIST(res, resfree6, link);
192296465Sdelphij	if (res != NULL)
193296465Sdelphij		return res;
194296465Sdelphij
195296465Sdelphij	rl = emalloc_zero(count * cb);
196296465Sdelphij	/* link all but the first onto free list */
197296465Sdelphij	res = (void *)((char *)rl + (count - 1) * cb);
198296465Sdelphij	for (i = count - 1; i > 0; i--) {
199296465Sdelphij		LINK_SLIST(resfree6, res, link);
200296465Sdelphij		res = (void *)((char *)res - cb);
201296465Sdelphij	}
20255714Skris	INSIST(rl == res);
203296465Sdelphij	/* allocate the first */
204296465Sdelphij	return res;
205296465Sdelphij}
206296465Sdelphij
20759191Skris
208296465Sdelphijstatic void
209296465Sdelphijfree_res(
210296465Sdelphij	restrict_u *	res,
211296465Sdelphij	int		v6
21272613Skris	)
21376866Skris{
214296465Sdelphij	restrict_u **	plisthead;
215296465Sdelphij	restrict_u *	unlinked;
216296465Sdelphij
217296465Sdelphij	restrictcount--;
21876866Skris	if (RES_LIMITED & res->flags)
21976866Skris		dec_res_limited();
22072613Skris
221194206Ssimon	if (v6)
222296465Sdelphij		plisthead = &restrictlist6;
223296465Sdelphij	else
224296465Sdelphij		plisthead = &restrictlist4;
225296465Sdelphij	UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u);
226296465Sdelphij	INSIST(unlinked == res);
22772613Skris
228296465Sdelphij	if (v6) {
229296465Sdelphij		zero_mem(res, V6_SIZEOF_RESTRICT_U);
230296465Sdelphij		plisthead = &resfree6;
231296465Sdelphij	} else {
23272613Skris		zero_mem(res, V4_SIZEOF_RESTRICT_U);
233296465Sdelphij		plisthead = &resfree4;
23476866Skris	}
235296465Sdelphij	LINK_SLIST(*plisthead, res, link);
23689837Skris}
237296465Sdelphij
238296465Sdelphij
239296465Sdelphijstatic void
240296465Sdelphijinc_res_limited(void)
241296465Sdelphij{
242296465Sdelphij	if (!res_limited_refcnt)
243296465Sdelphij		mon_start(MON_RES);
244296465Sdelphij	res_limited_refcnt++;
245296465Sdelphij}
246296465Sdelphij
247296465Sdelphij
248296465Sdelphijstatic void
249296465Sdelphijdec_res_limited(void)
250296465Sdelphij{
251296465Sdelphij	res_limited_refcnt--;
252296465Sdelphij	if (!res_limited_refcnt)
253296465Sdelphij		mon_stop(MON_RES);
254296465Sdelphij}
255296465Sdelphij
256296465Sdelphij
257296465Sdelphijstatic restrict_u *
258296465Sdelphijmatch_restrict4_addr(
259296465Sdelphij	u_int32	addr,
260160814Ssimon	u_short	port
261296465Sdelphij	)
262296465Sdelphij{
263296465Sdelphij	const int	v6 = 0;
264296465Sdelphij	restrict_u *	res;
265160814Ssimon	restrict_u *	next;
266296465Sdelphij
267296465Sdelphij	for (res = restrictlist4; res != NULL; res = next) {
268296465Sdelphij		next = res->link;
269296465Sdelphij		if (res->expire &&
270296465Sdelphij		    res->expire <= current_time)
271296465Sdelphij			free_res(res, v6);
272296465Sdelphij		if (res->u.v4.addr == (addr & res->u.v4.mask)
27372613Skris		    && (!(RESM_NTPONLY & res->mflags)
274296465Sdelphij			|| NTP_PORT == port))
275296465Sdelphij			break;
276296465Sdelphij	}
277296465Sdelphij	return res;
278296465Sdelphij}
279296465Sdelphij
280296465Sdelphij
28189837Skrisstatic restrict_u *
282296465Sdelphijmatch_restrict6_addr(
283296465Sdelphij	const struct in6_addr *	addr,
284296465Sdelphij	u_short			port
28589837Skris	)
286296465Sdelphij{
287296465Sdelphij	const int	v6 = 1;
288296465Sdelphij	restrict_u *	res;
289296465Sdelphij	restrict_u *	next;
29089837Skris	struct in6_addr	masked;
291296465Sdelphij
292296465Sdelphij	for (res = restrictlist6; res != NULL; res = next) {
293296465Sdelphij		next = res->link;
294296465Sdelphij		INSIST(next != res);
295		if (res->expire &&
296		    res->expire <= current_time)
297			free_res(res, v6);
298		MASK_IPV6_ADDR(&masked, addr, &res->u.v6.mask);
299		if (ADDR6_EQ(&masked, &res->u.v6.addr)
300		    && (!(RESM_NTPONLY & res->mflags)
301			|| NTP_PORT == (int)port))
302			break;
303	}
304	return res;
305}
306
307
308/*
309 * match_restrict_entry - find an exact match on a restrict list.
310 *
311 * Exact match is addr, mask, and mflags all equal.
312 * In order to use more common code for IPv4 and IPv6, this routine
313 * requires the caller to populate a restrict_u with mflags and either
314 * the v4 or v6 address and mask as appropriate.  Other fields in the
315 * input restrict_u are ignored.
316 */
317static restrict_u *
318match_restrict_entry(
319	const restrict_u *	pmatch,
320	int			v6
321	)
322{
323	restrict_u *res;
324	restrict_u *rlist;
325	size_t cb;
326
327	if (v6) {
328		rlist = restrictlist6;
329		cb = sizeof(pmatch->u.v6);
330	} else {
331		rlist = restrictlist4;
332		cb = sizeof(pmatch->u.v4);
333	}
334
335	for (res = rlist; res != NULL; res = res->link)
336		if (res->mflags == pmatch->mflags &&
337		    !memcmp(&res->u, &pmatch->u, cb))
338			break;
339	return res;
340}
341
342
343/*
344 * res_sorts_before4 - compare two restrict4 entries
345 *
346 * Returns nonzero if r1 sorts before r2.  We sort by descending
347 * address, then descending mask, then descending mflags, so sorting
348 * before means having a higher value.
349 */
350static int
351res_sorts_before4(
352	restrict_u *r1,
353	restrict_u *r2
354	)
355{
356	int r1_before_r2;
357
358	if (r1->u.v4.addr > r2->u.v4.addr)
359		r1_before_r2 = 1;
360	else if (r1->u.v4.addr < r2->u.v4.addr)
361		r1_before_r2 = 0;
362	else if (r1->u.v4.mask > r2->u.v4.mask)
363		r1_before_r2 = 1;
364	else if (r1->u.v4.mask < r2->u.v4.mask)
365		r1_before_r2 = 0;
366	else if (r1->mflags > r2->mflags)
367		r1_before_r2 = 1;
368	else
369		r1_before_r2 = 0;
370
371	return r1_before_r2;
372}
373
374
375/*
376 * res_sorts_before6 - compare two restrict6 entries
377 *
378 * Returns nonzero if r1 sorts before r2.  We sort by descending
379 * address, then descending mask, then descending mflags, so sorting
380 * before means having a higher value.
381 */
382static int
383res_sorts_before6(
384	restrict_u *r1,
385	restrict_u *r2
386	)
387{
388	int r1_before_r2;
389	int cmp;
390
391	cmp = ADDR6_CMP(&r1->u.v6.addr, &r2->u.v6.addr);
392	if (cmp > 0)		/* r1->addr > r2->addr */
393		r1_before_r2 = 1;
394	else if (cmp < 0)	/* r2->addr > r1->addr */
395		r1_before_r2 = 0;
396	else {
397		cmp = ADDR6_CMP(&r1->u.v6.mask, &r2->u.v6.mask);
398		if (cmp > 0)		/* r1->mask > r2->mask*/
399			r1_before_r2 = 1;
400		else if (cmp < 0)	/* r2->mask > r1->mask */
401			r1_before_r2 = 0;
402		else if (r1->mflags > r2->mflags)
403			r1_before_r2 = 1;
404		else
405			r1_before_r2 = 0;
406	}
407
408	return r1_before_r2;
409}
410
411
412/*
413 * restrictions - return restrictions for this host
414 */
415u_short
416restrictions(
417	sockaddr_u *srcadr
418	)
419{
420	restrict_u *match;
421	struct in6_addr *pin6;
422	u_short flags;
423
424	res_calls++;
425	flags = 0;
426	/* IPv4 source address */
427	if (IS_IPV4(srcadr)) {
428		/*
429		 * Ignore any packets with a multicast source address
430		 * (this should be done early in the receive process,
431		 * not later!)
432		 */
433		if (IN_CLASSD(SRCADR(srcadr)))
434			return (int)RES_IGNORE;
435
436		match = match_restrict4_addr(SRCADR(srcadr),
437					     SRCPORT(srcadr));
438
439		INSIST(match != NULL);
440
441		match->count++;
442		/*
443		 * res_not_found counts only use of the final default
444		 * entry, not any "restrict default ntpport ...", which
445		 * would be just before the final default.
446		 */
447		if (&restrict_def4 == match)
448			res_not_found++;
449		else
450			res_found++;
451		flags = match->flags;
452	}
453
454	/* IPv6 source address */
455	if (IS_IPV6(srcadr)) {
456		pin6 = PSOCK_ADDR6(srcadr);
457
458		/*
459		 * Ignore any packets with a multicast source address
460		 * (this should be done early in the receive process,
461		 * not later!)
462		 */
463		if (IN6_IS_ADDR_MULTICAST(pin6))
464			return (int)RES_IGNORE;
465
466		match = match_restrict6_addr(pin6, SRCPORT(srcadr));
467		INSIST(match != NULL);
468		match->count++;
469		if (&restrict_def6 == match)
470			res_not_found++;
471		else
472			res_found++;
473		flags = match->flags;
474	}
475	return (flags);
476}
477
478
479/*
480 * hack_restrict - add/subtract/manipulate entries on the restrict list
481 */
482void
483hack_restrict(
484	int		op,
485	sockaddr_u *	resaddr,
486	sockaddr_u *	resmask,
487	u_short		mflags,
488	u_short		flags,
489	u_long		expire
490	)
491{
492	int		v6;
493	restrict_u	match;
494	restrict_u *	res;
495	restrict_u **	plisthead;
496
497	DPRINTF(1, ("restrict: op %d addr %s mask %s mflags %08x flags %08x\n",
498		    op, stoa(resaddr), stoa(resmask), mflags, flags));
499
500	if (NULL == resaddr) {
501		REQUIRE(NULL == resmask);
502		REQUIRE(RESTRICT_FLAGS == op);
503		restrict_source_flags = flags;
504		restrict_source_mflags = mflags;
505		restrict_source_enabled = 1;
506		return;
507	}
508
509	ZERO(match);
510
511#if 0
512	/* silence VC9 potentially uninit warnings */
513	// HMS: let's use a compiler-specific "enable" for this.
514	res = NULL;
515	v6 = 0;
516#endif
517
518	if (IS_IPV4(resaddr)) {
519		v6 = 0;
520		/*
521		 * Get address and mask in host byte order for easy
522		 * comparison as u_int32
523		 */
524		match.u.v4.addr = SRCADR(resaddr);
525		match.u.v4.mask = SRCADR(resmask);
526		match.u.v4.addr &= match.u.v4.mask;
527
528	} else if (IS_IPV6(resaddr)) {
529		v6 = 1;
530		/*
531		 * Get address and mask in network byte order for easy
532		 * comparison as byte sequences (e.g. memcmp())
533		 */
534		match.u.v6.mask = SOCK_ADDR6(resmask);
535		MASK_IPV6_ADDR(&match.u.v6.addr, PSOCK_ADDR6(resaddr),
536			       &match.u.v6.mask);
537
538	} else	/* not IPv4 nor IPv6 */
539		REQUIRE(0);
540
541	match.flags = flags;
542	match.mflags = mflags;
543	match.expire = expire;
544	res = match_restrict_entry(&match, v6);
545
546	switch (op) {
547
548	case RESTRICT_FLAGS:
549		/*
550		 * Here we add bits to the flags. If this is a
551		 * new restriction add it.
552		 */
553		if (NULL == res) {
554			if (v6) {
555				res = alloc_res6();
556				memcpy(res, &match,
557				       V6_SIZEOF_RESTRICT_U);
558				plisthead = &restrictlist6;
559			} else {
560				res = alloc_res4();
561				memcpy(res, &match,
562				       V4_SIZEOF_RESTRICT_U);
563				plisthead = &restrictlist4;
564			}
565			LINK_SORT_SLIST(
566				*plisthead, res,
567				(v6)
568				  ? res_sorts_before6(res, L_S_S_CUR())
569				  : res_sorts_before4(res, L_S_S_CUR()),
570				link, restrict_u);
571			restrictcount++;
572			if (RES_LIMITED & flags)
573				inc_res_limited();
574		} else {
575			if ((RES_LIMITED & flags) &&
576			    !(RES_LIMITED & res->flags))
577				inc_res_limited();
578			res->flags |= flags;
579		}
580		break;
581
582	case RESTRICT_UNFLAG:
583		/*
584		 * Remove some bits from the flags. If we didn't
585		 * find this one, just return.
586		 */
587		if (res != NULL) {
588			if ((RES_LIMITED & res->flags)
589			    && (RES_LIMITED & flags))
590				dec_res_limited();
591			res->flags &= ~flags;
592		}
593		break;
594
595	case RESTRICT_REMOVE:
596	case RESTRICT_REMOVEIF:
597		/*
598		 * Remove an entry from the table entirely if we
599		 * found one. Don't remove the default entry and
600		 * don't remove an interface entry.
601		 */
602		if (res != NULL
603		    && (RESTRICT_REMOVEIF == op
604			|| !(RESM_INTERFACE & res->mflags))
605		    && res != &restrict_def4
606		    && res != &restrict_def6)
607			free_res(res, v6);
608		break;
609
610	default:	/* unknown op */
611		INSIST(0);
612		break;
613	}
614
615}
616
617
618/*
619 * restrict_source - maintains dynamic "restrict source ..." entries as
620 *		     peers come and go.
621 */
622void
623restrict_source(
624	sockaddr_u *	addr,
625	int		farewell,	/* 0 to add, 1 to remove */
626	u_long		expire		/* 0 is infinite, valid until */
627	)
628{
629	sockaddr_u	onesmask;
630	restrict_u *	res;
631	int		found_specific;
632
633	if (!restrict_source_enabled || SOCK_UNSPEC(addr) ||
634	    IS_MCAST(addr) || ISREFCLOCKADR(addr))
635		return;
636
637	REQUIRE(AF_INET == AF(addr) || AF_INET6 == AF(addr));
638
639	SET_HOSTMASK(&onesmask, AF(addr));
640	if (farewell) {
641		hack_restrict(RESTRICT_REMOVE, addr, &onesmask,
642			      0, 0, 0);
643		DPRINTF(1, ("restrict_source: %s removed", stoa(addr)));
644		return;
645	}
646
647	/*
648	 * If there is a specific entry for this address, hands
649	 * off, as it is condidered more specific than "restrict
650	 * server ...".
651	 * However, if the specific entry found is a fleeting one
652	 * added by pool_xmit() before soliciting, replace it
653	 * immediately regardless of the expire value to make way
654	 * for the more persistent entry.
655	 */
656	if (IS_IPV4(addr)) {
657		res = match_restrict4_addr(SRCADR(addr), SRCPORT(addr));
658		INSIST(res != NULL);
659		found_specific = (SRCADR(&onesmask) == res->u.v4.mask);
660	} else {
661		res = match_restrict6_addr(&SOCK_ADDR6(addr),
662					   SRCPORT(addr));
663		INSIST(res != NULL);
664		found_specific = ADDR6_EQ(&res->u.v6.mask,
665					  &SOCK_ADDR6(&onesmask));
666	}
667	if (!expire && found_specific && res->expire) {
668		found_specific = 0;
669		free_res(res, IS_IPV6(addr));
670	}
671	if (found_specific)
672		return;
673
674	hack_restrict(RESTRICT_FLAGS, addr, &onesmask,
675		      restrict_source_mflags, restrict_source_flags,
676		      expire);
677	DPRINTF(1, ("restrict_source: %s host restriction added\n",
678		    stoa(addr)));
679}
680