1/*
2 * Copyright (c) 1983, 1988, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32#include "defs.h"
33
34#ifdef __NetBSD__
35__RCSID("$NetBSD$");
36#elif defined(__FreeBSD__)
37__RCSID("$FreeBSD$");
38#else
39__RCSID("$Revision: 2.27 $");
40#ident "$Revision: 2.27 $"
41#endif
42
43
44u_int update_seqno;
45
46
47/* walk the tree of routes with this for output
48 */
49static struct {
50	struct sockaddr_in to;
51	naddr	to_mask;
52	naddr	to_net;
53	naddr	to_std_mask;
54	naddr	to_std_net;
55	struct interface *ifp;		/* usually output interface */
56	struct auth *a;
57	char	metric;			/* adjust metrics by interface */
58	int	npackets;
59	int	gen_limit;
60	u_int	state;
61#define	    WS_ST_FLASH	    0x001	/* send only changed routes */
62#define	    WS_ST_RIP2_ALL  0x002	/* send full featured RIPv2 */
63#define	    WS_ST_AG	    0x004	/* ok to aggregate subnets */
64#define	    WS_ST_SUPER_AG  0x008	/* ok to aggregate networks */
65#define	    WS_ST_QUERY	    0x010	/* responding to a query */
66#define	    WS_ST_TO_ON_NET 0x020	/* sending onto one of our nets */
67#define	    WS_ST_DEFAULT   0x040	/* faking a default */
68} ws;
69
70/* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */
71struct ws_buf v12buf;
72static union pkt_buf ripv12_buf;
73
74/* Another for only RIPv2 listeners */
75static struct ws_buf v2buf;
76static union pkt_buf rip_v2_buf;
77
78
79
80void
81bufinit(void)
82{
83	ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE;
84	v12buf.buf = &ripv12_buf.rip;
85	v12buf.base = &v12buf.buf->rip_nets[0];
86
87	rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE;
88	rip_v2_buf.rip.rip_vers = RIPv2;
89	v2buf.buf = &rip_v2_buf.rip;
90	v2buf.base = &v2buf.buf->rip_nets[0];
91}
92
93
94/* Send the contents of the global buffer via the non-multicast socket
95 */
96int					/* <0 on failure */
97output(enum output_type type,
98       struct sockaddr_in *dst,		/* send to here */
99       struct interface *ifp,
100       struct rip *buf,
101       int size)			/* this many bytes */
102{
103	struct sockaddr_in osin;
104	int flags;
105	const char *msg;
106	int res;
107	int soc;
108	int serrno;
109
110	assert(ifp != NULL);
111	osin = *dst;
112	if (osin.sin_port == 0)
113		osin.sin_port = htons(RIP_PORT);
114#ifdef _HAVE_SIN_LEN
115	if (osin.sin_len == 0)
116		osin.sin_len = sizeof(osin);
117#endif
118
119	soc = rip_sock;
120	flags = 0;
121
122	switch (type) {
123	case OUT_QUERY:
124		msg = "Answer Query";
125		if (soc < 0)
126			soc = ifp->int_rip_sock;
127		break;
128	case OUT_UNICAST:
129		msg = "Send";
130		if (soc < 0)
131			soc = ifp->int_rip_sock;
132		flags = MSG_DONTROUTE;
133		break;
134	case OUT_BROADCAST:
135		if (ifp->int_if_flags & IFF_POINTOPOINT) {
136			msg = "Send";
137		} else {
138			msg = "Send bcast";
139		}
140		flags = MSG_DONTROUTE;
141		break;
142	case OUT_MULTICAST:
143		if ((ifp->int_if_flags & (IFF_POINTOPOINT|IFF_MULTICAST)) ==
144		    IFF_POINTOPOINT) {
145			msg = "Send pt-to-pt";
146		} else if (ifp->int_state & IS_DUP) {
147			trace_act("abort multicast output via %s"
148				  " with duplicate address",
149				  ifp->int_name);
150			return 0;
151		} else {
152			msg = "Send mcast";
153			if (rip_sock_mcast != ifp) {
154				struct ip_mreqn mreqn;
155
156				memset(&mreqn, 0, sizeof(struct ip_mreqn));
157				mreqn.imr_ifindex = ifp->int_index;
158				if (0 > setsockopt(rip_sock,
159						   IPPROTO_IP,
160						   IP_MULTICAST_IF,
161						   &mreqn,
162						   sizeof(mreqn))) {
163					serrno = errno;
164					LOGERR("setsockopt(rip_sock, "
165					       "IP_MULTICAST_IF)");
166					errno = serrno;
167					ifp = 0;
168					return -1;
169				}
170				rip_sock_mcast = ifp;
171			}
172			osin.sin_addr.s_addr = htonl(INADDR_RIP_GROUP);
173		}
174		break;
175
176	case NO_OUT_MULTICAST:
177	case NO_OUT_RIPV2:
178	default:
179#ifdef DEBUG
180		abort();
181#endif
182		return -1;
183	}
184
185	trace_rip(msg, "to", &osin, ifp, buf, size);
186
187	res = sendto(soc, buf, size, flags,
188		     (struct sockaddr *)&osin, sizeof(osin));
189	if (res < 0
190	    && (ifp == 0 || !(ifp->int_state & IS_BROKE))) {
191		serrno = errno;
192		msglog("%s sendto(%s%s%s.%d): %s", msg,
193		       ifp != 0 ? ifp->int_name : "",
194		       ifp != 0 ? ", " : "",
195		       inet_ntoa(osin.sin_addr),
196		       ntohs(osin.sin_port),
197		       strerror(errno));
198		errno = serrno;
199	}
200
201	return res;
202}
203
204
205/* Find the first key for a packet to send.
206 * Try for a key that is eligible and has not expired, but settle for
207 * the last key if they have all expired.
208 * If no key is ready yet, give up.
209 */
210struct auth *
211find_auth(struct interface *ifp)
212{
213	struct auth *ap, *res;
214	int i;
215
216
217	if (ifp == 0)
218		return 0;
219
220	res = 0;
221	ap = ifp->int_auth;
222	for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) {
223		/* stop looking after the last key */
224		if (ap->type == RIP_AUTH_NONE)
225			break;
226
227		/* ignore keys that are not ready yet */
228		if ((u_long)ap->start > (u_long)clk.tv_sec)
229			continue;
230
231		if ((u_long)ap->end < (u_long)clk.tv_sec) {
232			/* note best expired password as a fall-back */
233			if (res == 0 || (u_long)ap->end > (u_long)res->end)
234				res = ap;
235			continue;
236		}
237
238		/* note key with the best future */
239		if (res == 0 || (u_long)res->end < (u_long)ap->end)
240			res = ap;
241	}
242	return res;
243}
244
245
246void
247clr_ws_buf(struct ws_buf *wb,
248	   struct auth *ap)
249{
250	struct netauth *na;
251
252	wb->lim = wb->base + NETS_LEN;
253	wb->n = wb->base;
254	memset(wb->n, 0, NETS_LEN*sizeof(*wb->n));
255
256	/* (start to) install authentication if appropriate
257	 */
258	if (ap == 0)
259		return;
260
261	na = (struct netauth*)wb->n;
262	if (ap->type == RIP_AUTH_PW) {
263		na->a_family = RIP_AF_AUTH;
264		na->a_type = RIP_AUTH_PW;
265		memcpy(na->au.au_pw, ap->key, sizeof(na->au.au_pw));
266		wb->n++;
267
268	} else if (ap->type ==  RIP_AUTH_MD5) {
269		na->a_family = RIP_AF_AUTH;
270		na->a_type = RIP_AUTH_MD5;
271		na->au.a_md5.md5_keyid = ap->keyid;
272		na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_KEY_LEN;
273		na->au.a_md5.md5_seqno = htonl(clk.tv_sec);
274		wb->n++;
275		wb->lim--;		/* make room for trailer */
276	}
277}
278
279
280void
281end_md5_auth(struct ws_buf *wb,
282	     struct auth *ap)
283{
284	struct netauth *na, *na2;
285	MD5_CTX md5_ctx;
286	int len;
287
288
289	na = (struct netauth*)wb->base;
290	na2 = (struct netauth*)wb->n;
291	len = (char *)na2-(char *)wb->buf;
292	na2->a_family = RIP_AF_AUTH;
293	na2->a_type = htons(1);
294	na->au.a_md5.md5_pkt_len = htons(len);
295	MD5Init(&md5_ctx);
296	MD5Update(&md5_ctx, (u_char *)wb->buf, len + RIP_AUTH_MD5_HASH_XTRA);
297	MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_KEY_LEN);
298	MD5Final(na2->au.au_pw, &md5_ctx);
299	wb->n++;
300}
301
302
303/* Send the buffer
304 */
305static void
306supply_write(struct ws_buf *wb)
307{
308	/* Output multicast only if legal.
309	 * If we would multicast and it would be illegal, then discard the
310	 * packet.
311	 */
312	switch (wb->type) {
313	case NO_OUT_MULTICAST:
314		trace_pkt("skip multicast to %s because impossible",
315			  naddr_ntoa(ws.to.sin_addr.s_addr));
316		break;
317	case NO_OUT_RIPV2:
318		break;
319	default:
320		if (ws.a != 0 && ws.a->type == RIP_AUTH_MD5)
321			end_md5_auth(wb,ws.a);
322		if (output(wb->type, &ws.to, ws.ifp, wb->buf,
323			   ((char *)wb->n - (char*)wb->buf)) < 0
324		    && ws.ifp != 0)
325			if_sick(ws.ifp);
326		ws.npackets++;
327		break;
328	}
329
330	clr_ws_buf(wb,ws.a);
331}
332
333
334/* put an entry into the packet
335 */
336static void
337supply_out(struct ag_info *ag)
338{
339	int i;
340	naddr mask, v1_mask, dst_h, ddst_h = 0;
341	struct ws_buf *wb;
342
343
344	/* Skip this route if doing a flash update and it and the routes
345	 * it aggregates have not changed recently.
346	 */
347	if (ag->ag_seqno < update_seqno
348	    && (ws.state & WS_ST_FLASH))
349		return;
350
351	dst_h = ag->ag_dst_h;
352	mask = ag->ag_mask;
353	v1_mask = ripv1_mask_host(htonl(dst_h),
354				  (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : 0);
355	i = 0;
356
357	/* If we are sending RIPv2 packets that cannot (or must not) be
358	 * heard by RIPv1 listeners, do not worry about sub- or supernets.
359	 * Subnets (from other networks) can only be sent via multicast.
360	 * A pair of subnet routes might have been promoted so that they
361	 * are legal to send by RIPv1.
362	 * If RIPv1 is off, use the multicast buffer.
363	 */
364	if ((ws.state & WS_ST_RIP2_ALL)
365	    || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) {
366		/* use the RIPv2-only buffer */
367		wb = &v2buf;
368
369	} else {
370		/* use the RIPv1-or-RIPv2 buffer */
371		wb = &v12buf;
372
373		/* Convert supernet route into corresponding set of network
374		 * routes for RIPv1, but leave non-contiguous netmasks
375		 * to ag_check().
376		 */
377		if (v1_mask > mask
378		    && mask + (mask & -mask) == 0) {
379			ddst_h = v1_mask & -v1_mask;
380			i = (v1_mask & ~mask)/ddst_h;
381
382			if (i > ws.gen_limit) {
383				/* Punt if we would have to generate an
384				 * unreasonable number of routes.
385				 */
386				if (TRACECONTENTS)
387					trace_misc("sending %s-->%s as 1"
388						   " instead of %d routes",
389						   addrname(htonl(dst_h), mask,
390							1),
391						   naddr_ntoa(ws.to.sin_addr
392							.s_addr),
393						   i+1);
394				i = 0;
395
396			} else {
397				mask = v1_mask;
398				ws.gen_limit -= i;
399			}
400		}
401	}
402
403	do {
404		wb->n->n_family = RIP_AF_INET;
405		wb->n->n_dst = htonl(dst_h);
406		/* If the route is from router-discovery or we are
407		 * shutting down, admit only a bad metric.
408		 */
409		wb->n->n_metric = ((stopint || ag->ag_metric < 1)
410				   ? HOPCNT_INFINITY
411				   : ag->ag_metric);
412		wb->n->n_metric = htonl(wb->n->n_metric);
413		/* Any non-zero bits in the supposedly unused RIPv1 fields
414		 * cause the old `routed` to ignore the route.
415		 * That means the mask and so forth cannot be sent
416		 * in the hybrid RIPv1/RIPv2 mode.
417		 */
418		if (ws.state & WS_ST_RIP2_ALL) {
419			if (ag->ag_nhop != 0
420			    && ((ws.state & WS_ST_QUERY)
421				|| (ag->ag_nhop != ws.ifp->int_addr
422				    && on_net(ag->ag_nhop,
423					      ws.ifp->int_net,
424					      ws.ifp->int_mask))))
425				wb->n->n_nhop = ag->ag_nhop;
426			wb->n->n_mask = htonl(mask);
427			wb->n->n_tag = ag->ag_tag;
428		}
429		dst_h += ddst_h;
430
431		if (++wb->n >= wb->lim)
432			supply_write(wb);
433	} while (i-- != 0);
434}
435
436
437/* supply one route from the table
438 */
439/* ARGSUSED */
440static int
441walk_supply(struct radix_node *rn,
442	    struct walkarg *argp UNUSED)
443{
444#define RT ((struct rt_entry *)rn)
445	u_short ags;
446	char metric, pref;
447	naddr dst, nhop;
448	struct rt_spare *rts;
449	int i;
450
451
452	/* Do not advertise external remote interfaces or passive interfaces.
453	 */
454	if ((RT->rt_state & RS_IF)
455	    && RT->rt_ifp != 0
456	    && (RT->rt_ifp->int_state & IS_PASSIVE)
457	    && !(RT->rt_state & RS_MHOME))
458		return 0;
459
460	/* If being quiet about our ability to forward, then
461	 * do not say anything unless responding to a query,
462	 * except about our main interface.
463	 */
464	if (!supplier && !(ws.state & WS_ST_QUERY)
465	    && !(RT->rt_state & RS_MHOME))
466		return 0;
467
468	dst = RT->rt_dst;
469
470	/* do not collide with the fake default route */
471	if (dst == RIP_DEFAULT
472	    && (ws.state & WS_ST_DEFAULT))
473		return 0;
474
475	if (RT->rt_state & RS_NET_SYN) {
476		if (RT->rt_state & RS_NET_INT) {
477			/* Do not send manual synthetic network routes
478			 * into the subnet.
479			 */
480			if (on_net(ws.to.sin_addr.s_addr,
481				   ntohl(dst), RT->rt_mask))
482				return 0;
483
484		} else {
485			/* Do not send automatic synthetic network routes
486			 * if they are not needed because no RIPv1 listeners
487			 * can hear them.
488			 */
489			if (ws.state & WS_ST_RIP2_ALL)
490				return 0;
491
492			/* Do not send automatic synthetic network routes to
493			 * the real subnet.
494			 */
495			if (on_net(ws.to.sin_addr.s_addr,
496				   ntohl(dst), RT->rt_mask))
497				return 0;
498		}
499		nhop = 0;
500
501	} else {
502		/* Advertise the next hop if this is not a route for one
503		 * of our interfaces and the next hop is on the same
504		 * network as the target.
505		 * The final determination is made by supply_out().
506		 */
507		if (!(RT->rt_state & RS_IF)
508		    && RT->rt_gate != myaddr
509		    && RT->rt_gate != loopaddr)
510			nhop = RT->rt_gate;
511		else
512			nhop = 0;
513	}
514
515	metric = RT->rt_metric;
516	ags = 0;
517
518	if (RT->rt_state & RS_MHOME) {
519		/* retain host route of multi-homed servers */
520		;
521
522	} else if (RT_ISHOST(RT)) {
523		/* We should always suppress (into existing network routes)
524		 * the host routes for the local end of our point-to-point
525		 * links.
526		 * If we are suppressing host routes in general, then do so.
527		 * Avoid advertising host routes onto their own network,
528		 * where they should be handled by proxy-ARP.
529		 */
530		if ((RT->rt_state & RS_LOCAL)
531		    || ridhosts
532		    || on_net(dst, ws.to_net, ws.to_mask))
533			ags |= AGS_SUPPRESS;
534
535		/* Aggregate stray host routes into network routes if allowed.
536		 * We cannot aggregate host routes into small network routes
537		 * without confusing RIPv1 listeners into thinking the
538		 * network routes are host routes.
539		 */
540		if ((ws.state & WS_ST_AG) && (ws.state & WS_ST_RIP2_ALL))
541			ags |= AGS_AGGREGATE;
542
543	} else {
544		/* Always suppress network routes into other, existing
545		 * network routes
546		 */
547		ags |= AGS_SUPPRESS;
548
549		/* Generate supernets if allowed.
550		 * If we can be heard by RIPv1 systems, we will
551		 * later convert back to ordinary nets.
552		 * This unifies dealing with received supernets.
553		 */
554		if ((ws.state & WS_ST_AG)
555		    && ((RT->rt_state & RS_SUBNET)
556			|| (ws.state & WS_ST_SUPER_AG)))
557			ags |= AGS_AGGREGATE;
558	}
559
560	/* Do not send RIPv1 advertisements of subnets to other
561	 * networks. If possible, multicast them by RIPv2.
562	 */
563	if ((RT->rt_state & RS_SUBNET)
564	    && !(ws.state & WS_ST_RIP2_ALL)
565	    && !on_net(dst, ws.to_std_net, ws.to_std_mask))
566		ags |= AGS_RIPV2 | AGS_AGGREGATE;
567
568
569	/* Do not send a route back to where it came from, except in
570	 * response to a query.  This is "split-horizon".  That means not
571	 * advertising back to the same network	and so via the same interface.
572	 *
573	 * We want to suppress routes that might have been fragmented
574	 * from this route by a RIPv1 router and sent back to us, and so we
575	 * cannot forget this route here.  Let the split-horizon route
576	 * suppress the fragmented routes and then itself be forgotten.
577	 *
578	 * Include the routes for both ends of point-to-point interfaces
579	 * among those suppressed by split-horizon, since the other side
580	 * should knows them as well as we do.
581	 *
582	 * Notice spare routes with the same metric that we are about to
583	 * advertise, to split the horizon on redundant, inactive paths.
584	 *
585	 * Do not suppress advertisements of interface-related addresses on
586	 * non-point-to-point interfaces.  This ensures that we have something
587	 * to say every 30 seconds to help detect broken Ethernets or
588	 * other interfaces where one packet every 30 seconds costs nothing.
589	 */
590	if (ws.ifp != 0
591	    && !(ws.state & WS_ST_QUERY)
592	    && (ws.state & WS_ST_TO_ON_NET)
593	    && (!(RT->rt_state & RS_IF)
594		|| ws.ifp->int_if_flags & IFF_POINTOPOINT)) {
595		for (rts = RT->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) {
596			if (rts->rts_metric > metric
597			    || rts->rts_ifp != ws.ifp)
598				continue;
599
600			/* If we do not mark the route with AGS_SPLIT_HZ here,
601			 * it will be poisoned-reverse, or advertised back
602			 * toward its source with an infinite metric.
603			 * If we have recently advertised the route with a
604			 * better metric than we now have, then we should
605			 * poison-reverse the route before suppressing it for
606			 * split-horizon.
607			 *
608			 * In almost all cases, if there is no spare for the
609			 * route then it is either old and dead or a brand
610			 * new route. If it is brand new, there is no need
611			 * for poison-reverse. If it is old and dead, it
612			 * is already poisoned.
613			 */
614			if (RT->rt_poison_time < now_expire
615			    || RT->rt_poison_metric >= metric
616			    || RT->rt_spares[1].rts_gate == 0) {
617				ags |= AGS_SPLIT_HZ;
618				ags &= ~AGS_SUPPRESS;
619			}
620			metric = HOPCNT_INFINITY;
621			break;
622		}
623	}
624
625	/* Keep track of the best metric with which the
626	 * route has been advertised recently.
627	 */
628	if (RT->rt_poison_metric >= metric
629	    || RT->rt_poison_time < now_expire) {
630		RT->rt_poison_time = now.tv_sec;
631		RT->rt_poison_metric = metric;
632	}
633
634	/* Adjust the outgoing metric by the cost of the link.
635	 * Avoid aggregation when a route is counting to infinity.
636	 */
637	pref = RT->rt_poison_metric + ws.metric;
638	metric += ws.metric;
639
640	/* Do not advertise stable routes that will be ignored,
641	 * unless we are answering a query.
642	 * If the route recently was advertised with a metric that
643	 * would have been less than infinity through this interface,
644	 * we need to continue to advertise it in order to poison it.
645	 */
646	if (metric >= HOPCNT_INFINITY) {
647		if (!(ws.state & WS_ST_QUERY)
648		    && (pref >= HOPCNT_INFINITY
649			|| RT->rt_poison_time < now_garbage))
650			return 0;
651
652		metric = HOPCNT_INFINITY;
653	}
654
655	ag_check(dst, RT->rt_mask, 0, nhop, metric, pref,
656		 RT->rt_seqno, RT->rt_tag, ags, supply_out);
657	return 0;
658#undef RT
659}
660
661
662/* Supply dst with the contents of the routing tables.
663 * If this won't fit in one packet, chop it up into several.
664 */
665void
666supply(struct sockaddr_in *dst,
667       struct interface *ifp,		/* output interface */
668       enum output_type type,
669       int flash,			/* 1=flash update */
670       int vers,			/* RIP version */
671       int passwd_ok)			/* OK to include cleartext password */
672{
673	struct rt_entry *rt;
674	int def_metric;
675
676	assert(ifp != NULL);
677
678	ws.state = 0;
679	ws.gen_limit = 1024;
680
681	ws.to = *dst;
682	ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr);
683	ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask;
684
685	if (ifp != 0) {
686		ws.to_mask = ifp->int_mask;
687		ws.to_net = ifp->int_net;
688		if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask))
689			ws.state |= WS_ST_TO_ON_NET;
690
691	} else {
692		ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, 0);
693		ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask;
694		rt = rtfind(dst->sin_addr.s_addr);
695		if (rt)
696			ifp = rt->rt_ifp;
697	}
698
699	ws.npackets = 0;
700	if (flash)
701		ws.state |= WS_ST_FLASH;
702
703	if ((ws.ifp = ifp) == 0) {
704		ws.metric = 1;
705	} else {
706		/* Adjust the advertised metric by the outgoing interface
707		 * metric.
708		 */
709		ws.metric = ifp->int_metric + 1 + ifp->int_adj_outmetric;
710	}
711
712	ripv12_buf.rip.rip_vers = vers;
713
714	switch (type) {
715	case OUT_MULTICAST:
716		if (ifp->int_if_flags & IFF_MULTICAST)
717			v2buf.type = OUT_MULTICAST;
718		else
719			v2buf.type = NO_OUT_MULTICAST;
720		v12buf.type = OUT_BROADCAST;
721		break;
722
723	case OUT_QUERY:
724		ws.state |= WS_ST_QUERY;
725		/* FALLTHROUGH */
726	case OUT_BROADCAST:
727	case OUT_UNICAST:
728		v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2;
729		v12buf.type = type;
730		break;
731
732	case NO_OUT_MULTICAST:
733	case NO_OUT_RIPV2:
734		break;			/* no output */
735	}
736
737	if (vers == RIPv2) {
738		/* full RIPv2 only if cannot be heard by RIPv1 listeners */
739		if (type != OUT_BROADCAST)
740			ws.state |= WS_ST_RIP2_ALL;
741		if ((ws.state & WS_ST_QUERY)
742		    || !(ws.state & WS_ST_TO_ON_NET)) {
743			ws.state |= (WS_ST_AG | WS_ST_SUPER_AG);
744		} else if (ifp == 0 || !(ifp->int_state & IS_NO_AG)) {
745			ws.state |= WS_ST_AG;
746			if (type != OUT_BROADCAST
747			    && (ifp == 0
748				|| !(ifp->int_state & IS_NO_SUPER_AG)))
749				ws.state |= WS_ST_SUPER_AG;
750		}
751	}
752
753	ws.a = (vers == RIPv2) ? find_auth(ifp) : 0;
754	if (!passwd_ok && ws.a != 0 && ws.a->type == RIP_AUTH_PW)
755		ws.a = 0;
756	clr_ws_buf(&v12buf,ws.a);
757	clr_ws_buf(&v2buf,ws.a);
758
759	/*  Fake a default route if asked and if there is not already
760	 * a better, real default route.
761	 */
762	if (supplier && (def_metric = ifp->int_d_metric) != 0) {
763		if (0 == (rt = rtget(RIP_DEFAULT, 0))
764		    || rt->rt_metric+ws.metric >= def_metric) {
765			ws.state |= WS_ST_DEFAULT;
766			ag_check(0, 0, 0, 0, def_metric, def_metric,
767				 0, 0, 0, supply_out);
768		} else {
769			def_metric = rt->rt_metric+ws.metric;
770		}
771
772		/* If both RIPv2 and the poor-man's router discovery
773		 * kludge are on, arrange to advertise an extra
774		 * default route via RIPv1.
775		 */
776		if ((ws.state & WS_ST_RIP2_ALL)
777		    && (ifp->int_state & IS_PM_RDISC)) {
778			ripv12_buf.rip.rip_vers = RIPv1;
779			v12buf.n->n_family = RIP_AF_INET;
780			v12buf.n->n_dst = htonl(RIP_DEFAULT);
781			v12buf.n->n_metric = htonl(def_metric);
782			v12buf.n++;
783		}
784	}
785
786	(void)rn_walktree(rhead, walk_supply, 0);
787	ag_flush(0,0,supply_out);
788
789	/* Flush the packet buffers, provided they are not empty and
790	 * do not contain only the password.
791	 */
792	if (v12buf.n != v12buf.base
793	    && (v12buf.n > v12buf.base+1
794		|| v12buf.base->n_family != RIP_AF_AUTH))
795		supply_write(&v12buf);
796	if (v2buf.n != v2buf.base
797	    && (v2buf.n > v2buf.base+1
798		|| v2buf.base->n_family != RIP_AF_AUTH))
799		supply_write(&v2buf);
800
801	/* If we sent nothing and this is an answer to a query, send
802	 * an empty buffer.
803	 */
804	if (ws.npackets == 0
805	    && (ws.state & WS_ST_QUERY))
806		supply_write(&v12buf);
807}
808
809
810/* send all of the routing table or just do a flash update
811 */
812void
813rip_bcast(int flash)
814{
815#ifdef _HAVE_SIN_LEN
816	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
817#else
818	static struct sockaddr_in dst = {AF_INET};
819#endif
820	struct interface *ifp;
821	enum output_type type;
822	int vers;
823	struct timeval rtime;
824
825
826	need_flash = 0;
827	intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME);
828	no_flash = rtime;
829	timevaladd(&no_flash, &now);
830
831	if (rip_sock < 0)
832		return;
833
834	trace_act("send %s and inhibit dynamic updates for %.3f sec",
835		  flash ? "dynamic update" : "all routes",
836		  rtime.tv_sec + ((float)rtime.tv_usec)/1000000.0);
837
838	LIST_FOREACH(ifp, &ifnet, int_list) {
839		/* Skip interfaces not doing RIP.
840		 * Do try broken interfaces to see if they have healed.
841		 */
842		if (IS_RIP_OUT_OFF(ifp->int_state))
843			continue;
844
845		/* skip turned off interfaces */
846		if (!iff_up(ifp->int_if_flags))
847			continue;
848
849		vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1;
850
851		if (ifp->int_if_flags & IFF_BROADCAST) {
852			/* ordinary, hardware interface */
853			dst.sin_addr.s_addr = ifp->int_brdaddr;
854
855			if (vers == RIPv2
856			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
857				type = OUT_MULTICAST;
858			} else {
859				type = OUT_BROADCAST;
860			}
861
862		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
863			/* point-to-point hardware interface */
864			dst.sin_addr.s_addr = ifp->int_dstaddr;
865			if (vers == RIPv2 &&
866			    ifp->int_if_flags & IFF_MULTICAST &&
867			    !(ifp->int_state  & IS_NO_RIP_MCAST)) {
868				type = OUT_MULTICAST;
869			} else {
870				type = OUT_UNICAST;
871			}
872
873		} else if (ifp->int_state & IS_REMOTE) {
874			/* remote interface */
875			dst.sin_addr.s_addr = ifp->int_addr;
876			type = OUT_UNICAST;
877
878		} else {
879			/* ATM, HIPPI, etc. */
880			continue;
881		}
882
883		supply(&dst, ifp, type, flash, vers, 1);
884	}
885
886	update_seqno++;			/* all routes are up to date */
887}
888
889
890/* Ask for routes
891 * Do it only once to an interface, and not even after the interface
892 * was broken and recovered.
893 */
894void
895rip_query(void)
896{
897#ifdef _HAVE_SIN_LEN
898	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
899#else
900	static struct sockaddr_in dst = {AF_INET};
901#endif
902	struct interface *ifp;
903	struct rip buf;
904	enum output_type type;
905
906
907	if (rip_sock < 0)
908		return;
909
910	memset(&buf, 0, sizeof(buf));
911
912	LIST_FOREACH(ifp, &ifnet, int_list) {
913		/* Skip interfaces those already queried.
914		 * Do not ask via interfaces through which we don't
915		 * accept input.  Do not ask via interfaces that cannot
916		 * send RIP packets.
917		 * Do try broken interfaces to see if they have healed.
918		 */
919		if (IS_RIP_IN_OFF(ifp->int_state)
920		    || ifp->int_query_time != NEVER)
921			continue;
922
923		/* skip turned off interfaces */
924		if (!iff_up(ifp->int_if_flags))
925			continue;
926
927		buf.rip_vers = (ifp->int_state&IS_NO_RIPV1_OUT) ? RIPv2:RIPv1;
928		buf.rip_cmd = RIPCMD_REQUEST;
929		buf.rip_nets[0].n_family = RIP_AF_UNSPEC;
930		buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY);
931
932		/* Send a RIPv1 query only if allowed and if we will
933		 * listen to RIPv1 routers.
934		 */
935		if ((ifp->int_state & IS_NO_RIPV1_OUT)
936		    || (ifp->int_state & IS_NO_RIPV1_IN)) {
937			buf.rip_vers = RIPv2;
938		} else {
939			buf.rip_vers = RIPv1;
940		}
941
942		if (ifp->int_if_flags & IFF_BROADCAST) {
943			/* ordinary, hardware interface */
944			dst.sin_addr.s_addr = ifp->int_brdaddr;
945
946			/* Broadcast RIPv1 queries and RIPv2 queries
947			 * when the hardware cannot multicast.
948			 */
949			if (buf.rip_vers == RIPv2
950			    && (ifp->int_if_flags & IFF_MULTICAST)
951			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
952				type = OUT_MULTICAST;
953			} else {
954				type = OUT_BROADCAST;
955			}
956
957		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
958			/* point-to-point hardware interface */
959			dst.sin_addr.s_addr = ifp->int_dstaddr;
960			type = OUT_UNICAST;
961
962		} else if (ifp->int_state & IS_REMOTE) {
963			/* remote interface */
964			dst.sin_addr.s_addr = ifp->int_addr;
965			type = OUT_UNICAST;
966
967		} else {
968			/* ATM, HIPPI, etc. */
969			continue;
970		}
971
972		ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL;
973		if (output(type, &dst, ifp, &buf, sizeof(buf)) < 0)
974			if_sick(ifp);
975	}
976}
977