/* * Copyright 2007 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. * * Copyright (c) 1983, 1988, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgment: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: src/sbin/routed/output.c,v 1.7 2000/08/11 08:24:38 sheldonh Exp $ */ #include "defs.h" #include #include uint_t update_seqno; /* * walk the tree of routes with this for output */ static struct { struct sockaddr_in to; in_addr_t to_mask; in_addr_t to_net; in_addr_t to_std_mask; in_addr_t to_std_net; struct interface *ifp; /* usually output interface */ struct auth *a; uint8_t metric; /* adjust metrics by interface */ uint32_t npackets; uint32_t gen_limit; #define WS_GEN_LIMIT_MAX 1024 uint16_t state; #define WS_ST_FLASH 0x001 /* send only changed routes */ #define WS_ST_RIP2_ALL 0x002 /* send full featured RIPv2 */ #define WS_ST_AG 0x004 /* ok to aggregate subnets */ #define WS_ST_SUPER_AG 0x008 /* ok to aggregate networks */ #define WS_ST_QUERY 0x010 /* responding to a query */ #define WS_ST_TO_ON_NET 0x020 /* sending onto one of our nets */ #define WS_ST_DEFAULT 0x040 /* faking a default */ } ws; /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */ struct ws_buf v12buf; static union pkt_buf ripv12_buf; /* Another for only RIPv2 listeners */ static struct ws_buf v2buf; static union pkt_buf rip_v2_buf; void bufinit(void) { ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE; v12buf.buf = &ripv12_buf.rip; v12buf.base = &v12buf.buf->rip_nets[0]; rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE; rip_v2_buf.rip.rip_vers = RIPv2; v2buf.buf = &rip_v2_buf.rip; v2buf.base = &v2buf.buf->rip_nets[0]; } /* * Send the contents of the global buffer via the non-multicast socket */ int /* <0 on failure */ output(enum output_type type, struct sockaddr_in *dst, /* send to here */ struct interface *ifp, struct rip *buf, int size) /* this many bytes */ { struct sockaddr_in sin; int flags; const char *msg; int res; int ifindex; struct in_addr addr; sin = *dst; if (sin.sin_port == 0) sin.sin_port = htons(RIP_PORT); flags = 0; if (ifp == NULL && type == OUT_MULTICAST) { msglog("Cannot send RIP message to %s", inet_ntoa(sin.sin_addr)); return (-1); } switch (type) { case OUT_QUERY: msg = "Answer Query"; break; case OUT_UNICAST: msg = "Send"; flags = MSG_DONTROUTE; break; case OUT_BROADCAST: msg = "Send bcast"; break; case OUT_MULTICAST: msg = "Send mcast"; break; case NO_OUT_MULTICAST: case NO_OUT_RIPV2: default: #ifdef DEBUG abort(); #endif return (-1); } /* * IP_PKTINFO overrides IP_MULTICAST_IF, so we don't set ifindex * for multicast traffic. */ ifindex = (type != OUT_MULTICAST && type != OUT_QUERY && ifp != NULL && ifp->int_phys != NULL) ? ifp->int_phys->phyi_index : 0; if (rip_sock_interface != ifp) { /* * For multicast, we have to choose the source * address. This is either the local address * (non-point-to-point) or the remote address. */ if (ifp != NULL) { addr.s_addr = (ifp->int_if_flags & IFF_POINTOPOINT) ? ifp->int_dstaddr : ifp->int_addr; if (type == OUT_MULTICAST && setsockopt(rip_sock, IPPROTO_IP, IP_MULTICAST_IF, &addr, sizeof (addr)) == -1) { LOGERR("setsockopt(rip_sock, IP_MULTICAST_IF)"); return (-1); } } rip_sock_interface = ifp; } trace_rip(msg, "to", &sin, ifp, buf, size); res = sendtoif(rip_sock, buf, size, flags, &sin, ifindex); if (res < 0 && (ifp == NULL || !(ifp->int_state & IS_BROKE))) { writelog(LOG_WARNING, "%s sendto(%s%s%s.%d): %s", msg, ifp != NULL ? ifp->int_name : "", ifp != NULL ? ", " : "", inet_ntoa(sin.sin_addr), ntohs(sin.sin_port), rip_strerror(errno)); } return (res); } /* * Semantically identical to sendto(), but sends the message through a * specific interface (if ifindex is non-zero) using IP_PKTINFO. */ int sendtoif(int fd, const void *buf, uint_t bufsize, uint_t flags, struct sockaddr_in *sinp, uint_t ifindex) { struct iovec iov; struct msghdr msg; struct cmsghdr *cmsgp; struct in_pktinfo *ipip; iov.iov_base = (void *)buf; iov.iov_len = bufsize; (void) memset(&msg, 0, sizeof (struct msghdr)); msg.msg_name = (struct sockaddr *)sinp; msg.msg_namelen = sizeof (struct sockaddr_in); msg.msg_iov = &iov; msg.msg_iovlen = 1; if (ifindex != 0) { /* * We can't precisely predict the alignment padding we'll * need, so we allocate the maximum alignment and then * use CMSG_NXTHDR() to fix it up at the end. */ msg.msg_controllen = sizeof (*cmsgp) + _MAX_ALIGNMENT + sizeof (*ipip) + _MAX_ALIGNMENT + sizeof (*cmsgp); msg.msg_control = alloca(msg.msg_controllen); cmsgp = CMSG_FIRSTHDR(&msg); ipip = (void *)CMSG_DATA(cmsgp); (void) memset(ipip, 0, sizeof (struct in_pktinfo)); ipip->ipi_ifindex = ifindex; cmsgp->cmsg_len = (caddr_t)(ipip + 1) - (caddr_t)cmsgp; cmsgp->cmsg_type = IP_PKTINFO; cmsgp->cmsg_level = IPPROTO_IP; /* * Correct the control message length. */ cmsgp = CMSG_NXTHDR(&msg, cmsgp); msg.msg_controllen = (caddr_t)cmsgp - (caddr_t)msg.msg_control; } return (sendmsg(fd, &msg, flags)); } /* * Find the first key for a packet to send. * Try for a key that is eligible and has not expired, but settle for * the last key if they have all expired. * If no key is ready yet, give up. */ struct auth * find_auth(struct interface *ifp) { struct auth *ap, *res = NULL; int i; if (ifp == NULL) return (NULL); if ((ap = ifp->int_auth) == NULL) return (NULL); for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) { /* stop looking after the last key */ if (ap->type == RIP_AUTH_NONE) break; /* ignore keys that are not ready yet */ if ((ulong_t)ap->start > (ulong_t)clk.tv_sec) continue; if ((ulong_t)ap->end < (ulong_t)clk.tv_sec) { /* note best expired password as a fall-back */ if (res == NULL || (((ulong_t)ap->end > (ulong_t)res->end)) && ((ulong_t)res->end < (ulong_t)clk.tv_sec)) res = ap; continue; } /* note key with the best future */ if (res == NULL || (ulong_t)res->end < (ulong_t)ap->end) res = ap; } return (res); } void clr_ws_buf(struct ws_buf *wb, struct auth *ap) { struct netauth *na; wb->lim = wb->base + NETS_LEN; wb->n = wb->base; (void) memset(wb->n, 0, NETS_LEN*sizeof (*wb->n)); /* * (start to) install authentication if appropriate */ if (ap == NULL) return; na = (struct netauth *)wb->n; if (ap->type == RIP_AUTH_PW) { na->a_family = RIP_AF_AUTH; na->a_type = RIP_AUTH_PW; (void) memcpy(na->au.au_pw, ap->key, sizeof (na->au.au_pw)); wb->n++; } else if (ap->type == RIP_AUTH_MD5) { na->a_family = RIP_AF_AUTH; na->a_type = RIP_AUTH_MD5; na->au.a_md5.md5_keyid = ap->keyid; na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_LEN; na->au.a_md5.md5_seqno = htonl(clk.tv_sec); wb->n++; wb->lim--; /* make room for trailer */ } } void end_md5_auth(struct ws_buf *wb, struct auth *ap) { struct netauth *na, *na2; MD5_CTX md5_ctx; int len; na = (struct netauth *)wb->base; na2 = (struct netauth *)wb->n; len = (char *)na2-(char *)wb->buf; na2->a_family = RIP_AF_AUTH; na2->a_type = RIP_AUTH_TRAILER; na->au.a_md5.md5_pkt_len = htons(len); MD5Init(&md5_ctx); /* len+4 to include auth trailer's family/type in MD5 sum */ MD5Update(&md5_ctx, (uchar_t *)wb->buf, len + 4); MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_LEN); MD5Final(na2->au.au_pw, &md5_ctx); wb->n++; } /* * Send the buffer */ static void supply_write(struct ws_buf *wb) { /* * Output multicast only if legal. * If we would multicast and it would be illegal, then discard the * packet. */ switch (wb->type) { case NO_OUT_MULTICAST: trace_pkt("skip multicast to %s because impossible", naddr_ntoa(ws.to.sin_addr.s_addr)); break; case NO_OUT_RIPV2: break; default: if (ws.a != NULL && ws.a->type == RIP_AUTH_MD5) end_md5_auth(wb, ws.a); if (output(wb->type, &ws.to, ws.ifp, wb->buf, ((char *)wb->n - (char *)wb->buf)) < 0 && ws.ifp != NULL) if_sick(ws.ifp, _B_FALSE); ws.npackets++; break; } clr_ws_buf(wb, ws.a); } /* * Put an entry into the packet */ static void supply_out(struct ag_info *ag) { uint32_t dstcount; in_addr_t mask, v1_mask, dst_h, ddst_h = 0; struct ws_buf *wb; /* * Skip this route if doing a flash update and it and the routes * it aggregates have not changed recently. */ if (ag->ag_seqno < update_seqno && (ws.state & WS_ST_FLASH)) return; dst_h = ag->ag_dst_h; mask = ag->ag_mask; v1_mask = ripv1_mask_host(htonl(dst_h), (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : NULL); dstcount = 0; /* * If we are sending RIPv2 packets that cannot (or must not) be * heard by RIPv1 listeners, do not worry about sub- or supernets. * Subnets (from other networks) can only be sent via multicast. * A pair of subnet routes might have been promoted so that they * are legal to send by RIPv1. * If RIPv1 is off, use the multicast buffer. */ if ((ws.state & WS_ST_RIP2_ALL) || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) { /* use the RIPv2-only buffer */ wb = &v2buf; } else { /* * use the RIPv1-or-RIPv2 buffer */ wb = &v12buf; /* * Convert supernet route into corresponding set of network * routes for RIPv1, but leave non-contiguous netmasks * to ag_check(). */ if (v1_mask > mask && mask + (mask & -mask) == 0) { ddst_h = v1_mask & -v1_mask; dstcount = (v1_mask & ~mask)/ddst_h; if (dstcount > ws.gen_limit) { /* * Punt if we would have to generate an * unreasonable number of routes. */ if (TRACECONTENTS) trace_misc("sending %s-->%s as 1" " instead of %d routes", addrname(htonl(dst_h), mask, 1), naddr_ntoa(ws.to.sin_addr.s_addr), dstcount + 1); dstcount = 0; } else { mask = v1_mask; ws.gen_limit -= dstcount; } } } do { wb->n->n_family = RIP_AF_INET; wb->n->n_dst = htonl(dst_h); /* * If the route is from router-discovery or we are * shutting down, or this is a broken/sick interface, * admit only a bad metric. */ wb->n->n_metric = ((stopint || ag->ag_metric < 1 || (ag->ag_ifp && (ag->ag_ifp->int_state & (IS_BROKE|IS_SICK)))) ? HOPCNT_INFINITY : ag->ag_metric); wb->n->n_metric = htonl(wb->n->n_metric); /* * Any non-zero bits in the supposedly unused RIPv1 fields * cause the old `routed` to ignore the route. * That means the mask and so forth cannot be sent * in the hybrid RIPv1/RIPv2 mode. */ if (ws.state & WS_ST_RIP2_ALL) { if (ag->ag_nhop != 0 && ((ws.state & WS_ST_QUERY) || (ag->ag_nhop != ws.ifp->int_addr && on_net(ag->ag_nhop, ws.ifp->int_net, ws.ifp->int_mask)) && ifwithaddr(ag->ag_nhop, _B_FALSE, _B_FALSE) == NULL)) wb->n->n_nhop = ag->ag_nhop; wb->n->n_mask = htonl(mask); wb->n->n_tag = ag->ag_tag; } dst_h += ddst_h; if (++wb->n >= wb->lim) supply_write(wb); } while (dstcount-- > 0); } /* * Supply one route from the table */ /* ARGSUSED */ static int walk_supply(struct radix_node *rn, void *argp) { #define RT ((struct rt_entry *)rn) ushort_t ags; uint8_t metric, pref; in_addr_t dst, nhop; struct rt_spare *rts; uint_t sparecount; /* * Do not advertise external remote interfaces or passive interfaces. */ if ((RT->rt_state & RS_IF) && RT->rt_ifp != NULL && (RT->rt_ifp->int_state & IS_PASSIVE) && !(RT->rt_state & RS_MHOME)) return (0); /* * Do not advertise routes learnt from /etc/gateways. */ if (RT->rt_spares[0].rts_origin == RO_FILE) return (0); /* * Do not advertise routes which would lead to forwarding on a * non-forwarding interface. */ if (RT->rt_state & RS_NOPROPAGATE) return (0); /* * If being quiet about our ability to forward, then * do not say anything unless responding to a query, * except about our main interface. */ if (!should_supply(NULL) && !(ws.state & WS_ST_QUERY) && !(RT->rt_state & RS_MHOME)) return (0); dst = RT->rt_dst; /* * do not collide with the fake default route */ if (dst == RIP_DEFAULT && (ws.state & WS_ST_DEFAULT)) return (0); if (RT->rt_state & RS_NET_SYN) { if (RT->rt_state & RS_NET_INT) { /* * Do not send manual synthetic network routes * into the subnet. */ if (on_net(ws.to.sin_addr.s_addr, ntohl(dst), RT->rt_mask)) return (0); } else { /* * Do not send automatic synthetic network routes * if they are not needed because no RIPv1 listeners * can hear them. */ if (ws.state & WS_ST_RIP2_ALL) return (0); /* * Do not send automatic synthetic network routes to * the real subnet. */ if (on_net(ws.to.sin_addr.s_addr, ntohl(dst), RT->rt_mask)) return (0); } nhop = 0; } else { /* * Advertise the next hop if this is not a route for one * of our interfaces and the next hop is on the same * network as the target. * The final determination is made by supply_out(). */ if (!(RT->rt_state & RS_IF) && !(RT->rt_state & RS_MHOME) && RT->rt_gate != loopaddr) nhop = RT->rt_gate; else nhop = 0; } metric = RT->rt_metric; ags = 0; if (!RT_ISHOST(RT)) { /* * Always suppress network routes into other, existing * network routes */ ags |= AGS_SUPPRESS; /* * Generate supernets if allowed. * If we can be heard by RIPv1 systems, we will * later convert back to ordinary nets. * This unifies dealing with received supernets. */ if ((ws.state & WS_ST_AG) && ((RT->rt_state & RS_SUBNET) || (ws.state & WS_ST_SUPER_AG))) ags |= AGS_AGGREGATE; } else if (!(RT->rt_state & RS_MHOME)) { /* * We should always suppress (into existing network routes) * the host routes for the local end of our point-to-point * links. * If we are suppressing host routes in general, then do so. * Avoid advertising host routes onto their own network, * where they should be handled by proxy-ARP. */ if ((RT->rt_state & RS_LOCAL) || ridhosts || on_net(dst, ws.to_net, ws.to_mask)) ags |= AGS_SUPPRESS; /* * Aggregate stray host routes into network routes if allowed. * We cannot aggregate host routes into small network routes * without confusing RIPv1 listeners into thinking the * network routes are host routes. */ if ((ws.state & WS_ST_AG) && (ws.state & WS_ST_RIP2_ALL)) ags |= AGS_AGGREGATE; } /* * Do not send RIPv1 advertisements of subnets to other * networks. If possible, multicast them by RIPv2. */ if ((RT->rt_state & RS_SUBNET) && !(ws.state & WS_ST_RIP2_ALL) && !on_net(dst, ws.to_std_net, ws.to_std_mask)) ags |= AGS_RIPV2 | AGS_AGGREGATE; /* * Do not send a route back to where it came from, except in * response to a query. This is "split-horizon". That means not * advertising back to the same network and so via the same interface. * * We want to suppress routes that might have been fragmented * from this route by a RIPv1 router and sent back to us, and so we * cannot forget this route here. Let the split-horizon route * suppress the fragmented routes and then itself be forgotten. * * Include the routes for both ends of point-to-point interfaces * among those suppressed by split-horizon, since the other side * should knows them as well as we do. * * Notice spare routes with the same metric that we are about to * advertise, to split the horizon on redundant, inactive paths. */ if (ws.ifp != NULL && !(ws.state & WS_ST_QUERY) && (ws.state & WS_ST_TO_ON_NET) && (!(RT->rt_state & RS_IF) || (ws.ifp->int_if_flags & IFF_POINTOPOINT))) { for (rts = RT->rt_spares, sparecount = 0; sparecount < RT->rt_num_spares; sparecount++, rts++) { if (rts->rts_metric > metric || rts->rts_ifp != ws.ifp) continue; /* * If we do not mark the route with AGS_SPLIT_HZ here, * it will be poisoned-reverse, or advertised back * toward its source with an infinite metric. * If we have recently advertised the route with a * better metric than we now have, then we should * poison-reverse the route before suppressing it for * split-horizon. * * In almost all cases, if there is no spare for the * route then it is either old and dead or a brand * new route. If it is brand new, there is no need * for poison-reverse. If it is old and dead, it * is already poisoned. */ if (RT->rt_poison_time < now_expire || RT->rt_poison_metric >= metric || RT->rt_spares[1].rts_gate == 0) { ags |= AGS_SPLIT_HZ; ags &= ~AGS_SUPPRESS; } metric = HOPCNT_INFINITY; break; } } /* * Keep track of the best metric with which the * route has been advertised recently. */ if (RT->rt_poison_metric >= metric || RT->rt_poison_time < now_expire) { RT->rt_poison_time = now.tv_sec; RT->rt_poison_metric = metric; } /* * Adjust the outgoing metric by the cost of the link. * Avoid aggregation when a route is counting to infinity. */ pref = RT->rt_poison_metric + ws.metric; metric += ws.metric; /* * If this is a static route pointing to the same interface * upon which we are sending out the RIP RESPONSE * adjust the preference so that we don't aggregate into this * route. Note that the maximum possible hop count on a route * per RFC 2453 is 16 (HOPCNT_INFINITY) */ if ((RT->rt_state & RS_STATIC) && (ws.ifp == RT->rt_ifp)) pref = (HOPCNT_INFINITY+1); /* * Do not advertise stable routes that will be ignored, * unless we are answering a query. * If the route recently was advertised with a metric that * would have been less than infinity through this interface, * we need to continue to advertise it in order to poison it. */ if (metric >= HOPCNT_INFINITY) { if (!(ws.state & WS_ST_QUERY) && (pref >= HOPCNT_INFINITY || RT->rt_poison_time < now_garbage)) return (0); metric = HOPCNT_INFINITY; } /* * supply this route out on the wire- we only care about dest/mask * and so can ignore all rt_spares[i] with i > 0 */ ag_check(dst, RT->rt_mask, 0, RT->rt_ifp, nhop, metric, pref, RT->rt_seqno, RT->rt_tag, ags, supply_out); return (0); #undef RT } /* * Supply dst with the contents of the routing tables. * If this won't fit in one packet, chop it up into several. */ void supply(struct sockaddr_in *dst, struct interface *ifp, /* output interface */ enum output_type type, int flash, /* 1=flash update */ int vers, /* RIP version */ boolean_t passwd_ok) /* OK to include cleartext password */ { struct rt_entry *rt; uint8_t def_metric; ws.state = 0; ws.gen_limit = WS_GEN_LIMIT_MAX; ws.to = *dst; ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr); ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask; if (ifp != NULL) { ws.to_mask = ifp->int_mask; ws.to_net = ifp->int_net; if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask) || type == OUT_MULTICAST) ws.state |= WS_ST_TO_ON_NET; } else { ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, NULL); ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask; rt = rtfind(dst->sin_addr.s_addr); if (rt != NULL) ifp = rt->rt_ifp; else return; } ws.npackets = 0; if (flash) ws.state |= WS_ST_FLASH; ws.ifp = ifp; /* * Routes in the table were already adjusted by their respective * destination interface costs (which are zero by default) on * input. The following is the value by which each route's metric * will be bumped up on output. */ ws.metric = 1; ripv12_buf.rip.rip_vers = vers; switch (type) { case OUT_MULTICAST: if (ifp->int_if_flags & IFF_MULTICAST) v2buf.type = OUT_MULTICAST; else v2buf.type = NO_OUT_MULTICAST; v12buf.type = OUT_BROADCAST; break; case OUT_QUERY: ws.state |= WS_ST_QUERY; /* FALLTHROUGH */ case OUT_BROADCAST: case OUT_UNICAST: v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2; v12buf.type = type; break; case NO_OUT_MULTICAST: case NO_OUT_RIPV2: return; /* no output */ } if (vers == RIPv2) { /* full RIPv2 only if cannot be heard by RIPv1 listeners */ if (type != OUT_BROADCAST) ws.state |= WS_ST_RIP2_ALL; if ((ws.state & WS_ST_QUERY) || !(ws.state & WS_ST_TO_ON_NET)) { ws.state |= (WS_ST_AG | WS_ST_SUPER_AG); } else if (ifp == NULL || !(ifp->int_state & IS_NO_AG)) { ws.state |= WS_ST_AG; if (type != OUT_BROADCAST && (ifp == NULL || !(ifp->int_state & IS_NO_SUPER_AG))) ws.state |= WS_ST_SUPER_AG; } /* See if this packet needs authenticating */ ws.a = find_auth(ifp); if (!passwd_ok && ws.a != NULL && ws.a->type == RIP_AUTH_PW) ws.a = NULL; if (ws.a != NULL && (ulong_t)ws.a->end < (ulong_t)clk.tv_sec && !ws.a->warnedflag) { /* * If the best key is an expired one, we may as * well use it. Log this event. */ writelog(LOG_WARNING, "Using expired auth while transmitting to %s", naddr_ntoa(ws.to.sin_addr.s_addr)); ws.a->warnedflag = 1; } } else { ws.a = NULL; } clr_ws_buf(&v12buf, ws.a); clr_ws_buf(&v2buf, ws.a); /* * Fake a default route if asked and if there is not already * a better, real default route. */ if (should_supply(NULL) && (def_metric = ifp->int_d_metric) != 0) { if (NULL == (rt = rtget(RIP_DEFAULT, 0)) || rt->rt_metric+ws.metric >= def_metric) { ws.state |= WS_ST_DEFAULT; ag_check(0, 0, 0, NULL, 0, def_metric, def_metric, 0, 0, 0, supply_out); } else { def_metric = rt->rt_metric+ws.metric; } /* * If both RIPv2 and the poor-man's router discovery * kludge are on, arrange to advertise an extra * default route via RIPv1. */ if ((ws.state & WS_ST_RIP2_ALL) && (ifp->int_state & IS_PM_RDISC)) { ripv12_buf.rip.rip_vers = RIPv1; v12buf.n->n_family = RIP_AF_INET; v12buf.n->n_dst = htonl(RIP_DEFAULT); v12buf.n->n_metric = htonl(def_metric); v12buf.n++; } } (void) rn_walktree(rhead, walk_supply, NULL); ag_flush(0, 0, supply_out); /* * Flush the packet buffers, provided they are not empty and * do not contain only the password. */ if (v12buf.n != v12buf.base && (v12buf.n > v12buf.base+1 || v12buf.base->n_family != RIP_AF_AUTH)) supply_write(&v12buf); if (v2buf.n != v2buf.base && (v2buf.n > v2buf.base+1 || v2buf.base->n_family != RIP_AF_AUTH)) supply_write(&v2buf); /* * If we sent nothing and this is an answer to a query, send * an empty buffer. */ if (ws.npackets == 0 && (ws.state & WS_ST_QUERY)) { supply_write(&v2buf); if (ws.npackets == 0) supply_write(&v12buf); } } /* * send all of the routing table or just do a flash update */ void rip_bcast(int flash) { static struct sockaddr_in dst = {AF_INET}; struct interface *ifp; enum output_type type; int vers; struct timeval rtime; need_flash = _B_FALSE; intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME); no_flash = rtime; timevaladd(&no_flash, &now); if (!rip_enabled) return; trace_act("send %s and inhibit dynamic updates for %.3f sec", flash ? "dynamic update" : "all routes", rtime.tv_sec + ((double)rtime.tv_usec)/1000000.0); for (ifp = ifnet; ifp != NULL; ifp = ifp->int_next) { /* * Skip interfaces not doing RIP or for which IP * forwarding isn't turned on. Skip duplicate * interfaces, we don't want to generate duplicate * packets. Do try broken interfaces to see if they * have healed. */ if (IS_RIP_OUT_OFF(ifp->int_state) || (ifp->int_state & IS_DUP) || !IS_IFF_ROUTING(ifp->int_if_flags)) continue; /* skip turned off interfaces */ if (!IS_IFF_UP(ifp->int_if_flags)) continue; /* skip interfaces we shouldn't use */ if (IS_IFF_QUIET(ifp->int_if_flags)) continue; vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1; dst.sin_addr.s_addr = ifp->int_ripout_addr; /* * Ignore the interface if it's not broadcast, * point-to-point, or remote. It must be non-broadcast * multiaccess, and therefore unsupported. */ if (!(ifp->int_if_flags & (IFF_BROADCAST | IFF_POINTOPOINT)) && !(ifp->int_state & IS_REMOTE)) continue; type = (ifp->int_if_flags & IFF_BROADCAST) ? OUT_BROADCAST : OUT_UNICAST; if (vers == RIPv2 && (ifp->int_if_flags & IFF_MULTICAST) && !(ifp->int_state & IS_NO_RIP_MCAST)) type = OUT_MULTICAST; supply(&dst, ifp, type, flash, vers, _B_TRUE); } update_seqno++; /* all routes are up to date */ } /* * Ask for routes * Do it only once to an interface, and not even after the interface * was broken and recovered. */ void rip_query(void) { static struct sockaddr_in dst = {AF_INET}; struct interface *ifp; struct rip buf; enum output_type type; if (!rip_enabled) return; (void) memset(&buf, 0, sizeof (buf)); for (ifp = ifnet; ifp; ifp = ifp->int_next) { /* * Skip interfaces those already queried. Do not ask * via interfaces through which we don't accept input. * Do not ask via interfaces that cannot send RIP * packets. Don't send queries on duplicate * interfaces, that would generate duplicate packets * on link. Do try broken interfaces to see if they * have healed. */ if (IS_RIP_IN_OFF(ifp->int_state) || (ifp->int_state & IS_DUP) || ifp->int_query_time != NEVER) continue; /* skip turned off interfaces */ if (!IS_IFF_UP(ifp->int_if_flags)) continue; /* skip interfaces we shouldn't use */ if (IS_IFF_QUIET(ifp->int_if_flags)) continue; /* * Ignore the interface if it's not broadcast, * point-to-point, or remote. It must be non-broadcast * multiaccess, and therefore unsupported. */ if (!(ifp->int_if_flags & (IFF_BROADCAST | IFF_POINTOPOINT)) && !(ifp->int_state & IS_REMOTE)) continue; buf.rip_cmd = RIPCMD_REQUEST; buf.rip_nets[0].n_family = RIP_AF_UNSPEC; buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY); /* * Send a RIPv1 query only if allowed and if we will * listen to RIPv1 routers. */ if ((ifp->int_state & IS_NO_RIPV1_OUT) || (ifp->int_state & IS_NO_RIPV1_IN)) { buf.rip_vers = RIPv2; } else { buf.rip_vers = RIPv1; } dst.sin_addr.s_addr = ifp->int_ripout_addr; type = (ifp->int_if_flags & IFF_BROADCAST) ? OUT_BROADCAST : OUT_UNICAST; if (buf.rip_vers == RIPv2 && (ifp->int_if_flags & IFF_MULTICAST) && !(ifp->int_state & IS_NO_RIP_MCAST)) type = OUT_MULTICAST; ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL; if (output(type, &dst, ifp, &buf, sizeof (buf)) < 0) if_sick(ifp, _B_FALSE); } }