summaryrefslogtreecommitdiffstats
path: root/usr.sbin/routed
diff options
context:
space:
mode:
authorwollman <wollman@FreeBSD.org>1996-05-30 16:19:14 +0000
committerwollman <wollman@FreeBSD.org>1996-05-30 16:19:14 +0000
commit59e35a5328276d072ddf5acf29f540c53f5465ad (patch)
treeeae818d6f76de45324f750edb384a7bd84aed0ff /usr.sbin/routed
parentb0eeb8b7adc1cb8efe14a4984afae460eb700257 (diff)
downloadFreeBSD-src-59e35a5328276d072ddf5acf29f540c53f5465ad.zip
FreeBSD-src-59e35a5328276d072ddf5acf29f540c53f5465ad.tar.gz
Initial revision
Diffstat (limited to 'usr.sbin/routed')
-rw-r--r--usr.sbin/routed/Makefile.inc1
-rw-r--r--usr.sbin/routed/parms.c563
-rw-r--r--usr.sbin/routed/radix.c894
-rw-r--r--usr.sbin/routed/rdisc.c965
-rw-r--r--usr.sbin/routed/rtquery/Makefile6
-rw-r--r--usr.sbin/routed/rtquery/rtquery.879
-rw-r--r--usr.sbin/routed/rtquery/rtquery.c516
-rw-r--r--usr.sbin/routed/rttrace/Makefile6
-rw-r--r--usr.sbin/routed/rttrace/rttrace.c146
-rw-r--r--usr.sbin/routed/table.c1888
10 files changed, 5064 insertions, 0 deletions
diff --git a/usr.sbin/routed/Makefile.inc b/usr.sbin/routed/Makefile.inc
new file mode 100644
index 0000000..10fa13f
--- /dev/null
+++ b/usr.sbin/routed/Makefile.inc
@@ -0,0 +1 @@
+.include "../../Makefile.inc"
diff --git a/usr.sbin/routed/parms.c b/usr.sbin/routed/parms.c
new file mode 100644
index 0000000..1fc0b85
--- /dev/null
+++ b/usr.sbin/routed/parms.c
@@ -0,0 +1,563 @@
+/*
+ * Copyright (c) 1983, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef lint
+static char sccsid[] = "@(#)if.c 8.1 (Berkeley) 6/5/93";
+#endif /* not lint */
+
+#ident "$Revision: 1.1 $"
+
+#include "defs.h"
+#include "pathnames.h"
+
+
+struct parm *parms;
+struct intnet *intnets;
+
+
+/* parse a set of parameters for an interface
+ */
+char * /* error message */
+parse_parms(char *line)
+{
+#define PARS(str) (0 == (tgt = str, strcasecmp(tok, tgt)))
+#define PARSE(str) (0 == (tgt = str, strncasecmp(tok, str "=", sizeof(str))))
+#define CKF(g,b) {if (0 != (parm.parm_int_state & ((g) & ~(b)))) break; \
+ parm.parm_int_state |= (b);}
+#define DELIMS " ,\t\n"
+ struct parm parm, *parmp;
+ struct intnet *intnetp;
+ char *tok, *tgt, *p;
+
+
+ /* "subnet=x.y.z.u/mask" must be alone on the line */
+ if (!strncasecmp("subnet=",line,7)) {
+ intnetp = (struct intnet*)malloc(sizeof(*intnetp));
+ if (!getnet(&line[7], &intnetp->intnet_addr,
+ &intnetp->intnet_mask)) {
+ free(intnetp);
+ return line;
+ }
+ HTONL(intnetp->intnet_addr);
+ intnetp->intnet_next = intnets;
+ intnets = intnetp;
+ return 0;
+ }
+
+ bzero(&parm, sizeof(parm));
+
+ tgt = "null";
+ for (tok = strtok(line, DELIMS);
+ tok != 0 && tok[0] != '\0';
+ tgt = 0, tok = strtok(0,DELIMS)) {
+ if (PARSE("if")) {
+ if (parm.parm_name[0] != '\0'
+ || tok[3] == '\0'
+ || strlen(tok) > IFNAMSIZ+3)
+ break;
+ strcpy(parm.parm_name, tok+3);
+
+ } else if (PARSE("passwd")) {
+ if (tok[7] == '\0'
+ || strlen(tok) > RIP_AUTH_PW_LEN+7)
+ break;
+ strcpy(parm.parm_passwd, tok+7);
+
+ } else if (PARS("no_ag")) {
+ parm.parm_int_state |= IS_NO_AG;
+
+ } else if (PARS("no_super_ag")) {
+ parm.parm_int_state |= IS_NO_SUPER_AG;
+
+ } else if (PARS("no_rip")) {
+ parm.parm_int_state |= (IS_NO_RIPV1_IN
+ | IS_NO_RIPV2_IN
+ | IS_NO_RIPV1_OUT
+ | IS_NO_RIPV2_OUT);
+
+ } else if (PARS("no_ripv1_in")) {
+ parm.parm_int_state |= IS_NO_RIPV1_IN;
+
+ } else if (PARS("no_ripv2_in")) {
+ parm.parm_int_state |= IS_NO_RIPV2_IN;
+
+ } else if (PARS("no_ripv2_out")) {
+ parm.parm_int_state |= IS_NO_RIPV2_OUT;
+
+ } else if (PARS("ripv2_out")) {
+ if (parm.parm_int_state & IS_NO_RIPV2_OUT)
+ break;
+ parm.parm_int_state |= IS_NO_RIPV1_OUT;
+
+ } else if (PARS("no_rdisc")) {
+ CKF((GROUP_IS_SOL|GROUP_IS_ADV),
+ IS_NO_ADV_IN | IS_NO_SOL_OUT | IS_NO_ADV_OUT);
+
+ } else if (PARS("no_solicit")) {
+ CKF(GROUP_IS_SOL, IS_NO_SOL_OUT);
+
+ } else if (PARS("send_solicit")) {
+ CKF(GROUP_IS_SOL, IS_SOL_OUT);
+
+ } else if (PARS("no_rdisc_adv")) {
+ CKF(GROUP_IS_ADV, IS_NO_ADV_OUT);
+
+ } else if (PARS("rdisc_adv")) {
+ CKF(GROUP_IS_ADV, IS_ADV_OUT);
+
+ } else if (PARS("bcast_rdisc")) {
+ parm.parm_int_state |= IS_BCAST_RDISC;
+
+ } else if (PARSE("rdisc_pref")) {
+ if (parm.parm_rdisc_pref != 0
+ || tok[11] == '\0'
+ || (parm.parm_rdisc_pref = (int)strtol(&tok[11],
+ &p,0),
+ *p != '\0'))
+ break;
+
+ } else if (PARSE("rdisc_interval")) {
+ if (parm.parm_rdisc_int != 0
+ || tok[15] == '\0'
+ || (parm.parm_rdisc_int = (int)strtol(&tok[15],
+ &p,0),
+ *p != '\0')
+ || parm.parm_rdisc_int < MinMaxAdvertiseInterval
+ || parm.parm_rdisc_int > MaxMaxAdvertiseInterval)
+ break;
+
+ } else if (PARSE("fake_default")) {
+ if (parm.parm_d_metric != 0
+ || tok[13] == '\0'
+ || (parm.parm_d_metric=(int)strtol(&tok[13],&p,0),
+ *p != '\0')
+ || parm.parm_d_metric >= HOPCNT_INFINITY-2)
+ break;
+
+ } else {
+ tgt = tok;
+ break;
+ }
+ }
+ if (tgt != 0)
+ return tgt;
+
+ if (parm.parm_int_state & IS_NO_ADV_IN)
+ parm.parm_int_state |= IS_NO_SOL_OUT;
+
+ /* check for duplicate specification */
+ for (parmp = parms; parmp != 0; parmp = parmp->parm_next) {
+ if (strcmp(parm.parm_name, parmp->parm_name))
+ continue;
+ if (parmp->parm_a_h != (parm.parm_a_h & parmp->parm_m)
+ && parm.parm_a_h != (parmp->parm_a_h & parm.parm_m))
+ continue;
+
+ if (strcmp(parmp->parm_passwd, parm.parm_passwd)
+ || (0 != (parm.parm_int_state & GROUP_IS_SOL)
+ && 0 != (parmp->parm_int_state & GROUP_IS_SOL)
+ && 0 != ((parm.parm_int_state ^ parmp->parm_int_state)
+ && GROUP_IS_SOL))
+ || (0 != (parm.parm_int_state & GROUP_IS_ADV)
+ && 0 != (parmp->parm_int_state & GROUP_IS_ADV)
+ && 0 != ((parm.parm_int_state ^ parmp->parm_int_state)
+ && GROUP_IS_ADV))
+ || (parm.parm_rdisc_pref != 0
+ && parmp->parm_rdisc_pref != 0
+ && parm.parm_rdisc_pref != parmp->parm_rdisc_pref)
+ || (parm.parm_rdisc_int != 0
+ && parmp->parm_rdisc_int != 0
+ && parm.parm_rdisc_int != parmp->parm_rdisc_int)
+ || (parm.parm_d_metric != 0
+ && parmp->parm_d_metric != 0
+ && parm.parm_d_metric != parmp->parm_d_metric))
+ return "duplicate";
+ }
+
+ parmp = (struct parm*)malloc(sizeof(*parmp));
+ bcopy(&parm, parmp, sizeof(*parmp));
+ parmp->parm_next = parms;
+ parms = parmp;
+
+ return 0;
+#undef DELIMS
+#undef PARS
+#undef PARSE
+}
+
+
+/* use configured parameters
+ */
+void
+get_parms(struct interface *ifp)
+{
+ struct parm *parmp;
+
+ for (parmp = parms; parmp != 0; parmp = parmp->parm_next) {
+ if ((parmp->parm_a_h == (ntohl(ifp->int_addr)
+ & parmp->parm_m)
+ && parmp->parm_name[0] == '\0')
+ || (parmp->parm_name[0] != '\0'
+ && !strcmp(ifp->int_name, parmp->parm_name))) {
+ ifp->int_state |= parmp->parm_int_state;
+ bcopy(parmp->parm_passwd, ifp->int_passwd,
+ sizeof(ifp->int_passwd));
+ ifp->int_rdisc_pref = parmp->parm_rdisc_pref;
+ ifp->int_rdisc_int = parmp->parm_rdisc_int;
+ ifp->int_d_metric = parmp->parm_d_metric;
+ }
+ }
+
+ if ((ifp->int_state & IS_NO_RIP_IN) == IS_NO_RIP_IN)
+ ifp->int_state |= IS_NO_RIP_OUT;
+
+ if (ifp->int_rdisc_int == 0)
+ ifp->int_rdisc_int = DefMaxAdvertiseInterval;
+
+ if ((ifp->int_state & IS_PASSIVE)
+ || (ifp->int_state & IS_REMOTE))
+ ifp->int_state |= IS_NO_ADV_IN|IS_NO_SOL_OUT|IS_NO_ADV_OUT;
+
+
+ if (!(ifp->int_state & IS_PASSIVE)) {
+ if (!(ifp->int_if_flags & IFF_MULTICAST)
+ && !(ifp->int_if_flags & IFF_POINTOPOINT))
+ ifp->int_state |= IS_NO_RIPV2_OUT;
+ }
+
+ if (!(ifp->int_if_flags & IFF_MULTICAST))
+ ifp->int_state |= IS_BCAST_RDISC;
+
+ if (ifp->int_if_flags & IFF_POINTOPOINT) {
+ ifp->int_state |= IS_BCAST_RDISC;
+ /* point-to-point links should be passive for the sake
+ * of demand-dialing
+ */
+ if (0 == (ifp->int_state & GROUP_IS_SOL))
+ ifp->int_state |= IS_NO_SOL_OUT;
+ if (0 == (ifp->int_state & GROUP_IS_ADV))
+ ifp->int_state |= IS_NO_ADV_OUT;
+ }
+}
+
+
+/* Read a list of gateways from /etc/gateways and add them to our tables.
+ *
+ * This file contains a list of "remote" gateways. That is usually
+ * a gateway which we cannot immediately determine if it is present or
+ * not as we can do for those provided by directly connected hardware.
+ *
+ * If a gateway is marked "passive" in the file, then we assume it
+ * does not understand RIP and assume it is always present. Those
+ * not marked passive are treated as if they were directly connected
+ * and assumed to be broken if they do not send us advertisements.
+ * All remote interfaces are added to our list, and those not marked
+ * passive are sent routing updates.
+ *
+ * A passive interface can also be local, hardware interface exempt
+ * from RIP.
+ */
+void
+gwkludge(void)
+{
+ FILE *fp;
+ char *p, *lptr;
+ char lbuf[200], net_host[5], dname[64+1+64+1], gname[64+1], qual[9];
+ struct interface *ifp;
+ naddr dst, netmask, gate;
+ int metric, n;
+ u_int state;
+ char *type;
+ struct parm *parmp;
+
+
+ fp = fopen(_PATH_GATEWAYS, "r");
+ if (fp == 0)
+ return;
+
+ for (;;) {
+ if (0 == fgets(lbuf, sizeof(lbuf)-1, fp))
+ break;
+ lptr = lbuf;
+ while (*lptr == ' ')
+ lptr++;
+ if (*lptr == '\n' /* ignore null and comment lines */
+ || *lptr == '#')
+ continue;
+
+ /* notice parameter lines */
+ if (strncasecmp("net", lptr, 3)
+ && strncasecmp("host", lptr, 4)) {
+ p = parse_parms(lptr);
+ if (p != 0)
+ msglog("bad \"%s\" in "_PATH_GATEWAYS
+ " entry %s", lptr, p);
+ continue;
+ }
+
+/* {net | host} XX[/M] XX gateway XX metric DD [passive | external]\n */
+ n = sscanf(lptr, "%4s %129[^ ] gateway"
+ " %64[^ / ] metric %d %8s\n",
+ net_host, dname, gname, &metric, qual);
+ if (n != 5) {
+ msglog("bad "_PATH_GATEWAYS" entry %s", lptr);
+ continue;
+ }
+ if (metric < 0 || metric >= HOPCNT_INFINITY) {
+ msglog("bad metric in "_PATH_GATEWAYS" entry %s",
+ lptr);
+ continue;
+ }
+ if (!strcmp(net_host, "host")) {
+ if (!gethost(dname, &dst)) {
+ msglog("bad host %s in "_PATH_GATEWAYS
+ " entry %s", dname, lptr);
+ continue;
+ }
+ netmask = HOST_MASK;
+ } else if (!strcmp(net_host, "net")) {
+ if (!getnet(dname, &dst, &netmask)) {
+ msglog("bad net %s in "_PATH_GATEWAYS
+ " entry %s", dname, lptr);
+ continue;
+ }
+ HTONL(dst);
+ } else {
+ msglog("bad \"%s\" in "_PATH_GATEWAYS
+ " entry %s", lptr);
+ continue;
+ }
+
+ if (!gethost(gname, &gate)) {
+ msglog("bad gateway %s in "_PATH_GATEWAYS
+ " entry %s", gname, lptr);
+ continue;
+ }
+
+ if (strcmp(qual, type = "passive") == 0) {
+ /* Passive entries are not placed in our tables,
+ * only the kernel's, so we don't copy all of the
+ * external routing information within a net.
+ * Internal machines should use the default
+ * route to a suitable gateway (like us).
+ */
+ state = IS_REMOTE | IS_PASSIVE;
+ if (metric == 0)
+ metric = 1;
+
+ } else if (strcmp(qual, type = "external") == 0) {
+ /* External entries are handled by other means
+ * such as EGP, and are placed only in the daemon
+ * tables to prevent overriding them with something
+ * else.
+ */
+ state = IS_REMOTE | IS_PASSIVE | IS_EXTERNAL;
+ if (metric == 0)
+ metric = 1;
+
+ } else if (qual[0] == '\0') {
+ if (metric != 0) {
+ /* Entries that are neither "passive" nor
+ * "external" are "remote" and must behave
+ * like physical interfaces. If they are not
+ * heard from regularly, they are deleted.
+ */
+ state = IS_REMOTE;
+ type = "remote";
+ } else {
+ /* "remote" entries with a metric of 0
+ * are aliases for our own interfaces
+ */
+ state = IS_REMOTE | IS_PASSIVE;
+ type = "alias";
+ }
+
+ } else {
+ msglog("bad "_PATH_GATEWAYS" entry %s", lptr);
+ continue;
+ }
+
+ if (!(state & IS_EXTERNAL)) {
+ /* If we are going to send packets to the gateway,
+ * it must be reachable using our physical interfaces
+ */
+ if (!rtfind(gate)) {
+ msglog("unreachable gateway %s in "
+ _PATH_GATEWAYS" entry %s",
+ gname, lptr);
+ continue;
+ }
+
+ /* Remember to advertise the corresponding logical
+ * network.
+ */
+ if (netmask != std_mask(dst))
+ state |= IS_SUBNET;
+ }
+
+ parmp = (struct parm*)malloc(sizeof(*parmp));
+ bzero(parmp, sizeof(*parmp));
+ parmp->parm_next = parms;
+ parms = parmp;
+ parmp->parm_a_h = ntohl(dst);
+ parmp->parm_m = -1;
+ parmp->parm_d_metric = 0;
+ parmp->parm_int_state = state;
+
+ /* See if this new interface duplicates an existing
+ * interface.
+ */
+ for (ifp = ifnet; 0 != ifp; ifp = ifp->int_next) {
+ if (ifp->int_addr == dst
+ && ifp->int_mask == netmask)
+ break;
+ }
+ if (ifp != 0) {
+ /* Let one of our real interfaces be marked passive.
+ */
+ if ((state & IS_PASSIVE) && !(state & IS_EXTERNAL)) {
+ ifp->int_state |= state;
+ } else {
+ msglog("%s is duplicated in "_PATH_GATEWAYS
+ " by %s",
+ ifp->int_name, lptr);
+ }
+ continue;
+ }
+
+ tot_interfaces++;
+
+ ifp = (struct interface *)malloc(sizeof(*ifp));
+ bzero(ifp, sizeof(*ifp));
+ if (ifnet != 0) {
+ ifp->int_next = ifnet;
+ ifnet->int_prev = ifp;
+ }
+ ifnet = ifp;
+
+ ifp->int_state = state;
+ ifp->int_net = ntohl(dst) & netmask;
+ ifp->int_mask = netmask;
+ if (netmask == HOST_MASK)
+ ifp->int_if_flags |= IFF_POINTOPOINT;
+ ifp->int_dstaddr = dst;
+ ifp->int_addr = gate;
+ ifp->int_metric = metric;
+ (void)sprintf(ifp->int_name, "%s-%s", type, naddr_ntoa(dst));
+ ifp->int_index = -1;
+
+ get_parms(ifp);
+
+ if (TRACEACTIONS)
+ trace_if("Add", ifp);
+ }
+}
+
+
+/* get a network number as a name or a number, with an optional "/xx"
+ * netmask.
+ */
+int /* 0=bad */
+getnet(char *name,
+ naddr *addr_hp,
+ naddr *maskp)
+{
+ int i;
+ struct netent *nentp;
+ naddr mask;
+ struct in_addr in;
+ char hname[MAXHOSTNAMELEN+1];
+ char *mname, *p;
+
+
+ /* Detect and separate "1.2.3.4/24"
+ */
+ if (0 != (mname = rindex(name,'/'))) {
+ i = (int)(mname - name);
+ if (i > sizeof(hname)-1) /* name too long */
+ return 0;
+ bcopy(name, hname, i);
+ hname[i] = '\0';
+ mname++;
+ name = hname;
+ }
+
+ nentp = getnetbyname(name);
+ if (nentp != 0) {
+ in.s_addr = (naddr)nentp->n_net;
+ } else if (inet_aton(name, &in) == 1) {
+ NTOHL(in.s_addr);
+ } else {
+ return 0;
+ }
+
+ if (mname == 0) {
+ mask = std_mask(in.s_addr);
+ } else {
+ mask = (naddr)strtoul(mname, &p, 0);
+ if (*p != '\0' || mask > 32)
+ return 0;
+ mask = HOST_MASK << (32-mask);
+ }
+
+ *addr_hp = in.s_addr;
+ *maskp = mask;
+ return 1;
+}
+
+
+int /* 0=bad */
+gethost(char *name,
+ naddr *addrp)
+{
+ struct hostent *hp;
+ struct in_addr in;
+
+
+ /* Try for a number first, even in IRIX where gethostbyname()
+ * is smart. This avoids hitting the name server which
+ * might be sick because routing is.
+ */
+ if (inet_aton(name, &in) == 1) {
+ *addrp = in.s_addr;
+ return 1;
+ }
+
+ hp = gethostbyname(name);
+ if (hp) {
+ bcopy(hp->h_addr, addrp, sizeof(*addrp));
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/usr.sbin/routed/radix.c b/usr.sbin/routed/radix.c
new file mode 100644
index 0000000..7552e08
--- /dev/null
+++ b/usr.sbin/routed/radix.c
@@ -0,0 +1,894 @@
+/*
+ * Copyright (c) 1988, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)radix.c 8.4 (Berkeley) 11/2/94
+ */
+
+/*
+ * Routines to build and maintain radix trees for routing lookups.
+ */
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/domain.h>
+#include <sys/syslog.h>
+#include <net/radix.h>
+#include <stdlib.h>
+#define min(a,b) (((a)<(b))?(a):(b))
+#define log(x, msg) syslog(x, msg)
+#define panic(s) {log(LOG_ERR,s); exit(1);}
+
+
+int max_keylen;
+struct radix_mask *rn_mkfreelist;
+struct radix_node_head *mask_rnhead;
+static char *addmask_key;
+static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1};
+static char *rn_zeros, *rn_ones;
+
+#define rn_masktop (mask_rnhead->rnh_treetop)
+#undef Bcmp
+#define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l))
+
+static int rn_satsifies_leaf(char *, struct radix_node *, int);
+
+/*
+ * The data structure for the keys is a radix tree with one way
+ * branching removed. The index rn_b at an internal node n represents a bit
+ * position to be tested. The tree is arranged so that all descendants
+ * of a node n have keys whose bits all agree up to position rn_b - 1.
+ * (We say the index of n is rn_b.)
+ *
+ * There is at least one descendant which has a one bit at position rn_b,
+ * and at least one with a zero there.
+ *
+ * A route is determined by a pair of key and mask. We require that the
+ * bit-wise logical and of the key and mask to be the key.
+ * We define the index of a route to associated with the mask to be
+ * the first bit number in the mask where 0 occurs (with bit number 0
+ * representing the highest order bit).
+ *
+ * We say a mask is normal if every bit is 0, past the index of the mask.
+ * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
+ * and m is a normal mask, then the route applies to every descendant of n.
+ * If the index(m) < rn_b, this implies the trailing last few bits of k
+ * before bit b are all 0, (and hence consequently true of every descendant
+ * of n), so the route applies to all descendants of the node as well.
+ *
+ * Similar logic shows that a non-normal mask m such that
+ * index(m) <= index(n) could potentially apply to many children of n.
+ * Thus, for each non-host route, we attach its mask to a list at an internal
+ * node as high in the tree as we can go.
+ *
+ * The present version of the code makes use of normal routes in short-
+ * circuiting an explict mask and compare operation when testing whether
+ * a key satisfies a normal route, and also in remembering the unique leaf
+ * that governs a subtree.
+ */
+
+struct radix_node *
+rn_search(v_arg, head)
+ void *v_arg;
+ struct radix_node *head;
+{
+ register struct radix_node *x;
+ register caddr_t v;
+
+ for (x = head, v = v_arg; x->rn_b >= 0;) {
+ if (x->rn_bmask & v[x->rn_off])
+ x = x->rn_r;
+ else
+ x = x->rn_l;
+ }
+ return (x);
+}
+
+struct radix_node *
+rn_search_m(v_arg, head, m_arg)
+ struct radix_node *head;
+ void *v_arg, *m_arg;
+{
+ register struct radix_node *x;
+ register caddr_t v = v_arg, m = m_arg;
+
+ for (x = head; x->rn_b >= 0;) {
+ if ((x->rn_bmask & m[x->rn_off]) &&
+ (x->rn_bmask & v[x->rn_off]))
+ x = x->rn_r;
+ else
+ x = x->rn_l;
+ }
+ return x;
+}
+
+int
+rn_refines(m_arg, n_arg)
+ void *m_arg, *n_arg;
+{
+ register caddr_t m = m_arg, n = n_arg;
+ register caddr_t lim, lim2 = lim = n + *(u_char *)n;
+ int longer = (*(u_char *)n++) - (int)(*(u_char *)m++);
+ int masks_are_equal = 1;
+
+ if (longer > 0)
+ lim -= longer;
+ while (n < lim) {
+ if (*n & ~(*m))
+ return 0;
+ if (*n++ != *m++)
+ masks_are_equal = 0;
+ }
+ while (n < lim2)
+ if (*n++)
+ return 0;
+ if (masks_are_equal && (longer < 0))
+ for (lim2 = m - longer; m < lim2; )
+ if (*m++)
+ return 1;
+ return (!masks_are_equal);
+}
+
+struct radix_node *
+rn_lookup(v_arg, m_arg, head)
+ void *v_arg, *m_arg;
+ struct radix_node_head *head;
+{
+ register struct radix_node *x;
+ caddr_t netmask = 0;
+
+ if (m_arg) {
+ if ((x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off)) == 0)
+ return (0);
+ netmask = x->rn_key;
+ }
+ x = rn_match(v_arg, head);
+ if (x && netmask) {
+ while (x && x->rn_mask != netmask)
+ x = x->rn_dupedkey;
+ }
+ return x;
+}
+
+static int
+rn_satsifies_leaf(char *trial,
+ register struct radix_node *leaf,
+ int skip)
+{
+ register char *cp = trial, *cp2 = leaf->rn_key, *cp3 = leaf->rn_mask;
+ char *cplim;
+ int length = min(*(u_char *)cp, *(u_char *)cp2);
+
+ if (cp3 == 0)
+ cp3 = rn_ones;
+ else
+ length = min(length, *(u_char *)cp3);
+ cplim = cp + length; cp3 += skip; cp2 += skip;
+ for (cp += skip; cp < cplim; cp++, cp2++, cp3++)
+ if ((*cp ^ *cp2) & *cp3)
+ return 0;
+ return 1;
+}
+
+struct radix_node *
+rn_match(v_arg, head)
+ void *v_arg;
+ struct radix_node_head *head;
+{
+ caddr_t v = v_arg;
+ register struct radix_node *t = head->rnh_treetop, *x;
+ register caddr_t cp = v, cp2;
+ caddr_t cplim;
+ struct radix_node *saved_t, *top = t;
+ int off = t->rn_off, vlen = *(u_char *)cp, matched_off;
+ register int test, b, rn_b;
+
+ /*
+ * Open code rn_search(v, top) to avoid overhead of extra
+ * subroutine call.
+ */
+ for (; t->rn_b >= 0; ) {
+ if (t->rn_bmask & cp[t->rn_off])
+ t = t->rn_r;
+ else
+ t = t->rn_l;
+ }
+ /*
+ * See if we match exactly as a host destination
+ * or at least learn how many bits match, for normal mask finesse.
+ *
+ * It doesn't hurt us to limit how many bytes to check
+ * to the length of the mask, since if it matches we had a genuine
+ * match and the leaf we have is the most specific one anyway;
+ * if it didn't match with a shorter length it would fail
+ * with a long one. This wins big for class B&C netmasks which
+ * are probably the most common case...
+ */
+ if (t->rn_mask)
+ vlen = *(u_char *)t->rn_mask;
+ cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
+ for (; cp < cplim; cp++, cp2++)
+ if (*cp != *cp2)
+ goto on1;
+ /*
+ * This extra grot is in case we are explicitly asked
+ * to look up the default. Ugh!
+ */
+ if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey)
+ t = t->rn_dupedkey;
+ return t;
+on1:
+ test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */
+ for (b = 7; (test >>= 1) > 0;)
+ b--;
+ matched_off = cp - v;
+ b += matched_off << 3;
+ rn_b = -1 - b;
+ /*
+ * If there is a host route in a duped-key chain, it will be first.
+ */
+ if ((saved_t = t)->rn_mask == 0)
+ t = t->rn_dupedkey;
+ for (; t; t = t->rn_dupedkey)
+ /*
+ * Even if we don't match exactly as a host,
+ * we may match if the leaf we wound up at is
+ * a route to a net.
+ */
+ if (t->rn_flags & RNF_NORMAL) {
+ if (rn_b <= t->rn_b)
+ return t;
+ } else if (rn_satsifies_leaf(v, t, matched_off))
+ return t;
+ t = saved_t;
+ /* start searching up the tree */
+ do {
+ register struct radix_mask *m;
+ t = t->rn_p;
+ if (m = t->rn_mklist) {
+ /*
+ * If non-contiguous masks ever become important
+ * we can restore the masking and open coding of
+ * the search and satisfaction test and put the
+ * calculation of "off" back before the "do".
+ */
+ do {
+ if (m->rm_flags & RNF_NORMAL) {
+ if (rn_b <= m->rm_b)
+ return (m->rm_leaf);
+ } else {
+ off = min(t->rn_off, matched_off);
+ x = rn_search_m(v, t, m->rm_mask);
+ while (x && x->rn_mask != m->rm_mask)
+ x = x->rn_dupedkey;
+ if (x && rn_satsifies_leaf(v, x, off))
+ return x;
+ }
+ } while (m = m->rm_mklist);
+ }
+ } while (t != top);
+ return 0;
+}
+
+#ifdef RN_DEBUG
+int rn_nodenum;
+struct radix_node *rn_clist;
+int rn_saveinfo;
+int rn_debug = 1;
+#endif
+
+struct radix_node *
+rn_newpair(v, b, nodes)
+ void *v;
+ int b;
+ struct radix_node nodes[2];
+{
+ register struct radix_node *tt = nodes, *t = tt + 1;
+ t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7);
+ t->rn_l = tt; t->rn_off = b >> 3;
+ tt->rn_b = -1; tt->rn_key = (caddr_t)v; tt->rn_p = t;
+ tt->rn_flags = t->rn_flags = RNF_ACTIVE;
+#ifdef RN_DEBUG
+ tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
+ tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
+#endif
+ return t;
+}
+
+struct radix_node *
+rn_insert(v_arg, head, dupentry, nodes)
+ void *v_arg;
+ struct radix_node_head *head;
+ int *dupentry;
+ struct radix_node nodes[2];
+{
+ caddr_t v = v_arg;
+ struct radix_node *top = head->rnh_treetop;
+ int head_off = top->rn_off, vlen = (int)*((u_char *)v);
+ register struct radix_node *t = rn_search(v_arg, top);
+ register caddr_t cp = v + head_off;
+ register int b;
+ struct radix_node *tt;
+ /*
+ * Find first bit at which v and t->rn_key differ
+ */
+ {
+ register caddr_t cp2 = t->rn_key + head_off;
+ register int cmp_res;
+ caddr_t cplim = v + vlen;
+
+ while (cp < cplim)
+ if (*cp2++ != *cp++)
+ goto on1;
+ *dupentry = 1;
+ return t;
+on1:
+ *dupentry = 0;
+ cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
+ for (b = (cp - v) << 3; cmp_res; b--)
+ cmp_res >>= 1;
+ }
+ {
+ register struct radix_node *p, *x = top;
+ cp = v;
+ do {
+ p = x;
+ if (cp[x->rn_off] & x->rn_bmask)
+ x = x->rn_r;
+ else x = x->rn_l;
+ } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */
+#ifdef RN_DEBUG
+ if (rn_debug)
+ log(LOG_DEBUG, "rn_insert: Going In:\n"), traverse(p);
+#endif
+ t = rn_newpair(v_arg, b, nodes); tt = t->rn_l;
+ if ((cp[p->rn_off] & p->rn_bmask) == 0)
+ p->rn_l = t;
+ else
+ p->rn_r = t;
+ x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */
+ if ((cp[t->rn_off] & t->rn_bmask) == 0) {
+ t->rn_r = x;
+ } else {
+ t->rn_r = tt; t->rn_l = x;
+ }
+#ifdef RN_DEBUG
+ if (rn_debug)
+ log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p);
+#endif
+ }
+ return (tt);
+}
+
+struct radix_node *
+rn_addmask(n_arg, search, skip)
+ int search, skip;
+ void *n_arg;
+{
+ caddr_t netmask = (caddr_t)n_arg;
+ register struct radix_node *x;
+ register caddr_t cp, cplim;
+ register int b = 0, mlen, j;
+ int maskduplicated, m0, isnormal;
+ struct radix_node *saved_x;
+ static int last_zeroed = 0;
+
+ if ((mlen = *(u_char *)netmask) > max_keylen)
+ mlen = max_keylen;
+ if (skip == 0)
+ skip = 1;
+ if (mlen <= skip)
+ return (mask_rnhead->rnh_nodes);
+ if (skip > 1)
+ Bcopy(rn_ones + 1, addmask_key + 1, skip - 1);
+ if ((m0 = mlen) > skip)
+ Bcopy(netmask + skip, addmask_key + skip, mlen - skip);
+ /*
+ * Trim trailing zeroes.
+ */
+ for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;)
+ cp--;
+ mlen = cp - addmask_key;
+ if (mlen <= skip) {
+ if (m0 >= last_zeroed)
+ last_zeroed = mlen;
+ return (mask_rnhead->rnh_nodes);
+ }
+ if (m0 < last_zeroed)
+ Bzero(addmask_key + m0, last_zeroed - m0);
+ *addmask_key = last_zeroed = mlen;
+ x = rn_search(addmask_key, rn_masktop);
+ if (Bcmp(addmask_key, x->rn_key, mlen) != 0)
+ x = 0;
+ if (x || search)
+ return (x);
+ R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x));
+ if ((saved_x = x) == 0)
+ return (0);
+ Bzero(x, max_keylen + 2 * sizeof (*x));
+ netmask = cp = (caddr_t)(x + 2);
+ Bcopy(addmask_key, cp, mlen);
+ x = rn_insert(cp, mask_rnhead, &maskduplicated, x);
+ if (maskduplicated) {
+ log(LOG_ERR, "rn_addmask: mask impossibly already in tree");
+ Free(saved_x);
+ return (x);
+ }
+ /*
+ * Calculate index of mask, and check for normalcy.
+ */
+ cplim = netmask + mlen; isnormal = 1;
+ for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;)
+ cp++;
+ if (cp != cplim) {
+ for (j = 0x80; (j & *cp) != 0; j >>= 1)
+ b++;
+ if (*cp != normal_chars[b] || cp != (cplim - 1))
+ isnormal = 0;
+ }
+ b += (cp - netmask) << 3;
+ x->rn_b = -1 - b;
+ if (isnormal)
+ x->rn_flags |= RNF_NORMAL;
+ return (x);
+}
+
+static int /* XXX: arbitrary ordering for non-contiguous masks */
+rn_lexobetter(void *m_arg, void *n_arg)
+{
+ register u_char *mp = m_arg, *np = n_arg, *lim;
+
+ if (*mp > *np)
+ return 1; /* not really, but need to check longer one first */
+ if (*mp == *np)
+ for (lim = mp + *mp; mp < lim;)
+ if (*mp++ > *np++)
+ return 1;
+ return 0;
+}
+
+static struct radix_mask *
+rn_new_radix_mask(register struct radix_node *tt,
+ register struct radix_mask *next)
+{
+ register struct radix_mask *m;
+
+ MKGet(m);
+ if (m == 0) {
+ log(LOG_ERR, "Mask for route not entered\n");
+ return (0);
+ }
+ Bzero(m, sizeof *m);
+ m->rm_b = tt->rn_b;
+ m->rm_flags = tt->rn_flags;
+ if (tt->rn_flags & RNF_NORMAL)
+ m->rm_leaf = tt;
+ else
+ m->rm_mask = tt->rn_mask;
+ m->rm_mklist = next;
+ tt->rn_mklist = m;
+ return m;
+}
+
+struct radix_node *
+rn_addroute(v_arg, n_arg, head, treenodes)
+ void *v_arg, *n_arg;
+ struct radix_node_head *head;
+ struct radix_node treenodes[2];
+{
+ caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg;
+ register struct radix_node *t, *x, *tt;
+ struct radix_node *saved_tt, *top = head->rnh_treetop;
+ short b = 0, b_leaf;
+ int keyduplicated;
+ caddr_t mmask;
+ struct radix_mask *m, **mp;
+
+ /*
+ * In dealing with non-contiguous masks, there may be
+ * many different routes which have the same mask.
+ * We will find it useful to have a unique pointer to
+ * the mask to speed avoiding duplicate references at
+ * nodes and possibly save time in calculating indices.
+ */
+ if (netmask) {
+ if ((x = rn_addmask(netmask, 0, top->rn_off)) == 0)
+ return (0);
+ b_leaf = x->rn_b;
+ b = -1 - x->rn_b;
+ netmask = x->rn_key;
+ }
+ /*
+ * Deal with duplicated keys: attach node to previous instance
+ */
+ saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
+ if (keyduplicated) {
+ for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) {
+ if (tt->rn_mask == netmask)
+ return (0);
+ if (netmask == 0 ||
+ (tt->rn_mask &&
+ ((b_leaf < tt->rn_b) || /* index(netmask) > node */
+ rn_refines(netmask, tt->rn_mask) ||
+ rn_lexobetter(netmask, tt->rn_mask))))
+ break;
+ }
+ /*
+ * If the mask is not duplicated, we wouldn't
+ * find it among possible duplicate key entries
+ * anyway, so the above test doesn't hurt.
+ *
+ * We sort the masks for a duplicated key the same way as
+ * in a masklist -- most specific to least specific.
+ * This may require the unfortunate nuisance of relocating
+ * the head of the list.
+ */
+ if (tt == saved_tt) {
+ struct radix_node *xx = x;
+ /* link in at head of list */
+ (tt = treenodes)->rn_dupedkey = t;
+ tt->rn_flags = t->rn_flags;
+ tt->rn_p = x = t->rn_p;
+ if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt;
+ saved_tt = tt; x = xx;
+ } else {
+ (tt = treenodes)->rn_dupedkey = t->rn_dupedkey;
+ t->rn_dupedkey = tt;
+ }
+#ifdef RN_DEBUG
+ t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
+ tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
+#endif
+ tt->rn_key = (caddr_t) v;
+ tt->rn_b = -1;
+ tt->rn_flags = RNF_ACTIVE;
+ }
+ /*
+ * Put mask in tree.
+ */
+ if (netmask) {
+ tt->rn_mask = netmask;
+ tt->rn_b = x->rn_b;
+ tt->rn_flags |= x->rn_flags & RNF_NORMAL;
+ }
+ t = saved_tt->rn_p;
+ if (keyduplicated)
+ goto on2;
+ b_leaf = -1 - t->rn_b;
+ if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r;
+ /* Promote general routes from below */
+ if (x->rn_b < 0) {
+ for (mp = &t->rn_mklist; x; x = x->rn_dupedkey)
+ if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) {
+ if (*mp = m = rn_new_radix_mask(x, 0))
+ mp = &m->rm_mklist;
+ }
+ } else if (x->rn_mklist) {
+ /*
+ * Skip over masks whose index is > that of new node
+ */
+ for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
+ if (m->rm_b >= b_leaf)
+ break;
+ t->rn_mklist = m; *mp = 0;
+ }
+on2:
+ /* Add new route to highest possible ancestor's list */
+ if ((netmask == 0) || (b > t->rn_b ))
+ return tt; /* can't lift at all */
+ b_leaf = tt->rn_b;
+ do {
+ x = t;
+ t = t->rn_p;
+ } while (b <= t->rn_b && x != top);
+ /*
+ * Search through routes associated with node to
+ * insert new route according to index.
+ * Need same criteria as when sorting dupedkeys to avoid
+ * double loop on deletion.
+ */
+ for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) {
+ if (m->rm_b < b_leaf)
+ continue;
+ if (m->rm_b > b_leaf)
+ break;
+ if (m->rm_flags & RNF_NORMAL) {
+ mmask = m->rm_leaf->rn_mask;
+ if (tt->rn_flags & RNF_NORMAL) {
+ log(LOG_ERR,
+ "Non-unique normal route, mask not entered");
+ return tt;
+ }
+ } else
+ mmask = m->rm_mask;
+ if (mmask == netmask) {
+ m->rm_refs++;
+ tt->rn_mklist = m;
+ return tt;
+ }
+ if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask))
+ break;
+ }
+ *mp = rn_new_radix_mask(tt, *mp);
+ return tt;
+}
+
+struct radix_node *
+rn_delete(v_arg, netmask_arg, head)
+ void *v_arg, *netmask_arg;
+ struct radix_node_head *head;
+{
+ register struct radix_node *t, *p, *x, *tt;
+ struct radix_mask *m, *saved_m, **mp;
+ struct radix_node *dupedkey, *saved_tt, *top;
+ caddr_t v, netmask;
+ int b, head_off, vlen;
+
+ v = v_arg;
+ netmask = netmask_arg;
+ x = head->rnh_treetop;
+ tt = rn_search(v, x);
+ head_off = x->rn_off;
+ vlen = *(u_char *)v;
+ saved_tt = tt;
+ top = x;
+ if (tt == 0 ||
+ Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
+ return (0);
+ /*
+ * Delete our route from mask lists.
+ */
+ if (netmask) {
+ if ((x = rn_addmask(netmask, 1, head_off)) == 0)
+ return (0);
+ netmask = x->rn_key;
+ while (tt->rn_mask != netmask)
+ if ((tt = tt->rn_dupedkey) == 0)
+ return (0);
+ }
+ if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
+ goto on1;
+ if (tt->rn_flags & RNF_NORMAL) {
+ if (m->rm_leaf != tt || m->rm_refs > 0) {
+ log(LOG_ERR, "rn_delete: inconsistent annotation\n");
+ return 0; /* dangling ref could cause disaster */
+ }
+ } else {
+ if (m->rm_mask != tt->rn_mask) {
+ log(LOG_ERR, "rn_delete: inconsistent annotation\n");
+ goto on1;
+ }
+ if (--m->rm_refs >= 0)
+ goto on1;
+ }
+ b = -1 - tt->rn_b;
+ t = saved_tt->rn_p;
+ if (b > t->rn_b)
+ goto on1; /* Wasn't lifted at all */
+ do {
+ x = t;
+ t = t->rn_p;
+ } while (b <= t->rn_b && x != top);
+ for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
+ if (m == saved_m) {
+ *mp = m->rm_mklist;
+ MKFree(m);
+ break;
+ }
+ if (m == 0) {
+ log(LOG_ERR, "rn_delete: couldn't find our annotation\n");
+ if (tt->rn_flags & RNF_NORMAL)
+ return (0); /* Dangling ref to us */
+ }
+on1:
+ /*
+ * Eliminate us from tree
+ */
+ if (tt->rn_flags & RNF_ROOT)
+ return (0);
+#ifdef RN_DEBUG
+ /* Get us out of the creation list */
+ for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
+ if (t) t->rn_ybro = tt->rn_ybro;
+#endif
+ t = tt->rn_p;
+ if (dupedkey = saved_tt->rn_dupedkey) {
+ if (tt == saved_tt) {
+ x = dupedkey; x->rn_p = t;
+ if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x;
+ } else {
+ for (x = p = saved_tt; p && p->rn_dupedkey != tt;)
+ p = p->rn_dupedkey;
+ if (p) p->rn_dupedkey = tt->rn_dupedkey;
+ else log(LOG_ERR, "rn_delete: couldn't find us\n");
+ }
+ t = tt + 1;
+ if (t->rn_flags & RNF_ACTIVE) {
+#ifndef RN_DEBUG
+ *++x = *t; p = t->rn_p;
+#else
+ b = t->rn_info; *++x = *t; t->rn_info = b; p = t->rn_p;
+#endif
+ if (p->rn_l == t) p->rn_l = x; else p->rn_r = x;
+ x->rn_l->rn_p = x; x->rn_r->rn_p = x;
+ }
+ goto out;
+ }
+ if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l;
+ p = t->rn_p;
+ if (p->rn_r == t) p->rn_r = x; else p->rn_l = x;
+ x->rn_p = p;
+ /*
+ * Demote routes attached to us.
+ */
+ if (t->rn_mklist) {
+ if (x->rn_b >= 0) {
+ for (mp = &x->rn_mklist; m = *mp;)
+ mp = &m->rm_mklist;
+ *mp = t->rn_mklist;
+ } else {
+ /* If there are any key,mask pairs in a sibling
+ duped-key chain, some subset will appear sorted
+ in the same order attached to our mklist */
+ for (m = t->rn_mklist; m && x; x = x->rn_dupedkey)
+ if (m == x->rn_mklist) {
+ struct radix_mask *mm = m->rm_mklist;
+ x->rn_mklist = 0;
+ if (--(m->rm_refs) < 0)
+ MKFree(m);
+ m = mm;
+ }
+ if (m)
+#ifdef _KERNEL
+ printf("%s %x at %x\n",
+ "rn_delete: Orphaned Mask", m, x);
+#else
+ syslog(LOG_ERR, "%s %x at %x\n",
+ "rn_delete: Orphaned Mask", m, x);
+#endif
+ }
+ }
+ /*
+ * We may be holding an active internal node in the tree.
+ */
+ x = tt + 1;
+ if (t != x) {
+#ifndef RN_DEBUG
+ *t = *x;
+#else
+ b = t->rn_info; *t = *x; t->rn_info = b;
+#endif
+ t->rn_l->rn_p = t; t->rn_r->rn_p = t;
+ p = x->rn_p;
+ if (p->rn_l == x) p->rn_l = t; else p->rn_r = t;
+ }
+out:
+ tt->rn_flags &= ~RNF_ACTIVE;
+ tt[1].rn_flags &= ~RNF_ACTIVE;
+ return (tt);
+}
+
+int
+rn_walktree(h, f, w)
+ struct radix_node_head *h;
+ register int (*f)();
+ void *w;
+{
+ int error;
+ struct radix_node *base, *next;
+ register struct radix_node *rn = h->rnh_treetop;
+ /*
+ * This gets complicated because we may delete the node
+ * while applying the function f to it, so we need to calculate
+ * the successor node in advance.
+ */
+ /* First time through node, go left */
+ while (rn->rn_b >= 0)
+ rn = rn->rn_l;
+ for (;;) {
+ base = rn;
+ /* If at right child go back up, otherwise, go right */
+ while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0)
+ rn = rn->rn_p;
+ /* Find the next *leaf* since next node might vanish, too */
+ for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;)
+ rn = rn->rn_l;
+ next = rn;
+ /* Process leaves */
+ while (rn = base) {
+ base = rn->rn_dupedkey;
+ if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w)))
+ return (error);
+ }
+ rn = next;
+ if (rn->rn_flags & RNF_ROOT)
+ return (0);
+ }
+ /* NOTREACHED */
+}
+
+int
+rn_inithead(head, off)
+ void **head;
+ int off;
+{
+ register struct radix_node_head *rnh;
+ register struct radix_node *t, *tt, *ttt;
+ if (*head)
+ return (1);
+ R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh));
+ if (rnh == 0)
+ return (0);
+ Bzero(rnh, sizeof (*rnh));
+ *head = rnh;
+ t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
+ ttt = rnh->rnh_nodes + 2;
+ t->rn_r = ttt;
+ t->rn_p = t;
+ tt = t->rn_l;
+ tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
+ tt->rn_b = -1 - off;
+ *ttt = *tt;
+ ttt->rn_key = rn_ones;
+ rnh->rnh_addaddr = rn_addroute;
+ rnh->rnh_deladdr = rn_delete;
+ rnh->rnh_matchaddr = rn_match;
+ rnh->rnh_lookup = rn_lookup;
+ rnh->rnh_walktree = rn_walktree;
+ rnh->rnh_treetop = t;
+ return (1);
+}
+
+void
+rn_init()
+{
+ char *cp, *cplim;
+#ifdef KERNEL
+ struct domain *dom;
+
+ for (dom = domains; dom; dom = dom->dom_next)
+ if (dom->dom_maxrtkey > max_keylen)
+ max_keylen = dom->dom_maxrtkey;
+#endif
+ if (max_keylen == 0) {
+ printf("rn_init: radix functions require max_keylen be set\n");
+ return;
+ }
+ R_Malloc(rn_zeros, char *, 3 * max_keylen);
+ if (rn_zeros == NULL)
+ panic("rn_init");
+ Bzero(rn_zeros, 3 * max_keylen);
+ rn_ones = cp = rn_zeros + max_keylen;
+ addmask_key = cplim = rn_ones + max_keylen;
+ while (cp < cplim)
+ *cp++ = -1;
+ if (rn_inithead((void **)&mask_rnhead, 0) == 0)
+ panic("rn_init 2");
+}
diff --git a/usr.sbin/routed/rdisc.c b/usr.sbin/routed/rdisc.c
new file mode 100644
index 0000000..93c5f8b
--- /dev/null
+++ b/usr.sbin/routed/rdisc.c
@@ -0,0 +1,965 @@
+/*
+ * Copyright (c) 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef lint
+static char sccsid[] = "@(#)rdisc.c 8.1 (Berkeley) x/y/95";
+#endif /* not lint */
+
+#ident "$Revision: 1.1 $"
+
+#include "defs.h"
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/ip_icmp.h>
+
+/* router advertisement ICMP packet */
+struct icmp_ad {
+ u_char icmp_type; /* type of message */
+ u_char icmp_code; /* type sub code */
+ u_short icmp_cksum; /* ones complement cksum of struct */
+ u_char icmp_ad_num; /* # of following router addresses */
+ u_char icmp_ad_asize; /* 2--words in each advertisement */
+ u_short icmp_ad_life; /* seconds of validity */
+ struct icmp_ad_info {
+ n_long icmp_ad_addr;
+ n_long icmp_ad_pref;
+ } icmp_ad_info[1];
+};
+
+/* router solicitation ICMP packet */
+struct icmp_so {
+ u_char icmp_type; /* type of message */
+ u_char icmp_code; /* type sub code */
+ u_short icmp_cksum; /* ones complement cksum of struct */
+ n_long icmp_so_rsvd;
+};
+
+union ad_u {
+ struct icmp icmp;
+ struct icmp_ad ad;
+ struct icmp_so so;
+};
+
+
+int rdisc_sock = -1; /* router-discovery raw socket */
+struct interface *rdisc_sock_mcast; /* current multicast interface */
+
+struct timeval rdisc_timer;
+int rdisc_ok; /* using solicited route */
+
+
+#define MAX_ADS 5
+struct dr { /* accumulated advertisements */
+ struct interface *dr_ifp;
+ naddr dr_gate; /* gateway */
+ time_t dr_ts; /* when received */
+ time_t dr_life; /* lifetime */
+ n_long dr_recv_pref; /* received but biased preference */
+ n_long dr_pref; /* preference adjusted by metric */
+} *cur_drp, drs[MAX_ADS];
+
+/* adjust preference by interface metric without driving it to infinity */
+#define PREF(p, ifp) ((p) < (ifp)->int_metric ? ((p) != 0 ? 1 : 0) \
+ : (p) - ((ifp)->int_metric-1))
+
+static void rdisc_sort(void);
+
+
+/* dump an ICMP Router Discovery Advertisement Message
+ */
+static void
+trace_rdisc(char *act,
+ naddr from,
+ naddr to,
+ struct interface *ifp,
+ union ad_u *p,
+ u_int len)
+{
+ int i;
+ n_long *wp, *lim;
+
+
+ if (ftrace == 0)
+ return;
+
+ lastlog();
+
+ if (p->icmp.icmp_type == ICMP_ROUTERADVERT) {
+ (void)fprintf(ftrace, "%s Router Ad"
+ " from %s to %s via %s life=%d\n",
+ act, naddr_ntoa(from), naddr_ntoa(to),
+ ifp ? ifp->int_name : "?",
+ p->ad.icmp_ad_life);
+ if (!TRACECONTENTS)
+ return;
+
+ wp = &p->ad.icmp_ad_info[0].icmp_ad_addr;
+ lim = &wp[(len - sizeof(p->ad)) / sizeof(*wp)];
+ for (i = 0; i < p->ad.icmp_ad_num && wp <= lim; i++) {
+ (void)fprintf(ftrace, "\t%s preference=%#x",
+ naddr_ntoa(wp[0]), ntohl(wp[1]));
+ wp += p->ad.icmp_ad_asize;
+ }
+ (void)fputc('\n',ftrace);
+
+ } else {
+ trace_msg("%s Router Solic. from %s to %s via %s"
+ " value=%#x\n",
+ act, naddr_ntoa(from), naddr_ntoa(to),
+ ifp ? ifp->int_name : "?",
+ ntohl(p->so.icmp_so_rsvd));
+ }
+}
+
+
+/* Pick multicast group for router-discovery socket
+ */
+void
+set_rdisc_mg(struct interface *ifp,
+ int on) { /* 0=turn it off */
+ struct ip_mreq m;
+
+ if (rdisc_sock == -1
+ || !(ifp->int_if_flags & IFF_MULTICAST)
+ || (ifp->int_state & IS_ALIAS)) {
+ ifp->int_state &= ~(IS_ALL_HOSTS | IS_ALL_ROUTERS);
+ return;
+ }
+
+#ifdef MCAST_PPP_BUG
+ if (ifp->int_if_flags & IFF_POINTOPOINT)
+ return;
+#endif
+ bzero(&m, sizeof(m));
+ m.imr_interface.s_addr = ((ifp->int_if_flags & IFF_POINTOPOINT)
+ ? ifp->int_dstaddr
+ : ifp->int_addr);
+ if (supplier
+ || (ifp->int_state & IS_NO_ADV_IN)
+ || !on) {
+ /* stop listening to advertisements */
+ if (ifp->int_state & IS_ALL_HOSTS) {
+ m.imr_multiaddr.s_addr = htonl(INADDR_ALLHOSTS_GROUP);
+ if (setsockopt(rdisc_sock, IPPROTO_IP,
+ IP_DROP_MEMBERSHIP,
+ &m, sizeof(m)) < 0)
+ DBGERR(1,"IP_DROP_MEMBERSHIP ALLHOSTS");
+ ifp->int_state &= ~IS_ALL_HOSTS;
+ }
+
+ } else if (!(ifp->int_state & IS_ALL_HOSTS)) {
+ /* start listening to advertisements */
+ m.imr_multiaddr.s_addr = htonl(INADDR_ALLHOSTS_GROUP);
+ if (setsockopt(rdisc_sock, IPPROTO_IP, IP_ADD_MEMBERSHIP,
+ &m, sizeof(m)) < 0)
+ DBGERR(1,"IP_ADD_MEMBERSHIP ALLHOSTS");
+ ifp->int_state |= IS_ALL_HOSTS;
+ }
+
+ if (!supplier
+ || (ifp->int_state & IS_NO_ADV_OUT)
+ || !on) {
+ /* stop listening to solicitations */
+ if (ifp->int_state & IS_ALL_ROUTERS) {
+ m.imr_multiaddr.s_addr=htonl(INADDR_ALLROUTERS_GROUP);
+ if (setsockopt(rdisc_sock, IPPROTO_IP,
+ IP_DROP_MEMBERSHIP,
+ &m, sizeof(m)) < 0)
+ DBGERR(1,"IP_DROP_MEMBERSHIP ALLROUTERS");
+ ifp->int_state &= ~IS_ALL_ROUTERS;
+ }
+
+ } else if (!(ifp->int_state & IS_ALL_ROUTERS)) {
+ /* start hearing solicitations */
+ m.imr_multiaddr.s_addr=htonl(INADDR_ALLROUTERS_GROUP);
+ if (setsockopt(rdisc_sock, IPPROTO_IP, IP_ADD_MEMBERSHIP,
+ &m, sizeof(m)) < 0)
+ DBGERR(1,"IP_ADD_MEMBERSHIP ALLROUTERS");
+ ifp->int_state |= IS_ALL_ROUTERS;
+ }
+}
+
+
+/* start supplying routes
+ */
+void
+set_supplier(void)
+{
+ struct interface *ifp;
+ struct dr *drp;
+
+ if (supplier_set)
+ return;
+
+ trace_msg("start suppying routes\n");
+
+ /* Forget discovered routes.
+ */
+ for (drp = drs; drp < &drs[MAX_ADS]; drp++) {
+ drp->dr_recv_pref = 0;
+ drp->dr_life = 0;
+ }
+ rdisc_age(0);
+
+ supplier_set = 1;
+ supplier = 1;
+
+ /* Do not start advertising until we have heard some RIP routes */
+ LIM_SEC(rdisc_timer, now.tv_sec+MIN_WAITTIME);
+
+ /* Switch router discovery multicast groups from soliciting
+ * to advertising.
+ */
+ for (ifp = ifnet; ifp; ifp = ifp->int_next) {
+ if (ifp->int_state & IS_BROKE)
+ continue;
+ ifp->int_rdisc_cnt = 0;
+ ifp->int_rdisc_timer.tv_usec = rdisc_timer.tv_usec;
+ ifp->int_rdisc_timer.tv_sec = now.tv_sec+MIN_WAITTIME;
+ set_rdisc_mg(ifp, 1);
+ }
+}
+
+
+/* age discovered routes and find the best one
+ */
+void
+rdisc_age(naddr bad_gate)
+{
+ time_t sec;
+ struct dr *drp;
+
+
+ if (supplier) {
+ /* If only adverising, then do only that. */
+ rdisc_adv();
+ return;
+ }
+
+ /* If we are being told about a bad router,
+ * then age the discovered default route, and if there is
+ * no alternative, solicite a replacement.
+ */
+ if (bad_gate != 0) {
+ /* Look for the bad discovered default route.
+ * Age it and note its interface.
+ */
+ for (drp = drs; drp < &drs[MAX_ADS]; drp++) {
+ if (drp->dr_ts == 0)
+ continue;
+
+ /* When we find the bad router, then age the route
+ * to at most SUPPLY_INTERVAL.
+ * This is contrary to RFC 1256, but defends against
+ * black holes.
+ */
+ if (drp->dr_gate == bad_gate) {
+ sec = (now.tv_sec - drp->dr_life
+ + SUPPLY_INTERVAL);
+ if (drp->dr_ts > sec) {
+ trace_msg("age 0.0.0.0 --> %s"
+ " via %s\n",
+ naddr_ntoa(drp->dr_gate),
+ drp->dr_ifp->int_name);
+ drp->dr_ts = sec;
+ }
+ break;
+ }
+ }
+ }
+
+ /* delete old redirected routes to keep the kernel table small
+ */
+ sec = (cur_drp == 0) ? MaxMaxAdvertiseInterval : cur_drp->dr_life;
+ del_redirects(bad_gate, now.tv_sec-sec);
+
+ rdisc_sol();
+
+ rdisc_sort();
+}
+
+
+/* zap all routes discovered via an interface that has gone bad
+ */
+void
+ifbad_rdisc(struct interface *ifp)
+{
+ struct dr *drp;
+
+ for (drp = drs; drp < &drs[MAX_ADS]; drp++) {
+ if (drp->dr_ifp != ifp)
+ continue;
+ drp->dr_recv_pref = 0;
+ drp->dr_life = 0;
+ }
+
+ rdisc_sort();
+}
+
+
+/* mark an interface ok for router discovering.
+ */
+void
+ifok_rdisc(struct interface *ifp)
+{
+ set_rdisc_mg(ifp, 1);
+
+ ifp->int_rdisc_cnt = 0;
+ ifp->int_rdisc_timer.tv_sec = now.tv_sec + (supplier
+ ? MIN_WAITTIME
+ : MAX_SOLICITATION_DELAY);
+ if (timercmp(&rdisc_timer, &ifp->int_rdisc_timer, >))
+ rdisc_timer = ifp->int_rdisc_timer;
+}
+
+
+/* get rid of a dead discovered router
+ */
+static void
+del_rdisc(struct dr *drp)
+{
+ struct interface *ifp;
+ int i;
+
+
+ del_redirects(drp->dr_gate, 0);
+ drp->dr_ts = 0;
+ drp->dr_life = 0;
+
+
+ /* Count the other discovered routes on the interface.
+ */
+ i = 0;
+ ifp = drp->dr_ifp;
+ for (drp = drs; drp < &drs[MAX_ADS]; drp++) {
+ if (drp->dr_ts != 0
+ && drp->dr_ifp == ifp)
+ i++;
+ }
+
+ /* If that was the last good discovered router on the interface,
+ * then solicit a new one.
+ * This is contrary to RFC 1256, but defends against black holes.
+ */
+ if (i == 0
+ && ifp->int_rdisc_cnt >= MAX_SOLICITATIONS) {
+ trace_msg("re-solicit routers via %s\n", ifp->int_name);
+ ifp->int_rdisc_cnt = 0;
+ ifp->int_rdisc_timer.tv_sec = 0;
+ rdisc_sol();
+ }
+}
+
+
+/* Find the best discovered route,
+ * and discard stale routers.
+ */
+static void
+rdisc_sort(void)
+{
+ struct dr *drp, *new_drp;
+ struct rt_entry *rt;
+ struct interface *ifp;
+ time_t sec;
+
+
+ /* find the best discovered route
+ */
+ new_drp = 0;
+ for (drp = drs; drp < &drs[MAX_ADS]; drp++) {
+ if (drp->dr_ts == 0)
+ continue;
+ ifp = drp->dr_ifp;
+
+ /* Get rid of expired discovered routes.
+ * Routes received over PPP links do not die until
+ * the link has been active long enough to be certain
+ * we should have heard from the router.
+ */
+ if (drp->dr_ts + drp->dr_life <= now.tv_sec) {
+ if (drp->dr_recv_pref == 0
+ || !ppp_noage
+ || !(ifp->int_if_flags & IFF_POINTOPOINT)
+ || !(ifp->int_state & IS_QUIET)
+ || (ifp->int_quiet_time
+ + (sec = MIN(MaxMaxAdvertiseInterval,
+ drp->dr_life)) <= now.tv_sec)) {
+ del_rdisc(drp);
+ continue;
+ }
+
+ /* If the PPP link is quiet, keep checking
+ * in case the link becomes active.
+ * After the link is active, the timer on the
+ * discovered route might force its deletion.
+ */
+ sec += now.tv_sec+1;
+ } else {
+ sec = drp->dr_ts+drp->dr_life+1;
+ }
+ LIM_SEC(rdisc_timer, sec);
+
+ /* Update preference with possibly changed interface
+ * metric.
+ */
+ drp->dr_pref = PREF(drp->dr_recv_pref, ifp);
+
+ /* Prefer the current route to prevent thrashing.
+ * Prefer shorter lifetimes to speed the detection of
+ * bad routers.
+ */
+ if (new_drp == 0
+ || new_drp->dr_pref < drp->dr_pref
+ || (new_drp->dr_pref == drp->dr_pref
+ && (drp == cur_drp
+ || (new_drp != cur_drp
+ && new_drp->dr_life > drp->dr_life))))
+ new_drp = drp;
+ }
+
+ /* switch to a better default route
+ */
+ if (new_drp != cur_drp) {
+ rt = rtget(RIP_DEFAULT, 0);
+
+ /* Stop using discovered routes if they are all bad
+ */
+ if (new_drp == 0) {
+ trace_msg("turn off Router Discovery\n");
+ rdisc_ok = 0;
+
+ if (rt != 0
+ && (rt->rt_state & RS_RDISC)) {
+ rtchange(rt, rt->rt_state,
+ rt->rt_gate, rt->rt_router,
+ HOPCNT_INFINITY, 0, rt->rt_ifp,
+ now.tv_sec - GARBAGE_TIME, 0);
+ rtswitch(rt, 0);
+ }
+
+ /* turn on RIP if permitted */
+ rip_on(0);
+
+ } else {
+ if (cur_drp == 0) {
+ trace_msg("turn on Router Discovery using"
+ " %s via %s\n",
+ naddr_ntoa(new_drp->dr_gate),
+ new_drp->dr_ifp->int_name);
+
+ rdisc_ok = 1;
+ rip_off();
+
+ } else {
+ trace_msg("switch Router Discovery from"
+ " %s via %s to %s via %s\n",
+ naddr_ntoa(cur_drp->dr_gate),
+ cur_drp->dr_ifp->int_name,
+ naddr_ntoa(new_drp->dr_gate),
+ new_drp->dr_ifp->int_name);
+ }
+
+ if (rt != 0) {
+ rtchange(rt, rt->rt_state | RS_RDISC,
+ new_drp->dr_gate, new_drp->dr_gate,
+ 0,0, new_drp->dr_ifp,
+ now.tv_sec, 0);
+ } else {
+ rtadd(RIP_DEFAULT, 0,
+ new_drp->dr_gate, new_drp->dr_gate,
+ 0, 0, RS_RDISC, new_drp->dr_ifp);
+ }
+ }
+
+ cur_drp = new_drp;
+ }
+}
+
+
+/* handle a single address in an advertisement
+ */
+static void
+parse_ad(naddr from,
+ naddr gate,
+ n_long pref,
+ int life,
+ struct interface *ifp)
+{
+ static naddr bad_gate;
+ struct dr *drp, *new_drp;
+
+
+ NTOHL(gate);
+ if (gate == RIP_DEFAULT
+ || !check_dst(gate)) {
+ if (bad_gate != from) {
+ msglog("router %s advertising bad gateway %s",
+ naddr_ntoa(from),
+ naddr_ntoa(gate));
+ bad_gate = from;
+ }
+ return;
+ }
+
+ /* ignore pointers to ourself and routes via unreachable networks
+ */
+ if (ifwithaddr(gate, 1, 0) != 0) {
+ if (TRACEPACKETS)
+ trace_msg("discard our own packet\n");
+ return;
+ }
+ if (!on_net(gate, ifp->int_net, ifp->int_mask)) {
+ if (TRACEPACKETS)
+ trace_msg("discard packet from unreachable net\n");
+ return;
+ }
+
+ /* Convert preference to an unsigned value
+ * and bias it by the metric of the interface.
+ */
+ pref = ntohl(pref) ^ MIN_PreferenceLevel;
+
+ for (new_drp = drs, drp = drs; drp < &drs[MAX_ADS]; drp++) {
+ if (drp->dr_ts == 0) {
+ new_drp = drp;
+ continue;
+ }
+
+ if (drp->dr_gate == gate) {
+ /* Zap an entry we are being told is kaput */
+ if (pref == 0 || life == 0) {
+ drp->dr_recv_pref = 0;
+ drp->dr_life = 0;
+ return;
+ }
+ new_drp = drp;
+ break;
+ }
+
+ /* look for least valueable entry */
+ if (new_drp->dr_pref > drp->dr_pref)
+ new_drp = drp;
+ }
+
+ /* ignore zap of an entry we do not know about. */
+ if (pref == 0 || life == 0)
+ return;
+
+ new_drp->dr_ifp = ifp;
+ new_drp->dr_gate = gate;
+ new_drp->dr_ts = now.tv_sec;
+ new_drp->dr_life = ntohl(life);
+ new_drp->dr_recv_pref = pref;
+ new_drp->dr_pref = PREF(pref,ifp);
+
+ ifp->int_rdisc_cnt = MAX_SOLICITATIONS;
+}
+
+
+/* Compute the IP checksum
+ * This assumes the packet is less than 32K long.
+ */
+static u_short
+in_cksum(u_short *p,
+ u_int len)
+{
+ u_int sum = 0;
+ int nwords = len >> 1;
+
+ while (nwords-- != 0)
+ sum += *p++;
+
+ if (len & 1)
+ sum += *(u_char *)p;
+
+ /* end-around-carry */
+ sum = (sum >> 16) + (sum & 0xffff);
+ sum += (sum >> 16);
+ return (~sum);
+}
+
+
+/* Send a router discovery advertisement or solicitation ICMP packet.
+ */
+static void
+send_rdisc(union ad_u *p,
+ int p_size,
+ struct interface *ifp,
+ naddr dst, /* 0 or unicast destination */
+ int type) /* 0=unicast, 1=bcast, 2=mcast */
+{
+ struct sockaddr_in sin;
+ int flags;
+ char *msg;
+ naddr tgt_mcast;
+
+
+ bzero(&sin, sizeof(sin));
+ sin.sin_addr.s_addr = dst;
+ flags = MSG_DONTROUTE;
+
+ switch (type) {
+ case 0: /* unicast */
+ msg = "Send";
+ break;
+
+ case 1: /* broadcast */
+ if (ifp->int_if_flags & IFF_POINTOPOINT) {
+ msg = "Send pt-to-pt";
+ sin.sin_addr.s_addr = ifp->int_dstaddr;
+ } else {
+ msg = "Broadcast";
+ sin.sin_addr.s_addr = ifp->int_brdaddr;
+ }
+ break;
+
+ case 2: /* multicast */
+ msg = "Multicast";
+ if (rdisc_sock_mcast != ifp) {
+ /* select the right interface. */
+#ifdef MCAST_PPP_BUG
+ /* Do not specifiy the primary interface explicitly
+ * if we have the multicast point-to-point kernel
+ * bug, since the kernel will do the wrong thing
+ * if the local address of a point-to-point link
+ * is the same as the address of an ordinary
+ * interface.
+ */
+ if (ifp->int_addr == myaddr) {
+ tgt_mcast = 0;
+ } else
+#endif
+ tgt_mcast = ifp->int_addr;
+ if (setsockopt(rdisc_sock,
+ IPPROTO_IP, IP_MULTICAST_IF,
+ &tgt_mcast, sizeof(tgt_mcast))) {
+ DBGERR(1,"setsockopt(rdisc_sock,"
+ "IP_MULTICAST_IF)");
+ return;
+ }
+ rdisc_sock_mcast = ifp;
+ }
+ flags = 0;
+ break;
+ }
+
+ if (TRACEPACKETS)
+ trace_rdisc(msg, ifp->int_addr, sin.sin_addr.s_addr, ifp,
+ p, p_size);
+
+ if (0 > sendto(rdisc_sock, p, p_size, flags,
+ (struct sockaddr *)&sin, sizeof(sin))) {
+ msglog("sendto(%s%s%s): %s",
+ ifp != 0 ? ifp->int_name : "",
+ ifp != 0 ? ", " : "",
+ inet_ntoa(sin.sin_addr),
+ strerror(errno));
+ if (ifp != 0)
+ ifbad(ifp, 0);
+ }
+}
+
+
+/* Send an advertisement
+ */
+static void
+send_adv(struct interface *ifp,
+ naddr dst, /* 0 or unicast destination */
+ int type) /* 0=unicast, 1=bcast, 2=mcast */
+{
+ union ad_u u;
+ n_long pref;
+
+
+ bzero(&u,sizeof(u.ad));
+
+ u.ad.icmp_type = ICMP_ROUTERADVERT;
+ u.ad.icmp_ad_num = 1;
+ u.ad.icmp_ad_asize = sizeof(u.ad.icmp_ad_info[0])/4;
+
+ u.ad.icmp_ad_life = stopint ? 0 : htonl(ifp->int_rdisc_int*3);
+
+ u.ad.icmp_ad_life = stopint ? 0 : htonl(ifp->int_rdisc_int*3);
+ pref = ifp->int_rdisc_pref ^ MIN_PreferenceLevel;
+ pref = PREF(pref, ifp) ^ MIN_PreferenceLevel;
+ u.ad.icmp_ad_info[0].icmp_ad_pref = htonl(pref);
+
+ u.ad.icmp_ad_info[0].icmp_ad_addr = ifp->int_addr;
+
+ u.ad.icmp_cksum = in_cksum((u_short*)&u.ad, sizeof(u.ad));
+
+ send_rdisc(&u, sizeof(u.ad), ifp, dst, type);
+}
+
+
+/* Advertise for Router Discovery
+ */
+void
+rdisc_adv(void)
+{
+ struct interface *ifp;
+
+
+ rdisc_timer.tv_sec = now.tv_sec + NEVER;
+
+ for (ifp = ifnet; ifp; ifp = ifp->int_next) {
+ if (0 != (ifp->int_state & (IS_NO_ADV_OUT
+ | IS_PASSIVE
+ | IS_ALIAS
+ | IS_BROKE)))
+ continue;
+
+ if (!timercmp(&ifp->int_rdisc_timer, &now, >)
+ || stopint) {
+ send_adv(ifp, INADDR_ALLHOSTS_GROUP,
+ (ifp->int_if_flags&IS_BCAST_RDISC) ? 1 : 2);
+ ifp->int_rdisc_cnt++;
+
+ intvl_random(&ifp->int_rdisc_timer,
+ (ifp->int_rdisc_int*3)/4,
+ ifp->int_rdisc_int);
+ if (ifp->int_rdisc_cnt < MAX_INITIAL_ADVERTS
+ && (ifp->int_rdisc_timer.tv_sec
+ > MAX_INITIAL_ADVERT_INTERVAL)) {
+ ifp->int_rdisc_timer.tv_sec
+ = MAX_INITIAL_ADVERT_INTERVAL;
+ }
+ timevaladd(&ifp->int_rdisc_timer, &now);
+ }
+
+ if (timercmp(&rdisc_timer, &ifp->int_rdisc_timer, >))
+ rdisc_timer = ifp->int_rdisc_timer;
+ }
+}
+
+
+/* Solicit for Router Discovery
+ */
+void
+rdisc_sol(void)
+{
+ struct interface *ifp;
+ union ad_u u;
+
+
+ rdisc_timer.tv_sec = now.tv_sec + NEVER;
+
+ for (ifp = ifnet; ifp; ifp = ifp->int_next) {
+ if (0 != (ifp->int_state & (IS_NO_SOL_OUT
+ | IS_PASSIVE
+ | IS_ALIAS
+ | IS_BROKE))
+ || ifp->int_rdisc_cnt >= MAX_SOLICITATIONS)
+ continue;
+
+ if (!timercmp(&ifp->int_rdisc_timer, &now, >)) {
+ bzero(&u,sizeof(u.so));
+ u.so.icmp_type = ICMP_ROUTERSOLICIT;
+ u.so.icmp_cksum = in_cksum((u_short*)&u.so,
+ sizeof(u.so));
+ send_rdisc(&u, sizeof(u.so), ifp,
+ INADDR_ALLROUTERS_GROUP,
+ ((ifp->int_if_flags & IS_BCAST_RDISC)
+ ? 1 : 2));
+
+ if (++ifp->int_rdisc_cnt >= MAX_SOLICITATIONS)
+ continue;
+
+ ifp->int_rdisc_timer.tv_sec = SOLICITATION_INTERVAL;
+ ifp->int_rdisc_timer.tv_usec = 0;
+ timevaladd(&ifp->int_rdisc_timer, &now);
+ }
+
+ if (timercmp(&rdisc_timer, &ifp->int_rdisc_timer, >))
+ rdisc_timer = ifp->int_rdisc_timer;
+ }
+}
+
+
+/* check the IP header of a possible Router Discovery ICMP packet */
+static struct interface * /* 0 if bad */
+ck_icmp(char *act,
+ naddr from,
+ naddr to,
+ union ad_u *p,
+ u_int len)
+{
+ struct interface *ifp;
+ char *type;
+
+
+ /* If we could tell the interface on which a packet from address 0
+ * arrived, we could deal with such solicitations.
+ */
+
+ ifp = ((from == 0) ? 0 : iflookup(from));
+
+ if (p->icmp.icmp_type == ICMP_ROUTERADVERT) {
+ type = "advertisement";
+ } else if (p->icmp.icmp_type == ICMP_ROUTERSOLICIT) {
+ type = "solicitation";
+ } else {
+ return 0;
+ }
+
+ if (p->icmp.icmp_code != 0) {
+ if (TRACEPACKETS)
+ msglog("unrecognized ICMP Router"
+ " %s code=%d from %s to %s\n",
+ type, p->icmp.icmp_code,
+ naddr_ntoa(from), naddr_ntoa(to));
+ return 0;
+ }
+
+ if (TRACEPACKETS)
+ trace_rdisc(act, from, to, ifp, p, len);
+
+ if (ifp == 0 && TRACEPACKETS)
+ msglog("unknown interface for router-discovery %s"
+ " from %s to %s",
+ type, naddr_ntoa(from), naddr_ntoa(to));
+
+ return ifp;
+}
+
+
+/* read packets from the router discovery socket
+ */
+void
+read_d(void)
+{
+ static naddr bad_asize, bad_len;
+ struct sockaddr_in from;
+ int n, fromlen, cc, hlen;
+ union {
+ struct ip ip;
+ u_short s[512/2];
+ u_char b[512];
+ } pkt;
+ union ad_u *p;
+ n_long *wp;
+ struct interface *ifp;
+
+
+ for (;;) {
+ fromlen = sizeof(from);
+ cc = recvfrom(rdisc_sock, &pkt, sizeof(pkt), 0,
+ (struct sockaddr*)&from,
+ &fromlen);
+ if (cc <= 0) {
+ if (cc < 0 && errno != EWOULDBLOCK)
+ LOGERR("recvfrom(rdisc_sock)");
+ break;
+ }
+ if (fromlen != sizeof(struct sockaddr_in))
+ logbad(1,"impossible recvfrom(rdisc_sock) fromlen=%d",
+ fromlen);
+
+ hlen = pkt.ip.ip_hl << 2;
+ if (cc < hlen + ICMP_MINLEN)
+ continue;
+ p = (union ad_u *)&pkt.b[hlen];
+ cc -= hlen;
+
+ ifp = ck_icmp("Recv",
+ from.sin_addr.s_addr, pkt.ip.ip_dst.s_addr,
+ p, cc);
+ if (ifp == 0)
+ continue;
+ if (ifwithaddr(from.sin_addr.s_addr, 0, 0)) {
+ trace_msg("\tdiscard our own packet\n");
+ continue;
+ }
+
+ switch (p->icmp.icmp_type) {
+ case ICMP_ROUTERADVERT:
+ if (p->ad.icmp_ad_asize*4
+ < sizeof(p->ad.icmp_ad_info[0])) {
+ if (bad_asize != from.sin_addr.s_addr) {
+ msglog("intolerable rdisc address"
+ " size=%d",
+ p->ad.icmp_ad_asize);
+ bad_asize = from.sin_addr.s_addr;
+ }
+ continue;
+ }
+ if (p->ad.icmp_ad_num == 0) {
+ if (TRACEPACKETS)
+ trace_msg("\tempty?\n");
+ continue;
+ }
+ if (cc != (sizeof(p->ad) - sizeof(p->ad.icmp_ad_info)
+ + (p->ad.icmp_ad_num
+ * sizeof(p->ad.icmp_ad_info[0])))) {
+ if (bad_len != from.sin_addr.s_addr) {
+ msglog("rdisc length %d does not"
+ " match ad_num %d",
+ cc, p->ad.icmp_ad_num);
+ bad_len = from.sin_addr.s_addr;
+ }
+ continue;
+ }
+ if (supplier)
+ continue;
+ if (ifp->int_state & IS_NO_ADV_IN)
+ continue;
+
+ wp = &p->ad.icmp_ad_info[0].icmp_ad_addr;
+ for (n = 0; n < p->ad.icmp_ad_num; n++) {
+ parse_ad(from.sin_addr.s_addr,
+ wp[0], wp[1],
+ p->ad.icmp_ad_life,
+ ifp);
+ wp += p->ad.icmp_ad_asize;
+ }
+ break;
+
+
+ case ICMP_ROUTERSOLICIT:
+ if (!supplier)
+ continue;
+ if (ifp->int_state & IS_NO_ADV_OUT)
+ continue;
+
+ /* XXX
+ * We should handle messages from address 0.
+ */
+
+ /* Respond with a point-to-point advertisement */
+ send_adv(ifp, from.sin_addr.s_addr, 0);
+ break;
+ }
+ }
+
+ rdisc_sort();
+}
diff --git a/usr.sbin/routed/rtquery/Makefile b/usr.sbin/routed/rtquery/Makefile
new file mode 100644
index 0000000..dd2419b
--- /dev/null
+++ b/usr.sbin/routed/rtquery/Makefile
@@ -0,0 +1,6 @@
+# @(#)Makefile 8.1 (Berkeley) 6/5/93
+
+PROG= rtquery
+MAN8= rtquery.8
+
+.include <bsd.prog.mk>
diff --git a/usr.sbin/routed/rtquery/rtquery.8 b/usr.sbin/routed/rtquery/rtquery.8
new file mode 100644
index 0000000..3c59efd
--- /dev/null
+++ b/usr.sbin/routed/rtquery/rtquery.8
@@ -0,0 +1,79 @@
+.Dd April 9, 1996
+.Dt RTQUERY 8
+.Os BSD 4.4
+.Sh NAME
+.Nm rtquery
+.Nd query routing daemons for their routing tables
+.Sh SYNOPSIS
+.Nm
+.Op Fl np1
+.Op Fl 1 Ar timeout
+.Op Fl r Ar addr
+.Ar host ...
+.Sh DESCRIPTION
+.Nm Rtquery
+is used to query a network routing daemon,
+.Xr routed 8
+or
+.Xr gated 8 ,
+for its routing table by sending a
+.Em request
+or
+.Em poll
+command. The routing information in any routing
+.Em response
+packets returned is displayed numerically and symbolically.
+.Pp
+.Em Rtquery
+by default uses the
+.Em request
+command.
+When the
+.B \-p
+option is specified,
+.Nm rtquery
+uses the
+.Em poll
+command, which is an
+undocumented extension to the RIP specification supported by
+.IR gated (1M).
+When querying
+.IR gated (1M),
+the
+.I poll
+command is preferred over the
+.I request
+command because the response is not subject to Split Horizon and/or
+Poisioned Reverse.
+.Pp
+Options supported by
+.Nm rtquery :
+.Bl -tag -width Ds
+.It Fl n
+Normally network and host numbers are displayed both symbolically
+and numerically.
+The
+.Fl n
+option displays only the numeric network and host numbers.
+.It Fl p
+Uses the
+.Em poll
+command to request full routing information from
+.Xr gated 8 ,
+This is an undocumented extension supported only by
+.Xr gated 8 .
+.It Fl 1
+query using RIP version 1 instead of RIP version 2.
+.It Fl w Ar timeout
+changes the delay for an answer from each host.
+By default, each host is given 15 seconds to respond.
+.It Fl r Ar addr
+ask about the route to destination
+.Em parms
+.Sh SEE ALSO
+.Xr routed 8,
+.Xr gated 8,
+.br
+RFC\ 1058 - Routing Information Protocol, RIPv1
+.br
+RFC\ 1723 - Routing Information Protocol, RIPv2
diff --git a/usr.sbin/routed/rtquery/rtquery.c b/usr.sbin/routed/rtquery/rtquery.c
new file mode 100644
index 0000000..97899c9
--- /dev/null
+++ b/usr.sbin/routed/rtquery/rtquery.c
@@ -0,0 +1,516 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef lint
+static char copyright[] =
+"@(#) Copyright (c) 1982, 1986, 1993\n\
+ The Regents of the University of California. All rights reserved.\n";
+#endif /* not lint */
+
+#ifndef lint
+static char sccsid[] = "@(#)query.c 8.1 (Berkeley) 6/5/93";
+#endif /* not lint */
+
+#include <sys/param.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <netinet/in.h>
+#define RIPVERSION RIPv2
+#include <protocols/routed.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <errno.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef sgi
+#include <strings.h>
+#include <bstring.h>
+#endif
+
+#ifndef sgi
+#define _HAVE_SIN_LEN
+#endif
+
+#define WTIME 15 /* Time to wait for all responses */
+#define STIME (250*1000) /* usec to wait for another response */
+
+int s;
+
+char *pgmname;
+
+union pkt_buf {
+ char packet[MAXPACKETSIZE+4096];
+ struct rip rip;
+} msg_buf;
+#define MSG msg_buf.rip
+#define MSG_LIM ((struct rip*)(&msg_buf.packet[MAXPACKETSIZE \
+ - sizeof(struct netinfo)]))
+
+int nflag; /* numbers, no names */
+int pflag; /* play the `gated` game */
+int ripv2 = 1; /* use RIP version 2 */
+int wtime = WTIME;
+int rflag; /* 1=ask about a particular route */
+
+struct timeval start; /* when query sent */
+
+static void rip_input(struct sockaddr_in*, int);
+static int query(char *, struct netinfo *);
+static int getnet(char *, struct netinfo *);
+static u_int std_mask(u_int);
+
+
+int
+main(int argc,
+ char *argv[])
+{
+ char *p;
+ struct seen {
+ struct seen *next;
+ struct in_addr addr;
+ } *seen, *sp;
+ int answered = 0;
+ int ch, cc, bsize;
+ fd_set bits;
+ struct timeval now, delay;
+ struct sockaddr_in from;
+ int fromlen;
+ struct netinfo rt;
+
+
+ bzero(&rt, sizeof(rt));
+
+ pgmname = argv[0];
+ while ((ch = getopt(argc, argv, "np1w:r:")) != EOF)
+ switch (ch) {
+ case 'n':
+ nflag = 1;
+ break;
+ case 'p':
+ pflag = 1;
+ break;
+ case '1':
+ ripv2 = 0;
+ break;
+ case 'w':
+ wtime = (int)strtoul(optarg, &p, 0);
+ if (*p != '\0'
+ || wtime <= 0)
+ goto usage;
+ break;
+ case 'r':
+ if (rflag)
+ goto usage;
+ rflag = getnet(optarg, &rt);
+ break;
+ case '?':
+ default:
+ goto usage;
+ }
+ argv += optind;
+ argc -= optind;
+ if (argc == 0) {
+usage: printf("usage: query [-np1v] [-w wtime] host1 [host2 ...]\n");
+ exit(1);
+ }
+
+ if (!rflag) {
+ rt.n_dst = RIP_DEFAULT;
+ rt.n_family = RIP_AF_UNSPEC;
+ rt.n_metric = htonl(HOPCNT_INFINITY);
+ }
+
+ s = socket(AF_INET, SOCK_DGRAM, 0);
+ if (s < 0) {
+ perror("socket");
+ exit(2);
+ }
+ for (bsize = 127*1024; ; bsize -= 1024) {
+ if (setsockopt(s, SOL_SOCKET, SO_RCVBUF,
+ &bsize, sizeof(bsize)) == 0)
+ break;
+ if (bsize <= 4*1024) {
+ perror("setsockopt SO_RCVBUF");
+ break;
+ }
+ }
+
+ /* ask the first host */
+ seen = 0;
+ while (0 > query(*argv++, &rt) && *argv != 0)
+ answered++;
+
+ FD_ZERO(&bits);
+ for (;;) {
+ FD_SET(s, &bits);
+ delay.tv_sec = 0;
+ delay.tv_usec = STIME;
+ cc = select(s+1, &bits, 0,0, &delay);
+ if (cc > 0) {
+ fromlen = sizeof(from);
+ cc = recvfrom(s, msg_buf.packet,
+ sizeof(msg_buf.packet), 0,
+ (struct sockaddr *)&from, &fromlen);
+ if (cc < 0) {
+ perror("recvfrom");
+ exit(1);
+ }
+ /* count the distinct responding hosts.
+ * You cannot match responding hosts with
+ * addresses to which queries were transmitted,
+ * because a router might respond with a
+ * different source address.
+ */
+ for (sp = seen; sp != 0; sp = sp->next) {
+ if (sp->addr.s_addr == from.sin_addr.s_addr)
+ break;
+ }
+ if (sp == 0) {
+ sp = malloc(sizeof(*sp));
+ sp->addr = from.sin_addr;
+ sp->next = seen;
+ seen = sp;
+ answered++;
+ }
+
+ rip_input(&from, cc);
+ continue;
+ }
+
+ if (cc < 0) {
+ if ( errno == EINTR)
+ continue;
+ perror("select");
+ exit(1);
+ }
+
+ /* After a pause in responses, probe another host.
+ * This reduces the intermingling of answers.
+ */
+ while (*argv != 0 && 0 > query(*argv++, &rt))
+ answered++;
+
+ /* continue until no more packets arrive
+ * or we have heard from all hosts
+ */
+ if (answered >= argc)
+ break;
+
+ /* or until we have waited a long time
+ */
+ if (gettimeofday(&now, 0) < 0) {
+ perror("gettimeofday(now)");
+ exit(1);
+ }
+ if (start.tv_sec + wtime <= now.tv_sec)
+ break;
+ }
+
+ /* fail if there was no answer */
+ exit (answered >= argc ? 0 : 1);
+ /* NOTREACHED */
+}
+
+
+/*
+ * Poll one host.
+ */
+static int
+query(char *host,
+ struct netinfo *rt)
+{
+ struct sockaddr_in router;
+ struct hostent *hp;
+
+ if (gettimeofday(&start, 0) < 0) {
+ perror("gettimeofday(start)");
+ return -1;
+ }
+
+ bzero(&router, sizeof(router));
+ router.sin_family = AF_INET;
+#ifdef _HAVE_SIN_LEN
+ router.sin_len = sizeof(router);
+#endif
+ router.sin_addr.s_addr = inet_addr(host);
+ if (router.sin_addr.s_addr == -1) {
+ hp = gethostbyname(host);
+ if (hp == 0) {
+ fprintf(stderr,"%s: %s:", pgmname, host);
+ herror(0);
+ return -1;
+ }
+ bcopy(hp->h_addr, &router.sin_addr, hp->h_length);
+ }
+
+ router.sin_port = htons(RIP_PORT);
+
+ MSG.rip_cmd = (pflag)? RIPCMD_POLL : RIPCMD_REQUEST;
+ MSG.rip_nets[0] = *rt;
+ if (ripv2) {
+ MSG.rip_vers = RIPv2;
+ } else {
+ MSG.rip_vers = RIPv1;
+ MSG.rip_nets[0].n_mask = 0;
+ }
+
+ if (sendto(s, msg_buf.packet, sizeof(struct rip), 0,
+ (struct sockaddr *)&router, sizeof(router)) < 0) {
+ perror(host);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Handle an incoming RIP packet.
+ */
+static void
+rip_input(struct sockaddr_in *from,
+ int size)
+{
+ struct netinfo *n, *lim;
+ struct in_addr in;
+ char *name;
+ char net_buf[80];
+ u_int mask, dmask;
+ char *sp;
+ int i;
+ struct hostent *hp;
+ struct netent *np;
+ struct netauth *a;
+
+
+ if (nflag) {
+ printf("%s:", inet_ntoa(from->sin_addr));
+ } else {
+ hp = gethostbyaddr((char*)&from->sin_addr,
+ sizeof(struct in_addr), AF_INET);
+ if (hp == 0) {
+ printf("%s:",
+ inet_ntoa(from->sin_addr));
+ } else {
+ printf("%s (%s):", hp->h_name,
+ inet_ntoa(from->sin_addr));
+ }
+ }
+ if (MSG.rip_cmd != RIPCMD_RESPONSE) {
+ printf("\n unexpected response type %d\n", MSG.rip_cmd);
+ return;
+ }
+ printf(" RIPv%d%s %d bytes\n", MSG.rip_vers,
+ (MSG.rip_vers != RIPv1 && MSG.rip_vers != RIPv2) ? " ?" : "",
+ size);
+ if (size > MAXPACKETSIZE) {
+ if (size > sizeof(msg_buf) - sizeof(*n)) {
+ printf(" at least %d bytes too long\n",
+ size-MAXPACKETSIZE);
+ size = sizeof(msg_buf) - sizeof(*n);
+ } else {
+ printf(" %d bytes too long\n",
+ size-MAXPACKETSIZE);
+ }
+ } else if (size%sizeof(*n) != sizeof(struct rip)%sizeof(*n)) {
+ printf(" response of bad length=%d\n", size);
+ }
+
+ n = MSG.rip_nets;
+ lim = (struct netinfo *)((char*)n + size) - 1;
+ for (; n <= lim; n++) {
+ name = "";
+ if (n->n_family == RIP_AF_INET) {
+ in.s_addr = n->n_dst;
+ (void)strcpy(net_buf, inet_ntoa(in));
+
+ mask = ntohl(n->n_mask);
+ dmask = mask & -mask;
+ if (mask != 0) {
+ sp = &net_buf[strlen(net_buf)];
+ if (MSG.rip_vers == RIPv1) {
+ (void)sprintf(sp," mask=%#x ? ",mask);
+ mask = 0;
+ } else if (mask + dmask == 0) {
+ for (i = 0;
+ (i != 32
+ && ((1<<i)&mask) == 0);
+ i++)
+ continue;
+ (void)sprintf(sp, "/%d",32-i);
+ } else {
+ (void)sprintf(sp," (mask %#x)", mask);
+ }
+ }
+
+ if (!nflag) {
+ if (mask == 0) {
+ mask = std_mask(in.s_addr);
+ if ((ntohl(in.s_addr) & ~mask) != 0)
+ mask = 0;
+ }
+ /* Without a netmask, do not worry about
+ * whether the destination is a host or a
+ * network. Try both and use the first name
+ * we get.
+ *
+ * If we have a netmask we can make a
+ * good guess.
+ */
+ if ((in.s_addr & ~mask) == 0) {
+ np = getnetbyaddr(in.s_addr, AF_INET);
+ if (np != 0)
+ name = np->n_name;
+ else if (in.s_addr == 0)
+ name = "default";
+ }
+ if (name[0] == '\0'
+ && (in.s_addr & ~mask) != 0) {
+ hp = gethostbyaddr((char*)&in,
+ sizeof(in),
+ AF_INET);
+ if (hp != 0)
+ name = hp->h_name;
+ }
+ }
+
+ } else if (n->n_family == RIP_AF_AUTH) {
+ a = (struct netauth*)n;
+ (void)printf(" authentication type %d: ",
+ a->a_type);
+ for (i = 0; i < sizeof(a->au.au_pw); i++)
+ (void)printf("%02x ", a->au.au_pw[i]);
+ putc('\n', stdout);
+ continue;
+
+ } else {
+ (void)sprintf(net_buf, "(af %#x) %d.%d.%d.%d",
+ n->n_family,
+ (char)(n->n_dst >> 24),
+ (char)(n->n_dst >> 16),
+ (char)(n->n_dst >> 8),
+ (char)n->n_dst);
+ }
+
+ (void)printf(" %-18s metric %2d %8s",
+ net_buf, ntohl(n->n_metric), name);
+
+ if (n->n_nhop != 0) {
+ in.s_addr = n->n_nhop;
+ if (nflag)
+ hp = 0;
+ else
+ hp = gethostbyaddr((char*)&in, sizeof(in),
+ AF_INET);
+ (void)printf(" nhop=%-15s%s",
+ (hp != 0) ? hp->h_name : inet_ntoa(in),
+ (MSG.rip_vers == RIPv1) ? " ?" : "");
+ }
+ if (n->n_tag != 0)
+ (void)printf(" tag=%#x%s", n->n_tag,
+ (MSG.rip_vers == RIPv1) ? " ?" : "");
+ putc('\n', stdout);
+ }
+}
+
+
+/* Return the classical netmask for an IP address.
+ */
+static u_int
+std_mask(u_int addr)
+{
+ NTOHL(addr);
+
+ if (addr == 0)
+ return 0;
+ if (IN_CLASSA(addr))
+ return IN_CLASSA_NET;
+ if (IN_CLASSB(addr))
+ return IN_CLASSB_NET;
+ return IN_CLASSC_NET;
+}
+
+
+/* get a network number as a name or a number, with an optional "/xx"
+ * netmask.
+ */
+static int /* 0=bad */
+getnet(char *name,
+ struct netinfo *rt)
+{
+ int i;
+ struct netent *nentp;
+ u_int mask;
+ struct in_addr in;
+ char hname[MAXHOSTNAMELEN+1];
+ char *mname, *p;
+
+
+ /* Detect and separate "1.2.3.4/24"
+ */
+ if (0 != (mname = rindex(name,'/'))) {
+ i = (int)(mname - name);
+ if (i > sizeof(hname)-1) /* name too long */
+ return 0;
+ bcopy(name, hname, i);
+ hname[i] = '\0';
+ mname++;
+ name = hname;
+ }
+
+ nentp = getnetbyname(name);
+ if (nentp != 0) {
+ in.s_addr = nentp->n_net;
+ } else if (inet_aton(name, &in) == 1) {
+ NTOHL(in.s_addr);
+ } else {
+ return 0;
+ }
+
+ if (mname == 0) {
+ mask = std_mask(in.s_addr);
+ } else {
+ mask = (u_int)strtoul(mname, &p, 0);
+ if (*p != '\0' || mask > 32)
+ return 0;
+ mask = 0xffffffff << (32-mask);
+ }
+
+ rt->n_dst = in.s_addr;
+ rt->n_family = AF_INET;
+ rt->n_mask = htonl(mask);
+ return 1;
+}
diff --git a/usr.sbin/routed/rttrace/Makefile b/usr.sbin/routed/rttrace/Makefile
new file mode 100644
index 0000000..df19d5c
--- /dev/null
+++ b/usr.sbin/routed/rttrace/Makefile
@@ -0,0 +1,6 @@
+# @(#)Makefile 8.1 (Berkeley) 6/5/93
+
+PROG= rttrace
+NOMAN= noman
+
+.include <bsd.prog.mk>
diff --git a/usr.sbin/routed/rttrace/rttrace.c b/usr.sbin/routed/rttrace/rttrace.c
new file mode 100644
index 0000000..0d8ac62
--- /dev/null
+++ b/usr.sbin/routed/rttrace/rttrace.c
@@ -0,0 +1,146 @@
+/*-
+ * Copyright (c) 1983, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef lint
+static char copyright[] =
+"@(#) Copyright (c) 1983, 1988, 1993\n\
+ The Regents of the University of California. All rights reserved.\n";
+#endif /* not lint */
+
+#ifndef lint
+static char sccsid[] = "@(#)trace.c 8.1 (Berkeley) 6/5/93";
+#endif /* not lint */
+
+#include <netdb.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#ifdef sgi
+#include <bstring.h>
+#endif
+#include <sys/param.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <protocols/routed.h>
+#include <arpa/inet.h>
+
+#ifndef sgi
+#define _HAVE_SIN_LEN
+#endif
+
+struct sockaddr_in myaddr;
+char packet[MAXPACKETSIZE];
+
+int
+main(int argc,
+ char **argv)
+{
+ int size, s;
+ struct sockaddr_in router;
+ char *tgt;
+ register struct rip *msg = (struct rip *)packet;
+ struct hostent *hp;
+
+ if (argc < 2) {
+usage:
+ printf("usage: on filename host1 host2 ...\n"
+ " or: off host1 host2 ...\n");
+ exit(1);
+ }
+ s = socket(AF_INET, SOCK_DGRAM, 0);
+ if (s < 0) {
+ perror("socket");
+ exit(2);
+ }
+ myaddr.sin_family = AF_INET;
+#ifdef _HAVE_SIN_LEN
+ myaddr.sin_len = sizeof(myaddr);
+#endif
+ myaddr.sin_port = htons(IPPORT_RESERVED-1);
+ while (bind(s, (struct sockaddr *)&myaddr, sizeof(myaddr)) < 0) {
+ if (errno != EADDRINUSE
+ || myaddr.sin_port == 0) {
+ perror("bind");
+ exit(2);
+ }
+ myaddr.sin_port = htons(ntohs(myaddr.sin_port)-1);
+ }
+
+ msg->rip_vers = RIPVERSION;
+ size = sizeof(int);
+
+ argv++, argc--;
+ if (!strcmp(*argv, "on")) {
+ msg->rip_cmd = RIPCMD_TRACEON;
+ if (--argc <= 1)
+ goto usage;
+ strcpy(msg->rip_tracefile, *++argv);
+ size += strlen(msg->rip_tracefile);
+
+ } else if (!strcmp(*argv, "off")) {
+ msg->rip_cmd = RIPCMD_TRACEOFF;
+
+ } else {
+ goto usage;
+ }
+ argv++, argc--;
+
+ bzero(&router, sizeof(router));
+ router.sin_family = AF_INET;
+#ifdef _HAVE_SIN_LEN
+ router.sin_len = sizeof(router);
+#endif
+ router.sin_port = htons(RIP_PORT);
+
+ do {
+ tgt = argc > 0 ? *argv++ : "localhost";
+ router.sin_family = AF_INET;
+ router.sin_addr.s_addr = inet_addr(tgt);
+ if (router.sin_addr.s_addr == -1) {
+ hp = gethostbyname(tgt);
+ if (hp == 0) {
+ herror(tgt);
+ continue;
+ }
+ bcopy(hp->h_addr, &router.sin_addr, hp->h_length);
+ }
+ if (sendto(s, packet, size, 0,
+ (struct sockaddr *)&router, sizeof(router)) < 0)
+ perror(*argv);
+ } while (--argc > 0);
+
+ return 0;
+}
diff --git a/usr.sbin/routed/table.c b/usr.sbin/routed/table.c
new file mode 100644
index 0000000..1106563
--- /dev/null
+++ b/usr.sbin/routed/table.c
@@ -0,0 +1,1888 @@
+/*
+ * Copyright (c) 1983, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef lint
+static char sccsid[] = "@(#)tables.c 8.1 (Berkeley) 6/5/93";
+#endif /* not lint */
+
+#ident "$Revision: 1.2 $"
+
+#include "defs.h"
+
+
+struct radix_node_head *rhead; /* root of the radix tree */
+
+int need_flash = 1; /* flash update needed
+ * start =1 to suppress the 1st
+ */
+
+struct timeval age_timer; /* next check of old routes */
+struct timeval need_kern = { /* need to update kernel table */
+ EPOCH+MIN_WAITTIME-1
+};
+
+int stopint;
+
+naddr age_bad_gate;
+
+
+/* It is desirable to "aggregate" routes, to combine differing routes of
+ * the same metric and next hop into a common route with a smaller netmask
+ * or to suppress redundant routes, routes that add no information to
+ * routes with smaller netmasks.
+ *
+ * A route is redundant if and only if any and all routes with smaller
+ * but matching netmasks and nets are the same. Since routes are
+ * kept sorted in the radix tree, redundant routes always come second.
+ *
+ * There are two kinds of aggregations. First, two routes of the same bit
+ * mask and differing only in the least significant bit of the network
+ * number can be combined into a single route with a coarser mask.
+ *
+ * Second, a route can be suppressed in favor of another route with a more
+ * coarse mask provided no incompatible routes with intermediate masks
+ * are present. The second kind of aggregation involves suppressing routes.
+ * A route must not be suppressed if an incompatible route exists with
+ * an intermediate mask, since the suppressed route would be covered
+ * by the intermediate.
+ *
+ * This code relies on the radix tree walk encountering routes
+ * sorted first by address, with the smallest address first.
+ */
+
+struct ag_info ag_slots[NUM_AG_SLOTS], *ag_avail, *ag_corsest, *ag_finest;
+
+/* #define DEBUG_AG */
+#ifdef DEBUG_AG
+#define CHECK_AG() {int acnt = 0; struct ag_info *cag; \
+ for (cag = ag_avail; cag != 0; cag = cag->ag_fine) \
+ acnt++; \
+ for (cag = ag_corsest; cag != 0; cag = cag->ag_fine) \
+ acnt++; \
+ if (acnt != NUM_AG_SLOTS) { \
+ (void)fflush(stderr); \
+ abort(); \
+ } \
+}
+#else
+#define CHECK_AG()
+#endif
+
+
+/* Output the contents of an aggregation table slot.
+ * This function must always be immediately followed with the deletion
+ * of the target slot.
+ */
+static void
+ag_out(struct ag_info *ag,
+ void (*out)(struct ag_info *))
+{
+ struct ag_info *ag_cors;
+ naddr bit;
+
+
+ /* If we have both the even and odd twins, then the immediate parent,
+ * if it is present is redundant, unless it manages to aggregate
+ * something. On successive calls, this code detects the
+ * even and odd twins, and marks the parent.
+ *
+ * Note that the order in which the radix tree code emits routes
+ * ensures that the twins are seen before the parent is emitted.
+ */
+ ag_cors = ag->ag_cors;
+ if (ag_cors != 0
+ && ag_cors->ag_mask == ag->ag_mask<<1
+ && ag_cors->ag_dst_h == (ag->ag_dst_h & ag_cors->ag_mask)) {
+ ag_cors->ag_state |= ((ag_cors->ag_dst_h == ag->ag_dst_h)
+ ? AGS_REDUN0
+ : AGS_REDUN1);
+ }
+
+ /* Skip it if this route is itself redundant.
+ *
+ * It is ok to change the contents of the slot here, since it is
+ * always deleted next.
+ */
+ if (ag->ag_state & AGS_REDUN0) {
+ if (ag->ag_state & AGS_REDUN1)
+ return;
+ bit = (-ag->ag_mask) >> 1;
+ ag->ag_dst_h |= bit;
+ ag->ag_mask |= bit;
+
+ } else if (ag->ag_state & AGS_REDUN1) {
+ bit = (-ag->ag_mask) >> 1;
+ ag->ag_mask |= bit;
+ }
+ out(ag);
+}
+
+
+static void
+ag_del(struct ag_info *ag)
+{
+ CHECK_AG();
+
+ if (ag->ag_cors == 0)
+ ag_corsest = ag->ag_fine;
+ else
+ ag->ag_cors->ag_fine = ag->ag_fine;
+
+ if (ag->ag_fine == 0)
+ ag_finest = ag->ag_cors;
+ else
+ ag->ag_fine->ag_cors = ag->ag_cors;
+
+ ag->ag_fine = ag_avail;
+ ag_avail = ag;
+
+ CHECK_AG();
+}
+
+
+/* Flush routes waiting for aggretation.
+ * This must not suppress a route unless it is known that among all
+ * routes with coarser masks that match it, the one with the longest
+ * mask is appropriate. This is ensured by scanning the routes
+ * in lexical order, and with the most restritive mask first
+ * among routes to the same destination.
+ */
+void
+ag_flush(naddr lim_dst_h, /* flush routes to here */
+ naddr lim_mask, /* matching this mask */
+ void (*out)(struct ag_info *))
+{
+ struct ag_info *ag, *ag_cors;
+ naddr dst_h;
+
+
+ for (ag = ag_finest;
+ ag != 0 && ag->ag_mask >= lim_mask;
+ ag = ag_cors) {
+ ag_cors = ag->ag_cors;
+
+ /* work on only the specified routes */
+ dst_h = ag->ag_dst_h;
+ if ((dst_h & lim_mask) != lim_dst_h)
+ continue;
+
+ if (!(ag->ag_state & AGS_SUPPRESS))
+ ag_out(ag, out);
+
+ else for ( ; ; ag_cors = ag_cors->ag_cors) {
+ /* Look for a route that can suppress the
+ * current route */
+ if (ag_cors == 0) {
+ /* failed, so output it and look for
+ * another route to work on
+ */
+ ag_out(ag, out);
+ break;
+ }
+
+ if ((dst_h & ag_cors->ag_mask) == ag_cors->ag_dst_h) {
+ /* We found a route with a coarser mask that
+ * aggregates the current target.
+ *
+ * If it has a different next hop, it
+ * cannot replace the target, so output
+ * the target.
+ */
+ if (ag->ag_gate != ag_cors->ag_gate
+ && !(ag->ag_state & AGS_DEAD)
+ && !(ag_cors->ag_state & AGS_RDISC)) {
+ ag_out(ag, out);
+ break;
+ }
+
+ /* If it has a good enough metric, it replaces
+ * the target.
+ */
+ if (ag_cors->ag_pref <= ag->ag_pref) {
+ if (ag_cors->ag_seqno > ag->ag_seqno)
+ ag_cors->ag_seqno = ag->ag_seqno;
+ if (AG_IS_REDUN(ag->ag_state)
+ && ag_cors->ag_mask==ag->ag_mask<<1) {
+ if (ag_cors->ag_dst_h == dst_h)
+ ag_cors->ag_state |= AGS_REDUN0;
+ else
+ ag_cors->ag_state |= AGS_REDUN1;
+ }
+ break;
+ }
+ }
+ }
+
+ /* That route has either been output or suppressed */
+ ag_cors = ag->ag_cors;
+ ag_del(ag);
+ }
+
+ CHECK_AG();
+}
+
+
+/* Try to aggregate a route with previous routes.
+ */
+void
+ag_check(naddr dst,
+ naddr mask,
+ naddr gate,
+ char metric,
+ char pref,
+ u_int seqno,
+ u_short tag,
+ u_short state,
+ void (*out)(struct ag_info *)) /* output using this */
+{
+ struct ag_info *ag, *nag, *ag_cors;
+ naddr xaddr;
+ int x;
+
+ NTOHL(dst);
+
+ /* Punt non-contiguous subnet masks.
+ *
+ * (X & -X) contains a single bit if and only if X is a power of 2.
+ * (X + (X & -X)) == 0 if and only if X is a power of 2.
+ */
+ if ((mask & -mask) + mask != 0) {
+ struct ag_info nc_ag;
+
+ nc_ag.ag_dst_h = dst;
+ nc_ag.ag_mask = mask;
+ nc_ag.ag_gate = gate;
+ nc_ag.ag_metric = metric;
+ nc_ag.ag_pref = pref;
+ nc_ag.ag_tag = tag;
+ nc_ag.ag_state = state;
+ nc_ag.ag_seqno = seqno;
+ out(&nc_ag);
+ return;
+ }
+
+ /* Search for the right slot in the aggregation table.
+ */
+ ag_cors = 0;
+ ag = ag_corsest;
+ while (ag != 0) {
+ if (ag->ag_mask >= mask)
+ break;
+ /* Suppress routes as we look.
+ * A route to an address less than the current destination
+ * will not be affected by the current route or any route
+ * seen hereafter. That means it is safe to suppress it.
+ * This check keeps poor routes (eg. with large hop counts)
+ * from preventing suppresion of finer routes.
+ */
+ if (ag_cors != 0
+ && ag->ag_dst_h < dst
+ && (ag->ag_state & AGS_SUPPRESS)
+ && ag_cors->ag_pref <= ag->ag_pref
+ && (ag->ag_dst_h & ag_cors->ag_mask) == ag_cors->ag_dst_h
+ && (ag_cors->ag_gate == ag->ag_gate
+ || (ag->ag_state & AGS_DEAD)
+ || (ag_cors->ag_state & AGS_RDISC))) {
+ if (ag_cors->ag_seqno > ag->ag_seqno)
+ ag_cors->ag_seqno = ag->ag_seqno;
+ if (AG_IS_REDUN(ag->ag_state)
+ && ag_cors->ag_mask==ag->ag_mask<<1) {
+ if (ag_cors->ag_dst_h == dst)
+ ag_cors->ag_state |= AGS_REDUN0;
+ else
+ ag_cors->ag_state |= AGS_REDUN1;
+ }
+ ag_del(ag);
+ CHECK_AG();
+ } else {
+ ag_cors = ag;
+ }
+ ag = ag_cors->ag_fine;
+ }
+
+ /* If we find the even/odd twin of the new route, and if the
+ * masks and so forth are equal, we can aggregate them.
+ * We can probably promote one of the pair.
+ *
+ * Since the routes are encountered in lexical order,
+ * the new route must be odd. However, the second or later
+ * times around this loop, it could be the even twin promoted
+ * from the even/odd pair of twins of the finer route.
+ */
+ while (ag != 0
+ && ag->ag_mask == mask
+ && ((ag->ag_dst_h ^ dst) & (mask<<1)) == 0) {
+
+ /* When a promoted route encounters the same but explicit
+ * route, assume the new one has been promoted, and
+ * so its gateway, metric and tag are right.
+ *
+ * Routes are encountered in lexical order, so an even/odd
+ * pair is never promoted until the parent route is
+ * already present. So we know that the new route
+ * is a promoted pair and the route already in the slot
+ * is the explicit route that was made redundant by
+ * the pair.
+ *
+ * The sequence number only controls flash updating, and
+ * so should be the smaller of the two.
+ */
+ if (ag->ag_dst_h == dst) {
+ ag->ag_metric = metric;
+ ag->ag_pref = pref;
+ ag->ag_gate = gate;
+ ag->ag_tag = tag;
+ if (ag->ag_seqno > seqno)
+ ag->ag_seqno = seqno;
+
+ /* some bits are set only if both routes have them */
+ ag->ag_state &= ~(~state & (AGS_PROMOTE | AGS_RIPV2));
+ /* others are set if they are set on either route */
+ ag->ag_state |= (state & (AGS_REDUN0 | AGS_REDUN1
+ | AGS_GATEWAY
+ | AGS_SUPPRESS));
+ return;
+ }
+
+ /* If one of the routes can be promoted and suppressed
+ * and the other can at least be suppressed, they
+ * can be combined.
+ * Note that any route that can be promoted is always
+ * marked to be eligible to be suppressed.
+ */
+ if (!((state & AGS_PROMOTE)
+ && (ag->ag_state & AGS_SUPPRESS))
+ && !((ag->ag_state & AGS_PROMOTE)
+ && (state & AGS_SUPPRESS)))
+ break;
+
+ /* A pair of even/odd twin routes can be combined
+ * if either is redundant, or if they are via the
+ * same gateway and have the same metric.
+ * Except that the kernel does not care about the
+ * metric.
+ */
+ if (AG_IS_REDUN(ag->ag_state)
+ || AG_IS_REDUN(state)
+ || (ag->ag_gate == gate
+ && ag->ag_pref == pref
+ && (state & ag->ag_state & AGS_PROMOTE) != 0
+ && ag->ag_tag == tag)) {
+
+ /* We have both the even and odd pairs.
+ * Since the routes are encountered in order,
+ * the route in the slot must be the even twin.
+ *
+ * Combine and promote the pair of routes.
+ */
+ if (seqno > ag->ag_seqno)
+ seqno = ag->ag_seqno;
+ if (!AG_IS_REDUN(state))
+ state &= ~AGS_REDUN1;
+ if (AG_IS_REDUN(ag->ag_state))
+ state |= AGS_REDUN0;
+ else
+ state &= ~AGS_REDUN0;
+ state |= (ag->ag_state & AGS_RIPV2);
+
+ /* Get rid of the even twin that was already
+ * in the slot.
+ */
+ ag_del(ag);
+
+ } else if (ag->ag_pref >= pref
+ && (ag->ag_state & AGS_PROMOTE)) {
+ /* If we cannot combine the pair, maybe the route
+ * with the worse metric can be promoted.
+ *
+ * Promote the old, even twin, by giving its slot
+ * in the table to the new, odd twin.
+ */
+ ag->ag_dst_h = dst;
+
+ xaddr = ag->ag_gate;
+ ag->ag_gate = gate;
+ gate = xaddr;
+
+ x = ag->ag_tag;
+ ag->ag_tag = tag;
+ tag = x;
+
+ x = ag->ag_state;
+ ag->ag_state = state;
+ state = x;
+ if (!AG_IS_REDUN(state))
+ state &= ~AGS_REDUN0;
+
+ x = ag->ag_metric;
+ ag->ag_metric = metric;
+ metric = x;
+
+ x = ag->ag_pref;
+ ag->ag_pref = pref;
+ pref = x;
+
+ if (seqno >= ag->ag_seqno)
+ seqno = ag->ag_seqno;
+ else
+ ag->ag_seqno = seqno;
+
+ } else {
+ if (!(state & AGS_PROMOTE))
+ break; /* cannot promote either twin */
+
+ /* promote the new, odd twin by shaving its
+ * mask and address.
+ */
+ if (seqno > ag->ag_seqno)
+ seqno = ag->ag_seqno;
+ else
+ ag->ag_seqno = seqno;
+ if (!AG_IS_REDUN(state))
+ state &= ~AGS_REDUN1;
+ }
+
+ mask <<= 1;
+ dst &= mask;
+
+ if (ag_cors == 0) {
+ ag = ag_corsest;
+ break;
+ }
+ ag = ag_cors;
+ ag_cors = ag->ag_cors;
+ }
+
+ /* When we can no longer promote and combine routes,
+ * flush the old route in the target slot. Also flush
+ * any finer routes that we know will never be aggregated by
+ * the new route.
+ *
+ * In case we moved toward coarser masks,
+ * get back where we belong
+ */
+ if (ag != 0
+ && ag->ag_mask < mask) {
+ ag_cors = ag;
+ ag = ag->ag_fine;
+ }
+
+ /* Empty the target slot
+ */
+ if (ag != 0 && ag->ag_mask == mask) {
+ ag_flush(ag->ag_dst_h, ag->ag_mask, out);
+ ag = (ag_cors == 0) ? ag_corsest : ag_cors->ag_fine;
+ }
+
+#ifdef DEBUG_AG
+ (void)fflush(stderr);
+ if (ag == 0 && ag_cors != ag_finest)
+ abort();
+ if (ag_cors == 0 && ag != ag_corsest)
+ abort();
+ if (ag != 0 && ag->ag_cors != ag_cors)
+ abort();
+ if (ag_cors != 0 && ag_cors->ag_fine != ag)
+ abort();
+ CHECK_AG();
+#endif
+
+ /* Save the new route on the end of the table.
+ */
+ nag = ag_avail;
+ ag_avail = nag->ag_fine;
+
+ nag->ag_dst_h = dst;
+ nag->ag_mask = mask;
+ nag->ag_gate = gate;
+ nag->ag_metric = metric;
+ nag->ag_pref = pref;
+ nag->ag_tag = tag;
+ nag->ag_state = state;
+ nag->ag_seqno = seqno;
+
+ nag->ag_fine = ag;
+ if (ag != 0)
+ ag->ag_cors = nag;
+ else
+ ag_finest = nag;
+ nag->ag_cors = ag_cors;
+ if (ag_cors == 0)
+ ag_corsest = nag;
+ else
+ ag_cors->ag_fine = nag;
+ CHECK_AG();
+}
+
+
+static char *
+rtm_type_name(u_char type)
+{
+ static char *rtm_types[] = {
+ "RTM_ADD",
+ "RTM_DELETE",
+ "RTM_CHANGE",
+ "RTM_GET",
+ "RTM_LOSING",
+ "RTM_REDIRECT",
+ "RTM_MISS",
+ "RTM_LOCK",
+ "RTM_OLDADD",
+ "RTM_OLDDEL",
+ "RTM_RESOLVE",
+ "RTM_NEWADDR",
+ "RTM_DELADDR",
+ "RTM_IFINFO"
+ };
+ static char name0[10];
+
+
+ if (type > sizeof(rtm_types)/sizeof(rtm_types[0])
+ || type == 0) {
+ sprintf(name0, "RTM type %#x", type);
+ return name0;
+ } else {
+ return rtm_types[type-1];
+ }
+}
+
+
+/* Trim a mask in a sockaddr
+ * Produce a length of 0 for an address of 0.
+ * Otherwise produce the index of the first zero byte.
+ */
+void
+#ifdef _HAVE_SIN_LEN
+masktrim(struct sockaddr_in *ap)
+#else
+masktrim(struct sockaddr_in_new *ap)
+#endif
+{
+ register char *cp;
+
+ if (ap->sin_addr.s_addr == 0) {
+ ap->sin_len = 0;
+ return;
+ }
+ cp = (char *)(&ap->sin_addr.s_addr+1);
+ while (*--cp != 0)
+ continue;
+ ap->sin_len = cp - (char*)ap + 1;
+}
+
+
+/* Tell the kernel to add, delete or change a route
+ */
+static void
+rtioctl(int action, /* RTM_DELETE, etc */
+ naddr dst,
+ naddr gate,
+ naddr mask,
+ int metric,
+ int flags)
+{
+ struct {
+ struct rt_msghdr w_rtm;
+ struct sockaddr_in w_dst;
+ struct sockaddr_in w_gate;
+#ifdef _HAVE_SA_LEN
+ struct sockaddr_in w_mask;
+#else
+ struct sockaddr_in_new w_mask;
+#endif
+ } w;
+ long cc;
+
+again:
+ bzero(&w, sizeof(w));
+ w.w_rtm.rtm_msglen = sizeof(w);
+ w.w_rtm.rtm_version = RTM_VERSION;
+ w.w_rtm.rtm_type = action;
+ w.w_rtm.rtm_flags = flags;
+ w.w_rtm.rtm_seq = ++rt_sock_seqno;
+ w.w_rtm.rtm_addrs = RTA_DST|RTA_GATEWAY;
+ if (metric != 0) {
+ w.w_rtm.rtm_rmx.rmx_hopcount = metric;
+ w.w_rtm.rtm_inits |= RTV_HOPCOUNT;
+ }
+ w.w_dst.sin_family = AF_INET;
+ w.w_dst.sin_addr.s_addr = dst;
+ w.w_gate.sin_family = AF_INET;
+ w.w_gate.sin_addr.s_addr = gate;
+#ifdef _HAVE_SA_LEN
+ w.w_dst.sin_len = sizeof(w.w_dst);
+ w.w_gate.sin_len = sizeof(w.w_gate);
+#endif
+ if (mask == HOST_MASK) {
+ w.w_rtm.rtm_flags |= RTF_HOST;
+ w.w_rtm.rtm_msglen -= sizeof(w.w_mask);
+ } else {
+ w.w_rtm.rtm_addrs |= RTA_NETMASK;
+ w.w_mask.sin_addr.s_addr = htonl(mask);
+#ifdef _HAVE_SA_LEN
+ masktrim(&w.w_mask);
+ if (w.w_mask.sin_len == 0)
+ w.w_mask.sin_len = sizeof(long);
+ w.w_rtm.rtm_msglen -= (sizeof(w.w_mask) - w.w_mask.sin_len);
+#endif
+ }
+#ifndef NO_INSTALL
+ cc = write(rt_sock, &w, w.w_rtm.rtm_msglen);
+ if (cc == w.w_rtm.rtm_msglen)
+ return;
+ if (cc < 0) {
+ if (errno == ESRCH && action == RTM_CHANGE) {
+ trace_msg("route to %s disappeared before CHANGE",
+ addrname(dst, mask, 0));
+ action = RTM_ADD;
+ goto again;
+ }
+ msglog("write(rt_sock) %s %s: %s",
+ rtm_type_name(action), addrname(dst, mask, 0),
+ strerror(errno));
+ } else {
+ msglog("write(rt_sock) wrote %d instead of %d",
+ cc, w.w_rtm.rtm_msglen);
+ }
+#endif
+}
+
+
+#define KHASH_SIZE 71 /* should be prime */
+#define KHASH(a,m) khash_bins[((a) ^ (m)) % KHASH_SIZE]
+static struct khash {
+ struct khash *k_next;
+ naddr k_dst;
+ naddr k_mask;
+ naddr k_gate;
+ short k_metric;
+ u_short k_state;
+#define KS_NEW 0x001
+#define KS_DELETE 0x002
+#define KS_ADD 0x004
+#define KS_CHANGE 0x008
+#define KS_DEL_ADD 0x010
+#define KS_STATIC 0x020
+#define KS_GATEWAY 0x040
+#define KS_DYNAMIC 0x080
+#define KS_DELETED 0x100 /* already deleted */
+ time_t k_hold;
+ time_t k_time;
+#define K_HOLD_LIM 30
+} *khash_bins[KHASH_SIZE];
+
+
+static struct khash*
+kern_find(naddr dst, naddr mask, struct khash ***ppk)
+{
+ struct khash *k, **pk;
+
+ for (pk = &KHASH(dst,mask); (k = *pk) != 0; pk = &k->k_next) {
+ if (k->k_dst == dst && k->k_mask == mask)
+ break;
+ }
+ if (ppk != 0)
+ *ppk = pk;
+ return k;
+}
+
+
+static struct khash*
+kern_add(naddr dst, naddr mask)
+{
+ struct khash *k, **pk;
+
+ k = kern_find(dst, mask, &pk);
+ if (k != 0)
+ return k;
+
+ k = (struct khash *)malloc(sizeof(*k));
+
+ bzero(k, sizeof(*k));
+ k->k_dst = dst;
+ k->k_mask = mask;
+ k->k_state = KS_NEW;
+ k->k_time = now.tv_sec;
+ k->k_hold = now.tv_sec;
+ *pk = k;
+
+ return k;
+}
+
+
+/* add a route the kernel told us
+ * rt_xaddrs() must have already been called.
+ */
+static void
+rtm_add(struct rt_msghdr *rtm)
+{
+ struct khash *k;
+ struct interface *ifp;
+ struct rt_entry *rt;
+ naddr mask;
+
+
+ if (rtm->rtm_flags & RTF_HOST) {
+ mask = HOST_MASK;
+ } else if (RTINFO_NETMASK != 0) {
+ mask = ntohl(S_ADDR(RTINFO_NETMASK));
+ } else {
+ msglog("punt %s without mask",
+ rtm_type_name(rtm->rtm_type));
+ return;
+ }
+
+ if (RTINFO_GATE == 0
+ || RTINFO_GATE->sa_family != AF_INET) {
+ msglog("punt %s without gateway",
+ rtm_type_name(rtm->rtm_type));
+ return;
+ }
+
+ k = kern_add(S_ADDR(RTINFO_DST), mask);
+ k->k_gate = S_ADDR(RTINFO_GATE);
+ k->k_metric = rtm->rtm_rmx.rmx_hopcount;
+ if (k->k_metric < 0)
+ k->k_metric = 0;
+ else if (k->k_metric > HOPCNT_INFINITY)
+ k->k_metric = HOPCNT_INFINITY;
+ k->k_state &= ~(KS_NEW | KS_DELETED | KS_GATEWAY | KS_STATIC);
+ if (rtm->rtm_flags & RTF_GATEWAY)
+ k->k_state |= KS_GATEWAY;
+ if (rtm->rtm_flags & RTF_STATIC)
+ k->k_state |= KS_STATIC;
+ if (rtm->rtm_flags & RTF_DYNAMIC)
+ k->k_state |= KS_DYNAMIC;
+ k->k_time = now.tv_sec;
+ k->k_hold = now.tv_sec;
+
+ /* Put static routes with real metrics into the daemon table so
+ * they can be advertised.
+ */
+ if (!(k->k_state & KS_STATIC))
+ return;
+
+ if (RTINFO_IFP != 0
+ && RTINFO_IFP->sdl_nlen != 0) {
+ RTINFO_IFP->sdl_data[RTINFO_IFP->sdl_nlen] = '\0';
+ ifp = ifwithname(RTINFO_IFP->sdl_data, k->k_gate);
+ } else {
+ ifp = iflookup(k->k_gate);
+ }
+ if (ifp == 0) {
+ msglog("static route %s --> %s impossibly lacks ifp",
+ addrname(S_ADDR(RTINFO_DST), mask, 0),
+ naddr_ntoa(k->k_gate));
+ return;
+ }
+ if (k->k_metric == 0)
+ return;
+
+ rt = rtget(k->k_dst, k->k_mask);
+ if (rt != 0) {
+ if (rt->rt_ifp != ifp
+ || 0 != (rt->rt_state & RS_NET_S)) {
+ rtdelete(rt);
+ rt = 0;
+ } else if (!(rt->rt_state & (RS_IF
+ | RS_LOCAL
+ | RS_MHOME
+ | RS_GW))) {
+ rtchange(rt, RS_STATIC,
+ k->k_gate, ifp->int_addr,
+ k->k_metric, 0, ifp,
+ now.tv_sec, 0);
+ }
+ }
+ if (rt == 0)
+ rtadd(k->k_dst, k->k_mask, k->k_gate,
+ ifp->int_addr, k->k_metric,
+ 0, RS_STATIC, ifp);
+}
+
+
+/* deal with packet loss
+ */
+static void
+rtm_lose(struct rt_msghdr *rtm)
+{
+ if (RTINFO_GATE == 0
+ || RTINFO_GATE->sa_family != AF_INET) {
+ msglog("punt %s without gateway",
+ rtm_type_name(rtm->rtm_type));
+ return;
+ }
+
+ if (!supplier)
+ rdisc_age(S_ADDR(RTINFO_GATE));
+
+ age(S_ADDR(RTINFO_GATE));
+}
+
+
+/* Clean the kernel table by copying it to the daemon image.
+ * Eventually the daemon will delete any extra routes.
+ */
+void
+flush_kern(void)
+{
+ size_t needed;
+ int mib[6];
+ char *buf, *next, *lim;
+ struct rt_msghdr *rtm;
+ struct interface *ifp;
+ static struct sockaddr_in gate_sa;
+
+
+ mib[0] = CTL_NET;
+ mib[1] = PF_ROUTE;
+ mib[2] = 0; /* protocol */
+ mib[3] = 0; /* wildcard address family */
+ mib[4] = NET_RT_DUMP;
+ mib[5] = 0; /* no flags */
+ if (sysctl(mib, 6, 0, &needed, 0, 0) < 0) {
+ DBGERR(1,"RT_DUMP-sysctl-estimate");
+ return;
+ }
+ buf = malloc(needed);
+ if (sysctl(mib, 6, buf, &needed, 0, 0) < 0)
+ BADERR(1,"RT_DUMP");
+ lim = buf + needed;
+ for (next = buf; next < lim; next += rtm->rtm_msglen) {
+ rtm = (struct rt_msghdr *)next;
+
+ rt_xaddrs((struct sockaddr *)(rtm+1),
+ (struct sockaddr *)(next + rtm->rtm_msglen),
+ rtm->rtm_addrs);
+
+ if (RTINFO_DST == 0
+ || RTINFO_DST->sa_family != AF_INET)
+ continue;
+
+ if (RTINFO_GATE == 0)
+ continue;
+ if (RTINFO_GATE->sa_family != AF_INET) {
+ if (RTINFO_GATE->sa_family != AF_LINK)
+ continue;
+ ifp = ifwithindex(((struct sockaddr_dl *)
+ RTINFO_GATE)->sdl_index);
+ if (ifp == 0)
+ continue;
+ gate_sa.sin_addr.s_addr = ifp->int_addr;
+#ifdef _HAVE_SA_LEN
+ gate_sa.sin_len = sizeof(gate_sa);
+#endif
+ gate_sa.sin_family = AF_INET;
+ RTINFO_GATE = (struct sockaddr *)&gate_sa;
+ }
+
+ /* ignore multicast addresses
+ */
+ if (IN_MULTICAST(ntohl(S_ADDR(RTINFO_DST))))
+ continue;
+
+ /* Note static routes and interface routes.
+ */
+ rtm_add(rtm);
+ }
+ free(buf);
+}
+
+
+/* Listen to announcements from the kernel
+ */
+void
+read_rt(void)
+{
+ long cc;
+ struct interface *ifp;
+ naddr mask;
+ union {
+ struct {
+ struct rt_msghdr rtm;
+ struct sockaddr addrs[RTAX_MAX];
+ } r;
+ struct if_msghdr ifm;
+ } m;
+ char pid_str[10+19+1];
+
+
+ for (;;) {
+ cc = read(rt_sock, &m, sizeof(m));
+ if (cc <= 0) {
+ if (cc < 0 && errno != EWOULDBLOCK)
+ LOGERR("read(rt_sock)");
+ return;
+ }
+
+ if (m.r.rtm.rtm_version != RTM_VERSION) {
+ msglog("bogus routing message version %d",
+ m.r.rtm.rtm_version);
+ continue;
+ }
+
+ /* Ignore our own results.
+ */
+ if (m.r.rtm.rtm_type <= RTM_CHANGE
+ && m.r.rtm.rtm_pid == mypid) {
+ static int complained = 0;
+ if (!complained) {
+ msglog("receiving our own change messages");
+ complained = 1;
+ }
+ continue;
+ }
+
+ if (m.r.rtm.rtm_type == RTM_IFINFO) {
+ ifp = ifwithindex(m.ifm.ifm_index);
+ if (ifp == 0)
+ trace_msg("note %s with flags %#x"
+ " for index #%d\n",
+ rtm_type_name(m.r.rtm.rtm_type),
+ m.ifm.ifm_flags,
+ m.ifm.ifm_index);
+ else
+ trace_msg("note %s with flags %#x for %s\n",
+ rtm_type_name(m.r.rtm.rtm_type),
+ m.ifm.ifm_flags,
+ ifp->int_name);
+
+ /* After being informed of a change to an interface,
+ * check them all now if the check would otherwise
+ * be a long time from now, if the interface is
+ * not known, or if the interface has been turned
+ * off or on.
+ */
+ if (ifinit_timer.tv_sec-now.tv_sec>=CHECK_BAD_INTERVAL
+ || ifp == 0
+ || ((ifp->int_if_flags ^ m.ifm.ifm_flags)
+ & IFF_UP_RUNNING) != 0)
+ ifinit_timer.tv_sec = now.tv_sec;
+ continue;
+ }
+
+ if (m.r.rtm.rtm_type <= RTM_CHANGE)
+ (void)sprintf(pid_str," from pid %d",m.r.rtm.rtm_pid);
+ else
+ pid_str[0] = '\0';
+
+ rt_xaddrs(m.r.addrs, &m.r.addrs[RTAX_MAX],
+ m.r.rtm.rtm_addrs);
+
+ if (RTINFO_DST == 0) {
+ trace_msg("ignore %s%s without dst\n",
+ rtm_type_name(m.r.rtm.rtm_type), pid_str);
+ continue;
+ }
+
+ if (RTINFO_DST->sa_family != AF_INET) {
+ trace_msg("ignore %s%s for AF %d\n",
+ rtm_type_name(m.r.rtm.rtm_type), pid_str,
+ RTINFO_DST->sa_family);
+ continue;
+ }
+
+ mask = ((RTINFO_NETMASK != 0)
+ ? ntohl(S_ADDR(RTINFO_NETMASK))
+ : (m.r.rtm.rtm_flags & RTF_HOST)
+ ? HOST_MASK
+ : std_mask(S_ADDR(RTINFO_DST)));
+
+ if (RTINFO_GATE == 0
+ || RTINFO_GATE->sa_family != AF_INET) {
+ trace_msg("%s for %s%s\n",
+ rtm_type_name(m.r.rtm.rtm_type),
+ addrname(S_ADDR(RTINFO_DST), mask, 0),
+ pid_str);
+ } else {
+ trace_msg("%s %s --> %s%s\n",
+ rtm_type_name(m.r.rtm.rtm_type),
+ addrname(S_ADDR(RTINFO_DST), mask, 0),
+ saddr_ntoa(RTINFO_GATE),
+ pid_str);
+ }
+
+ switch (m.r.rtm.rtm_type) {
+ case RTM_ADD:
+ case RTM_CHANGE:
+ if (m.r.rtm.rtm_errno != 0) {
+ trace_msg("ignore %s%s with \"%s\" error\n",
+ rtm_type_name(m.r.rtm.rtm_type),
+ pid_str,
+ strerror(m.r.rtm.rtm_errno));
+ } else {
+ rtm_add(&m.r.rtm);
+ }
+ break;
+
+ case RTM_REDIRECT:
+ if (m.r.rtm.rtm_errno != 0) {
+ trace_msg("ignore %s with \"%s\" from %s"
+ " for %s-->%s\n",
+ rtm_type_name(m.r.rtm.rtm_type),
+ strerror(m.r.rtm.rtm_errno),
+ saddr_ntoa(RTINFO_AUTHOR),
+ saddr_ntoa(RTINFO_GATE),
+ addrname(S_ADDR(RTINFO_DST),
+ mask, 0));
+ } else {
+ rtm_add(&m.r.rtm);
+ }
+ break;
+
+ case RTM_DELETE:
+ if (m.r.rtm.rtm_errno != 0) {
+ trace_msg("ignore %s%s with \"%s\" error\n",
+ rtm_type_name(m.r.rtm.rtm_type),
+ pid_str,
+ strerror(m.r.rtm.rtm_errno));
+ } else {
+ del_static(S_ADDR(RTINFO_DST), mask, 1);
+ }
+ break;
+
+ case RTM_LOSING:
+ rtm_lose(&m.r.rtm);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+
+/* after aggregating, note routes that belong in the kernel
+ */
+static void
+kern_out(struct ag_info *ag)
+{
+ struct khash *k;
+
+
+ /* Do not install bad routes if they are not already present.
+ * This includes routes that had RS_NET_S for interfaces that
+ * recently died.
+ */
+ if (ag->ag_metric == HOPCNT_INFINITY
+ && 0 == kern_find(htonl(ag->ag_dst_h), ag->ag_mask, 0))
+ return;
+
+ k = kern_add(htonl(ag->ag_dst_h), ag->ag_mask);
+
+ /* will need to add new entry */
+ if (k->k_state & KS_NEW) {
+ k->k_state = KS_ADD;
+ if (ag->ag_state & AGS_GATEWAY)
+ k->k_state |= KS_GATEWAY;
+ k->k_gate = ag->ag_gate;
+ k->k_metric = ag->ag_metric;
+ return;
+ }
+
+ /* modify existing kernel entry if necessary */
+ k->k_state &= ~(KS_DELETE | KS_DYNAMIC);
+ if (k->k_gate != ag->ag_gate
+ || k->k_metric != ag->ag_metric) {
+ k->k_gate = ag->ag_gate;
+ k->k_metric = ag->ag_metric;
+ k->k_state |= KS_CHANGE;
+ }
+
+ if ((k->k_state & KS_GATEWAY)
+ && !(ag->ag_state & AGS_GATEWAY)) {
+ k->k_state &= ~KS_GATEWAY;
+ k->k_state |= (KS_ADD | KS_DEL_ADD);
+ } else if (!(k->k_state & KS_GATEWAY)
+ && (ag->ag_state & AGS_GATEWAY)) {
+ k->k_state |= KS_GATEWAY;
+ k->k_state |= (KS_ADD | KS_DEL_ADD);
+ }
+#undef RT
+}
+
+
+/* ARGSUSED */
+static int
+walk_kern(struct radix_node *rn,
+ struct walkarg *w)
+{
+#define RT ((struct rt_entry *)rn)
+ char pref;
+ u_int ags = 0;
+
+ /* Do not install synthetic routes */
+ if (0 != (RT->rt_state & RS_NET_S))
+ return 0;
+
+ /* Do not install routes for "external" remote interfaces.
+ */
+ if ((RT->rt_state & RS_IF)
+ && RT->rt_ifp != 0
+ && (RT->rt_ifp->int_state & IS_EXTERNAL))
+ return 0;
+
+ /* If it is not an interface, or an alias for an interface,
+ * it must be a "gateway."
+ *
+ * If it is a "remote" interface, it is also a "gateway" to
+ * the kernel if is not a alias.
+ */
+ if (!(RT->rt_state & RS_IF)
+ || RT->rt_ifp == 0
+ || ((RT->rt_ifp->int_state & IS_REMOTE)
+ && RT->rt_ifp->int_metric == 0))
+ ags |= (AGS_GATEWAY | AGS_SUPPRESS | AGS_PROMOTE);
+
+ if (RT->rt_metric == HOPCNT_INFINITY) {
+ pref = HOPCNT_INFINITY;
+ ags |= (AGS_DEAD | AGS_SUPPRESS);
+ } else {
+ pref = 1;
+ }
+
+ if (RT->rt_state & RS_RDISC)
+ ags |= AGS_RDISC;
+
+ ag_check(RT->rt_dst, RT->rt_mask, RT->rt_gate,
+ RT->rt_metric, pref,
+ 0, 0, ags, kern_out);
+ return 0;
+#undef RT
+}
+
+
+/* Update the kernel table to match the daemon table.
+ */
+void
+fix_kern(void)
+{
+ int i, flags;
+ struct khash *k, **pk;
+
+
+ need_kern = age_timer;
+
+ /* Walk daemon table, updating the copy of the kernel table.
+ */
+ (void)rn_walktree(rhead, walk_kern, 0);
+ ag_flush(0,0,kern_out);
+
+ for (i = 0; i < KHASH_SIZE; i++) {
+ for (pk = &khash_bins[i]; (k = *pk) != 0; ) {
+ /* Do not touch static routes */
+ if (k->k_state & KS_STATIC) {
+ pk = &k->k_next;
+ continue;
+ }
+
+ /* check hold on routes deleted by the operator */
+ if (k->k_hold > now.tv_sec) {
+ LIM_SEC(need_kern, k->k_hold);
+ pk = &k->k_next;
+ continue;
+ }
+
+ if (k->k_state & KS_DELETE) {
+ if (!(k->k_state & KS_DELETED))
+ rtioctl(RTM_DELETE,
+ k->k_dst,k->k_gate,
+ k->k_mask, 0, 0);
+ *pk = k->k_next;
+ free(k);
+ continue;
+ }
+
+ if (k->k_state & KS_DEL_ADD)
+ rtioctl(RTM_DELETE,
+ k->k_dst,k->k_gate,k->k_mask, 0, 0);
+
+ flags = (k->k_state & KS_GATEWAY) ? RTF_GATEWAY : 0;
+ if (k->k_state & KS_ADD) {
+ rtioctl(RTM_ADD,
+ k->k_dst, k->k_gate, k->k_mask,
+ k->k_metric, flags);
+ } else if (k->k_state & KS_CHANGE) {
+ rtioctl(RTM_CHANGE,
+ k->k_dst,k->k_gate,k->k_mask,
+ k->k_metric, flags);
+ }
+ k->k_state &= ~(KS_ADD | KS_CHANGE | KS_DEL_ADD);
+
+ /* Unless it seems something else is handling the
+ * routes in the kernel, mark this route to be
+ * deleted in the next cycle.
+ * This deletes routes that disappear from the
+ * daemon table, since the normal aging code
+ * will clear the bit for routes that have not
+ * disappeard from the daemon table.
+ */
+ if (now.tv_sec >= EPOCH+MIN_WAITTIME-1
+ && (rip_interfaces != 0 || !supplier))
+ k->k_state |= KS_DELETE;
+ pk = &k->k_next;
+ }
+ }
+}
+
+
+/* Delete a static route in the image of the kernel table.
+ */
+void
+del_static(naddr dst,
+ naddr mask,
+ int gone)
+{
+ struct khash *k;
+ struct rt_entry *rt;
+
+ /* Just mark it in the table to be deleted next time the kernel
+ * table is updated.
+ * If it has already been deleted, mark it as such, and set its
+ * hold timer so that it will not be deleted again for a while.
+ * This lets the operator delete a route added by the daemon
+ * and add a replacement.
+ */
+ k = kern_find(dst, mask, 0);
+ if (k != 0) {
+ k->k_state &= ~KS_STATIC;
+ k->k_state |= KS_DELETE;
+ if (gone) {
+ k->k_state |= KS_DELETED;
+ k->k_hold = now.tv_sec + K_HOLD_LIM;
+ }
+ }
+
+ rt = rtget(dst, mask);
+ if (rt != 0 && (rt->rt_state & RS_STATIC))
+ rtbad(rt);
+}
+
+
+/* Delete all routes generated from ICMP Redirects that use a given
+ * gateway.
+ */
+void
+del_redirects(naddr bad_gate,
+ time_t old)
+{
+ int i;
+ struct khash *k;
+
+
+ for (i = 0; i < KHASH_SIZE; i++) {
+ for (k = khash_bins[i]; k != 0; k = k->k_next) {
+ if (!(k->k_state & KS_DYNAMIC)
+ || 0 != (k->k_state & (KS_STATIC | KS_DELETE)))
+ continue;
+
+ if (k->k_gate != bad_gate
+ && k->k_time > old)
+ continue;
+
+ k->k_state |= KS_DELETE;
+ need_kern.tv_sec = now.tv_sec;
+ if (TRACEACTIONS)
+ trace_msg("mark redirected %s --> %s"
+ " for deletion\n",
+ addrname(k->k_dst, k->k_mask, 0),
+ naddr_ntoa(k->k_gate));
+ }
+ }
+}
+
+
+/* Start the daemon tables.
+ */
+void
+rtinit(void)
+{
+ extern int max_keylen;
+ int i;
+ struct ag_info *ag;
+
+ /* Initialize the radix trees */
+ max_keylen = sizeof(struct sockaddr_in);
+ rn_init();
+ rn_inithead((void**)&rhead, 32);
+
+ /* mark all of the slots in the table free */
+ ag_avail = ag_slots;
+ for (ag = ag_slots, i = 1; i < NUM_AG_SLOTS; i++) {
+ ag->ag_fine = ag+1;
+ ag++;
+ }
+}
+
+
+#ifdef _HAVE_SIN_LEN
+static struct sockaddr_in dst_sock = {sizeof(dst_sock), AF_INET};
+static struct sockaddr_in mask_sock = {sizeof(mask_sock), AF_INET};
+#else
+static struct sockaddr_in_new dst_sock = {_SIN_ADDR_SIZE, AF_INET};
+static struct sockaddr_in_new mask_sock = {_SIN_ADDR_SIZE, AF_INET};
+#endif
+
+
+void
+set_need_flash(void)
+{
+ if (!need_flash) {
+ need_flash = 1;
+ /* Do not send the flash update immediately. Wait a little
+ * while to hear from other routers.
+ */
+ no_flash.tv_sec = now.tv_sec + MIN_WAITTIME;
+ }
+}
+
+
+/* Get a particular routing table entry
+ */
+struct rt_entry *
+rtget(naddr dst, naddr mask)
+{
+ struct rt_entry *rt;
+
+ dst_sock.sin_addr.s_addr = dst;
+ mask_sock.sin_addr.s_addr = mask;
+ masktrim(&mask_sock);
+ rt = (struct rt_entry *)rhead->rnh_lookup(&dst_sock,&mask_sock,rhead);
+ if (!rt
+ || rt->rt_dst != dst
+ || rt->rt_mask != mask)
+ return 0;
+
+ return rt;
+}
+
+
+/* Find a route to dst as the kernel would.
+ */
+struct rt_entry *
+rtfind(naddr dst)
+{
+ dst_sock.sin_addr.s_addr = dst;
+ return (struct rt_entry *)rhead->rnh_matchaddr(&dst_sock, rhead);
+}
+
+
+/* add a route to the table
+ */
+void
+rtadd(naddr dst,
+ naddr mask,
+ naddr gate, /* forward packets here */
+ naddr router, /* on the authority of this router */
+ int metric,
+ u_short tag,
+ u_int state, /* RS_ for our table */
+ struct interface *ifp)
+{
+ struct rt_entry *rt;
+ naddr smask;
+ int i;
+ struct rt_spare *rts;
+
+ rt = (struct rt_entry *)malloc(sizeof (*rt));
+ if (rt == 0) {
+ BADERR(1,"rtadd malloc");
+ return;
+ }
+ bzero(rt, sizeof(*rt));
+ for (rts = rt->rt_spares, i = NUM_SPARES; i != 0; i--, rts++)
+ rts->rts_metric = HOPCNT_INFINITY;
+
+ rt->rt_nodes->rn_key = (caddr_t)&rt->rt_dst_sock;
+ rt->rt_dst = dst;
+ rt->rt_dst_sock.sin_family = AF_INET;
+#ifdef _HAVE_SIN_LEN
+ rt->rt_dst_sock.sin_len = dst_sock.sin_len;
+#endif
+ if (mask != HOST_MASK) {
+ smask = std_mask(dst);
+ if ((smask & ~mask) == 0 && mask > smask)
+ state |= RS_SUBNET;
+ }
+ mask_sock.sin_addr.s_addr = mask;
+ masktrim(&mask_sock);
+ rt->rt_mask = mask;
+ rt->rt_state = state;
+ rt->rt_gate = gate;
+ rt->rt_router = router;
+ rt->rt_time = now.tv_sec;
+ if (metric == HOPCNT_INFINITY) {
+ rt->rt_time -= POISON_SECS;
+ rt->rt_hold_down = now.tv_sec+HOLD_TIME;
+ }
+ rt->rt_metric = metric;
+ if ((rt->rt_state & RS_NET_S) == 0)
+ rt->rt_hold_metric = metric;
+ else
+ rt->rt_hold_metric = HOPCNT_INFINITY;
+ rt->rt_tag = tag;
+ rt->rt_ifp = ifp;
+ rt->rt_seqno = update_seqno+1;
+
+ if (TRACEACTIONS)
+ trace_add_del("Add", rt);
+
+ need_kern.tv_sec = now.tv_sec;
+ set_need_flash();
+
+ if (0 == rhead->rnh_addaddr(&rt->rt_dst_sock, &mask_sock,
+ rhead, rt->rt_nodes)) {
+ msglog("rnh_addaddr() failed for %s mask=%#x",
+ naddr_ntoa(dst), mask);
+ }
+}
+
+
+/* notice a changed route
+ */
+void
+rtchange(struct rt_entry *rt,
+ u_int state, /* new state bits */
+ naddr gate, /* now forward packets here */
+ naddr router, /* on the authority of this router */
+ int metric, /* new metric */
+ u_short tag,
+ struct interface *ifp,
+ time_t new_time,
+ char *label)
+{
+ if (rt->rt_metric != metric) {
+ /* Hold down the route if it is bad, but only long enough
+ * for neighors that do not implement poison-reverse or
+ * split horizon to hear the bad news.
+ */
+ if (metric == HOPCNT_INFINITY) {
+ if (new_time > now.tv_sec - POISON_SECS)
+ new_time = now.tv_sec - POISON_SECS;
+ if (!(rt->rt_state & RS_RDISC)
+ && rt->rt_hold_down < now.tv_sec+HOLD_TIME)
+ rt->rt_hold_down = now.tv_sec+HOLD_TIME;
+ if (now.tv_sec < rt->rt_hold_down)
+ LIM_SEC(age_timer, rt->rt_hold_down+1);
+ } else {
+ rt->rt_hold_down = 0;
+ if ((rt->rt_state & RS_NET_S) == 0)
+ rt->rt_hold_metric = metric;
+ }
+
+ rt->rt_seqno = update_seqno+1;
+ set_need_flash();
+ }
+
+ if (rt->rt_gate != gate) {
+ need_kern.tv_sec = now.tv_sec;
+ rt->rt_seqno = update_seqno+1;
+ set_need_flash();
+ }
+
+ state |= (rt->rt_state & RS_SUBNET);
+
+ if (TRACEACTIONS)
+ trace_change(rt, state, gate, router, metric, tag, ifp,
+ new_time,
+ label ? label : "Chg ");
+
+ rt->rt_state = state;
+ rt->rt_gate = gate;
+ rt->rt_router = router;
+ rt->rt_metric = metric;
+ rt->rt_tag = tag;
+ rt->rt_ifp = ifp;
+ rt->rt_time = new_time;
+}
+
+
+/* switch to a backup route
+ */
+void
+rtswitch(struct rt_entry *rt,
+ struct rt_spare *rts)
+{
+ struct rt_spare *rts1, swap;
+ char label[10];
+ int i;
+
+
+ /* Do not change permanent routes */
+ if (0 != (rt->rt_state & (RS_GW | RS_MHOME | RS_STATIC | RS_IF)))
+ return;
+
+ /* Do not discard synthetic routes until they go bad */
+ if (0 != (rt->rt_state & RS_NET_S)
+ && rt->rt_metric < HOPCNT_INFINITY)
+ return;
+
+ if (rts == 0) {
+ /* find the best alternative among the spares */
+ rts = rt->rt_spares+1;
+ for (i = NUM_SPARES, rts1 = rts+1; i > 2; i--, rts1++) {
+ if (BETTER_LINK(rts1,rts))
+ rts = rts1;
+ }
+ }
+
+ /* Do not bother if it is not worthwhile.
+ */
+ if (!BETTER_LINK(rts, rt->rt_spares))
+ return;
+
+ /* Do not change the route if it is being held down.
+ * Honor the hold-down to counter systems that do not support
+ * split horizon or for other causes of counting to infinity,
+ * and so only for routes worse than our last good route.
+ */
+ if (now.tv_sec < rt->rt_hold_down
+ && rts->rts_metric > rt->rt_hold_metric) {
+ LIM_SEC(age_timer, rt->rt_hold_down+1);
+ return;
+ }
+
+ swap = rt->rt_spares[0];
+
+ (void)sprintf(label, "Use #%d", rts - rt->rt_spares);
+ rtchange(rt, rt->rt_state & ~(RS_NET_S | RS_RDISC),
+ rts->rts_gate, rts->rts_router, rts->rts_metric,
+ rts->rts_tag, rts->rts_ifp, rts->rts_time, label);
+
+ *rts = swap;
+}
+
+
+void
+rtdelete(struct rt_entry *rt)
+{
+ struct khash *k;
+
+
+ if (TRACEACTIONS)
+ trace_add_del("Del", rt);
+
+ k = kern_find(rt->rt_dst, rt->rt_mask, 0);
+ if (k != 0) {
+ k->k_state |= KS_DELETE;
+ need_kern.tv_sec = now.tv_sec;
+ }
+
+ dst_sock.sin_addr.s_addr = rt->rt_dst;
+ mask_sock.sin_addr.s_addr = rt->rt_mask;
+ masktrim(&mask_sock);
+ if (rt != (struct rt_entry *)rhead->rnh_deladdr(&dst_sock, &mask_sock,
+ rhead)) {
+ msglog("rnh_deladdr() failed");
+ } else {
+ free(rt);
+ }
+}
+
+
+/* Get rid of a bad route, and try to switch to a replacement.
+ */
+void
+rtbad(struct rt_entry *rt)
+{
+ /* Poison the route */
+ rtchange(rt, rt->rt_state & ~(RS_IF | RS_LOCAL | RS_STATIC),
+ rt->rt_gate, rt->rt_router, HOPCNT_INFINITY, rt->rt_tag,
+ 0, rt->rt_time, 0);
+
+ rtswitch(rt, 0);
+}
+
+
+/* Junk a RS_NET_S route, but save if if it is needed by another interface.
+ */
+void
+rtbad_sub(struct rt_entry *rt)
+{
+ struct interface *ifp, *ifp1;
+ struct intnet *intnetp;
+ u_int state;
+
+
+ ifp1 = 0;
+ state = 0;
+
+ if (rt->rt_state & RS_LOCAL) {
+ /* Is this the route through loopback for the interface?
+ * If so, see if it is used by any other interfaces, a
+ * point-to-point interface with the same local address.
+ */
+ for (ifp = ifnet; ifp != 0; ifp = ifp->int_next) {
+ if (ifp->int_metric == HOPCNT_INFINITY)
+ continue;
+
+ /* Save it if another interface needs it
+ */
+ if (ifp->int_addr == rt->rt_ifp->int_addr) {
+ state |= RS_LOCAL;
+ ifp1 = ifp;
+ break;
+ }
+ }
+
+ }
+
+ if (!(state & RS_LOCAL)
+ && (rt->rt_state & RS_NET_S)) {
+ for (ifp = ifnet; ifp != 0; ifp = ifp->int_next) {
+ if (ifp->int_metric == HOPCNT_INFINITY)
+ continue;
+
+ /* Retain RIPv1 logical network route if
+ * there is another interface that justifies
+ * it.
+ */
+ if ((ifp->int_state & IS_NEED_NET_SUB)
+ && rt->rt_mask == ifp->int_std_mask
+ && rt->rt_dst == ifp->int_std_addr) {
+ state |= RS_NET_SUB;
+ ifp1 = ifp;
+
+ } else if ((ifp->int_if_flags & IFF_POINTOPOINT)
+ && rt->rt_mask == ifp->int_host_mask
+ && rt->rt_dst == ifp->int_host_addr
+ && ridhosts) {
+ state |= RS_NET_HOST;
+ ifp1 = ifp;
+ }
+ }
+
+ if (ifp1 == 0) {
+ for (intnetp = intnets;
+ intnetp != 0;
+ intnetp = intnetp->intnet_next) {
+ if (intnetp->intnet_addr == rt->rt_dst
+ && intnetp->intnet_mask == rt->rt_mask) {
+ state |= RS_NET_SUB;
+ break;
+ }
+ }
+ }
+ }
+
+
+ if (ifp1 != 0) {
+ rtchange(rt, (rt->rt_state & ~(RS_NET_S | RS_LOCAL)) | state,
+ rt->rt_gate, rt->rt_router, NET_S_METRIC,
+ rt->rt_tag, ifp1, rt->rt_time, 0);
+ } else {
+ rtbad(rt);
+ }
+}
+
+
+/* Called while walking the table looking for sick interfaces
+ * or after a time change.
+ */
+/* ARGSUSED */
+int
+walk_bad(struct radix_node *rn,
+ struct walkarg *w)
+{
+#define RT ((struct rt_entry *)rn)
+ struct rt_spare *rts;
+ int i;
+ time_t new_time;
+
+
+ /* fix any spare routes through the interface
+ */
+ rts = RT->rt_spares;
+ for (i = NUM_SPARES; i != 1; i--) {
+ rts++;
+
+ if (rts->rts_ifp != 0
+ && (rts->rts_ifp->int_state & IS_BROKE)) {
+ new_time = rts->rts_time;
+ if (new_time >= now_garbage)
+ new_time = now_garbage-1;
+ if (TRACEACTIONS)
+ trace_upslot(RT, rts, rts->rts_gate,
+ rts->rts_router, 0,
+ HOPCNT_INFINITY, rts->rts_tag,
+ new_time);
+ rts->rts_ifp = 0;
+ rts->rts_metric = HOPCNT_INFINITY;
+ rts->rts_time = new_time;
+ }
+ }
+
+ /* Deal with the main route
+ */
+ /* finished if it has been handled before or if its interface is ok
+ */
+ if (RT->rt_ifp == 0 || !(RT->rt_ifp->int_state & IS_BROKE))
+ return 0;
+
+ /* Bad routes for other than interfaces are easy.
+ */
+ if (!(RT->rt_state & RS_IF)) {
+ rtbad(RT);
+ return 0;
+ }
+
+ rtbad_sub(RT);
+ return 0;
+#undef RT
+}
+
+
+/* Check the age of an individual route.
+ */
+/* ARGSUSED */
+static int
+walk_age(struct radix_node *rn,
+ struct walkarg *w)
+{
+#define RT ((struct rt_entry *)rn)
+ struct interface *ifp;
+ struct rt_spare *rts;
+ int i;
+
+
+ /* age the spare routes */
+ rts = RT->rt_spares;
+ for (i = NUM_SPARES; i != 0; i--, rts++) {
+
+ ifp = rts->rts_ifp;
+ if (i == NUM_SPARES) {
+ if (!AGE_RT(RT, ifp)) {
+ /* Keep various things from deciding ageless
+ * routes are stale */
+ rts->rts_time = now.tv_sec;
+ continue;
+ }
+
+ /* forget RIP routes after RIP has been turned off.
+ */
+ if (rip_sock < 0 && !(RT->rt_state & RS_RDISC)) {
+ rtdelete(RT);
+ return 0;
+ }
+ }
+
+ if (age_bad_gate == rts->rts_gate
+ && rts->rts_time >= now_stale) {
+ /* age failing routes
+ */
+ rts->rts_time -= SUPPLY_INTERVAL;
+
+ } else if (ppp_noage
+ && ifp != 0
+ && (ifp->int_if_flags & IFF_POINTOPOINT)
+ && (ifp->int_state & IS_QUIET)) {
+ /* optionally do not age routes through quiet
+ * point-to-point interfaces
+ */
+ rts->rts_time = now.tv_sec;
+ continue;
+ }
+
+ /* trash the spare routes when they go bad */
+ if (rts->rts_metric < HOPCNT_INFINITY
+ && now_garbage > rts->rts_time) {
+ if (TRACEACTIONS)
+ trace_upslot(RT, rts, rts->rts_gate,
+ rts->rts_router, rts->rts_ifp,
+ HOPCNT_INFINITY, rts->rts_tag,
+ rts->rts_time);
+ rts->rts_metric = HOPCNT_INFINITY;
+ }
+ }
+
+
+ /* finished if the active route is still fresh */
+ if (now_stale <= RT->rt_time)
+ return 0;
+
+ /* try to switch to an alternative */
+ if (now.tv_sec < RT->rt_hold_down) {
+ LIM_SEC(age_timer, RT->rt_hold_down+1);
+ return 0;
+ } else {
+ rtswitch(RT, 0);
+ }
+
+ /* Delete a dead route after it has been publically mourned. */
+ if (now_garbage > RT->rt_time) {
+ rtdelete(RT);
+ return 0;
+ }
+
+ /* Start poisoning a bad route before deleting it. */
+ if (now.tv_sec - RT->rt_time > EXPIRE_TIME)
+ rtchange(RT, RT->rt_state, RT->rt_gate, RT->rt_router,
+ HOPCNT_INFINITY, RT->rt_tag, RT->rt_ifp,
+ RT->rt_time, 0);
+ return 0;
+}
+
+
+/* Watch for dead routes and interfaces.
+ */
+void
+age(naddr bad_gate)
+{
+ struct interface *ifp;
+
+
+ age_timer.tv_sec = now.tv_sec + (rip_sock < 0
+ ? NEVER
+ : SUPPLY_INTERVAL);
+
+ for (ifp = ifnet; ifp; ifp = ifp->int_next) {
+ /* Check for dead IS_REMOTE interfaces by timing their
+ * transmissions.
+ */
+ if ((ifp->int_state & IS_REMOTE)
+ && !(ifp->int_state & IS_PASSIVE)
+ && (ifp->int_state & IS_ACTIVE)) {
+
+ LIM_SEC(age_timer, now.tv_sec+SUPPLY_INTERVAL);
+ if (now.tv_sec - ifp->int_act_time > EXPIRE_TIME)
+ ifbad(ifp,
+ "remote interface %s to %s timed out");
+ }
+ }
+
+ /* Age routes. */
+ age_bad_gate = bad_gate;
+ (void)rn_walktree(rhead, walk_age, 0);
+
+ /* Update the kernel routing table. */
+ fix_kern();
+}
OpenPOWER on IntegriCloud